diff --git a/notebooks/How-to-Build-LLM-Apps-that-can-See-Hear-Speak/How-to-Build-LLM-Apps-that-can-See-Hear-Speak.ipynb b/notebooks/How-to-Build-LLM-Apps-that-can-See-Hear-Speak/How-to-Build-LLM-Apps-that-can-See-Hear-Speak.ipynb
new file mode 100644
index 0000000..a8636d7
--- /dev/null
+++ b/notebooks/How-to-Build-LLM-Apps-that-can-See-Hear-Speak/How-to-Build-LLM-Apps-that-can-See-Hear-Speak.ipynb
@@ -0,0 +1,669 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "215c4617-6ebd-43fc-aa25-904fec4bdf5e",
+ "metadata": {
+ "language": "python"
+ },
+ "source": "# How to Build LLM Apps that can See, Hear, Speak"
+ },
+ {
+ "cell_type": "markdown",
+ "id": "a2a7fa17-ff2a-4abf-922d-af00514ab87e",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-09-28T12:52:11.717083Z",
+ "iopub.status.busy": "2023-09-28T12:52:11.716833Z",
+ "iopub.status.idle": "2023-09-28T12:52:11.719792Z",
+ "shell.execute_reply": "2023-09-28T12:52:11.719167Z",
+ "shell.execute_reply.started": "2023-09-28T12:52:11.717067Z"
+ },
+ "language": "python"
+ },
+ "source": "\n- [Back to Contents](#contents)\n# Demo Architecture\n![Image Alt Text](https://drive.google.com/uc?id=14LYlMDIroDDXHel4hCaOR1EFg_BuYb9P)"
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "f035321e-d2b9-4576-9219-fe2f89e7cbe2",
+ "metadata": {
+ "language": "python"
+ },
+ "outputs": [],
+ "source": "!python --version"
+ },
+ {
+ "cell_type": "markdown",
+ "id": "c9938d9e-5f18-4c25-9dbe-207dc8c6f91b",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-09-28T13:01:44.430458Z",
+ "iopub.status.busy": "2023-09-28T13:01:44.430181Z",
+ "iopub.status.idle": "2023-09-28T13:01:44.434484Z",
+ "shell.execute_reply": "2023-09-28T13:01:44.433724Z",
+ "shell.execute_reply.started": "2023-09-28T13:01:44.430442Z"
+ },
+ "language": "python"
+ },
+ "source": "\n# Contents: \n- [Demo Architecture](#architecture)\n- [Step 1: SingleStore DDLs](#ddl)\n- [Step 2: Packages and imports](#imports)\n- [Step 3: Ingest from data sources](#ingest)\n- [Step 4: Connect SingleStore to Open AI's LLM with Langchain](#connect_s2)\n- [Step 5: Add Voice Recognition and Speech](#speech)\n- [Step 6: Tying it together with Image data](#image)\n- [Conclusion](#conclusion)"
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "96d5cbe6-59c0-4cbf-bb7d-75683e24c51a",
+ "metadata": {
+ "language": "python"
+ },
+ "outputs": [],
+ "source": ""
+ },
+ {
+ "cell_type": "markdown",
+ "id": "72d63784-9e1b-47fb-a962-70da21b7128c",
+ "metadata": {
+ "language": "python"
+ },
+ "source": "\n- [Back to Contents](#contents)\n- # Setup SingleStore DDLs"
+ },
+ {
+ "cell_type": "markdown",
+ "id": "5cdc0633-6480-478d-a9fd-9af7ee2b2c24",
+ "metadata": {
+ "language": "python"
+ },
+ "source": "Create and use the database llm_webinar"
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "35d1dd68-4280-481d-aefd-954c609b184d",
+ "metadata": {
+ "language": "python"
+ },
+ "outputs": [],
+ "source": "%%sql\nCREATE DATABASE llm_webinar;"
+ },
+ {
+ "cell_type": "markdown",
+ "id": "a846eff5-74e7-410a-a51a-2a58da119e81",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2023-09-27T17:12:51.870607Z",
+ "iopub.status.busy": "2023-09-27T17:12:51.870412Z",
+ "iopub.status.idle": "2023-09-27T17:12:51.874780Z",
+ "shell.execute_reply": "2023-09-27T17:12:51.873989Z",
+ "shell.execute_reply.started": "2023-09-27T17:12:51.870588Z"
+ },
+ "language": "python"
+ },
+ "source": "Create tables"
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "728002a2-663e-48fd-8153-9c1a50b03bbb",
+ "metadata": {
+ "language": "python"
+ },
+ "outputs": [],
+ "source": "%%sql\nCREATE TABLE `stockTable` (\n`ticker` varchar(20) CHARACTER SET utf8 COLLATE utf8_general_ci DEFAULT NULL,\n`created_at` datetime DEFAULT NULL,\n`open` float DEFAULT NULL,\n`high` float DEFAULT NULL,\n`low` float DEFAULT NULL,\n`close` float DEFAULT NULL,\n`volume` int(11) DEFAULT NULL,\nSORT KEY (ticker, created_at desc),\nSHARD KEY (ticker)\n);\n\nCREATE TABLE newsSentiment (\n title TEXT CHARACTER SET utf8mb4,\n url TEXT,\n time_published DATETIME,\n authors TEXT,\n summary TEXT CHARACTER SET utf8mb4,\n banner_image TEXT,\n source TEXT,\n category_within_source TEXT,\n source_domain TEXT,\n topic TEXT,\n topic_relevance_score TEXT,\n overall_sentiment_score REAL,\n overall_sentiment_label TEXT,\n `ticker` varchar(20) CHARACTER SET utf8 COLLATE utf8_general_ci DEFAULT NULL,\n ticker_relevance_score DECIMAL(10, 6),\n ticker_sentiment_score DECIMAL(10, 6),\n ticker_sentiment_label TEXT,\n SORT KEY (`ticker`,`time_published` DESC),\n SHARD KEY `__SHARDKEY` (`ticker`,`time_published` DESC),\n KEY(ticker) USING HASH,\n KEY(authors) USING HASH,\n KEY(source) USING HASH,\n KEY(overall_sentiment_label) USING HASH,\n KEY(ticker_sentiment_label) USING HASH \n);\n\nCREATE ROWSTORE REFERENCE TABLE companyInfo (\n ticker VARCHAR(10) PRIMARY KEY,\n AssetType VARCHAR(50),\n Name VARCHAR(100),\n Description TEXT,\n CIK VARCHAR(10),\n Exchange VARCHAR(10),\n Currency VARCHAR(10),\n Country VARCHAR(50),\n Sector VARCHAR(50),\n Industry VARCHAR(250),\n Address VARCHAR(100),\n FiscalYearEnd VARCHAR(20),\n LatestQuarter DATE,\n MarketCapitalization BIGINT,\n EBITDA BIGINT,\n PERatio DECIMAL(10, 2),\n PEGRatio DECIMAL(10, 3),\n BookValue DECIMAL(10, 2),\n DividendPerShare DECIMAL(10, 2),\n DividendYield DECIMAL(10, 4),\n EPS DECIMAL(10, 2),\n RevenuePerShareTTM DECIMAL(10, 2),\n ProfitMargin DECIMAL(10, 4),\n OperatingMarginTTM DECIMAL(10, 4),\n ReturnOnAssetsTTM DECIMAL(10, 4),\n ReturnOnEquityTTM DECIMAL(10, 4),\n RevenueTTM BIGINT,\n GrossProfitTTM BIGINT,\n DilutedEPSTTM DECIMAL(10, 2),\n QuarterlyEarningsGrowthYOY DECIMAL(10, 3),\n QuarterlyRevenueGrowthYOY DECIMAL(10, 3),\n AnalystTargetPrice DECIMAL(10, 2),\n TrailingPE DECIMAL(10, 2),\n ForwardPE DECIMAL(10, 2),\n PriceToSalesRatioTTM DECIMAL(10, 3),\n PriceToBookRatio DECIMAL(10, 2),\n EVToRevenue DECIMAL(10, 3),\n EVToEBITDA DECIMAL(10, 2),\n Beta DECIMAL(10, 3),\n 52WeekHigh DECIMAL(10, 2),\n 52WeekLow DECIMAL(10, 2),\n 50DayMovingAverage DECIMAL(10, 2),\n 200DayMovingAverage DECIMAL(10, 2),\n SharesOutstanding BIGINT,\n DividendDate DATE,\n ExDividendDate DATE\n);\n\nCREATE TABLE `embeddings` (\n`id` bigint(11) NOT NULL AUTO_INCREMENT,\n`category` varchar(255) CHARACTER SET utf8 COLLATE utf8_general_ci DEFAULT NULL,\n`question` longtext CHARACTER SET utf8 COLLATE utf8_general_ci,\n`question_embedding` longblob,\n`answer` longtext CHARACTER SET utf8 COLLATE utf8_general_ci,\n`answer_embedding` longblob,\n`created_at` datetime DEFAULT NULL,\nUNIQUE KEY `PRIMARY` (`id`) USING HASH,\nSHARD KEY `__SHARDKEY` (`id`),\nKEY `category` (`category`) USING HASH,\nSORT KEY `__UNORDERED` (`created_at` DESC)\n);"
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "58f31787-6d9a-42cb-8bec-548e429f39de",
+ "metadata": {
+ "language": "python"
+ },
+ "outputs": [],
+ "source": "%%sql\nSHOW TABLES;"
+ },
+ {
+ "cell_type": "markdown",
+ "id": "4fb79318-199c-4d33-97d9-3b8d381dcaba",
+ "metadata": {
+ "language": "python"
+ },
+ "source": "\n- [Back to Contents](#contents)\n# Install packages and imports"
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "986a23fe-91d6-40cf-8ccf-8ff37f639014",
+ "metadata": {
+ "language": "python"
+ },
+ "outputs": [],
+ "source": "!pip install elevenlabs --quiet\n!pip install openai --quiet\n!pip install matplotlib --quiet\n!pip install scipy --quiet\n!pip install scikit-learn --quiet\n!pip install singlestoredb --quiet\n!pip install langchain --quiet"
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "819b9d83-28f6-49de-be48-82d29b315ab0",
+ "metadata": {
+ "language": "python"
+ },
+ "outputs": [],
+ "source": "import requests\nimport time\nfrom datetime import datetime, timedelta\nfrom datetime import datetime\nimport datetime\nfrom dateutil.relativedelta import relativedelta\nimport singlestoredb as s2\nimport getpass\nimport openai\nfrom langchain.sql_database import SQLDatabase\nfrom langchain.llms.openai import OpenAI\nfrom langchain.agents.agent_toolkits import SQLDatabaseToolkit\nfrom langchain.agents import create_sql_agent\nfrom openai.embeddings_utils import get_embeddings\nimport numpy as np"
+ },
+ {
+ "cell_type": "markdown",
+ "id": "27b4b29e-b0c9-401e-a969-9fc770f7f6bd",
+ "metadata": {
+ "language": "python"
+ },
+ "source": "### Set API keys"
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "ce7134f0-4ba9-4a8c-b53d-1e42cfaf8d98",
+ "metadata": {
+ "language": "python"
+ },
+ "outputs": [],
+ "source": "alpha_vantage_apikey = getpass.getpass(\"enter alphavantage apikey here\")\nopenai_apikey = getpass.getpass(\"enter openai apikey here\")\nelevenlabs_apikey = getpass.getpass(\"enter elevenlabs apikey here\")"
+ },
+ {
+ "cell_type": "markdown",
+ "id": "105bdb05-3cb5-4c1e-9cd1-b741bd3ab87a",
+ "metadata": {
+ "language": "python"
+ },
+ "source": "\n- [Back to Contents](#contents)\n# Ingest from data sources"
+ },
+ {
+ "cell_type": "markdown",
+ "id": "9c43af07-1bd3-43e8-9911-355966072ebe",
+ "metadata": {
+ "language": "python"
+ },
+ "source": "### Bring past two months of stock data"
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "c9595c3a-8b00-4d44-a6ab-32d2848459ad",
+ "metadata": {
+ "language": "python"
+ },
+ "outputs": [],
+ "source": "# set up connection to SingleStore and the ticker list\ns2_conn = s2.connect(connection_url)\nticker_list = ['TSLA', 'AMZN', 'PLTR']"
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "2f2c5126-d102-4904-83f6-3a30bc4906b5",
+ "metadata": {
+ "language": "python"
+ },
+ "outputs": [],
+ "source": "from datetime import datetime\n\ndef get_past_months(num_months):\n today = datetime.today()\n months = []\n\n for months_ago in range(0, num_months):\n target_date = today - relativedelta(months=months_ago)\n months.append(target_date.strftime('%Y-%m'))\n\n return months\n\nnum_months = 2 # Number of months \nyear_month_list = get_past_months(num_months)\nprint(year_month_list)\n\n# pull intraday data for each stock and write to SingleStore\nfor ticker in ticker_list:\n print(ticker)\n data_list = []\n for year_month in year_month_list:\n print(year_month)\n\n intraday_price_url = \"https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol={}&interval=5min&month={}&outputsize=full&apikey={}\".format(ticker, year_month, alpha_vantage_apikey)\n r = requests.get(intraday_price_url)\n\n try: \n data = r.json()['Time Series (5min)']\n except:\n time.sleep(1) # required to not hit API limits\n continue\n \n for key in data:\n document = data[key]\n document['datetime'] = key\n document['ticker'] = ticker\n\n document['open'] = document['1. open']\n document['high'] = document['2. high']\n document['low'] = document['3. low']\n document['close'] = document['4. close']\n document['volume'] = document['5. volume']\n\n document['open'] = float(document['open'])\n document['high'] = float(document['high'])\n document['low'] = float(document['low'])\n document['close'] = float(document['close'])\n document['volume'] = int(document['volume'])\n\n \n del document['1. open']\n del document['2. high']\n del document['3. low']\n del document['4. close']\n del document['5. volume']\n\n data_list += [document]\n \n # Inside your loop, create the params dictionary with the correct values\n params = {\n 'datetime': document['datetime'],\n 'ticker': ticker,\n 'open': document['open'],\n 'high': document['high'],\n 'low': document['low'],\n 'close': document['close'],\n 'volume': document['volume']\n }\n\n # Construct and execute the SQL statement\n table_name = 'stockTable'\n stmt = f\"INSERT INTO {table_name} (created_at, ticker, open, high, low, close, volume) VALUES (%(datetime)s, %(ticker)s, %(open)s, %(high)s, %(low)s, %(close)s, %(volume)s)\"\n\n with s2_conn.cursor() as cur:\n cur.execute(stmt, params)\n # time.sleep(1) # required to not hit API limits"
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "fb534c0e-bfd9-47f6-a2be-7bd160ab9ab2",
+ "metadata": {
+ "language": "python"
+ },
+ "outputs": [],
+ "source": "%%sql\nselect count(*) from stockTable"
+ },
+ {
+ "cell_type": "markdown",
+ "id": "5e142437-c49b-4484-9475-35105cde45b2",
+ "metadata": {
+ "language": "python"
+ },
+ "source": "# Bring in Company data"
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "a4c9d7e8-5cd7-47d4-a188-cb660bd9c798",
+ "metadata": {
+ "language": "python"
+ },
+ "outputs": [],
+ "source": "# pull intraday data for each stock and write to SingleStore\nfor ticker in ticker_list:\n print(ticker)\n data_list = []\n # for year_month in year_month_list:\n\n company_overview = \"https://www.alphavantage.co/query?function=OVERVIEW&symbol={}&outputsize=full&apikey={}\".format(ticker, alpha_vantage_apikey)\n r = requests.get(company_overview)\n\n try: \n data = r.json()\n except:\n time.sleep(3) # required to not hit API limits\n continue\n \n data['CIK'] = int(data['CIK'])\n data['MarketCapitalization']= float(data['MarketCapitalization'])\n # Assuming data['EBITDA'] is a string containing 'None'\n ebitda_str = data['EBITDA']\n if ebitda_str.lower() == 'none':\n # Handle the case where EBITDA is 'None', for example, you can set it to 0\n data['EBITDA'] = 0.0\n else:\n # Convert the EBITDA string to a float\n data['EBITDA'] = float(ebitda_str)\n \n PERatio_flt = data['PERatio']\n if PERatio_flt.lower() == 'none':\n # Handle the case where EVToRevenue is '-'\n data['PERatio'] = 0.0 # You can use any default value that makes sense\n else:\n # Convert the EVToRevenue string to a float\n data['PERatio'] = float(PERatio_flt)\n \n data['PEGRatio']= float(data['PEGRatio'])\n data['BookValue']= float(data['BookValue'])\n data['DividendPerShare']= float(data['DividendPerShare'])\n data['DividendYield']= float(data['DividendYield'])\n data['EPS']= float(data['EPS'])\n data['RevenuePerShareTTM']= float(data['RevenuePerShareTTM'])\n data['ProfitMargin']= float(data['ProfitMargin'])\n data['OperatingMarginTTM']= float(data['OperatingMarginTTM'])\n data['ReturnOnAssetsTTM']= float(data['ReturnOnAssetsTTM'])\n data['ReturnOnEquityTTM']= float(data['ReturnOnEquityTTM'])\n data['RevenueTTM']= int(data['RevenueTTM'])\n data['GrossProfitTTM']= int(data['GrossProfitTTM'])\n data['DilutedEPSTTM']= float(data['DilutedEPSTTM'])\n data['QuarterlyEarningsGrowthYOY']= float(data['QuarterlyEarningsGrowthYOY'])\n data['QuarterlyRevenueGrowthYOY']= float(data['QuarterlyRevenueGrowthYOY'])\n data['AnalystTargetPrice']= float(data['AnalystTargetPrice'])\n # Assuming data['TrailingPE'] is a string containing '-'\n trailing_pe_str = data['TrailingPE']\n if trailing_pe_str == '-':\n # Handle the case where TrailingPE is '-'\n data['TrailingPE'] = 0.0 # You can use any default value that makes sense\n else:\n try:\n # Attempt to convert the TrailingPE string to a float\n data['TrailingPE'] = float(trailing_pe_str)\n except ValueError:\n # Handle the case where the conversion fails (e.g., if it contains invalid characters)\n data['TrailingPE'] = 0.0 # Set to a default value or handle as needed\n\n\n \n data['ForwardPE']= float(data['ForwardPE'])\n data['PriceToSalesRatioTTM']= float(data['PriceToSalesRatioTTM'])\n # Assuming data['EVToRevenue'] is a string containing '-'\n PriceToBookRatio_flt = data['PriceToBookRatio']\n if PriceToBookRatio_flt == '-':\n # Handle the case where EVToRevenue is '-'\n data['PriceToBookRatio'] = 0.0 # You can use any default value that makes sense\n else:\n # Convert the EVToRevenue string to a float\n data['PriceToBookRatio'] = float(PriceToBookRatio_flt)\n\n \n # Assuming data['EVToRevenue'] is a string containing '-'\n ev_to_revenue_str = data['EVToRevenue']\n if ev_to_revenue_str == '-':\n # Handle the case where EVToRevenue is '-'\n data['EVToRevenue'] = 0.0 # You can use any default value that makes sense\n else:\n # Convert the EVToRevenue string to a float\n data['EVToRevenue'] = float(ev_to_revenue_str)\n \n # data['EVToEBITDA']= float(data['EVToEBITDA'])\n # Assuming data['EVToRevenue'] is a string containing '-'\n ev_to_EBITDA_str = data['EVToEBITDA']\n if ev_to_revenue_str == '-':\n # Handle the case where EVToRevenue is '-'\n data['EVToEBITDA'] = 0.0 # You can use any default value that makes sense\n else:\n # Convert the EVToRevenue string to a float\n data['EVToEBITDA'] = float(ev_to_EBITDA_str)\n\n \n data['Beta']= float(data['Beta'])\n data['52WeekHigh']= float(data['52WeekHigh'])\n data['52WeekLow']= float(data['52WeekLow'])\n data['50DayMovingAverage']= float(data['50DayMovingAverage'])\n data['200DayMovingAverage']= float(data['200DayMovingAverage'])\n data['SharesOutstanding']= int(data['SharesOutstanding'])\n # description_embedding = [np.array(x, '\n- [Back to Demo Architecture](#architecture)\n# Connect SingleStore to Open AI's LLM with Langchain"
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "1dd25cf2-bd39-4f25-b3a5-fe128c372e97",
+ "metadata": {
+ "language": "python"
+ },
+ "outputs": [],
+ "source": "os.environ[\"OPENAI_API_KEY\"] = openai_apikey\nembedding_model = 'text-embedding-ada-002'\ngpt_model = 'gpt-3.5-turbo-16k'\n\n# Create the agent executor\ndb = SQLDatabase.from_uri(connection_url, include_tables=['embeddings', 'companyInfo', 'newsSentiment', 'stockTable'], sample_rows_in_table_info=2)\nllm = OpenAI(openai_api_key=os.environ[\"OPENAI_API_KEY\"], temperature=0, verbose=True)\ntoolkit = SQLDatabaseToolkit(db=db, llm=llm)\n\nagent_executor = create_sql_agent(\n llm=OpenAI(temperature=0),\n toolkit=toolkit,\n verbose=True,\n prefix= '''\n You are an agent designed to interact with a SQL database called SingleStore. This sometimes has Shard and Sort keys in the table schemas, which you can ignore. \n \\nGiven an input question, create a syntactically correct MySQL query to run, then look at the results of the query and return the answer. \n \\n If you are asked about similarity questions, you should use the DOT_PRODUCT function.\n \n \\nHere are a few examples of how to use the DOT_PRODUCT function:\n \\nExample 1:\n Q: how similar are the questions and answers?\n A: The query used to find this is:\n \n select question, answer, dot_product(question_embedding, answer_embedding) as similarity from embeddings;\n \n \\nExample 2:\n Q: What are the most similar questions in the embeddings table, not including itself?\n A: The query used to find this answer is:\n \n SELECT q1.question as question1, q2.question as question2, DOT_PRODUCT(q1.question_embedding, q2.question_embedding) :> float as score\n FROM embeddings q1, embeddings q2 \n WHERE question1 != question2 \n ORDER BY score DESC LIMIT 5;\n \n \\nExample 3:\n Q: In the embeddings table, which rows are from the chatbot?\n A: The query used to find this answer is:\n \n SELECT category, question, answer FROM embeddings\n WHERE category = 'chatbot';\n\n \\nIf you are asked to describe the database, you should run the query SHOW TABLES \n \\nUnless the user specifies a specific number of examples they wish to obtain, always limit your query to at most {top_k} results.\n \\n The question embeddings and answer embeddings are very long, so do not show them unless specifically asked to.\n \\nYou can order the results by a relevant column to return the most interesting examples in the database.\n \\nNever query for all the columns from a specific table, only ask for the relevant columns given the question.\n \\nYou have access to tools for interacting with the database.\\nOnly use the below tools. \n Only use the information returned by the below tools to construct your final answer.\n \\nYou MUST double check your query before executing it. If you get an error while executing a query, rewrite the query and try again up to 3 times.\n \\n\\nDO NOT make any DML statements (INSERT, UPDATE, DELETE, DROP etc.) to the database.\n \\n\\nIf the question does not seem related to the database, just return \"I don\\'t know\" as the answer.\\n,\n \n ''',\n format_instructions='''Use the following format:\\n\n \\nQuestion: the input question you must answer\n \\nThought: you should always think about what to do\n \\nAction: the action to take, should be one of [{tool_names}]\n \\nAction Input: the input to the action\n \\nObservation: the result of the action\n \\nThought: I now know the final answer\n \\nFinal Answer: the final answer to the original input question\n \\nSQL Query used to get the Answer: the final sql query used for the final answer'\n ''',\n top_k=3,\n max_iterations=5\n)"
+ },
+ {
+ "cell_type": "markdown",
+ "id": "a993b299-200e-48e0-886a-6eba2daafe35",
+ "metadata": {
+ "language": "python"
+ },
+ "source": "### Create function that processes user question with a check in Semantic Cache Layer"
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "be7f7f12-13ee-42bc-a6c7-c5743642bd62",
+ "metadata": {
+ "language": "python"
+ },
+ "outputs": [],
+ "source": "table_name = 'embeddings'\nsimilarity_threshold = .97\n\ndef process_user_question(question):\n print(f'\\nQuestion asked: {question}')\n category = 'chatbot'\n \n # Get vector embedding from the original question and calculate the elapsed time\n start_time = time.time()\n question_embedding= [np.array(x, ' float as score from embeddings where category=\"chatbot\" order by score desc limit 1;'\n \n\n with s2_conn.cursor() as cur:\n start_time = time.time()\n cur.execute(stmt, params)\n row = cur.fetchone()\n elapsed_time = (time.time() - start_time) * 1000\n print(f\"Execution time for checking existing questions: {elapsed_time:.2f} milliseconds\")\n \n try:\n question2, answer, score = row\n print(f\"\\nClosest Matching row:\\nQuestion: {question2}\\nAnswer: {answer}\\nSimilarity Score: {score}\")\n\n if score > similarity_threshold:\n print('Action to take: Using existing answer')\n return answer\n\n else:\n print('Action to take: Running agent_executor')\n start_time = time.time()\n answer2 = agent_executor.run(question)\n elapsed_time = (time.time() - start_time) * 1000\n print(f\"agent_executor execution time: {elapsed_time:.2f} milliseconds\")\n\n # Get current time\n created_at = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n # Get the answer embedding and calculate the elapsed time\n start_time = time.time()\n answer_embedding = [np.array(x, '\n- [Back to Contents](#contents)\n# Add Voice Recognition and Speech"
+ },
+ {
+ "cell_type": "markdown",
+ "id": "f4ad3d62-554d-43b9-af75-862efee6e001",
+ "metadata": {
+ "language": "python"
+ },
+ "source": "### Select a voice"
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "cc2390de-f4e3-4d33-9593-f829a7d40d85",
+ "metadata": {
+ "language": "python"
+ },
+ "outputs": [],
+ "source": "from elevenlabs import generate, stream, voices\nfrom elevenlabs import set_api_key\nfrom IPython.display import Audio\nfrom IPython.display import display\nimport requests"
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "9b098298-ce68-46ba-b962-bc08226ca955",
+ "metadata": {
+ "language": "python"
+ },
+ "outputs": [],
+ "source": "voices = voices()\nvoices[0]"
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "c2001b20-6d6b-4a34-8716-7f295b7abc53",
+ "metadata": {
+ "language": "python"
+ },
+ "outputs": [],
+ "source": "CHUNK_SIZE = 1024\nurl = \"https://api.elevenlabs.io/v1/text-to-speech/21m00Tcm4TlvDq8ikWAM/stream\"\n\nheaders = {\n \"Accept\": \"audio/mpeg\",\n \"Content-Type\": \"application/json\",\n \"xi-api-key\": elevenlabs_apikey\n}\n\ndata = {\n \"text\": answer,\n \"model_id\": \"eleven_monolingual_v1\",\n \"voice_settings\": {\n \"stability\": 0.5,\n \"similarity_boost\": 0.5\n }\n}\n\nresponse = requests.post(url, json=data, headers=headers, stream=True)\n\n# create an audio file\nwith open('output.mp3', 'wb') as f:\n for chunk in response.iter_content(chunk_size=CHUNK_SIZE):\n if chunk:\n f.write(chunk)\n"
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "3ba4d543-6df1-4089-b054-7c149d9a3324",
+ "metadata": {
+ "language": "python"
+ },
+ "outputs": [],
+ "source": "!ls"
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "8bbffa96-ed40-40ea-b424-a32b5f1b9a9f",
+ "metadata": {
+ "language": "python"
+ },
+ "outputs": [],
+ "source": "audio_file = 'output.mp3'\n\naudio = Audio(filename=audio_file, autoplay =True)\ndisplay(audio)"
+ },
+ {
+ "cell_type": "markdown",
+ "id": "6d904504-c20b-4ca9-bc36-8c9288c4c4f4",
+ "metadata": {
+ "language": "python"
+ },
+ "source": "### Transcribe the audio file"
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "9f991542-c92a-498c-ba7e-c37af7bbc491",
+ "metadata": {
+ "language": "python"
+ },
+ "outputs": [],
+ "source": "openai.api_key = openai_apikey\naudio_file= open(\"output.mp3\", \"rb\")\ntranscript = openai.Audio.transcribe(\"whisper-1\", audio_file)\nprint(transcript[\"text\"])"
+ },
+ {
+ "cell_type": "markdown",
+ "id": "c6d005ea-9979-4c62-b09b-0888ec17da12",
+ "metadata": {
+ "language": "python"
+ },
+ "source": "\n- [Back to Demo Architecture](#architecture)\n# Tying it together with Image data"
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "290da6ad-3ea5-426e-8d6c-3b226f00cad4",
+ "metadata": {
+ "language": "python"
+ },
+ "outputs": [],
+ "source": "# Most recent news article for TSLA\nquestion_3 = \"\"\"What is the most recent news article for Amazon where the topic_relevance_score is greater than 90%? \nInclude the url, time published and banner image.\"\"\"\nanswer = process_user_question(question_3)\nprint(f'The answer is: {answer}')"
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "7c18a63c-13c4-40e8-b235-55bc6f04fd28",
+ "metadata": {
+ "language": "python"
+ },
+ "outputs": [],
+ "source": "%%sql\nSELECT title, url, time_published, banner_image FROM newsSentiment WHERE ticker = 'AMZN' AND topic_relevance_score > 0.9 ORDER BY time_published DESC LIMIT 3"
+ },
+ {
+ "cell_type": "markdown",
+ "id": "a3076d45-a48e-4a90-811f-e4fb9e01c7e6",
+ "metadata": {
+ "language": "python"
+ },
+ "source": "### Load the image"
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "672be099-e988-4739-9932-0a6f23bcc844",
+ "metadata": {
+ "language": "python"
+ },
+ "outputs": [],
+ "source": "import matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nfrom io import BytesIO\nbanner_image_url = \"https://staticx-tuner.zacks.com/images/default_article_images/default341.jpg\"\nresponse = requests.get(banner_image_url)\n\nif response.status_code == 200:\n img = mpimg.imread(BytesIO(response.content), format='JPG')\n imgplot = plt.imshow(img)\n plt.show()\nelse:\n print(f\"Failed to retrieve the image. Status code: {response.status_code}\")"
+ },
+ {
+ "cell_type": "markdown",
+ "id": "7b4528e9-4545-41d0-9a36-b84141e9850c",
+ "metadata": {
+ "language": "python"
+ },
+ "source": "### Set up the huggingface transformer"
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "6848e943-4cd8-445e-840d-f9d13b4391f6",
+ "metadata": {
+ "language": "python"
+ },
+ "outputs": [],
+ "source": "transformers_version = \"v4.29.0\" #@param [\"main\", \"v4.29.0\"] {allow-input: true}\n\nprint(f\"Setting up everything with transformers version {transformers_version}\")\n\n!pip install huggingface_hub>=0.14.1 git+https://github.com/huggingface/transformers@$transformers_version -q diffusers accelerate datasets torch soundfile sentencepiece opencv-python openai\n"
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "7b77f62b-3f57-4335-bae4-27f5f74abf70",
+ "metadata": {
+ "language": "python"
+ },
+ "outputs": [],
+ "source": "import IPython\nimport soundfile as sf\n\ndef play_audio(audio):\n sf.write(\"speech_converted.wav\", audio.numpy(), samplerate=16000)\n return IPython.display.Audio(\"speech_converted.wav\")\n\nfrom huggingface_hub import notebook_login\nnotebook_login()"
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "6b2ceeee-6d95-4aa5-83fd-9651be326e59",
+ "metadata": {
+ "language": "python"
+ },
+ "outputs": [],
+ "source": "agent_name = \"OpenAI (API Key)\" #@param [\"StarCoder (HF Token)\", \"OpenAssistant (HF Token)\", \"OpenAI (API Key)\"]\n\nif agent_name == \"StarCoder (HF Token)\":\n from transformers.tools import HfAgent\n agent = HfAgent(\"https://api-inference.huggingface.co/models/bigcode/starcoder\")\n print(\"StarCoder is initialized 💪\")\nelif agent_name == \"OpenAssistant (HF Token)\":\n from transformers.tools import HfAgent\n agent = HfAgent(url_endpoint=\"https://api-inference.huggingface.co/models/OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5\")\n print(\"OpenAssistant is initialized 💪\")\nif agent_name == \"OpenAI (API Key)\":\n from transformers.tools import OpenAiAgent\n pswd = openai_apikey\n agent = OpenAiAgent(model=\"text-davinci-003\", api_key=pswd)\n print(\"OpenAI is initialized 💪\")"
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "eed4814a-8a7e-41f6-956b-51c7debc1fff",
+ "metadata": {
+ "language": "python"
+ },
+ "outputs": [],
+ "source": "caption = agent.run(\"Can you caption the `image`?\", image=img)"
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "71e8f5dd-ab27-415e-a0b6-423c497232ef",
+ "metadata": {
+ "language": "python"
+ },
+ "outputs": [],
+ "source": "data = {\n \"text\": caption,\n \"model_id\": \"eleven_monolingual_v1\",\n \"voice_settings\": {\n \"stability\": 0.5,\n \"similarity_boost\": 0.5\n }\n}\n\nresponse = requests.post(url, json=data, headers=headers)\nwith open('output.mp3', 'wb') as f:\n for chunk in response.iter_content(chunk_size=CHUNK_SIZE):\n if chunk:\n f.write(chunk)\n\naudio_file = 'output.mp3'\n\naudio = Audio(filename=audio_file, autoplay =True)\ndisplay(audio)"
+ },
+ {
+ "cell_type": "markdown",
+ "id": "de4c4db5-d068-41ef-aa85-0657c6c450a9",
+ "metadata": {
+ "language": "python"
+ },
+ "source": "\n- [Back to Contents](#contents)\n# Conclusion"
+ },
+ {
+ "cell_type": "markdown",
+ "id": "faff70cf-728f-4bb3-b744-ba7efb195e10",
+ "metadata": {
+ "language": "python"
+ },
+ "source": "- Handle transactional and analytical queries with your vector data\n- no need to export data out of SingleStore to another vector db\n- Scan vectors fast with exact nearest neighbor. (DOT_PRODUCT, EUCLIDEAN_DISTANCE, and VECTOR_SUB are high-perf functions using single-instruction-multiple-data (SIMD) processor instructions)\n- Ability to stream data directly into SingleStore\n- Use SingleStore as Semantic Cache Layer leveraging the Plancache. No need for a cache layer.\n- Easily scale the workspace for your workload\n- handle reads and writes in parallel\n- Use of external functions. \n"
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "13bff7c9-a7c5-4807-84df-e9f7490f4627",
+ "metadata": {
+ "language": "python"
+ },
+ "outputs": [],
+ "source": ""
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "21e59bb1-bf4f-4aa7-bf7a-92f8e4f19dd7",
+ "metadata": {
+ "language": "python"
+ },
+ "outputs": [],
+ "source": ""
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "b07e2d81-473d-4035-9cbf-fe66007362bf",
+ "metadata": {
+ "language": "python"
+ },
+ "outputs": [],
+ "source": ""
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "808e304b-2515-449a-8cec-8f3a3a38ed4b",
+ "metadata": {
+ "language": "python"
+ },
+ "outputs": [],
+ "source": ""
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "93e4e0da-a887-4cd7-b70c-475710ece772",
+ "metadata": {
+ "language": "python"
+ },
+ "outputs": [],
+ "source": ""
+ },
+ {
+ "cell_type": "markdown",
+ "id": "17b69461-5ceb-42ff-9986-2092b5670f58",
+ "metadata": {
+ "language": "python"
+ },
+ "source": "# Reset Demo"
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "4ae2913a-ee2b-40f0-94b3-cb75c9891aea",
+ "metadata": {
+ "language": "python"
+ },
+ "outputs": [],
+ "source": "%%sql\nDROP DATABASE llm_webinar;"
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "eeb7d04b-df99-457b-96c3-9b0967f00c6c",
+ "metadata": {
+ "language": "python"
+ },
+ "outputs": [],
+ "source": ""
+ }
+ ],
+ "metadata": {
+ "jupyterlab": {
+ "notebooks": {
+ "version_major": 6,
+ "version_minor": 4
+ }
+ },
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimeType": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.4"
+ },
+ "singlestore_cell_default_language": "python",
+ "singlestore_connection": {
+ "connectionID": "ee66326b-0803-4fdb-8b7f-d6415f18d4c8",
+ "defaultDatabase": "llm_webinar"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/notebooks/How-to-Build-LLM-Apps-that-can-See-Hear-Speak/meta.toml b/notebooks/How-to-Build-LLM-Apps-that-can-See-Hear-Speak/meta.toml
new file mode 100644
index 0000000..dec289e
--- /dev/null
+++ b/notebooks/How-to-Build-LLM-Apps-that-can-See-Hear-Speak/meta.toml
@@ -0,0 +1,8 @@
+[meta]
+title="How to Build LLM Apps that can See Hear Speak"
+description="""\
+ Using OpenAI to build a an app that can take images, audio, and text data to generate output
+ """
+icon="chart-network"
+tags=["Advanced", "OpenAI", "genAI"]
+destinations=["spaces"]