diff --git a/.github/scripts/spellcheck_conf/wordlist.txt b/.github/scripts/spellcheck_conf/wordlist.txt index 58afaca5bdd2943b0af6b369f8b589aed8c0c8f6..493e6f9760dc6c784c212ae223a847ad96f2cfa6 100644 --- a/.github/scripts/spellcheck_conf/wordlist.txt +++ b/.github/scripts/spellcheck_conf/wordlist.txt @@ -1508,3 +1508,24 @@ xTTS TogetherAI Vercel's echarts +pydantic +Deloitte +Deloitte's +Felicis +Gmail +LangSmith +Letta +NLU +Norvig's +OAuth +Ollama's +Weng +dropdown +globals +gmail +multiagent +yyy +jpeg +toend +codellama +DIFFLOG diff --git a/docs/FAQ.md b/docs/FAQ.md index 6dc3fd91b40acf42015b7c78ef7300470ea8a039..fa5e7de8221a0095c12eb33a5987b83e30325699 100644 --- a/docs/FAQ.md +++ b/docs/FAQ.md @@ -29,20 +29,20 @@ Here we discuss frequently asked questions that may occur and we found useful al 7. How to handle CUDA memory fragmentations during fine-tuning that may lead into an OOM? - In some cases you may experience that after model checkpointing specially with FSDP (this usually does not happen with PEFT methods), the reserved and allocated CUDA memory has increased. This might be due to CUDA memory fragmentations. PyTorch recenly added an enviroment variable that helps to better manage memory fragmentation (this feature in available on PyTorch nightlies at the time of writing this doc July 30 2023). You can set this in your main training script as follows: + In some cases you may experience that after model checkpointing specially with FSDP (this usually does not happen with PEFT methods), the reserved and allocated CUDA memory has increased. This might be due to CUDA memory fragmentations. PyTorch recently added an environment variable that helps to better manage memory fragmentation (this feature in available on PyTorch nightlies at the time of writing this doc July 30 2023). You can set this in your main training script as follows: ```bash os.environ['PYTORCH_CUDA_ALLOC_CONF']='expandable_segments:True' ``` - We also added this enviroment variable in `setup_environ_flags` of the [train_utils.py](../src/llama_recipes/utils/train_utils.py), feel free to uncomment it if required. + We also added this environment variable in `setup_environ_flags` of the [train_utils.py](../src/llama_recipes/utils/train_utils.py), feel free to uncomment it if required. 8. Additional debugging flags? The environment variable `TORCH_DISTRIBUTED_DEBUG` can be used to trigger additional useful logging and collective synchronization checks to ensure all ranks are synchronized appropriately. `TORCH_DISTRIBUTED_DEBUG` can be set to either OFF (default), INFO, or DETAIL depending on the debugging level required. Please note that the most verbose option, DETAIL may impact the application performance and thus should only be used when debugging issues. - We also added this enviroment variable in `setup_environ_flags` of the [train_utils.py](../src/llama_recipes/utils/train_utils.py), feel free to uncomment it if required. + We also added this environment variable in `setup_environ_flags` of the [train_utils.py](../src/llama_recipes/utils/train_utils.py), feel free to uncomment it if required. 9. I am getting import errors when running inference. diff --git a/recipes/quickstart/images/a_colorful_llama_doing_ai_programming.jpeg b/docs/img/a_colorful_llama_doing_ai_programming.jpeg similarity index 100% rename from recipes/quickstart/images/a_colorful_llama_doing_ai_programming.jpeg rename to docs/img/a_colorful_llama_doing_ai_programming.jpeg diff --git a/recipes/quickstart/images/cat.jpeg b/docs/img/cat.jpeg similarity index 100% rename from recipes/quickstart/images/cat.jpeg rename to docs/img/cat.jpeg diff --git a/recipes/quickstart/images/gnocchi_alla_romana.jpeg b/docs/img/gnocchi_alla_romana.jpeg similarity index 100% rename from recipes/quickstart/images/gnocchi_alla_romana.jpeg rename to docs/img/gnocchi_alla_romana.jpeg diff --git a/recipes/quickstart/images/grocery_shopping_bascket_with_salmon_in_package.jpeg b/docs/img/grocery_shopping_bascket_with_salmon_in_package.jpeg similarity index 100% rename from recipes/quickstart/images/grocery_shopping_bascket_with_salmon_in_package.jpeg rename to docs/img/grocery_shopping_bascket_with_salmon_in_package.jpeg diff --git a/recipes/quickstart/images/llama-mobile-confirmed.png b/docs/img/llama-mobile-confirmed.png similarity index 100% rename from recipes/quickstart/images/llama-mobile-confirmed.png rename to docs/img/llama-mobile-confirmed.png diff --git a/recipes/quickstart/images/llama-recipes.png b/docs/img/llama-recipes.png similarity index 100% rename from recipes/quickstart/images/llama-recipes.png rename to docs/img/llama-recipes.png diff --git a/recipes/quickstart/images/llama_stack.png b/docs/img/llama_stack.png similarity index 100% rename from recipes/quickstart/images/llama_stack.png rename to docs/img/llama_stack.png diff --git a/recipes/quickstart/images/meta_release.png b/docs/img/meta_release.png similarity index 100% rename from recipes/quickstart/images/meta_release.png rename to docs/img/meta_release.png diff --git a/docs/img/resized_image.jpg b/docs/img/resized_image.jpg new file mode 100644 index 0000000000000000000000000000000000000000..339f1da6e879d36c4547394bc495df0944316ddc Binary files /dev/null and b/docs/img/resized_image.jpg differ diff --git a/recipes/quickstart/images/thumbnail_IMG_1329.jpg b/docs/img/thumbnail_IMG_1329.jpg similarity index 100% rename from recipes/quickstart/images/thumbnail_IMG_1329.jpg rename to docs/img/thumbnail_IMG_1329.jpg diff --git a/recipes/quickstart/images/thumbnail_IMG_1440.jpg b/docs/img/thumbnail_IMG_1440.jpg similarity index 100% rename from recipes/quickstart/images/thumbnail_IMG_1440.jpg rename to docs/img/thumbnail_IMG_1440.jpg diff --git a/recipes/quickstart/images/thumbnail_IMG_6385.jpg b/docs/img/thumbnail_IMG_6385.jpg similarity index 100% rename from recipes/quickstart/images/thumbnail_IMG_6385.jpg rename to docs/img/thumbnail_IMG_6385.jpg diff --git a/docs/multi_gpu.md b/docs/multi_gpu.md index 820595dcf3bdd6169dba4ac56c1fb3209aeb5ee8..7c797ddc8bb8b06b64c2c2b57bc49289fd473cfc 100644 --- a/docs/multi_gpu.md +++ b/docs/multi_gpu.md @@ -174,7 +174,7 @@ It lets us specify the training settings for everything from `model_name` to `da * `mixed_precision` boolean flag to specify using mixed precision, defatults to true. - * `use_fp16` boolean flag to specify using FP16 for mixed precision, defatults to False. We recommond not setting this flag, and only set `mixed_precision` that will use `BF16`, this will help with speed and memory savings while avoiding challenges of scaler accuracies with `FP16`. + * `use_fp16` boolean flag to specify using FP16 for mixed precision, defatults to False. We recommend not setting this flag, and only set `mixed_precision` that will use `BF16`, this will help with speed and memory savings while avoiding challenges of scaler accuracies with `FP16`. * `sharding_strategy` this specifies the sharding strategy for FSDP, it can be: * `FULL_SHARD` that shards model parameters, gradients and optimizer states, results in the most memory savings. @@ -187,7 +187,7 @@ It lets us specify the training settings for everything from `model_name` to `da * `checkpoint_type` specifies the state dict checkpoint type for saving the model. `FULL_STATE_DICT` streams state_dict of each model shard from a rank to CPU and assembels the full state_dict on CPU. `SHARDED_STATE_DICT` saves one checkpoint per rank, and enables the re-loading the model in a different world size. -* `fsdp_activation_checkpointing` enables activation checkpoining for FSDP, this saves significant amount of memory with the trade off of recomputing itermediate activations during the backward pass. The saved memory can be re-invested in higher batch sizes to increase the throughput. We recommond you use this option. +* `fsdp_activation_checkpointing` enables activation checkpoining for FSDP, this saves significant amount of memory with the trade off of recomputing itermediate activations during the backward pass. The saved memory can be re-invested in higher batch sizes to increase the throughput. We recommend you use this option. * `fsdp_config.pure_bf16` it moves the model to `BFloat16` and if `optimizer` is set to `anyprecision` then optimizer states will be kept in `BFloat16` as well. You can use this option if necessary. diff --git a/recipes/3p_integrations/aws/prompt_engineering_with_llama_2_on_amazon_bedrock.ipynb b/recipes/3p_integrations/aws/prompt_engineering_with_llama_2_on_amazon_bedrock.ipynb index 6fd1b0d86e299c571a9daf021693c15409653341..df8b61126a5382886adad0788b2adf6cfc0674d7 100644 --- a/recipes/3p_integrations/aws/prompt_engineering_with_llama_2_on_amazon_bedrock.ipynb +++ b/recipes/3p_integrations/aws/prompt_engineering_with_llama_2_on_amazon_bedrock.ipynb @@ -758,7 +758,7 @@ "\n", "Adding specific examples of your desired output generally results in more accurate, consistent output. This technique is called \"few-shot prompting\".\n", "\n", - "In this example, the generated response follows our desired format that offers a more nuanced sentiment classifer that gives a positive, neutral, and negative response confidence percentage.\n", + "In this example, the generated response follows our desired format that offers a more nuanced sentiment classifier that gives a positive, neutral, and negative response confidence percentage.\n", "\n", "See also: [Zhao et al. (2021)](https://arxiv.org/abs/2102.09690), [Liu et al. (2021)](https://arxiv.org/abs/2101.06804), [Su et al. (2022)](https://arxiv.org/abs/2209.01975), [Rubin et al. (2022)](https://arxiv.org/abs/2112.08633).\n", "\n" @@ -1045,7 +1045,7 @@ "source": [ "### Self-Consistency\n", "\n", - "LLMs are probablistic, so even with Chain-of-Thought, a single generation might produce incorrect results. Self-Consistency ([Wang et al. (2022)](https://arxiv.org/abs/2203.11171)) introduces enhanced accuracy by selecting the most frequent answer from multiple generations (at the cost of higher compute):" + "LLMs are probabilistic, so even with Chain-of-Thought, a single generation might produce incorrect results. Self-Consistency ([Wang et al. (2022)](https://arxiv.org/abs/2203.11171)) introduces enhanced accuracy by selecting the most frequent answer from multiple generations (at the cost of higher compute):" ] }, { @@ -1179,7 +1179,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Retrieval-Augmented Generation, or RAG, describes the practice of including information in the prompt you've retrived from an external database ([Lewis et al. (2020)](https://arxiv.org/abs/2005.11401v4)). It's an effective way to incorporate facts into your LLM application and is more affordable than fine-tuning which may be costly and negatively impact the foundational model's capabilities.\n", + "Retrieval-Augmented Generation, or RAG, describes the practice of including information in the prompt you've retrieved from an external database ([Lewis et al. (2020)](https://arxiv.org/abs/2005.11401v4)). It's an effective way to incorporate facts into your LLM application and is more affordable than fine-tuning which may be costly and negatively impact the foundational model's capabilities.\n", "\n", "This could be as simple as a lookup table or as sophisticated as a [vector database]([FAISS](https://github.com/facebookresearch/faiss)) containing all of your company's knowledge:" ] diff --git a/recipes/3p_integrations/groq/groq-example-templates/presidential-speeches-rag-with-pinecone/README.md b/recipes/3p_integrations/groq/groq-example-templates/presidential-speeches-rag-with-pinecone/README.md index 9e8aa7d6e499d2eb2fe873f6a8d53414683e0e40..9f6355c9b88f833ed7ac527eced7ff82c95106a6 100644 --- a/recipes/3p_integrations/groq/groq-example-templates/presidential-speeches-rag-with-pinecone/README.md +++ b/recipes/3p_integrations/groq/groq-example-templates/presidential-speeches-rag-with-pinecone/README.md @@ -1,6 +1,6 @@ # Presidential Speeches RAG with Pinecone -This repository contains a command line application that allows users to ask questions about US presidental speeches by applying Retrieval-Augmented Generation (RAG) over a Pinecone vector database. The application uses RAG to answer the user's question by retrieving the most relevant presidential speeches and using them to supplant the LLM response. +This repository contains a command line application that allows users to ask questions about US presidential speeches by applying Retrieval-Augmented Generation (RAG) over a Pinecone vector database. The application uses RAG to answer the user's question by retrieving the most relevant presidential speeches and using them to supplant the LLM response. ## Features diff --git a/recipes/3p_integrations/groq/groq-example-templates/presidential-speeches-rag-with-pinecone/main.py b/recipes/3p_integrations/groq/groq-example-templates/presidential-speeches-rag-with-pinecone/main.py index a2bb2eea5903ab7114bc25b29a0139ec7d0d51c7..aa3b206addce8a92306bb62111fd4397ca914a82 100644 --- a/recipes/3p_integrations/groq/groq-example-templates/presidential-speeches-rag-with-pinecone/main.py +++ b/recipes/3p_integrations/groq/groq-example-templates/presidential-speeches-rag-with-pinecone/main.py @@ -55,7 +55,7 @@ def presidential_speech_chat_completion(client, model, user_question, relevant_e }, { "role": "user", - "content": "User Question: " + user_question + "\n\nRelevant Speech Exerpt(s):\n\n" + relevant_excerpts, + "content": "User Question: " + user_question + "\n\nRelevant Speech Excerpt(s):\n\n" + relevant_excerpts, } ], model = model diff --git a/recipes/3p_integrations/lamini/text2sql_memory_tuning/data/gold-test-set-v2.jsonl b/recipes/3p_integrations/lamini/text2sql_memory_tuning/data/gold-test-set-v2.jsonl index 508431e28dd345e318fb56190eec8b4d4a0c7dfc..f0a7eb840844a8665a54cf4221e111b1fb4b0e98 100644 --- a/recipes/3p_integrations/lamini/text2sql_memory_tuning/data/gold-test-set-v2.jsonl +++ b/recipes/3p_integrations/lamini/text2sql_memory_tuning/data/gold-test-set-v2.jsonl @@ -29,8 +29,8 @@ {"question": "Would you please let me know what the highest paid players are for each position?", "answer": "The highest paid players are Nikola Jokic (C), Paul George (F), Norman Powell (G), Kevin Durant (PF), Stephen Curry (PG), LeBron James (SF), Bradley Beal (SG).", "sql": "SELECT name, pos, MAX(CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER)) as max_salary FROM nba_roster WHERE SALARY!= '--' GROUP BY POS;"} {"question": "Is Jalen Johnson 23 years old?", "answer": "No, Jalen Johnson is 21 years old", "sql" : "Select name, age from nba_roster where name='Jalen Johnson';"} {"question": "Who is the oldest player on the Brooklyn Nets?", "answer": "Spencer Dinwiddie, Dorian Finney-Smith, Royce O'Neale", "sql" : "SELECT NAME FROM nba_roster WHERE TEAM = 'Brooklyn Nets' AND AGE = (SELECT MAX(AGE) FROM nba_roster WHERE TEAM = 'Brooklyn Nets');"} -{"question": "Who has the higest salary on the Memphis Grizzlies?", "answer": "Ja Morant", "sql" : "select salary, name from nba_roster where team='Memphis Grizzlies' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} -{"question": "Which player has the higest salary on the Cleveland Cavaliers?", "answer": "Darius Garland", "sql" : "select salary, name from nba_roster where team='Cleveland Cavaliers' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} +{"question": "Who has the highest salary on the Memphis Grizzlies?", "answer": "Ja Morant", "sql" : "select salary, name from nba_roster where team='Memphis Grizzlies' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} +{"question": "Which player has the highest salary on the Cleveland Cavaliers?", "answer": "Darius Garland", "sql" : "select salary, name from nba_roster where team='Cleveland Cavaliers' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} {"question": "Who is the highest paid center on the Dallas Mavericks?", "answer": "Dereck Lively II", "sql" : "select salary, name from nba_roster where team='Dallas Mavericks' and POS='C' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} {"question": "How much is Marcus Smart getting paid?", "answer": "$18,833,712", "sql" : "select salary from nba_roster where name='Marcus Smart';"} {"question": "What's the average age of the Trail Blazers?", "answer": "24", "sql" : "select avg(age) from nba_roster where team='Portland Trail Blazers';"} diff --git a/recipes/3p_integrations/lamini/text2sql_memory_tuning/data/gold-test-set.jsonl b/recipes/3p_integrations/lamini/text2sql_memory_tuning/data/gold-test-set.jsonl index efa0534190478ed49d6b54615121b23182af5160..399fa7b7bb302fd3d25c2aa6ac31f5f365886f3b 100644 --- a/recipes/3p_integrations/lamini/text2sql_memory_tuning/data/gold-test-set.jsonl +++ b/recipes/3p_integrations/lamini/text2sql_memory_tuning/data/gold-test-set.jsonl @@ -9,8 +9,8 @@ {"question": "Would you please let me know what the highest paid players are for each position?", "answer": "The highest paid players are Nikola Jokic (C), Paul George (F), Norman Powell (G), Kevin Durant (PF), Stephen Curry (PG), LeBron James (SF), Bradley Beal (SG).", "sql": "SELECT name, pos, MAX(CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER)) as max_salary FROM nba_roster WHERE SALARY!= '--' GROUP BY POS;"} {"question": "Is Jalen Johnson 23 years old?", "answer": "No, Jalen Johnson is 21 years old", "sql" : "Select name, age from nba_roster where name='Jalen Johnson';"} {"question": "Who is the oldest player on the Brooklyn Nets?", "answer": "Spencer Dinwiddie, Dorian Finney-Smith, Royce O'Neale", "sql" : "SELECT NAME FROM nba_roster WHERE TEAM = 'Brooklyn Nets' AND AGE = (SELECT MAX(AGE) FROM nba_roster WHERE TEAM = 'Brooklyn Nets');"} -{"question": "Who has the higest salary on the Memphis Grizzlies?", "answer": "Ja Morant", "sql" : "select salary, name from nba_roster where team='Memphis Grizzlies' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} -{"question": "Which player has the higest salary on the Cleveland Cavaliers?", "answer": "Darius Garland", "sql" : "select salary, name from nba_roster where team='Cleveland Cavaliers' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} +{"question": "Who has the highest salary on the Memphis Grizzlies?", "answer": "Ja Morant", "sql" : "select salary, name from nba_roster where team='Memphis Grizzlies' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} +{"question": "Which player has the highest salary on the Cleveland Cavaliers?", "answer": "Darius Garland", "sql" : "select salary, name from nba_roster where team='Cleveland Cavaliers' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} {"question": "Who is the highest paid center on the Dallas Mavericks?", "answer": "Dereck Lively II", "sql" : "select salary, name from nba_roster where team='Dallas Mavericks' and POS='C' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} {"question": "How much is Marcus Smart getting paid?", "answer": "$18,833,712", "sql" : "select salary from nba_roster where name='Marcus Smart';"} {"question": "What's the average age of the Trail Blazers?", "answer": "24", "sql" : "select avg(age) from nba_roster where team='Portland Trail Blazers';"} diff --git a/recipes/3p_integrations/lamini/text2sql_memory_tuning/data/training_data/archive/generated_queries_large_filtered_cleaned.jsonl b/recipes/3p_integrations/lamini/text2sql_memory_tuning/data/training_data/archive/generated_queries_large_filtered_cleaned.jsonl index 53a6cf033ca11ebf4c53528d0c3e3d86eab9262a..27ee07b34ff1e1fa08ea68032b70860d669bba35 100644 --- a/recipes/3p_integrations/lamini/text2sql_memory_tuning/data/training_data/archive/generated_queries_large_filtered_cleaned.jsonl +++ b/recipes/3p_integrations/lamini/text2sql_memory_tuning/data/training_data/archive/generated_queries_large_filtered_cleaned.jsonl @@ -209,8 +209,8 @@ {"question": "Would you please let me know what the highest paid players are for each position?", "sql": "SELECT name, pos, MAX(CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER)) as max_salary FROM nba_roster WHERE SALARY!= '--' GROUP BY POS;"} {"question": "Is Jalen Johnson 23 years old?", "sql": "Select name, age from nba_roster where name='Jalen Johnson';"} {"question": "Who is the oldest player on the Brooklyn Nets?", "sql": "SELECT NAME FROM nba_roster WHERE TEAM = 'Brooklyn Nets' AND AGE = (SELECT MAX(AGE) FROM nba_roster WHERE TEAM = 'Brooklyn Nets');"} -{"question": "Who has the higest salary on the Memphis Grizzlies?", "sql": "select salary, name from nba_roster where team='Memphis Grizzlies' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} -{"question": "Which player has the higest salary on the Cleveland Cavaliers?", "sql": "select salary, name from nba_roster where team='Cleveland Cavaliers' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} +{"question": "Who has the highest salary on the Memphis Grizzlies?", "sql": "select salary, name from nba_roster where team='Memphis Grizzlies' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} +{"question": "Which player has the highest salary on the Cleveland Cavaliers?", "sql": "select salary, name from nba_roster where team='Cleveland Cavaliers' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} {"question": "Who is the highest paid center on the Dallas Mavericks?", "sql": "select salary, name from nba_roster where team='Dallas Mavericks' and POS='C' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} {"question": "How much is Marcus Smart getting paid?", "sql": "select salary from nba_roster where name='Marcus Smart';"} {"question": "What's the average age of the Trail Blazers?", "sql": "select avg(age) from nba_roster where team='Portland Trail Blazers';"} diff --git a/recipes/3p_integrations/lamini/text2sql_memory_tuning/data/training_data/archive/generated_queries_v2_large_filtered_cleaned.jsonl b/recipes/3p_integrations/lamini/text2sql_memory_tuning/data/training_data/archive/generated_queries_v2_large_filtered_cleaned.jsonl index 824e964719beded15b0fe3dcaf544f969a1c42c9..1166db156d720d0a54ae9831cd4d62849473be86 100644 --- a/recipes/3p_integrations/lamini/text2sql_memory_tuning/data/training_data/archive/generated_queries_v2_large_filtered_cleaned.jsonl +++ b/recipes/3p_integrations/lamini/text2sql_memory_tuning/data/training_data/archive/generated_queries_v2_large_filtered_cleaned.jsonl @@ -117,8 +117,8 @@ {"question": "Would you please let me know what the highest paid players are for each position?", "sql": "SELECT name, pos, MAX(CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER)) as max_salary FROM nba_roster WHERE SALARY!= '--' GROUP BY POS;"} {"question": "Is Jalen Johnson 23 years old?", "sql" : "Select name, age from nba_roster where name='Jalen Johnson';"} {"question": "Who is the oldest player on the Brooklyn Nets?", "sql" : "SELECT NAME FROM nba_roster WHERE TEAM = 'Brooklyn Nets' AND AGE = (SELECT MAX(AGE) FROM nba_roster WHERE TEAM = 'Brooklyn Nets');"} -{"question": "Who has the higest salary on the Memphis Grizzlies?", "sql" : "select salary, name from nba_roster where team='Memphis Grizzlies' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} -{"question": "Which player has the higest salary on the Cleveland Cavaliers?", "sql" : "select salary, name from nba_roster where team='Cleveland Cavaliers' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} +{"question": "Who has the highest salary on the Memphis Grizzlies?", "sql" : "select salary, name from nba_roster where team='Memphis Grizzlies' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} +{"question": "Which player has the highest salary on the Cleveland Cavaliers?", "sql" : "select salary, name from nba_roster where team='Cleveland Cavaliers' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} {"question": "Who is the highest paid center on the Dallas Mavericks?", "sql" : "select salary, name from nba_roster where team='Dallas Mavericks' and POS='C' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} {"question": "How much is Marcus Smart getting paid?", "sql" : "select salary from nba_roster where name='Marcus Smart';"} {"question": "What's the average age of the Trail Blazers?", "sql" : "select avg(age) from nba_roster where team='Portland Trail Blazers';"} diff --git a/recipes/3p_integrations/lamini/text2sql_memory_tuning/data/training_data/generated_queries.jsonl b/recipes/3p_integrations/lamini/text2sql_memory_tuning/data/training_data/generated_queries.jsonl index 0f0eb4b1cd35266ca49643917897fbc950f45179..16c8dd45738a2cfd1dba7f68777a387a192366c2 100644 --- a/recipes/3p_integrations/lamini/text2sql_memory_tuning/data/training_data/generated_queries.jsonl +++ b/recipes/3p_integrations/lamini/text2sql_memory_tuning/data/training_data/generated_queries.jsonl @@ -118,8 +118,8 @@ {"question": "Would you please let me know what the highest paid players are for each position?", "answer": "The highest paid players are Nikola Jokic (C), Paul George (F), Norman Powell (G), Kevin Durant (PF), Stephen Curry (PG), LeBron James (SF), Bradley Beal (SG).", "sql": "SELECT name, pos, MAX(CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER)) as max_salary FROM nba_roster WHERE SALARY!= '--' GROUP BY POS;"} {"question": "Is Jalen Johnson 23 years old?", "answer": "No, Jalen Johnson is 21 years old", "sql": "Select name, age from nba_roster where name='Jalen Johnson';"} {"question": "Who is the oldest player on the Brooklyn Nets?", "answer": "Spencer Dinwiddie, Dorian Finney-Smith, Royce O'Neale", "sql": "SELECT NAME FROM nba_roster WHERE TEAM = 'Brooklyn Nets' AND AGE = (SELECT MAX(AGE) FROM nba_roster WHERE TEAM = 'Brooklyn Nets');"} -{"question": "Who has the higest salary on the Memphis Grizzlies?", "answer": "Ja Morant", "sql": "select salary, name from nba_roster where team='Memphis Grizzlies' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} -{"question": "Which player has the higest salary on the Cleveland Cavaliers?", "answer": "Darius Garland", "sql": "select salary, name from nba_roster where team='Cleveland Cavaliers' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} +{"question": "Who has the highest salary on the Memphis Grizzlies?", "answer": "Ja Morant", "sql": "select salary, name from nba_roster where team='Memphis Grizzlies' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} +{"question": "Which player has the highest salary on the Cleveland Cavaliers?", "answer": "Darius Garland", "sql": "select salary, name from nba_roster where team='Cleveland Cavaliers' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} {"question": "Who is the highest paid center on the Dallas Mavericks?", "answer": "Dereck Lively II", "sql": "select salary, name from nba_roster where team='Dallas Mavericks' and POS='C' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} {"question": "How much is Marcus Smart getting paid?", "answer": "$18,833,712", "sql": "select salary from nba_roster where name='Marcus Smart';"} {"question": "What's the average age of the Trail Blazers?", "answer": "24", "sql": "select avg(age) from nba_roster where team='Portland Trail Blazers';"} @@ -148,8 +148,8 @@ {"question": "Would you please let me know what the highest paid players are for each position?", "answer": "The highest paid players are Nikola Jokic (C), Paul George (F), Norman Powell (G), Kevin Durant (PF), Stephen Curry (PG), LeBron James (SF), Bradley Beal (SG).", "sql": "SELECT name, pos, MAX(CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER)) as max_salary FROM nba_roster WHERE SALARY!= '--' GROUP BY POS;"} {"question": "Is Jalen Johnson 23 years old?", "answer": "No, Jalen Johnson is 21 years old", "sql": "Select name, age from nba_roster where name='Jalen Johnson';"} {"question": "Who is the oldest player on the Brooklyn Nets?", "answer": "Spencer Dinwiddie, Dorian Finney-Smith, Royce O'Neale", "sql": "SELECT NAME FROM nba_roster WHERE TEAM = 'Brooklyn Nets' AND AGE = (SELECT MAX(AGE) FROM nba_roster WHERE TEAM = 'Brooklyn Nets');"} -{"question": "Who has the higest salary on the Memphis Grizzlies?", "answer": "Ja Morant", "sql": "select salary, name from nba_roster where team='Memphis Grizzlies' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} -{"question": "Which player has the higest salary on the Cleveland Cavaliers?", "answer": "Darius Garland", "sql": "select salary, name from nba_roster where team='Cleveland Cavaliers' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} +{"question": "Who has the highest salary on the Memphis Grizzlies?", "answer": "Ja Morant", "sql": "select salary, name from nba_roster where team='Memphis Grizzlies' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} +{"question": "Which player has the highest salary on the Cleveland Cavaliers?", "answer": "Darius Garland", "sql": "select salary, name from nba_roster where team='Cleveland Cavaliers' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} {"question": "Who is the highest paid center on the Dallas Mavericks?", "answer": "Dereck Lively II", "sql": "select salary, name from nba_roster where team='Dallas Mavericks' and POS='C' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} {"question": "How much is Marcus Smart getting paid?", "answer": "$18,833,712", "sql": "select salary from nba_roster where name='Marcus Smart';"} {"question": "What's the average age of the Trail Blazers?", "answer": "24", "sql": "select avg(age) from nba_roster where team='Portland Trail Blazers';"} diff --git a/recipes/3p_integrations/lamini/text2sql_memory_tuning/data/training_data/generated_queries_large.jsonl b/recipes/3p_integrations/lamini/text2sql_memory_tuning/data/training_data/generated_queries_large.jsonl index 5601bef59d556c4f5a53ad4816b173c52bd43745..77fcdf3fdfb4b3c23724d4b95bc26c30d9b2c1a8 100644 --- a/recipes/3p_integrations/lamini/text2sql_memory_tuning/data/training_data/generated_queries_large.jsonl +++ b/recipes/3p_integrations/lamini/text2sql_memory_tuning/data/training_data/generated_queries_large.jsonl @@ -1108,8 +1108,8 @@ {"question": "Would you please let me know what the highest paid players are for each position?", "answer": "The highest paid players are Nikola Jokic (C), Paul George (F), Norman Powell (G), Kevin Durant (PF), Stephen Curry (PG), LeBron James (SF), Bradley Beal (SG).", "sql": "SELECT name, pos, MAX(CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER)) as max_salary FROM nba_roster WHERE SALARY!= '--' GROUP BY POS;"} {"question": "Is Jalen Johnson 23 years old?", "answer": "No, Jalen Johnson is 21 years old", "sql": "Select name, age from nba_roster where name='Jalen Johnson';"} {"question": "Who is the oldest player on the Brooklyn Nets?", "answer": "Spencer Dinwiddie, Dorian Finney-Smith, Royce O'Neale", "sql": "SELECT NAME FROM nba_roster WHERE TEAM = 'Brooklyn Nets' AND AGE = (SELECT MAX(AGE) FROM nba_roster WHERE TEAM = 'Brooklyn Nets');"} -{"question": "Who has the higest salary on the Memphis Grizzlies?", "answer": "Ja Morant", "sql": "select salary, name from nba_roster where team='Memphis Grizzlies' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} -{"question": "Which player has the higest salary on the Cleveland Cavaliers?", "answer": "Darius Garland", "sql": "select salary, name from nba_roster where team='Cleveland Cavaliers' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} +{"question": "Who has the highest salary on the Memphis Grizzlies?", "answer": "Ja Morant", "sql": "select salary, name from nba_roster where team='Memphis Grizzlies' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} +{"question": "Which player has the highest salary on the Cleveland Cavaliers?", "answer": "Darius Garland", "sql": "select salary, name from nba_roster where team='Cleveland Cavaliers' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} {"question": "Who is the highest paid center on the Dallas Mavericks?", "answer": "Dereck Lively II", "sql": "select salary, name from nba_roster where team='Dallas Mavericks' and POS='C' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} {"question": "How much is Marcus Smart getting paid?", "answer": "$18,833,712", "sql": "select salary from nba_roster where name='Marcus Smart';"} {"question": "What's the average age of the Trail Blazers?", "answer": "24", "sql": "select avg(age) from nba_roster where team='Portland Trail Blazers';"} @@ -1138,8 +1138,8 @@ {"question": "Would you please let me know what the highest paid players are for each position?", "answer": "The highest paid players are Nikola Jokic (C), Paul George (F), Norman Powell (G), Kevin Durant (PF), Stephen Curry (PG), LeBron James (SF), Bradley Beal (SG).", "sql": "SELECT name, pos, MAX(CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER)) as max_salary FROM nba_roster WHERE SALARY!= '--' GROUP BY POS;"} {"question": "Is Jalen Johnson 23 years old?", "answer": "No, Jalen Johnson is 21 years old", "sql": "Select name, age from nba_roster where name='Jalen Johnson';"} {"question": "Who is the oldest player on the Brooklyn Nets?", "answer": "Spencer Dinwiddie, Dorian Finney-Smith, Royce O'Neale", "sql": "SELECT NAME FROM nba_roster WHERE TEAM = 'Brooklyn Nets' AND AGE = (SELECT MAX(AGE) FROM nba_roster WHERE TEAM = 'Brooklyn Nets');"} -{"question": "Who has the higest salary on the Memphis Grizzlies?", "answer": "Ja Morant", "sql": "select salary, name from nba_roster where team='Memphis Grizzlies' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} -{"question": "Which player has the higest salary on the Cleveland Cavaliers?", "answer": "Darius Garland", "sql": "select salary, name from nba_roster where team='Cleveland Cavaliers' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} +{"question": "Who has the highest salary on the Memphis Grizzlies?", "answer": "Ja Morant", "sql": "select salary, name from nba_roster where team='Memphis Grizzlies' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} +{"question": "Which player has the highest salary on the Cleveland Cavaliers?", "answer": "Darius Garland", "sql": "select salary, name from nba_roster where team='Cleveland Cavaliers' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} {"question": "Who is the highest paid center on the Dallas Mavericks?", "answer": "Dereck Lively II", "sql": "select salary, name from nba_roster where team='Dallas Mavericks' and POS='C' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} {"question": "How much is Marcus Smart getting paid?", "answer": "$18,833,712", "sql": "select salary from nba_roster where name='Marcus Smart';"} {"question": "What's the average age of the Trail Blazers?", "answer": "24", "sql": "select avg(age) from nba_roster where team='Portland Trail Blazers';"} diff --git a/recipes/3p_integrations/lamini/text2sql_memory_tuning/data/training_data/generated_queries_large_filtered.jsonl b/recipes/3p_integrations/lamini/text2sql_memory_tuning/data/training_data/generated_queries_large_filtered.jsonl index 58bddd51ee969bb1c6643b6c05f93a0ff41031f6..a1381d256013beea2d24c969629e73e61eb8290e 100644 --- a/recipes/3p_integrations/lamini/text2sql_memory_tuning/data/training_data/generated_queries_large_filtered.jsonl +++ b/recipes/3p_integrations/lamini/text2sql_memory_tuning/data/training_data/generated_queries_large_filtered.jsonl @@ -321,8 +321,8 @@ {"question": "Would you please let me know what the highest paid players are for each position?", "sql": "SELECT name, pos, MAX(CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER)) as max_salary FROM nba_roster WHERE SALARY!= '--' GROUP BY POS;"} {"question": "Is Jalen Johnson 23 years old?", "sql": "Select name, age from nba_roster where name='Jalen Johnson';"} {"question": "Who is the oldest player on the Brooklyn Nets?", "sql": "SELECT NAME FROM nba_roster WHERE TEAM = 'Brooklyn Nets' AND AGE = (SELECT MAX(AGE) FROM nba_roster WHERE TEAM = 'Brooklyn Nets');"} -{"question": "Who has the higest salary on the Memphis Grizzlies?", "sql": "select salary, name from nba_roster where team='Memphis Grizzlies' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} -{"question": "Which player has the higest salary on the Cleveland Cavaliers?", "sql": "select salary, name from nba_roster where team='Cleveland Cavaliers' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} +{"question": "Who has the highest salary on the Memphis Grizzlies?", "sql": "select salary, name from nba_roster where team='Memphis Grizzlies' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} +{"question": "Which player has the highest salary on the Cleveland Cavaliers?", "sql": "select salary, name from nba_roster where team='Cleveland Cavaliers' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} {"question": "Who is the highest paid center on the Dallas Mavericks?", "sql": "select salary, name from nba_roster where team='Dallas Mavericks' and POS='C' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} {"question": "How much is Marcus Smart getting paid?", "sql": "select salary from nba_roster where name='Marcus Smart';"} {"question": "What's the average age of the Trail Blazers?", "sql": "select avg(age) from nba_roster where team='Portland Trail Blazers';"} diff --git a/recipes/3p_integrations/lamini/text2sql_memory_tuning/data/training_data/generated_queries_v2.jsonl b/recipes/3p_integrations/lamini/text2sql_memory_tuning/data/training_data/generated_queries_v2.jsonl index f204e69b9f5868ce8e4760d6ca43351a34f643ff..afeb3eae35ed7ca1f1eb5767a2285cbb395607c2 100644 --- a/recipes/3p_integrations/lamini/text2sql_memory_tuning/data/training_data/generated_queries_v2.jsonl +++ b/recipes/3p_integrations/lamini/text2sql_memory_tuning/data/training_data/generated_queries_v2.jsonl @@ -160,8 +160,8 @@ {"question": "Would you please let me know what the highest paid players are for each position?", "answer": "The highest paid players are Nikola Jokic (C), Paul George (F), Norman Powell (G), Kevin Durant (PF), Stephen Curry (PG), LeBron James (SF), Bradley Beal (SG).", "sql": "SELECT name, pos, MAX(CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER)) as max_salary FROM nba_roster WHERE SALARY!= '--' GROUP BY POS;"} {"question": "Is Jalen Johnson 23 years old?", "answer": "No, Jalen Johnson is 21 years old", "sql": "Select name, age from nba_roster where name='Jalen Johnson';"} {"question": "Who is the oldest player on the Brooklyn Nets?", "answer": "Spencer Dinwiddie, Dorian Finney-Smith, Royce O'Neale", "sql": "SELECT NAME FROM nba_roster WHERE TEAM = 'Brooklyn Nets' AND AGE = (SELECT MAX(AGE) FROM nba_roster WHERE TEAM = 'Brooklyn Nets');"} -{"question": "Who has the higest salary on the Memphis Grizzlies?", "answer": "Ja Morant", "sql": "select salary, name from nba_roster where team='Memphis Grizzlies' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} -{"question": "Which player has the higest salary on the Cleveland Cavaliers?", "answer": "Darius Garland", "sql": "select salary, name from nba_roster where team='Cleveland Cavaliers' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} +{"question": "Who has the highest salary on the Memphis Grizzlies?", "answer": "Ja Morant", "sql": "select salary, name from nba_roster where team='Memphis Grizzlies' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} +{"question": "Which player has the highest salary on the Cleveland Cavaliers?", "answer": "Darius Garland", "sql": "select salary, name from nba_roster where team='Cleveland Cavaliers' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} {"question": "Who is the highest paid center on the Dallas Mavericks?", "answer": "Dereck Lively II", "sql": "select salary, name from nba_roster where team='Dallas Mavericks' and POS='C' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} {"question": "How much is Marcus Smart getting paid?", "answer": "$18,833,712", "sql": "select salary from nba_roster where name='Marcus Smart';"} {"question": "What's the average age of the Trail Blazers?", "answer": "24", "sql": "select avg(age) from nba_roster where team='Portland Trail Blazers';"} @@ -215,8 +215,8 @@ {"question": "Would you please let me know what the highest paid players are for each position?", "answer": "The highest paid players are Nikola Jokic (C), Paul George (F), Norman Powell (G), Kevin Durant (PF), Stephen Curry (PG), LeBron James (SF), Bradley Beal (SG).", "sql": "SELECT name, pos, MAX(CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER)) as max_salary FROM nba_roster WHERE SALARY!= '--' GROUP BY POS;"} {"question": "Is Jalen Johnson 23 years old?", "answer": "No, Jalen Johnson is 21 years old", "sql": "Select name, age from nba_roster where name='Jalen Johnson';"} {"question": "Who is the oldest player on the Brooklyn Nets?", "answer": "Spencer Dinwiddie, Dorian Finney-Smith, Royce O'Neale", "sql": "SELECT NAME FROM nba_roster WHERE TEAM = 'Brooklyn Nets' AND AGE = (SELECT MAX(AGE) FROM nba_roster WHERE TEAM = 'Brooklyn Nets');"} -{"question": "Who has the higest salary on the Memphis Grizzlies?", "answer": "Ja Morant", "sql": "select salary, name from nba_roster where team='Memphis Grizzlies' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} -{"question": "Which player has the higest salary on the Cleveland Cavaliers?", "answer": "Darius Garland", "sql": "select salary, name from nba_roster where team='Cleveland Cavaliers' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} +{"question": "Who has the highest salary on the Memphis Grizzlies?", "answer": "Ja Morant", "sql": "select salary, name from nba_roster where team='Memphis Grizzlies' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} +{"question": "Which player has the highest salary on the Cleveland Cavaliers?", "answer": "Darius Garland", "sql": "select salary, name from nba_roster where team='Cleveland Cavaliers' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} {"question": "Who is the highest paid center on the Dallas Mavericks?", "answer": "Dereck Lively II", "sql": "select salary, name from nba_roster where team='Dallas Mavericks' and POS='C' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} {"question": "How much is Marcus Smart getting paid?", "answer": "$18,833,712", "sql": "select salary from nba_roster where name='Marcus Smart';"} {"question": "What's the average age of the Trail Blazers?", "answer": "24", "sql": "select avg(age) from nba_roster where team='Portland Trail Blazers';"} diff --git a/recipes/3p_integrations/lamini/text2sql_memory_tuning/data/training_data/generated_queries_v2_large.jsonl b/recipes/3p_integrations/lamini/text2sql_memory_tuning/data/training_data/generated_queries_v2_large.jsonl index 65ce3babeb464ba37490953f18c1ad1647772c1e..3724164d6a9fdaf93d29a81cafa59585825d9cba 100644 --- a/recipes/3p_integrations/lamini/text2sql_memory_tuning/data/training_data/generated_queries_v2_large.jsonl +++ b/recipes/3p_integrations/lamini/text2sql_memory_tuning/data/training_data/generated_queries_v2_large.jsonl @@ -1188,8 +1188,8 @@ {"question": "Would you please let me know what the highest paid players are for each position?", "answer": "The highest paid players are Nikola Jokic (C), Paul George (F), Norman Powell (G), Kevin Durant (PF), Stephen Curry (PG), LeBron James (SF), Bradley Beal (SG).", "sql": "SELECT name, pos, MAX(CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER)) as max_salary FROM nba_roster WHERE SALARY!= '--' GROUP BY POS;"} {"question": "Is Jalen Johnson 23 years old?", "answer": "No, Jalen Johnson is 21 years old", "sql": "Select name, age from nba_roster where name='Jalen Johnson';"} {"question": "Who is the oldest player on the Brooklyn Nets?", "answer": "Spencer Dinwiddie, Dorian Finney-Smith, Royce O'Neale", "sql": "SELECT NAME FROM nba_roster WHERE TEAM = 'Brooklyn Nets' AND AGE = (SELECT MAX(AGE) FROM nba_roster WHERE TEAM = 'Brooklyn Nets');"} -{"question": "Who has the higest salary on the Memphis Grizzlies?", "answer": "Ja Morant", "sql": "select salary, name from nba_roster where team='Memphis Grizzlies' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} -{"question": "Which player has the higest salary on the Cleveland Cavaliers?", "answer": "Darius Garland", "sql": "select salary, name from nba_roster where team='Cleveland Cavaliers' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} +{"question": "Who has the highest salary on the Memphis Grizzlies?", "answer": "Ja Morant", "sql": "select salary, name from nba_roster where team='Memphis Grizzlies' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} +{"question": "Which player has the highest salary on the Cleveland Cavaliers?", "answer": "Darius Garland", "sql": "select salary, name from nba_roster where team='Cleveland Cavaliers' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} {"question": "Who is the highest paid center on the Dallas Mavericks?", "answer": "Dereck Lively II", "sql": "select salary, name from nba_roster where team='Dallas Mavericks' and POS='C' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} {"question": "How much is Marcus Smart getting paid?", "answer": "$18,833,712", "sql": "select salary from nba_roster where name='Marcus Smart';"} {"question": "What's the average age of the Trail Blazers?", "answer": "24", "sql": "select avg(age) from nba_roster where team='Portland Trail Blazers';"} @@ -1243,8 +1243,8 @@ {"question": "Would you please let me know what the highest paid players are for each position?", "answer": "The highest paid players are Nikola Jokic (C), Paul George (F), Norman Powell (G), Kevin Durant (PF), Stephen Curry (PG), LeBron James (SF), Bradley Beal (SG).", "sql": "SELECT name, pos, MAX(CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER)) as max_salary FROM nba_roster WHERE SALARY!= '--' GROUP BY POS;"} {"question": "Is Jalen Johnson 23 years old?", "answer": "No, Jalen Johnson is 21 years old", "sql": "Select name, age from nba_roster where name='Jalen Johnson';"} {"question": "Who is the oldest player on the Brooklyn Nets?", "answer": "Spencer Dinwiddie, Dorian Finney-Smith, Royce O'Neale", "sql": "SELECT NAME FROM nba_roster WHERE TEAM = 'Brooklyn Nets' AND AGE = (SELECT MAX(AGE) FROM nba_roster WHERE TEAM = 'Brooklyn Nets');"} -{"question": "Who has the higest salary on the Memphis Grizzlies?", "answer": "Ja Morant", "sql": "select salary, name from nba_roster where team='Memphis Grizzlies' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} -{"question": "Which player has the higest salary on the Cleveland Cavaliers?", "answer": "Darius Garland", "sql": "select salary, name from nba_roster where team='Cleveland Cavaliers' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} +{"question": "Who has the highest salary on the Memphis Grizzlies?", "answer": "Ja Morant", "sql": "select salary, name from nba_roster where team='Memphis Grizzlies' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} +{"question": "Which player has the highest salary on the Cleveland Cavaliers?", "answer": "Darius Garland", "sql": "select salary, name from nba_roster where team='Cleveland Cavaliers' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} {"question": "Who is the highest paid center on the Dallas Mavericks?", "answer": "Dereck Lively II", "sql": "select salary, name from nba_roster where team='Dallas Mavericks' and POS='C' and SALARY!= '--' ORDER BY CAST(REPLACE(REPLACE(SALARY, '$', ''), ',','') AS INTEGER) DESC LIMIT 1;"} {"question": "How much is Marcus Smart getting paid?", "answer": "$18,833,712", "sql": "select salary from nba_roster where name='Marcus Smart';"} {"question": "What's the average age of the Trail Blazers?", "answer": "24", "sql": "select avg(age) from nba_roster where team='Portland Trail Blazers';"} diff --git a/recipes/3p_integrations/llamaindex/dlai_agentic_rag/README.md b/recipes/3p_integrations/llamaindex/dlai_agentic_rag/README.md index deeee9a9cdd1317c0f406ecfa410701305891719..ea9c3d849c3b91e549605faf4ecfea6f25b9439a 100644 --- a/recipes/3p_integrations/llamaindex/dlai_agentic_rag/README.md +++ b/recipes/3p_integrations/llamaindex/dlai_agentic_rag/README.md @@ -1,6 +1,6 @@ # Building Agentic RAG with Llamaindex -The folder here containts the Llama 3 ported notebooks of the DLAI short course [Building Agentic RAG with Llamaindex](https://www.deeplearning.ai/short-courses/building-agentic-rag-with-llamaindex/). +The folder here contains the Llama 3 ported notebooks of the DLAI short course [Building Agentic RAG with Llamaindex](https://www.deeplearning.ai/short-courses/building-agentic-rag-with-llamaindex/). 1. [Building Agentic RAG with Llamaindex L1 Router Engine](../../../quickstart/agents/DeepLearningai_Course_Notebooks/Building_Agentic_RAG_with_Llamaindex_L1_Router_Engine.ipynb) shows how to implement a simple agentic RAG, a router that will pick up one of several query tools (question answering or summarization) to execute a query on a single document. Note this notebook is located in the `quickstart` folder. diff --git a/recipes/3p_integrations/modal/many-llamas-human-eval/README.md b/recipes/3p_integrations/modal/many-llamas-human-eval/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1c3c1b661918a415a20cac455a1e2f6250294100 --- /dev/null +++ b/recipes/3p_integrations/modal/many-llamas-human-eval/README.md @@ -0,0 +1,71 @@ +# Many-Llamas Human-Eval + +In this directory, we run an experiment answering the question: + +*If we run enough Llama models in parallel, can they outperform GPT-4o on HumanEval?* + +It seeks to increase model performance not through scaling parameters, but by scaling compute time. + +### Technical Blog + +This experiment built by the team at [Modal](https://modal.com), and is described in the following blog post: + +[Beat GPT-4o at Python by searching with 100 small Llamas](https://modal.com/blog/llama-human-eval) + +The experiment has since been upgraded to use the [Llama 3.2 3B Instruct](https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct) model, and runnable end-to-end using the Modal serverless platform. + +## Run it yourself + +### Install the Modal CLI +From within your virtual environment, run: +```bash +pip install modal +``` +And if you're new to Modal, authenticate with: +```bash +modal setup +# or if that doesn't work, try +# python -m modal setup +``` + +That's all! + +This CLI will execute your modal apps, which build and run containers on the cloud, on your GPU of choice. + +### HuggingFace Pull Access + +To download the model, you'll first need to accept the [Llama 3.2 License](https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct) on HuggingFace and be approved for access. + +Then, create a [modal secret](https://modal.com/secrets) named `huggingface`, to which you'll add your HF_TOKEN as an environment variable. + +### Run The Experiment + +This command will run every step for you: +```bash +bash run_e2e.sh +``` + +Or if you prefer to run it manually, you can step through each of the modal commands in [the script](./run_e2e.sh). + +This will execute: +1. Downloading the Llama 3.2 3B Instruct model to a cloud volume +2. Deploying a vLLM inference server to GPUs +3. Running hundreds of parallel generations on the HumanEval test set +4. Running the evaluation script to compute pass@k and fail@k +5. Generating graphs of pass@k and fail@k + +### Results + +The resulting plots of the evals will be saved locally to: +- `/tmp/plot-pass-k.jpeg` +- `/tmp/plot-fail-k.jpeg` + +`/tmp/plot-pass-k.jpeg` shows pass@k for the Llama 3.2 3B Instruct model vs pass@1 for GPT-4o. + + + +You'll see that at 100 generations, the Llama model is able to perform on-par with GPT-4o. At higher scale, the Llama model will outperform GPT-4o. + +`/tmp/plot-fail-k.jpeg` shows fail@k across a log-scale, showing smooth scaling of this method. + + diff --git a/recipes/3p_integrations/modal/many-llamas-human-eval/download.py b/recipes/3p_integrations/modal/many-llamas-human-eval/download.py new file mode 100644 index 0000000000000000000000000000000000000000..d96f36537cee37b1b208f05d5712876fe304021d --- /dev/null +++ b/recipes/3p_integrations/modal/many-llamas-human-eval/download.py @@ -0,0 +1,64 @@ +# ## Downloading Llama 3.2 3B Instruct Model +# This script uses a Modal Function to download the model into a cloud Volume. +# +# Run it with: +# modal run download + +import modal + +MODELS_DIR = "/llamas" +DEFAULT_NAME = "meta-llama/Llama-3.2-3B-Instruct" + +MINUTES = 60 +HOURS = 60 * MINUTES + +# Create a modal Volume to store the model +volume = modal.Volume.from_name("llamas", create_if_missing=True) + +# This defines the image to use for the modal function +image = ( + modal.Image.debian_slim(python_version="3.10") + .pip_install( + [ + "huggingface_hub", # download models from the Hugging Face Hub + "hf-transfer", # download models faster with Rust + ] + ) + .env({"HF_HUB_ENABLE_HF_TRANSFER": "1"}) +) + +# We run the function from a modal App, which will have our HF_SECRET env var set. +# Add your HuggingFace secret access token here: https://modal.com/secrets +# secret name: huggingface +# env var name: HF_TOKEN +app = modal.App(image=image, secrets=[modal.Secret.from_name("huggingface")]) + +# This function will be ran in the cloud, with the volume mounted. +@app.function(volumes={MODELS_DIR: volume}, timeout=4 * HOURS) +def download_model(model_name, force_download=False): + from huggingface_hub import snapshot_download + + volume.reload() + + snapshot_download( + model_name, + local_dir=MODELS_DIR + "/" + model_name, + ignore_patterns=[ + "*.pt", + "*.bin", + "*.pth", + "original/*", + ], # Ensure safetensors + force_download=force_download, + ) + + volume.commit() + + print("Model successfully downloaded") + +@app.local_entrypoint() +def main( + model_name: str = DEFAULT_NAME, + force_download: bool = False, +): + download_model.remote(model_name, force_download) diff --git a/recipes/3p_integrations/modal/many-llamas-human-eval/eval.py b/recipes/3p_integrations/modal/many-llamas-human-eval/eval.py new file mode 100644 index 0000000000000000000000000000000000000000..5d2c135bed12c14fc7147fc3f6840e367a0778e1 --- /dev/null +++ b/recipes/3p_integrations/modal/many-llamas-human-eval/eval.py @@ -0,0 +1,96 @@ +# ## Evaluating HumanEval Results using Modal Sandboxes +# This script will take generated results and evaluate them. +# We use Modal Sandboxes to safely evaluate LLM-generated results. +# +# Run it with: +# modal run eval + +from pathlib import Path + +import modal + +app = modal.App("many-llamas-human-eval") + +volume = modal.Volume.from_name("humaneval", create_if_missing=True) + +sandbox_image = ( + modal.Image.debian_slim() + .apt_install("git") + .run_commands( + "git clone https://github.com/modal-labs/human-eval.git", + "pip install -e human-eval", + ) +) + +MINUTES = 60 + +@app.function(volumes={"/humaneval": volume}, timeout=10 * MINUTES) +def eval_single_task(sample_file_path: str, problem_file_path: str): + with modal.Volume.ephemeral() as vol: + with vol.batch_upload() as batch: + batch.put_file(sample_file_path, "samples.jsonl") + batch.put_file(problem_file_path, "problems.jsonl") + + print(f"Starting sandbox for {sample_file_path}") + sandbox = modal.Sandbox.create( + "bash", + "-c", + "evaluate_functional_correctness vol/samples.jsonl --problem_file=vol/problems.jsonl --n_workers=32", + image=sandbox_image, + volumes={"/vol": vol}, + timeout=10 * MINUTES, + cpu=32, + ) + + try: + sandbox.wait() + print(f"Finished sandbox for {sample_file_path}") + except FunctionTimeoutError: + print("Sandbox timed out") + + if sandbox.returncode == 0: + print(sandbox.stdout.read()) + data = b"" + for chunk in vol.read_file("samples.jsonl_results.jsonl"): + data += chunk + with open(f"{sample_file_path}_results.jsonl", "wb") as f: + f.write(data) + else: + print(f"Tests failed with code {sandbox.returncode}") + print(sandbox.stderr.read()) + + +@app.function(volumes={"/humaneval": volume}, timeout=10 * MINUTES) +def eval_all_tasks(): + import os + + volume.reload() + + # Find all files matching /humaneval/{env}/{run}/{id}.jsonl + envs = [element for element in Path("/humaneval").iterdir() if element.is_dir()] + for env in envs: + print(f"looking in {env}") + problem_file = env / "data.jsonl" + + pattern = "*/*.jsonl" + handles = [] + for file_path in env.glob(pattern): + # Skip files that end with _results.jsonl + if str(file_path).endswith("_results.jsonl"): + continue + + print(f"Checking {file_path}") + # Check if the corresponding results file exists + results_file = f"{file_path}_results.jsonl" + if not os.path.exists(results_file): + # If it doesn't exist, run do_eval + print("Spawning on", file_path, problem_file) + handles.append(eval_single_task.spawn(file_path, problem_file)) + + for handle in handles: + handle.get() + + +@app.local_entrypoint() +def main(): + eval_all_tasks.remote() diff --git a/recipes/3p_integrations/modal/many-llamas-human-eval/generate.py b/recipes/3p_integrations/modal/many-llamas-human-eval/generate.py new file mode 100644 index 0000000000000000000000000000000000000000..4ea6cd9ce584eca754be572649bb3a46997c33fa --- /dev/null +++ b/recipes/3p_integrations/modal/many-llamas-human-eval/generate.py @@ -0,0 +1,248 @@ +# ## Generating HumanEval Results with our Llama 3.2 3B Instruct Model +# This app starts many parallel clients to send requests to the vLLM server. +# +# For each of the tasks in the HumanEval test set, we'll run a client to request 1000 completions. +# Results are saved to our mounted volume. +# +# Run it with: +# modal run generate --data-dir test --no-dry-run --n 1000 --subsample 100 + +from datetime import datetime +import json +from pathlib import Path +from dataclasses import dataclass, asdict + +import modal + +# This defines the image to use for running openai clients in parallel +image = modal.Image.debian_slim(python_version="3.11").pip_install( + "openai==1.38.0", "datasets==2.20.0" +) + +app = modal.App("many-llamas-human-eval", image=image) + +volume = modal.Volume.from_name("humaneval", create_if_missing=True) +DATA_DIR = Path("/mnt/humaneval") + +default_system_prompt = "Write the body for the Python function provided in the prompt below. Do not write anything else. Your output will be directly concatenated with the prompt and the resulting function executed against tests." + +MINUTES = 60 # seconds +HOURS = 60 * MINUTES + +@dataclass +class CompletionParams: + model: str = None + max_tokens: int = 1024 + temperature: float = 0.7 + top_p: float = 0.9 + frequency_penalty: float = 0 + presence_penalty: float = 0 + n: int = 1 + stop: str = None + seed: int = None + +@dataclass +class ClientParams: + app_name: str = "many-llamas-human-eval" + workspace: str = None + api_key: str = "super-secret-token" # match the secret in inference.py + + @property + def url(self): + return f"https://{self.workspace}--{self.app_name}-serve.modal.run/v1" + + +@app.local_entrypoint() +def main( + app_name: str = "many-llamas-human-eval", + workspace: str = None, + api_key: str = "super-secret-token", + model: str = None, + max_tokens: int = 1024, + temperature: float = 0.7, + top_p: float = 0.9, + frequency_penalty: float = 0, + presence_penalty: float = 0, + n: int = 1, + stop: str = None, + seed: int = None, + data_dir: str = "dev-llm", + subsample: int = 1, # percent of the test split to read + system_prompt: str = default_system_prompt, + dry_run: bool = True, +): + if workspace is None: + workspace = modal.config._profile + + client_params = ClientParams(app_name, workspace, api_key) + + completion_params = CompletionParams( + model=model, + max_tokens=max_tokens, + temperature=temperature, + top_p=top_p, + frequency_penalty=frequency_penalty, + presence_penalty=presence_penalty, + n=n, + stop=stop, + seed=seed, + ) + + # Run a remote download function to save the HumanEval dataset in the cloud volume + save_dataset.remote(path=data_dir, subsample=subsample) + + # Run a remote generation function + results = run_human_eval.remote( + client_params=client_params, + completion_params=completion_params, + system_prompt=system_prompt, + data_dir=data_dir, + dry_run=dry_run, + ) + if results: + with open("/tmp/results.jsonl", "w") as f: + f.writelines(json.dumps(result) + "\n" for result in results) + print(f"results saved locally to {f.name}") + +# This is the parent function that spawns a client for each eval task +@app.function(volumes={DATA_DIR: volume}, timeout=1 * HOURS) +def run_human_eval( + client_params: ClientParams, + completion_params: CompletionParams, + data_dir="dev-llm", + system_prompt: str = default_system_prompt, + dry_run=True, +): + dataset = load_dataset(data_dir) + + timestamp = datetime.utcnow().isoformat() + "Z" + output_dir = Path(DATA_DIR) / data_dir / f"run-{timestamp}" + output_dir.mkdir(parents=True, exist_ok=True) + handles = [] + print(f"Eval set contains {len(dataset)} items") + + # For each eval item in the dataset, spawn a parallel openAI client worker that generates n completions each + print(Colors.BOLD, f"Spawning clients for each eval item. You may notice a brief wait while the inference server(s) boot.", Colors.END, sep="") + for i, item in enumerate(dataset): + handles.append( + run_item.spawn( + item, + client_params, + completion_params, + system_prompt, + output_dir, + dry_run, + ) + ) + + for handle in handles: + result = handle.get() + + if not dry_run: + return result + +# This function is responsible for generating n completions for a single eval item +# It calls into our deployed vLLM server and saves results to the cloud volume +@app.function(volumes={DATA_DIR: volume}, timeout=1 * HOURS) +def run_item( + item: dict, + client_params: ClientParams, + completion_params: CompletionParams, + system_prompt: str, + output_dir: Path, + dry_run: bool, +): + client = create_client(client_params) + if not completion_params.model: + model = client.models.list().data[0] + model = model.id + completion_params.model = model + + prompt = item["prompt"] + messages = [ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": prompt}, + ] + + per_request = 250 + ct, completions = completion_params.n, [] + if not dry_run: + while ct > 0: + response = get_completion( + client, + messages=messages, + **asdict(completion_params) | dict(n=min(ct, per_request)), + ) + if response: + completions += [ + { + "task_id": item["task_id"], + "completion": choice.message.content, + } + for choice in response.choices + ] + ct -= per_request + + index = item["task_id"].split("/")[-1] + output_path = output_dir / f"{index}.jsonl" + output_path.parent.mkdir(parents=True, exist_ok=True) + with open(output_path, "w") as f: + f.writelines(json.dumps(completion) + "\n" for completion in completions) + + print(Colors.GREEN + f"Completions saved to {output_path}" + Colors.END) + + +class Colors: + """ANSI color codes""" + + GREEN = "\033[0;32m" + RED = "\033[0;31m" + BLUE = "\033[0;34m" + GRAY = "\033[0;90m" + BOLD = "\033[1m" + END = "\033[0m" + + +def get_completion(client, **kwargs): + try: + response = client.chat.completions.create(**kwargs) + return response + except Exception as e: + print(Colors.RED, f"Error during API call: {e}", Colors.END, sep="") + return None + + +def create_client(client_params: ClientParams): + from openai import OpenAI + + client = OpenAI(api_key=client_params.api_key) + client.base_url = client_params.url + + return client + +# This function downloads the HumanEval dataset +@app.function(volumes={DATA_DIR: volume}) +def save_dataset(path="dev-llm", subsample: int = 1): + import datasets + + path = DATA_DIR / path + + ds = datasets.load_dataset( + "openai/openai_humaneval", + # reads 0% to subsample% of the test split + split=datasets.ReadInstruction("test", to=subsample, unit="%"), + ) + + ds.to_json(path / "data.jsonl") + + volume.commit() + + +def load_dataset(path="dev-llm"): + import datasets + + path = DATA_DIR / path + + ds = datasets.load_dataset(path=str(path), data_files="data.jsonl") + + return ds["train"] diff --git a/recipes/3p_integrations/modal/many-llamas-human-eval/inference.py b/recipes/3p_integrations/modal/many-llamas-human-eval/inference.py new file mode 100644 index 0000000000000000000000000000000000000000..45bb60420f55d42aad65641b5a9643424e129860 --- /dev/null +++ b/recipes/3p_integrations/modal/many-llamas-human-eval/inference.py @@ -0,0 +1,149 @@ +# ## Serving Llama 3.2 3B Instruct Model With vLLM +# This app runs a vLLM server on an A100 GPU. +# +# Run it with: +# modal deploy inference + +import modal + +# This defines the image to use for the vLLM server container +vllm_image = modal.Image.debian_slim(python_version="3.10").pip_install( + "vllm==0.5.3post1" +) + + +MODELS_DIR = "/llamas" +MODEL_NAME = "meta-llama/Llama-3.2-3B-Instruct" + +# Ensure the model is downloaded and the volume exists +try: + volume = modal.Volume.lookup("llamas", create_if_missing=False) +except modal.exception.NotFoundError: + raise Exception("Download models first with modal run download") + +app = modal.App("many-llamas-human-eval") + +N_GPU = 1 # tip: for best results, first upgrade to more powerful GPUs, and only then increase GPU count +TOKEN = ( + "super-secret-token" # auth token. for production use, replace with a modal.Secret +) + +MINUTES = 60 # seconds +HOURS = 60 * MINUTES + +@app.function( + image=vllm_image, + gpu=modal.gpu.A100(count=N_GPU, size="40GB"), + container_idle_timeout=5 * MINUTES, + timeout=24 * HOURS, + allow_concurrent_inputs=20, # VLLM will batch requests so many can be received at once + volumes={MODELS_DIR: volume}, + concurrency_limit=10, # max 10 GPUs +) +@modal.asgi_app() +def serve(): + import fastapi + import vllm.entrypoints.openai.api_server as api_server + from vllm.engine.arg_utils import AsyncEngineArgs + from vllm.engine.async_llm_engine import AsyncLLMEngine + from vllm.entrypoints.logger import RequestLogger + from vllm.entrypoints.openai.serving_chat import OpenAIServingChat + from vllm.entrypoints.openai.serving_completion import ( + OpenAIServingCompletion, + ) + from vllm.usage.usage_lib import UsageContext + + volume.reload() # ensure we have the latest version of the weights + + # create a fastAPI app that uses vLLM's OpenAI-compatible router + web_app = fastapi.FastAPI( + title=f"OpenAI-compatible {MODEL_NAME} server", + description="Run an OpenAI-compatible LLM server with vLLM on modal.com", + version="0.0.1", + docs_url="/docs", + ) + + # security: CORS middleware for external requests + http_bearer = fastapi.security.HTTPBearer( + scheme_name="Bearer Token", + description="See code for authentication details.", + ) + web_app.add_middleware( + fastapi.middleware.cors.CORSMiddleware, + allow_origins=["*"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], + ) + + # security: inject dependency on authed routes + async def is_authenticated(api_key: str = fastapi.Security(http_bearer)): + if api_key.credentials != TOKEN: + raise fastapi.HTTPException( + status_code=fastapi.status.HTTP_401_UNAUTHORIZED, + detail="Invalid authentication credentials", + ) + return {"username": "authenticated_user"} + + router = fastapi.APIRouter(dependencies=[fastapi.Depends(is_authenticated)]) + + # wrap vllm's router in auth router + router.include_router(api_server.router) + # add authed vllm to our fastAPI app + web_app.include_router(router) + + engine_args = AsyncEngineArgs( + model=MODELS_DIR + "/" + MODEL_NAME, + tensor_parallel_size=N_GPU, + gpu_memory_utilization=0.90, + max_model_len=2048, + enforce_eager=False, # capture the graph for faster inference, but slower cold starts (30s > 20s) + ) + + engine = AsyncLLMEngine.from_engine_args( + engine_args, usage_context=UsageContext.OPENAI_API_SERVER + ) + + model_config = get_model_config(engine) + + request_logger = RequestLogger(max_log_len=2048) + + api_server.openai_serving_chat = OpenAIServingChat( + engine, + model_config=model_config, + served_model_names=[MODEL_NAME], + chat_template=None, + response_role="assistant", + lora_modules=[], + prompt_adapters=[], + request_logger=request_logger, + ) + api_server.openai_serving_completion = OpenAIServingCompletion( + engine, + model_config=model_config, + served_model_names=[MODEL_NAME], + lora_modules=[], + prompt_adapters=[], + request_logger=request_logger, + ) + + return web_app + + +def get_model_config(engine): + import asyncio + + try: # adapted from vLLM source -- https://github.com/vllm-project/vllm/blob/507ef787d85dec24490069ffceacbd6b161f4f72/vllm/entrypoints/openai/api_server.py#L235C1-L247C1 + event_loop = asyncio.get_running_loop() + except RuntimeError: + event_loop = None + + if event_loop is not None and event_loop.is_running(): + # If the current is instanced by Ray Serve, + # there is already a running event loop + model_config = event_loop.run_until_complete(engine.get_model_config()) + else: + # When using single vLLM without engine_use_ray + model_config = asyncio.run(engine.get_model_config()) + + return model_config diff --git a/recipes/3p_integrations/modal/many-llamas-human-eval/plot.py b/recipes/3p_integrations/modal/many-llamas-human-eval/plot.py new file mode 100644 index 0000000000000000000000000000000000000000..db225fb13eaedc21f3c633539e3ee8e37d74c874 --- /dev/null +++ b/recipes/3p_integrations/modal/many-llamas-human-eval/plot.py @@ -0,0 +1,194 @@ +# ## Plotting HumanEval Results +# This script will calculate pass@k and fail@k for our experiment and plot them. +# +# Run it with: +# modal run plot + +import io +import json +from pathlib import Path +from typing import List, Union +import itertools + +import modal + +try: + volume = modal.Volume.lookup("humaneval", create_if_missing=False) +except modal.exception.NotFoundError: + raise Exception("Generate results first with modal run generate --data-dir test --no-dry-run --n 1000 --subsample 100") + + +image = modal.Image.debian_slim(python_version="3.11").pip_install( + "numpy==1.26.4", + "pandas==2.2.3", + "matplotlib==3.9.2", + "seaborn==0.13.2", +) + +app = modal.App("many-llamas-human-eval", image=image) + +DATA_DIR = Path("/mnt/humaneval") + +with image.imports(): + import numpy as np + import pandas as pd + import matplotlib.pyplot as plt + import seaborn as sns + +@app.function(volumes={DATA_DIR: volume}) +def render_plots(): + run_dirs = list(sorted((DATA_DIR / "test").glob("run-*"))) + + for run_dir in reversed(run_dirs): + if len(list(run_dir.iterdir())) < 150: + print(f"skipping incomplete run {run_dir}") + else: + break + + all_result_paths = list(run_dir.glob("*.jsonl_results.jsonl")) + + data = [] + for path in all_result_paths: + data += [json.loads(line) for line in path.read_text(encoding='utf-8').splitlines()] + + for element in data: + del element["completion"] + + df = pd.DataFrame.from_records(data) + + gb = df.groupby("task_id") + passes = gb["passed"].sum() + + def estimate_pass_at_k( + num_samples: Union[int, List[int], np.ndarray], + num_correct: Union[List[int], np.ndarray], + k: int + ) -> np.ndarray: + """ + Estimates pass@k of each problem and returns them in an array. + """ + + def estimator(n: int, c: int, k: int) -> float: + """ + Calculates 1 - comb(n - c, k) / comb(n, k). + """ + if n - c < k: + return 1.0 + return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1, n + 1)) + + if isinstance(num_samples, int): + num_samples_it = itertools.repeat(num_samples, len(num_correct)) + else: + assert len(num_samples) == len(num_correct) + num_samples_it = iter(num_samples) + + return np.array([estimator(int(n), int(c), k) for n, c in zip(num_samples_it, num_correct)]) + + pass_at_ks = {} + + for k in [1, 10, 100, 1000]: + pass_at_ks[k] = estimate_pass_at_k(1000, passes, k) + + pass_at_k = {k: np.mean(v) for k, v in pass_at_ks.items()} + + plot_df = pd.DataFrame( + {"k": pass_at_k.keys(), + "pass@k": pass_at_k.values()} + ) + plot_df["fail@k"] = 1 - plot_df["pass@k"] + + sns.set_theme(style='dark') + plt.style.use("dark_background") + + plt.rcParams['font.sans-serif'] = ["Inter", "Arial", "DejaVu Sans", "Liberation Sans", "Bitstream Vera Sans", "sans-serif"] + + sns.despine() + + sns.set_context("talk", rc={"lines.linewidth": 2.5}) + + gpt4o_benchmark = 0.902 + + # First plot + plt.figure(figsize=(10, 6)) + fg = sns.lineplot( + x="k", + y="pass@k", + data=plot_df, + color="#7FEE64", + linewidth=6, + alpha=0.9, + label="Llama 3.2 3B Instruct pass@k" + ) + + initial_lim = fg.axes.get_xlim() + fg.axes.hlines( + gpt4o_benchmark, *initial_lim, + linestyle="--", + alpha=0.6, + zorder=-1, + label="GPT-4o fail@1" + ) + fg.axes.set_xlim(*initial_lim) + fg.axes.set_ylabel("") + fg.axes.set_ylim(0, 1) + plt.tight_layout(pad=1.2) + plt.legend() + + # Save the first plot as bytes + img_buffer = io.BytesIO() + plt.savefig(img_buffer, format='jpeg') + plot_1_img_bytes = img_buffer.getvalue() + plt.close() + + # Second plot + plt.figure(figsize=(10, 6)) + fg = sns.lineplot( + x="k", + y="fail@k", + data=plot_df, + color="#7FEE64", + linewidth=6, + alpha=0.9, + label="Llama 3.2 3B Instruct fail@k" + ) + + initial_lim = fg.axes.get_xlim() + fg.axes.hlines( + 1 - gpt4o_benchmark, *initial_lim, + linestyle="--", + alpha=0.6, + zorder=-1, + label="GPT-4o fail@1" + ) + fg.axes.set_xlim(*initial_lim) + fg.axes.set_ylabel("") + fg.axes.set_yscale("log") + fg.axes.set_xscale("log") + fg.axes.set_xlim(0.5, 2000) + fg.axes.set_ylim(1e-2, 1e0) + plt.tight_layout(pad=1.2) + plt.legend() + + # Save the second plot as bytes + img_buffer = io.BytesIO() + plt.savefig(img_buffer, format='jpeg') + plot_2_img_bytes = img_buffer.getvalue() + plt.close() + + return [plot_1_img_bytes, plot_2_img_bytes] + +@app.local_entrypoint() +def main(): + plots = render_plots.remote() + + assert len(plots) == 2 + + with open ("/tmp/plot-pass-k.jpeg", "wb") as f: + f.write(plots[0]) + + with open ("/tmp/plot-fail-k.jpeg", "wb") as f: + f.write(plots[1]) + + print("Plots saved to:") + print(" /tmp/plot-pass-k.jpeg") + print(" /tmp/plot-fail-k.jpeg") \ No newline at end of file diff --git a/recipes/3p_integrations/modal/many-llamas-human-eval/run_e2e.sh b/recipes/3p_integrations/modal/many-llamas-human-eval/run_e2e.sh new file mode 100644 index 0000000000000000000000000000000000000000..d544425b5517564381ece2eeedba20d0a4c4ad83 --- /dev/null +++ b/recipes/3p_integrations/modal/many-llamas-human-eval/run_e2e.sh @@ -0,0 +1,21 @@ +#!/bin/bash +set -euo pipefail +IFS=$'\n\t' + +command -v modal >/dev/null 2>&1 || { echo >&2 "modal command not found. Install modal first! Aborting."; exit 1; } + +echo 'downloading LLaMA 3.2 3B Instruct model' +echo 'make sure to create a Secret called huggingface on Modal and accept the LLaMA 3.2 license' +modal run download.py + +echo 'deploying vLLM inference server' +modal deploy inference.py + +echo 'running HumanEval generation' +modal run generate.py --data-dir test --no-dry-run --n 1000 --subsample 100 + +echo 'running HumanEval evaluation' +modal run eval.py + +echo 'generating graphs for pass@k and fail@k' +modal run plot.py \ No newline at end of file diff --git a/recipes/3p_integrations/octoai/video_summary.ipynb b/recipes/3p_integrations/octoai/video_summary.ipynb index 93432dda1b6491a374c914816d4b9e53f071e6b9..aaa51509ea622527463382e8df1eeef4a955785f 100644 --- a/recipes/3p_integrations/octoai/video_summary.ipynb +++ b/recipes/3p_integrations/octoai/video_summary.ipynb @@ -8,7 +8,7 @@ "## This demo app shows:\n", "* How to use LangChain's YoutubeLoader to retrieve the caption in a YouTube video\n", "* How to ask Llama 3 to summarize the content (per the Llama's input size limit) of the video in a naive way using LangChain's stuff method\n", - "* How to bypass the limit of Llama 3's max input token size by using a more sophisticated way using LangChain's map_reduce and refine methods - see [here](https://python.langchain.com/docs/use_cases/summarization) for more info" + "* How to bypass the limit of Llama 3's max input token size by using a more sophisticated way using LangChain's map_reduce and refine methods - see [here](https://python.langchain.com/docs/tutorials/summarization/) for more info" ] }, { @@ -22,7 +22,7 @@ "- [tiktoken](https://github.com/openai/tiktoken) BytePair Encoding tokenizer\n", "- [pytube](https://pytube.io/en/latest/) Utility for downloading YouTube videos\n", "\n", - "**Note** This example uses OctoAI to host the Llama 3 model. If you have not set up/or used OctoAI before, we suggest you take a look at the [HelloLlamaCloud](HelloLlamaCloud.ipynb) example for information on how to set up OctoAI before continuing with this example.\n", + "**Note** This example uses OctoAI to host the Llama 3 model. If you have not set up/or used OctoAI before, we suggest you take a look at the [hello_llama_cloud](hello_llama_cloud.ipynb) example for information on how to set up OctoAI before continuing with this example.\n", "If you do not want to use OctoAI, you will need to make some changes to this notebook as you go along." ] }, @@ -205,7 +205,7 @@ "id": "e112845f-de16-4c2f-8afe-6cca31f6fa38", "metadata": {}, "source": [ - "To fix this, you can use LangChain's load_summarize_chain method (detail [here](https://python.langchain.com/docs/use_cases/summarization)).\n", + "To fix this, you can use LangChain's load_summarize_chain method (detail [here](https://python.langchain.com/docs/tutorials/summarization/)).\n", "\n", "First you'll create splits or sub-documents of the original content, then use the LangChain's `load_summarize_chain` with the `refine` or `map_reduce type`.\n", "\n", @@ -221,7 +221,6 @@ "source": [ "import os\n", "os.environ[\"LANGCHAIN_API_KEY\"] = \"your_langchain_api_key\"\n", - "os.environ[\"LANGCHAIN_API_KEY\"] = \"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\"\n", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n", "os.environ[\"LANGCHAIN_PROJECT\"] = \"Video Summary with Llama 3\"" ] diff --git a/recipes/3p_integrations/togetherai/multimodal_RAG_with_nvidia_investor_slide_deck.ipynb b/recipes/3p_integrations/togetherai/multimodal_RAG_with_nvidia_investor_slide_deck.ipynb index 20dd8215280c39656b18eed91f503b522ea17e7d..0aeedf8463a12d6a1a6592c3859eabd2eb55614e 100644 --- a/recipes/3p_integrations/togetherai/multimodal_RAG_with_nvidia_investor_slide_deck.ipynb +++ b/recipes/3p_integrations/togetherai/multimodal_RAG_with_nvidia_investor_slide_deck.ipynb @@ -635,7 +635,7 @@ } ], "source": [ - "# Dowload and rename the last presentation from Nvidia to investors\n", + "# Download and rename the last presentation from Nvidia to investors\n", "!wget https://s201.q4cdn.com/141608511/files/doc_presentations/2023/Oct/01/ndr_presentation_oct_2023_final.pdf\n", "!mv ndr_presentation_oct_2023_final.pdf nvidia_presentation.pdf" ] @@ -811,7 +811,7 @@ "source": [ "### How does this work? What happens under the hood between the different pages and query token?\n", "\n", - "The interaction operation between page image patch and query text token representations to score each page of the document is what allows this great retreival performance.\n", + "The interaction operation between page image patch and query text token representations to score each page of the document is what allows this great retrieval performance.\n", "\n", "Typically each image is resized and cut into patch sizes of 16x16 pixels. These patches are then embedded into 128 dimensional vectors which are stored and used to perform the MaxSim and late interaction operations between the image and text tokens. ColPali is a multi-vector approach because it produces multiple vectors for each image/query; one vector for each token instead of just one vector for all tokens. \n", "\n", @@ -878,7 +878,7 @@ }, "outputs": [], "source": [ - "# Since we stored the collection along with the index we have the base64 images of all PDF pages aswell!\n", + "# Since we stored the collection along with the index we have the base64 images of all PDF pages as well!\n", "model.search(query, k=1)" ] }, @@ -949,7 +949,7 @@ "source": [ "Here we can see that the combination of ColQwen2 as a image retriever and Llama-3.2 90B Vision is a powerful duo for multimodal RAG applications specially with PDFs.\n", "\n", - "Not only was ColQwen2 able to retrieve the correct page that had the right answer on it but then Llama-3.2 90B Vision was also able to find exactly where on the page this answer was, ignoring all the irrelvant details!\n", + "Not only was ColQwen2 able to retrieve the correct page that had the right answer on it but then Llama-3.2 90B Vision was also able to find exactly where on the page this answer was, ignoring all the irrelevant details!\n", "\n", "Voila!🎉🎉\n", "\n", diff --git a/recipes/3p_integrations/togetherai/structured_text_extraction_from_images.ipynb b/recipes/3p_integrations/togetherai/structured_text_extraction_from_images.ipynb index a4e2071e7191eb272d61d192d2c37c47c4c0ca9f..5a5074c6974b3abc98127f1b18a6d853da71c1dd 100644 --- a/recipes/3p_integrations/togetherai/structured_text_extraction_from_images.ipynb +++ b/recipes/3p_integrations/togetherai/structured_text_extraction_from_images.ipynb @@ -143,7 +143,7 @@ "id": "8aPkxE7MnbkX" }, "source": [ - "## Lets bring in the reciept that we want to extract information from\n", + "## Lets bring in the receipt that we want to extract information from\n", "\n", "Notice that this is a real receipt with multiple portions that are not relevant to the line item extraction structure we've outlined above.\n", "\n", @@ -243,7 +243,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Notice that the model is not perfect and wasn't able to extract out some line items. It's hard for most models to perform this zero-shot extraction of data from images. A way to improve this is to finetune the model using [Visual Intruction Tuning](https://arxiv.org/abs/2304.08485)." + "Notice that the model is not perfect and wasn't able to extract out some line items. It's hard for most models to perform this zero-shot extraction of data from images. A way to improve this is to finetune the model using [Visual Instruction Tuning](https://arxiv.org/abs/2304.08485)." ] }, { @@ -401,7 +401,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Althought with some missed line items we were able to extract out structured JSON from an image in a zero shot manner! To improve the results for your pipeline and make them production ready I recommend you [finetune](https://docs.together.ai/docs/fine-tuning-overview) the vision model on your own dataset!\n", + "Although with some missed line items we were able to extract out structured JSON from an image in a zero shot manner! To improve the results for your pipeline and make them production ready I recommend you [finetune](https://docs.together.ai/docs/fine-tuning-overview) the vision model on your own dataset!\n", "\n", "Learn more about how to use JSON mode in the [docs](https://docs.together.ai/docs/json-mode) here!" ] diff --git a/recipes/3p_integrations/vllm/README.md b/recipes/3p_integrations/vllm/README.md index fa1f35d1fa0ebe000cd83ae0c2f7218b286d2c32..cfc25b2ea8e8597e88ec246229350b83ced68ea2 100644 --- a/recipes/3p_integrations/vllm/README.md +++ b/recipes/3p_integrations/vllm/README.md @@ -27,12 +27,12 @@ To launch the inference simply execute the following command changing the tp_siz python inference.py --model_name $MODEL_PATH --peft_model_name $PEFT_MODEL_PATH --tp_size 8 --user_prompt "Hello my name is" ``` The script will ask for another prompt ina loop after completing the generation which you can exit by simply pressing enter and leaving the prompt empty. -When using multiple gpus the model will automatically be split accross the available GPUs using tensor parallelism. +When using multiple gpus the model will automatically be split across the available GPUs using tensor parallelism. ## Multi-node multi-gpu inference The FP8 quantized variants of Meta Llama (i.e. meta-llama/Meta-Llama-3.1-405B-FP8 and meta-llama/Meta-Llama-3.1-405B-Instruct-FP8) can be executed on a single node with 8x80GB H100 using the script located in this folder. To run the unquantized Meta Llama 405B variants (i.e. meta-llama/Meta-Llama-3.1-405B and meta-llama/Meta-Llama-3.1-405B-Instruct) we need multi-node inference. -vLLM allows this by leveraging pipeline parallelism accros nodes while still applying tensor parallelism insid each node. +vLLM allows this by leveraging pipeline parallelism across nodes while still applying tensor parallelism inside each node. To start a multi-node inference we first need to set up a ray serves which well be leveraged by vLLM to execute the model across node boundaries. ```bash diff --git a/recipes/experimental/long_context/H2O/README.md b/recipes/experimental/long_context/H2O/README.md index 20167f50db59c3c0963f116515b758f0bffc2eb6..c2097435b87fa55012f008ba7914f4b3b7240e41 100644 --- a/recipes/experimental/long_context/H2O/README.md +++ b/recipes/experimental/long_context/H2O/README.md @@ -2,7 +2,7 @@ ### Overview: -Heavy-Hitter Oracle (H2O) is an efficient inference framework of LLMs. During the generative inference of transfomers, the size of KV cache grows linearly with the sequence length (prompt length + generation length) during long context generation. And the size KV cache is usually significantly larger than the model parameters, contrains the inference throughput. H2O identifies the critical KV pairs and evicts other unnecessary ones, maintaining a small cache size thus improving the throughput. +Heavy-Hitter Oracle (H2O) is an efficient inference framework of LLMs. During the generative inference of transformers, the size of KV cache grows linearly with the sequence length (prompt length + generation length) during long context generation. And the size KV cache is usually significantly larger than the model parameters, contrains the inference throughput. H2O identifies the critical KV pairs and evicts other unnecessary ones, maintaining a small cache size thus improving the throughput. Besides, LLMs usually have poor generation to long sequence during inference. H2O handles this issue by maintaining only heavy-hitter tokens and the most recent tokens. Incorporated with the positional rolling strategy (reassigning the position of each kv with the position in the kv cache instead of the original sequence), H2O can process sequence length much longer than the pretrained context window. Different from other approaches, like [Positional Interpolation](https://arxiv.org/abs/2306.15595), H2O is a KV cache policy and do not involve any training process for long context processing. diff --git a/recipes/experimental/long_context/H2O/src/streaming.sh b/recipes/experimental/long_context/H2O/src/streaming.sh index f7a3ea069ea7c3d6a5f54588a67b56329f05fd29..1e31b447f6fab6d463c0cbf795bdf3871c5de886 100644 --- a/recipes/experimental/long_context/H2O/src/streaming.sh +++ b/recipes/experimental/long_context/H2O/src/streaming.sh @@ -12,7 +12,7 @@ elif [[ ${method} == 'full' ]]; then --input-path data \ --model-name lmsys/vicuna-13b-v1.5 else - echo 'unknown argment for method' + echo 'unknown argument for method' fi diff --git a/recipes/quickstart/Prompt_Engineering_with_Llama_3.ipynb b/recipes/quickstart/Prompt_Engineering_with_Llama_3.ipynb index f9e70566614b086c1be0db9463da04bba754ef08..bc90afbe4affd84b88a7898cfbfc6628f0df8b9a 100644 --- a/recipes/quickstart/Prompt_Engineering_with_Llama_3.ipynb +++ b/recipes/quickstart/Prompt_Engineering_with_Llama_3.ipynb @@ -152,7 +152,7 @@ "source": [ "## Notebook Setup\n", "\n", - "The following APIs will be used to call LLMs throughout the guide. As an example, we'll call Llama 3.1 chat using [Grok](https://console.groq.com/playground?model=llama3-70b-8192).\n", + "The following APIs will be used to call LLMs throughout the guide. As an example, we'll call Llama 3.1 chat using [Groq](https://console.groq.com/playground?model=llama3-70b-8192).\n", "\n", "To install prerequisites run:" ] diff --git a/recipes/quickstart/agents/Agents_Tutorial/Tool_Calling_101.ipynb b/recipes/quickstart/agents/Agents_Tutorial/Tool_Calling_101.ipynb index ee6d89e97b4fad89bcf9d9fc9e00425ca6926804..5b77a99a66fcfe7d8fb4098925e1f93d739f366f 100644 --- a/recipes/quickstart/agents/Agents_Tutorial/Tool_Calling_101.ipynb +++ b/recipes/quickstart/agents/Agents_Tutorial/Tool_Calling_101.ipynb @@ -22,7 +22,7 @@ "- Understand how the tool calls are handled under the hood\n", "- 3.2 Model Tool Calling Format and Behaviour\n", "\n", - "In Part 2, we will learn how to build system that can get us comparision between 2 papers" + "In Part 2, we will learn how to build system that can get us comparison between 2 papers" ] }, { @@ -400,7 +400,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "If everything is setup correctly-the model should now wrap function calls with the `|<python_tag>|` following the actualy function call. \n", + "If everything is setup correctly-the model should now wrap function calls with the `|<python_tag>|` following the actually function call. \n", "\n", "This can allow you to manage your function calling logic accordingly. \n", "\n", @@ -660,11 +660,11 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Life is great because Llama Team writes great docs for us, so we can conviently copy-pasta examples from there :)\n", + "Life is great because Llama Team writes great docs for us, so we can conveniently copy-pasta examples from there :)\n", "\n", "[Here](https://www.llama.com/docs/model-cards-and-prompt-formats/llama3_2#-tool-calling-(1b/3b)-) are the docs for your reference that we will be using. \n", "\n", - "Excercise for viewer: Use `llama-toolchain` again to verify like we did earlier and then start the prompt engineering for the small Llamas." + "Exercise for viewer: Use `llama-toolchain` again to verify like we did earlier and then start the prompt engineering for the small Llamas." ] }, { diff --git a/recipes/quickstart/agents/Agents_Tutorial/Tool_Calling_201.ipynb b/recipes/quickstart/agents/Agents_Tutorial/Tool_Calling_201.ipynb index eb54362f7e5394a948ffc767ea8b60a1c96eb852..bc9a3d3943baf1572b4bf11e28401c82c4d46bca 100644 --- a/recipes/quickstart/agents/Agents_Tutorial/Tool_Calling_201.ipynb +++ b/recipes/quickstart/agents/Agents_Tutorial/Tool_Calling_201.ipynb @@ -403,7 +403,7 @@ "source": [ "def get_arxiv_ids(web_results: dict, temperature: int = 0, max_tokens=512):\n", " # Initialize chat history with a specific prompt to extract arXiv IDs\n", - " arxiv_id_chat_history = [{\"role\": \"system\", \"content\": \"Given this input, give me the arXiv ID of the papers. The input has the query and web results. DO NOT WRITE ANYTHING ELSE IN YOUR RESPONSE: ONLY THE ARXIV ID ONCE, the web search will have it repeated mutliple times, just return the it once and where its actually the arxiv ID\"}, {\"role\": \"user\", \"content\": f\"Here is the query and results{web_results}\"}]\n", + " arxiv_id_chat_history = [{\"role\": \"system\", \"content\": \"Given this input, give me the arXiv ID of the papers. The input has the query and web results. DO NOT WRITE ANYTHING ELSE IN YOUR RESPONSE: ONLY THE ARXIV ID ONCE, the web search will have it repeated multiple times, just return the it once and where its actually the arxiv ID\"}, {\"role\": \"user\", \"content\": f\"Here is the query and results{web_results}\"}]\n", "\n", " # Call the model to process the input and extract arXiv IDs\n", " response = client.chat.completions.create(\n", diff --git a/recipes/quickstart/build_with_Llama_3_2.ipynb b/recipes/quickstart/build_with_Llama_3_2.ipynb index 59aadf3cc1ecc6ccc312c2ff91f2e3215fef1f89..f50dbaa90e97c12ee9ae6afb6243b98fc676f789 100644 --- a/recipes/quickstart/build_with_Llama_3_2.ipynb +++ b/recipes/quickstart/build_with_Llama_3_2.ipynb @@ -417,7 +417,7 @@ " plt.axis('off')\n", " plt.show()\n", "\n", - "display_local_image(\"images/a_colorful_llama_doing_ai_programming.jpeg\")" + "display_local_image(\"../../docs/img/a_colorful_llama_doing_ai_programming.jpeg\")" ] }, { @@ -441,7 +441,7 @@ " with open(image_path, \"rb\") as img:\n", " return base64.b64encode(img.read()).decode('utf-8')\n", "\n", - "base64_image = encode_image(\"images/a_colorful_llama_doing_ai_programming.jpeg\")" + "base64_image = encode_image(\"../../docs/img/a_colorful_llama_doing_ai_programming.jpeg\")" ] }, { @@ -525,8 +525,8 @@ } ], "source": [ - "display_local_image(\"images/grocery_shopping_bascket_with_salmon_in_package.jpeg\")\n", - "base64_image = encode_image(\"images/grocery_shopping_bascket_with_salmon_in_package.jpeg\")" + "display_local_image(\"../../docs/img/grocery_shopping_bascket_with_salmon_in_package.jpeg\")\n", + "base64_image = encode_image(\"../../docs/img/grocery_shopping_bascket_with_salmon_in_package.jpeg\")" ] }, { @@ -1066,8 +1066,8 @@ } ], "source": [ - "display_local_image(\"images/thumbnail_IMG_1329.jpg\")\n", - "img = Image.open(\"images/thumbnail_IMG_1329.jpg\")\n", + "display_local_image(\"../../docs/img/thumbnail_IMG_1329.jpg\")\n", + "img = Image.open(\"../../docs/img/thumbnail_IMG_1329.jpg\")\n", "width, height = img.size\n", "print(\"Image dimensions:\", width, height)" ] @@ -1112,7 +1112,7 @@ " # Resize the image while maintaining aspect ratio\n", " resized_img = img.resize((new_width, new_height))\n", "\n", - " resized_img.save(\"images/resized_image.jpg\")\n", + " resized_img.save(\"../../docs/img/resized_image.jpg\")\n", "\n", " print(\"Original size:\", original_width, \"x\", original_height)\n", " print(\"New size:\", new_width, \"x\", new_height)\n", @@ -1121,7 +1121,7 @@ " \n", "max_dimension = 1120\n", "resized_img = resize_image(img)\n", - "base64_image = encode_image(\"images/resized_image.jpg\")" + "base64_image = encode_image(\"../../docs/img/resized_image.jpg\")" ] }, { @@ -1223,8 +1223,8 @@ } ], "source": [ - "display_local_image(\"images/thumbnail_IMG_6385.jpg\")\n", - "img = Image.open(\"images/thumbnail_IMG_6385.jpg\")\n", + "display_local_image(\"../../docs/img/thumbnail_IMG_6385.jpg\")\n", + "img = Image.open(\"../../docs/img/thumbnail_IMG_6385.jpg\")\n", "width, height = img.size\n", "print(\"Image dimensions:\", width, height)" ] @@ -1247,7 +1247,7 @@ "source": [ "max_dimension = 1120\n", "resized_img = resize_image(img)\n", - "base64_image = encode_image(\"images/resized_image.jpg\")" + "base64_image = encode_image(\"../../docs/img/resized_image.jpg\")" ] }, { @@ -1320,7 +1320,7 @@ } ], "source": [ - "base64_image = encode_image(\"images/resized_image.jpg\")\n", + "base64_image = encode_image(\"../../docs/img/resized_image.jpg\")\n", "\n", "messages = [\n", " {\n", @@ -1381,8 +1381,8 @@ } ], "source": [ - "display_local_image(\"images/meta_release.png\")\n", - "base64_image = encode_image(\"images/meta_release.png\")" + "display_local_image(\"../../docs/img/meta_release.png\")\n", + "base64_image = encode_image(\"../../docs/img/meta_release.png\")" ] }, { @@ -1448,8 +1448,8 @@ } ], "source": [ - "display_local_image(\"images/llama_stack.png\")\n", - "base64_image = encode_image(\"images/llama_stack.png\")" + "display_local_image(\"../../docs/img/llama_stack.png\")\n", + "base64_image = encode_image(\"../../docs/img/llama_stack.png\")" ] }, { @@ -1553,8 +1553,8 @@ } ], "source": [ - "display_local_image(\"images/thumbnail_IMG_1440.jpg\")\n", - "img = Image.open(\"images/thumbnail_IMG_1440.jpg\")\n", + "display_local_image(\"../../docs/img/thumbnail_IMG_1440.jpg\")\n", + "img = Image.open(\"../../docs/img/thumbnail_IMG_1440.jpg\")\n", "width, height = img.size\n", "print(\"Image dimensions:\", width, height)" ] @@ -1577,7 +1577,7 @@ "source": [ "max_dimension = 1120\n", "resized_img = resize_image(img)\n", - "base64_image = encode_image(\"images/resized_image.jpg\")" + "base64_image = encode_image(\"../../docs/img/resized_image.jpg\")" ] }, { @@ -1686,11 +1686,11 @@ } ], "source": [ - "display_local_image(\"images/thumbnail_IMG_6385.jpg\")\n", - "img = Image.open(\"images/thumbnail_IMG_6385.jpg\")\n", + "display_local_image(\"../../docs/img/thumbnail_IMG_6385.jpg\")\n", + "img = Image.open(\"../../docs/img/thumbnail_IMG_6385.jpg\")\n", "max_dimension = 1120\n", "resized_img = resize_image(img)\n", - "base64_image = encode_image(\"images/resized_image.jpg\")" + "base64_image = encode_image(\"../../docs/img/resized_image.jpg\")" ] }, { @@ -2488,7 +2488,7 @@ } ], "source": [ - "display_local_image(\"images/cat.jpeg\")" + "display_local_image(\"../../docs/img/cat.jpeg\")" ] }, { @@ -2626,7 +2626,7 @@ " if log is not None:\n", " log.print()\n", "\n", - "await run_main(\"images/cat.jpeg\",\n", + "await run_main(\"../../docs/img/cat.jpeg\",\n", " \"What cat breed is this? Tell me in detail about the breed.\")" ] }, @@ -2656,8 +2656,8 @@ } ], "source": [ - "display_local_image(\"images/gnocchi_alla_romana.jpeg\")\n", - "img = Image.open(\"images/gnocchi_alla_romana.jpeg\")\n", + "display_local_image(\"../../docs/img/gnocchi_alla_romana.jpeg\")\n", + "img = Image.open(\"../../docs/img/gnocchi_alla_romana.jpeg\")\n", "resized_img = resize_image(img)" ] }, @@ -2708,7 +2708,7 @@ } ], "source": [ - "await run_main(\"images/resized_image.jpg\",\n", + "await run_main(\"../../docs/img/resized_image.jpg\",\n", " \"What's the name of this dish? How can I make it?\")" ] }, @@ -2739,7 +2739,7 @@ "### ExecuTorch (XNNPACK framework)\n", "In this workshop we will walk you through the end to end workflow for building an android demo app using CPU on device via XNNPACK framework.\n", "To do so we need to follow these steps:\n", - "<img src=\"images/llama-mobile-confirmed.png\" alt=\"\" /> \n", + "<img src=\"../../docs/img/llama-mobile-confirmed.png\" alt=\"\" /> \n", "\n", "\n", "For detailed explanation of each of these steps please see this [link](https://github.com/pytorch/executorch/blob/main/examples/demo-apps/android/LlamaDemo/docs/delegates/xnnpack_README.md). Alternatively, you can follow this [tutorial](https://github.com/pytorch/executorch/blob/main/examples/demo-apps/apple_ios/LLaMA/docs/delegates/xnnpack_README.md) for running Llama 3.2 lightweight models on your iOS device!" diff --git a/recipes/quickstart/finetuning/README.md b/recipes/quickstart/finetuning/README.md index 46d58aa6cfd58ae8387cefb9a3ba29d963556bce..c7933474b24433989519a51e5c74274e8a6bcce1 100644 --- a/recipes/quickstart/finetuning/README.md +++ b/recipes/quickstart/finetuning/README.md @@ -79,7 +79,7 @@ It lets us specify the training settings for everything from `model_name` to `da * `mixed_precision` boolean flag to specify using mixed precision, defatults to true. - * `use_fp16` boolean flag to specify using FP16 for mixed precision, defatults to False. We recommond not setting this flag, and only set `mixed_precision` that will use `BF16`, this will help with speed and memory savings while avoiding challenges of scaler accuracies with `FP16`. + * `use_fp16` boolean flag to specify using FP16 for mixed precision, defatults to False. We recommend not setting this flag, and only set `mixed_precision` that will use `BF16`, this will help with speed and memory savings while avoiding challenges of scaler accuracies with `FP16`. * `sharding_strategy` this specifies the sharding strategy for FSDP, it can be: * `FULL_SHARD` that shards model parameters, gradients and optimizer states, results in the most memory savings. @@ -92,7 +92,7 @@ It lets us specify the training settings for everything from `model_name` to `da * `checkpoint_type` specifies the state dict checkpoint type for saving the model. `FULL_STATE_DICT` streams state_dict of each model shard from a rank to CPU and assembels the full state_dict on CPU. `SHARDED_STATE_DICT` saves one checkpoint per rank, and enables the re-loading the model in a different world size. -* `fsdp_activation_checkpointing` enables activation checkpoining for FSDP, this saves significant amount of memory with the trade off of recomputing itermediate activations during the backward pass. The saved memory can be re-invested in higher batch sizes to increase the throughput. We recommond you use this option. +* `fsdp_activation_checkpointing` enables activation checkpoining for FSDP, this saves significant amount of memory with the trade off of recomputing itermediate activations during the backward pass. The saved memory can be re-invested in higher batch sizes to increase the throughput. We recommend you use this option. * `pure_bf16` it moves the model to `BFloat16` and if `optimizer` is set to `anyprecision` then optimizer states will be kept in `BFloat16` as well. You can use this option if necessary. diff --git a/recipes/quickstart/inference/local_inference/README.md b/recipes/quickstart/inference/local_inference/README.md index 8e27304a257cd1a059f467688a2dbc2c2ccb7673..40f2e5015cd1d3574eb2b7c6869b0fec938ee190 100644 --- a/recipes/quickstart/inference/local_inference/README.md +++ b/recipes/quickstart/inference/local_inference/README.md @@ -119,7 +119,7 @@ Then convert your FSDP checkpoint to HuggingFace checkpoints using: # --HF_model_path_or_name specifies the HF Llama model name or path where it has config.json and tokenizer.json ``` -By default, training parameter are saved in `train_params.yaml` in the path where FSDP checkpoints are saved, in the converter script we frist try to find the HugingFace model name used in the fine-tuning to load the model with configs from there, if not found user need to provide it. +By default, training parameter are saved in `train_params.yaml` in the path where FSDP checkpoints are saved, in the converter script we first try to find the HugingFace model name used in the fine-tuning to load the model with configs from there, if not found user need to provide it. Then run inference using: diff --git a/recipes/responsible_ai/prompt_guard/inference.py b/recipes/responsible_ai/prompt_guard/inference.py index 4e41dd4e00e4f0c87834311b042ad34a1de38cb6..05268816633a74d6fc86ceb4731364fb2e1b9203 100644 --- a/recipes/responsible_ai/prompt_guard/inference.py +++ b/recipes/responsible_ai/prompt_guard/inference.py @@ -11,7 +11,7 @@ Utilities for loading the PromptGuard model and evaluating text for jailbreaks a Note that the underlying model has a maximum recommended input size of 512 tokens as a DeBERTa model. The final two functions in this file implement efficient parallel batched evaluation of the model on a list -of input strings of arbirary length, with the final score for each input being the maximum score across all +of input strings of arbitrary length, with the final score for each input being the maximum score across all chunks of the input string. """ diff --git a/recipes/use_cases/browser_use/agent/browser-use-quickstart.ipynb b/recipes/use_cases/browser_use/agent/browser-use-quickstart.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..71e9e7e0e2b021cb48392b4d6ad4720f288d4557 --- /dev/null +++ b/recipes/use_cases/browser_use/agent/browser-use-quickstart.ipynb @@ -0,0 +1,688 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 🌠Building an Intelligent Browser Agent with Llama 3.2\n", + "\n", + "This notebook provides a step-by-step guide to creating an AI-powered browser agent capable of navigating and interacting with websites autonomously. By combining the power of Llama 3.2 Vision, Playwright, and Together AI, this agent can perform tasks seamlessly while understanding both visual and textual content.\n", + "\n", + "##### Demo\n", + "For a detailed explanation of the code and a demo video, visit our blog post: [**Blog Post and Demo Video**](https://miguelg719.github.io/browser-use-blog/)\n", + "\n", + "##### Features\n", + "- Visual understanding of web pages through screenshots\n", + "- Autonomous navigation and interaction\n", + "- Natural language instructions for web tasks\n", + "- Persistent browser session management\n", + "\n", + "For example, you can ask the agent to:\n", + "- Search for a product on Amazon\n", + "- Find the cheapest flight to Tokyo\n", + "- Buy tickets for the next Warriors game\n", + "\n", + "\n", + "##### What's in this Notebook?\n", + "\n", + "This recipe walks you through:\n", + "- Setting up the environment and installing dependencies.\n", + "- Automating browser interactions using Playwright.\n", + "- Defining a structured prompt for the LLM to understand the task and execute the next action.\n", + "- Leveraging Llama 3.2 Vision for content comprehension.\n", + "- Creating a persistent and intelligent browser agent for real-world applications.\n", + "\n", + "***Please note that the agent is not perfect and may not always behave as expected.**\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 1. Install Required Libraries\n", + "This cell installs the necessary Python packages for the script, such as `together`, `playwright`, and `beautifulsoup4`.\n", + "It also ensures that Playwright is properly installed to enable automated browser interactions." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%pip install together playwright\n", + "!playwright install" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 2. Import Modules and Set Up Environment\n", + "Set your `Together` API key to instantiate the client client. Feel free to use a different provider if it's more convenient. " + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "from dotenv import load_dotenv\n", + "from together import Together\n", + "\n", + "load_dotenv()\n", + "\n", + "client = Together(api_key=os.getenv(\"TOGETHER_API_KEY\"))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### Vision Query Example\n", + "This function converts an image file into a Base64-encoded string, which is required for LLM querying.\n", + "\n", + "The next cell shows an example of how to use the `encode_image` function to convert an image file into a Base64-encoded string, which is then used in a chat completion request to the Llama 3.2 Vision model.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "import base64\n", + "from IPython.display import Markdown\n", + "imagePath= \"sample_screenshot.png\"\n", + "\n", + "def encode_image(image_path):\n", + " with open(image_path, \"rb\") as image_file:\n", + " return base64.b64encode(image_file.read()).decode('utf-8')\n", + "\n", + "# Must have an image on the local path to use it\n", + "base64_image = encode_image(imagePath)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "response = client.chat.completions.create(\n", + " model=\"meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo\",\n", + " messages=[\n", + " {\n", + " \"role\": \"user\",\n", + " \"content\": [\n", + " {\"type\": \"text\", \"text\": \"what is this image about?\"},\n", + " {\n", + " \"type\": \"image_url\",\n", + " # Uses a local image path. To use a remote image, replace the url with the image URL.\n", + " \"image_url\": {\n", + " \"url\": f\"data:image/jpeg;base64,{base64_image}\",\n", + " }\n", + " },\n", + " ],\n", + " }\n", + " ]\n", + ")\n", + "\n", + "display(Markdown(response.choices[0].message.content))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Helper Functions to Parse the Accessibility Tree\n", + "\n", + "The agent will use the accessibility tree to understand the elements on the page and interact with them. A helper function is defined here to help simplity the accessibility tree for the agent." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "def parse_accessibility_tree(node, indent=0):\n", + " \"\"\"\n", + " Recursively parses the accessibility tree and prints a readable structure.\n", + " Args:\n", + " node (dict): A node in the accessibility tree.\n", + " indent (int): Indentation level for the nested structure.\n", + " \"\"\"\n", + " # Initialize res as an empty string at the start of each parse\n", + " res = \"\"\n", + " \n", + " def _parse_node(node, indent, res):\n", + " # Base case: If the node is empty or doesn't have a 'role', skip it\n", + " if not node or 'role' not in node:\n", + " return res\n", + "\n", + " # Indentation for nested levels\n", + " indented_space = \" \" * indent\n", + " \n", + " # Add node's name and role to result string\n", + " if 'value' in node:\n", + " res = res + f\"{indented_space}Role: {node['role']} - Name: {node.get('name', 'No name')} - Value: {node['value']}\\n\"\n", + " else:\n", + " res = res + f\"{indented_space}Role: {node['role']} - Name: {node.get('name', 'No name')}\\n\"\n", + " \n", + " # If the node has children, recursively parse them\n", + " if 'children' in node:\n", + " for child in node['children']:\n", + " res = _parse_node(child, indent + 2, res) # Increase indentation for child nodes\n", + " \n", + " return res\n", + "\n", + " return _parse_node(node, indent, res)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 3. Define Prompts\n", + "a) **Planning Prompt:**\n", + "Create a structured prompt for the LLM to understand the task and execute the next action.\n", + "\n", + "b) **Agent Execution Prompt**\n", + "A structured prompt is created, specifying the instructions for processing the webpage content and screenshots." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "planning_prompt = \"\"\"\n", + "Given a user request, define a very simple plan of subtasks (actions) to achieve the desired outcome and execute them iteratively using Playwright.\n", + "\n", + "1. Understand the Task:\n", + " - Interpret the user's request and identify the core goal.\n", + " - Break down the task into a few smaller, actionable subtasks to achieve the goal effectively.\n", + "\n", + "2. Planning Actions:\n", + " - Translate the user's request into a high-level plan of actions.\n", + " - Example actions include:\n", + " - Searching for specific information.\n", + " - Navigating to specified URLs.\n", + " - Interacting with website elements (clicking, filling).\n", + " - Extracting or validating data.\n", + "\n", + "Input:\n", + "- User Request (Task)\n", + "\n", + "Output from the Agent:\n", + "- Step-by-Step Action Plan:: Return only an ordered list of actions. Only return the list, no other text.\n", + "\n", + "**Example User Requests and Agent Behavior:**\n", + "\n", + "1. **Input:** \"Search for a product on Amazon.\"\n", + " - **Output:**\n", + " 1. Navigate to Amazon's homepage.\n", + " 2. Enter the product name in the search bar and perform the search.\n", + " 3. Extract and display the top results, including the product title, price, and ratings.\n", + "\n", + "2. **Input:** \"Find the cheapest flight to Tokyo.\"\n", + " - **Output:**\n", + " 1. Visit a flight aggregator website (e.g. Kayak).\n", + " 2. Enter the departure city.\n", + " 3. Enter the destination city\n", + " 4. Enter the start and end dates.\n", + " 5. Extract and compare the flight options, highlighting the cheapest option.\n", + "\n", + "3. **Input:** \"Buy tickets for the next Warriors game.\"\n", + " - **Output:**\n", + " 1. Navigate to a ticket-selling platform (e.g., Ticketmaster).\n", + " 2. Fill the search bar with the team name.\n", + " 2. Search for upcoming team games.\n", + " 3. Select the next available game and purchase tickets for the specified quantity.\n", + "\n", + "\"\"\"\n", + "\n", + "\n", + "execution_prompt = \"\"\"\n", + "You will be given a task, a website's page accessibility tree, and the page screenshot as context. The screenshot is where you are now, use it to understand the accessibility tree. Based on that information, you need to decide the next step action. ONLY RETURN THE NEXT STEP ACTION IN A SINGLE JSON.\n", + "\n", + "When selecting elements, use elements from the accessibility tree.\n", + "\n", + "Reflect on what you are seeing in the accessibility tree and the screenshot and decide the next step action, elaborate on it in reasoning, and choose the next appropriate action.\n", + "\n", + "Selectors must follow the format:\n", + "- For a button with a specific name: \"button=ButtonName\"\n", + "- For a placeholder (e.g., input field): \"placeholder=PlaceholderText\"\n", + "- For text: \"text=VisibleText\"\n", + "\n", + "Make sure to analyze the accessibility tree and the screenshot to understand the current state, if something is not clear, you can use the previous actions to understand the current state. Explain why you are in the current state in current_state.\n", + "\n", + "You will be given a task and you MUST return the next step action in JSON format:\n", + "{\n", + " \"current_state\": \"Where are you now? Analyze the accessibility tree and the screenshot to understand the current state.\",\n", + " \"reasoning\": \"What is the next step to accomplish the task?\",\n", + " \"action\": \"navigation\" or \"click\" or \"fill\" or \"finished\",\n", + " \"url\": \"https://www.example.com\", // Only for navigation actions\n", + " \"selector\": \"button=Click me\", // For click or fill actions, derived from the accessibility tree\n", + " \"value\": \"Input text\", // Only for fill actions\n", + "}\n", + "\n", + "### Guidelines:\n", + "1. Use **\"navigation\"** for navigating to a new website through a URL.\n", + "2. Use **\"click\"** for interacting with clickable elements. Examples:\n", + " - Buttons: \"button=Click me\"\n", + " - Text: \"text=VisibleText\"\n", + " - Placeholders: \"placeholder=Search...\"\n", + " - Link: \"link=BUY NOW\"\n", + "3. Use **\"fill\"** for inputting text into editable fields. Examples:\n", + " - Placeholder: \"placeholder=Search...\"\n", + " - Textbox: \"textbox=Flight destination output\"\n", + " - Input: \"input=Search...\"\n", + "4. Use **\"finished\"** when the task is done. For example:\n", + " - If a task is successfully completed.\n", + " - If navigation confirms you are on the correct page.\n", + "\n", + "\n", + "### Accessibility Tree Examples:\n", + "\n", + "You will be given an accessibility tree to interact with the webpage. It consists of a nested node structure that represents elements on the page. For example:\n", + "\n", + "Role: generic - Name: \n", + " Role: text - Name: San Francisco (SFO)\n", + " Role: button - Name: \n", + " Role: listitem - Name: \n", + " Role: textbox - Name: Flight origin input\n", + "Role: button - Name: Swap departure airport and destination airport\n", + "Role: generic - Name: \n", + " Role: textbox - Name: Flight destination input\n", + "Role: button - Name: Start date\n", + "Role: button - Name: \n", + "Role: button - Name: \n", + "Role: button - Name: End date\n", + "Role: button - Name: \n", + "Role: button - Name: \n", + "Role: button - Name: Search\n", + "\n", + "This section indicates that there is a textbox with a name \"Flight destination input\" filled with San Francisco (SFO). There is also a button with the name \"Swap departure airport and destination airport\". Another textbox with the name \"Flight destination input\" not filled with any text. There are also buttons with the names \"Start date\", \"End date\", which are not filled with any dates, and a button named \"Search\".\n", + "\n", + "Retry actions at most 2 times before trying a different action.\n", + "\n", + "### Examples:\n", + "1. To click on a button labeled \"Search\":\n", + " {\n", + " \"current_state\": \"On the homepage of a search engine.\",\n", + " \"reasoning\": \"The accessibility tree shows a button named 'Search'. Clicking it is the appropriate next step to proceed with the task.\",\n", + " \"action\": \"click\",\n", + " \"selector\": \"button=Search\"\n", + " }\n", + "\n", + "2. To fill a search bar with the text \"AI tools\":\n", + " {\n", + " \"current_state\": \"On the search page with a focused search bar.\",\n", + " \"reasoning\": \"The accessibility tree shows an input field with placeholder 'Search...'. Entering the query 'AI tools' fulfills the next step of the task.\",\n", + " \"action\": \"fill\",\n", + " \"selector\": \"placeholder=Search...\",\n", + " \"value\": \"AI tools\"\n", + " }\n", + "\n", + "3. To navigate to a specific URL:\n", + " {\n", + " \"current_state\": \"Starting from a blank page.\",\n", + " \"reasoning\": \"The task requires visiting a specific website to gather relevant information. Navigating to the URL is the first step.\",\n", + " \"action\": \"navigation\",\n", + " \"url\": \"https://example.com\"\n", + " }\n", + "\n", + "4. To finish the task:\n", + " {\n", + " \"current_state\": \"Completed the search and extracted the necessary data.\",\n", + " \"reasoning\": \"The task goal has been achieved, and no further actions are required.\",\n", + " \"action\": \"finished\"\n", + " }\n", + "\"\"\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Few Shot Examples\n", + "\n", + "Performance improves drastically by adding a few shot examples." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "few_shot_example_1 = \"\"\"\n", + "User Input: \"What are the best tacos in San Francisco?\"\n", + "\n", + "Agent Step Sequence:\n", + "Step 1: \n", + "{\n", + " \"current_state\": \"On a blank page.\",\n", + " \"reasoning\": \"The task is to find the best tacos in San Francisco, so the first step is to navigate to Google to perform a search.\",\n", + " \"action\": \"navigation\",\n", + " \"url\": \"https://www.google.com\",\n", + "}\n", + "\n", + "Step 2: \n", + "{\n", + " \"current_state\": \"On the Google homepage.\",\n", + " \"reasoning\": \"To search for the best tacos in San Francisco, I need to fill the Google search bar with the query.\",\n", + " \"action\": \"fill\",\n", + " \"selector\": \"combobox=Search\",\n", + " \"value\": \"Best tacos in San Francisco\"\n", + "}\n", + "\n", + "Step 3:\n", + "{\n", + " \"current_state\": \"On Google search results page.\",\n", + " \"reasoning\": \"After entering the query, I need to click the search button to retrieve the results.\",\n", + " \"action\": \"click\",\n", + " \"selector\": \"button=Google Search\"\n", + "}\n", + "\n", + "Step 4: \n", + "{\n", + " \"current_state\": \"On the search results page with multiple links.\",\n", + " \"reasoning\": \"From the search results, I need to click on a reliable food-review or blogwebsite link.\",\n", + " \"action\": \"click\",\n", + " \"selector\": \"text=Yelp\"\n", + "}\n", + "\n", + "Step 5:\n", + "{\n", + " \"current_state\": \"On Yelp's best taqueria near San Francisco page.\",\n", + " \"reasoning\": \"The task is complete as I have found the top taquerias in San Francisco.\",\n", + " \"action\": \"finished\",\n", + " \"summary\": \"I have successfully found the best tacos in San Francisco.\"\n", + "}\n", + "\"\"\"\n", + "\n", + "few_shot_example_2 = \"\"\"\n", + "User Input: Can you send an email to reschedule a meeting for Dmitry at gmail.com for tomorrow morning? I'm sick today.\n", + "\n", + "Agent Step Sequence:\n", + "Step 1:\n", + "{\n", + " \"current_state\": \"On a blank page.\",\n", + " \"reasoning\": \"To send an email, the first step is to navigate to Gmail.\",\n", + " \"action\": \"navigation\",\n", + " \"url\": \"https://mail.google.com\",\n", + "}\n", + "\n", + "Step 2:\n", + "{\n", + " \"current_state\": \"On Gmail's homepage.\",\n", + " \"reasoning\": \"Click the 'Compose' button to start drafting a new email.\",\n", + " \"action\": \"click\",\n", + " \"selector\": \"button=Compose\"\n", + "}\n", + "\n", + "Step 3:\n", + "{\n", + " \"current_state\": \"In the new email draft window.\",\n", + " \"reasoning\": \"Enter Dmitry's email address in the recipient field.\",\n", + " \"action\": \"fill\",\n", + " \"selector\": \"placeholder=Recipients\",\n", + " \"value\": \"dmitry@gmail.com\"\n", + "}\n", + "\n", + "Step 4: \n", + "{\n", + " \"current_state\": \"In the new email draft with the recipient filled.\",\n", + " \"reasoning\": \"Set the subject line to indicate the purpose of the email.\",\n", + " \"action\": \"fill\",\n", + " \"selector\": \"placeholder=Subject\",\n", + " \"value\": \"Rescheduling Meeting\"\n", + "}\n", + "\n", + "Step 5:\n", + "{\n", + " \"current_state\": \"In the new email draft with the subject set.\",\n", + " \"reasoning\": \"Compose the email body to politely inform Dmitry about rescheduling the meeting.\",\n", + " \"action\": \"fill\",\n", + " \"selector\": \"placeholder=Email body\",\n", + " \"value\": \"Hi Dmitry,\\\\n\\\\nI'm feeling unwell today and would like to reschedule our meeting for tomorrow morning. Please let me know if this works for you.\\\\n\\\\nBest regards,\\\\n[Your Name]\"\n", + "}\n", + "\n", + "Step 6: \n", + "{\n", + " \"current_state\": \"In the new email draft with the body composed.\",\n", + " \"reasoning\": \"Click the 'Send' button to deliver the email to Dmitry.\",\n", + " \"action\": \"click\",\n", + " \"selector\": \"button=Send\"\n", + "}\n", + "\n", + "Step 7:\n", + "{\n", + " \"current_state\": \"On Gmail's homepage after sending the email.\",\n", + " \"reasoning\": \"The email has been drafted and sent, fulfilling the task of informing Dmitry about the reschedule.\",\n", + " \"action\": \"finished\",\n", + " \"summary\": \"Email sent to Dmitry to reschedule the meeting for tomorrow morning.\"\n", + "}\n", + "\"\"\"\n", + "\n", + "few_shot_examples = [few_shot_example_1, few_shot_example_2]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 4. Define a task and generate a plan of actions to execute\n", + "\n", + "You can define your own task or use one of the examples below" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "# Define your task here:\n", + "# task = 'Find toys to buy for my 10 year old niece this Christmas'\n", + "# task = 'Find tickets for the next Warriors game'\n", + "task = 'Find the cheapest flight to Madrid'" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Generate a plan of actions to execute\n", + "\n", + "The next cell queries the LLM using the planning prompt to generate a plan of actions to execute. This then becomes each of the individual subtasks for the execution agent to complete." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Generating plan...\")\n", + "planning_response = client.chat.completions.create(\n", + " model=\"meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo\",\n", + " temperature=0.0,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": planning_prompt},\n", + " {\"role\": \"user\", \"content\": task},\n", + " ],\n", + ") \n", + "plan = planning_response.choices[0].message.content\n", + "print(plan)\n", + "steps = [line.strip()[3:] for line in plan.strip().split('\\n')]\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 5. Create the Browser environment and Run the Agent\n", + "The necessary modules for web scraping are imported, and the setup for using Playwright asynchronously is initialized.\n", + "\n", + "The context is provided to the LLM to help it understand its current state and generate the next required action to complete the provided task. \n", + "\n", + "- At any step, you can press **enter** to continue or **'q'** to quit the agent loop. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from playwright.async_api import async_playwright\n", + "import asyncio \n", + "import json\n", + "import re\n", + "\n", + "previous_context = None\n", + "\n", + "async def run_browser():\n", + " async with async_playwright() as playwright:\n", + " # Launch Chromium browser\n", + " browser = await playwright.chromium.launch(headless=False, channel=\"chrome\")\n", + " page = await browser.new_page()\n", + " await asyncio.sleep(1)\n", + " await page.goto(\"https://google.com/\")\n", + " previous_actions = []\n", + " try:\n", + " while True: # Infinite loop to keep session alive, press enter to continue or 'q' to quit\n", + " # Get Context from page\n", + " accessibility_tree = await page.accessibility.snapshot()\n", + " accessibility_tree = parse_accessibility_tree(accessibility_tree)\n", + " await page.screenshot(path=\"screenshot.png\")\n", + " base64_image = encode_image(imagePath)\n", + " previous_context = accessibility_tree\n", + " response = client.chat.completions.create(\n", + " model=\"meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo\",\n", + " temperature=0.0,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": execution_prompt},\n", + " {\"role\": \"system\", \"content\": f\"Few shot examples: {few_shot_examples}. Just a few examples, user will assign you VERY range set of tasks.\"},\n", + " {\"role\": \"system\", \"content\": f\"Plan to execute: {steps}\\n\\n Accessibility Tree: {previous_context}\\n\\n, previous actions: {previous_actions}\"},\n", + " {\"role\": \"user\", \"content\": \n", + " [\n", + " {\n", + " \"type\": \"text\",\n", + " \"text\": f'What should be the next action to accomplish the task: {task} based on the current state? Remember to review the plan and select the next action based on the current state. Provide the next action in JSON format strictly as specified above.',\n", + " },\n", + " {\n", + " \"type\": \"image_url\",\n", + " \"image_url\": {\n", + " \"url\": f\"data:image/jpeg;base64,{base64_image}\",\n", + " }\n", + " },\n", + " ]\n", + " }\n", + " ],\n", + " )\n", + " res = response.choices[0].message.content\n", + " print('Agent response:', res)\n", + " try:\n", + " match = re.search(r'\\{.*\\}', res, re.DOTALL)\n", + " if match:\n", + " output = json.loads(match.group(0))\n", + " except Exception as e:\n", + " print('Error parsing JSON:', e)\n", + "\n", + " if output[\"action\"] == \"navigation\":\n", + " try:\n", + " await page.goto(output[\"url\"])\n", + " previous_actions.append(f\"navigated to {output['url']}, SUCCESS\")\n", + " except Exception as e:\n", + " previous_actions.append(f\"Error navigating to {output['url']}: {e}\")\n", + "\n", + " elif output[\"action\"] == \"click\":\n", + " try:\n", + " selector_type, selector_name = output[\"selector\"].split(\"=\")[0], output[\"selector\"].split(\"=\")[1]\n", + " res = await page.get_by_role(selector_type, name=selector_name).first.click()\n", + " previous_actions.append(f\"clicked {output['selector']}, SUCCESS\")\n", + " except Exception as e:\n", + " previous_actions.append(f\"Error clicking on {output['selector']}: {e}\")\n", + " \n", + " elif output[\"action\"] == \"fill\":\n", + " try:\n", + " selector_type, selector_name = output[\"selector\"].split(\"=\")[0], output[\"selector\"].split(\"=\")[1]\n", + " res = await page.get_by_role(selector_type, name=selector_name).fill(output[\"value\"])\n", + " await asyncio.sleep(1)\n", + " await page.keyboard.press(\"Enter\")\n", + " previous_actions.append(f\"filled {output['selector']} with {output['value']}, SUCCESS\")\n", + " except Exception as e:\n", + " previous_actions.append(f\"Error filling {output['selector']} with {output['value']}: {e}\")\n", + "\n", + " elif output[\"action\"] == \"finished\":\n", + " print(output[\"summary\"])\n", + " break\n", + "\n", + " await asyncio.sleep(1) \n", + " \n", + " # Or wait for user input\n", + " user_input = input(\"Press 'q' to quit or Enter to continue: \")\n", + " if user_input.lower() == 'q':\n", + " break\n", + " \n", + " except Exception as e:\n", + " print(f\"An error occurred: {e}\")\n", + " finally:\n", + " # Only close the browser when explicitly requested\n", + " await browser.close()\n", + "\n", + "# Run the async function\n", + "await run_browser()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## And that's it! Congratulations! 🎉🎉\n", + "\n", + "You've just created a browser agent that can navigate websites, understand page content through vision, plan and execute actions based on natural language commands, and maintain context across multiple interactions.\n", + "\n", + "\n", + "**Collaborators**\n", + "\n", + "Feel free to reach out with any questions or feedback!\n", + "\n", + "\n", + "**Miguel Gonzalez** on [X](https://x.com/miguel_gonzf) or [LinkedIn](https://www.linkedin.com/in/gonzalezfernandezmiguel/)\n", + "\n", + "**Dimitry Khorzov** on [X](https://x.com/korzhov_dm) or [LinkedIn](https://www.linkedin.com/in/korzhovdm)\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "base", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.3" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/recipes/use_cases/browser_use/agent/sample_screenshot.png b/recipes/use_cases/browser_use/agent/sample_screenshot.png new file mode 100644 index 0000000000000000000000000000000000000000..83647db7205a16e761ee32c507a047ecf6d14e26 Binary files /dev/null and b/recipes/use_cases/browser_use/agent/sample_screenshot.png differ diff --git a/recipes/use_cases/coding/text2sql/quickstart.ipynb b/recipes/use_cases/coding/text2sql/quickstart.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..1bf90efcaada3b5ab08d41765d3c166fbf28765f --- /dev/null +++ b/recipes/use_cases/coding/text2sql/quickstart.ipynb @@ -0,0 +1,334 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "e8cba0b6", + "metadata": {}, + "source": [ + "<a href=\"https://colab.research.google.com/github/meta-llama/llama-recipes/blob/main/recipes/use_cases/coding/text2sql/quickstart.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a> \n", + "\n", + "## Quick Demo of Text2SQL Using Llama 3.3\n", + "\n", + "This demo shows how to use Llama 3.3 to answer questions about a SQLite DB. \n", + "\n", + "We'll use LangChain and the Llama cloud provider [Together.ai](https://api.together.ai/), where you can easily get a free API key (or you can use any other Llama cloud provider or even Ollama running Llama locally - see [here](https://github.com/meta-llama/llama-recipes/tree/main/recipes/quickstart) for examples)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "33fb3190-59fb-4edd-82dd-f20f6eab3e47", + "metadata": {}, + "outputs": [], + "source": [ + "!pip install -U langchain langchain-community langchain-together" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "fa4562d3", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "from langchain_together import ChatTogether\n", + "\n", + "os.environ['TOGETHER_API_KEY'] = 'your_api_key'\n", + "\n", + "llm = ChatTogether(\n", + " model=\"meta-llama/Llama-3.3-70B-Instruct-Turbo\",\n", + " temperature=0,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "6d421ae7", + "metadata": {}, + "source": [ + "To recreate the `nba_roster.db` file, run the two commands below:\n", + "- `python txt2csv.py` to convert the `nba.txt` file to `nba_roster.csv`. The `nba.txt` file was created by scraping the NBA roster info from the web.\n", + "- `python csv2db.py` to convert `nba_roster.csv` to `nba_roster.db`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "56f0360e-fca3-49a8-9a70-0416f84e15fc", + "metadata": {}, + "outputs": [], + "source": [ + "# uncomment if you don't want to create the db yourself\n", + "#! wget https://github.com/meta-llama/llama-recipes/raw/3649841b426999fdc61c30a9fc8721106bec769b/recipes/use_cases/coding/text2sql/nba_roster.db" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "3bb99f39-cd7a-4db6-91dd-02f3bf80347c", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_community.utilities import SQLDatabase\n", + "\n", + "# Note: to run in Colab, you need to upload the nba_roster.db file in the repo to the Colab folder first.\n", + "db = SQLDatabase.from_uri(\"sqlite:///nba_roster.db\", sample_rows_in_table_info=0)\n", + "\n", + "def get_schema():\n", + " return db.get_table_info()" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "8d793ce7-324b-4861-926c-54973d7c9b43", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Based on the table schema below, write a SQL query that would answer the user's question; just return the SQL query and nothing else.\n", + "\n", + "Scheme:\n", + "\n", + "CREATE TABLE nba_roster (\n", + "\t\"Team\" TEXT, \n", + "\t\"NAME\" TEXT, \n", + "\t\"Jersey\" TEXT, \n", + "\t\"POS\" TEXT, \n", + "\t\"AGE\" INTEGER, \n", + "\t\"HT\" TEXT, \n", + "\t\"WT\" TEXT, \n", + "\t\"COLLEGE\" TEXT, \n", + "\t\"SALARY\" TEXT\n", + ")\n", + "\n", + "Question: What team is Stephen Curry on?\n", + "\n", + "SQL Query:\n" + ] + } + ], + "source": [ + "question = \"What team is Stephen Curry on?\"\n", + "prompt = f\"\"\"Based on the table schema below, write a SQL query that would answer the user's question; just return the SQL query and nothing else.\n", + "\n", + "Scheme:\n", + "{get_schema()}\n", + "\n", + "Question: {question}\n", + "\n", + "SQL Query:\"\"\"\n", + "\n", + "print(prompt)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "70776558", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "SELECT Team FROM nba_roster WHERE NAME = 'Stephen Curry'\n" + ] + } + ], + "source": [ + "answer = llm.invoke(prompt).content\n", + "print(answer)" + ] + }, + { + "cell_type": "markdown", + "id": "afcf423a", + "metadata": {}, + "source": [ + "***Note:*** If you don't have the \"just return the SQL query and nothing else\" in the prompt above, you'll likely get more text other than the SQL query back in the answer, making some extra post-processing necessary before running the db query below." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "62472ce6-794b-4a61-b88c-a1e031e28e4e", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\"[('Golden State Warriors',)]\"" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# note this is a dangerous operation and for demo purpose only; in production app you'll need to safe-guard any DB operation\n", + "result = db.run(answer)\n", + "result" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "39ed4bc3", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "I don't have enough information to determine whose salary you are referring to. Could you please provide more context or specify the person you are asking about?\n" + ] + } + ], + "source": [ + "# how about a follow up question\n", + "follow_up = \"What's his salary?\"\n", + "print(llm.invoke(follow_up).content)" + ] + }, + { + "cell_type": "markdown", + "id": "98b2c523", + "metadata": {}, + "source": [ + "Since we did not pass any context along with the follow-up to Llama, it doesn't know the answer. Let's try to fix it by adding context to the follow-up prompt." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "0c305278-29d2-4e88-9b3d-ad67c94ce0f2", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Based on the table schema, question, SQL query, and SQL response below, write a new SQL response; be concise, just output the SQL response.\n", + "\n", + "Scheme:\n", + "\n", + "CREATE TABLE nba_roster (\n", + "\t\"Team\" TEXT, \n", + "\t\"NAME\" TEXT, \n", + "\t\"Jersey\" TEXT, \n", + "\t\"POS\" TEXT, \n", + "\t\"AGE\" INTEGER, \n", + "\t\"HT\" TEXT, \n", + "\t\"WT\" TEXT, \n", + "\t\"COLLEGE\" TEXT, \n", + "\t\"SALARY\" TEXT\n", + ")\n", + "\n", + "Question: What's his salary?\n", + "SQL Query: What team is Stephen Curry on?\n", + "SQL Result: [('Golden State Warriors',)]\n", + "\n", + "New SQL Response:\n", + "\n" + ] + } + ], + "source": [ + "prompt = f\"\"\"Based on the table schema, question, SQL query, and SQL response below, write a new SQL response; be concise, just output the SQL response.\n", + "\n", + "Scheme:\n", + "{get_schema()}\n", + "\n", + "Question: {follow_up}\n", + "SQL Query: {question}\n", + "SQL Result: {result}\n", + "\n", + "New SQL Response:\n", + "\"\"\"\n", + "print(prompt)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "03739b96-e607-4fa9-bc5c-df118198dc7f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "SELECT SALARY FROM nba_roster WHERE NAME = \"Stephen Curry\"\n" + ] + } + ], + "source": [ + "new_answer = llm.invoke(prompt).content\n", + "print(new_answer)" + ] + }, + { + "cell_type": "markdown", + "id": "c782abb6-3b44-45be-8694-70fc29b82523", + "metadata": {}, + "source": [ + "Because we have \"be concise, just output the SQL response\", Llama 3 is able to just generate the SQL statement; otherwise output parsing will be needed." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "6ecfca53-be7e-4668-bad1-5ca7571817d7", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\"[('$51,915,615',)]\"" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "db.run(new_answer)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9d79bbb1-e91d-4b56-b6ef-98c94ff414d0", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.14" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/recipes/use_cases/coding/text2sql/structured_llama.ipynb b/recipes/use_cases/coding/text2sql/structured_llama.ipynb deleted file mode 100644 index 253ef9ba74b55a663b69ded1f6834f91d2263808..0000000000000000000000000000000000000000 --- a/recipes/use_cases/coding/text2sql/structured_llama.ipynb +++ /dev/null @@ -1,244 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "e8cba0b6", - "metadata": {}, - "source": [ - "<a href=\"https://colab.research.google.com/github/meta-llama/llama-recipes/blob/main/recipes/use_cases/text2sql/StructuredLlama.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a> \n", - "\n", - "## Use Llama 3 to chat about structured data\n", - "This demo shows how to use LangChain with Llama 3 to query structured data, the 2023-24 NBA roster info, stored in a SQLite DB, and how to ask Llama 3 follow up question about the DB." - ] - }, - { - "cell_type": "markdown", - "id": "f839d07d", - "metadata": {}, - "source": [ - "We start by installing the necessary packages:\n", - "- [Replicate](https://replicate.com/) to host the Llama 3 model\n", - "- [langchain](https://python.langchain.com/docs/get_started/introduction) provides necessary RAG tools for this demo\n", - "\n", - "**Note** We will be using [Replicate](https://replicate.com/meta/meta-llama-3-8b-instruct) to run the examples here. You will need to first sign in with Replicate with your github account, then create a free API token [here](https://replicate.com/account/api-tokens) that you can use for a while. You can also use other Llama 3 cloud providers such as [Groq](https://console.groq.com/), [Together](https://api.together.xyz/playground/language/meta-llama/Llama-3-8b-hf), or [Anyscale](https://app.endpoints.anyscale.com/playground) - see Section 2 of the Getting to Know Llama [notebook](https://github.com/meta-llama/llama-recipes/blob/main/recipes/quickstart/Getting_to_know_Llama.ipynb) for more information.\n", - "\n", - "If you'd like to run Llama 3 locally for the benefits of privacy, no cost or no rate limit (some Llama 3 hosting providers set limits for free plan of queries or tokens per second or minute), see [Running Llama Locally](https://github.com/meta-llama/llama-recipes/blob/main/recipes/quickstart/Running_Llama2_Anywhere/Running_Llama_on_Mac_Windows_Linux.ipynb)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "33fb3190-59fb-4edd-82dd-f20f6eab3e47", - "metadata": {}, - "outputs": [], - "source": [ - "!pip install langchain replicate" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "fa4562d3", - "metadata": {}, - "outputs": [], - "source": [ - "from getpass import getpass\n", - "import os\n", - "\n", - "REPLICATE_API_TOKEN = getpass()\n", - "os.environ[\"REPLICATE_API_TOKEN\"] = REPLICATE_API_TOKEN" - ] - }, - { - "cell_type": "markdown", - "id": "1e586b75", - "metadata": {}, - "source": [ - "Next we call the Llama 3 8b chat model from Replicate. You can also use Llama 3 70b model by replacing the `model` name with \"meta/meta-llama-3-70b-instruct\"." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9dcd744c", - "metadata": {}, - "outputs": [], - "source": [ - "from langchain_community.llms import Replicate\n", - "llm = Replicate(\n", - " model=\"meta/meta-llama-3-8b-instruct\",\n", - " model_kwargs={\"temperature\": 0.0, \"top_p\": 1, \"max_new_tokens\":500}\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "6d421ae7", - "metadata": {}, - "source": [ - "To recreate the `nba_roster.db` file, run the two commands below:\n", - "- `python txt2csv.py` to convert the `nba.txt` file to `nba_roster.csv`. The `nba.txt` file was created by scraping the NBA roster info from the web.\n", - "- `python csv2db.py` to convert `nba_roster.csv` to `nba_roster.db`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3bb99f39-cd7a-4db6-91dd-02f3bf80347c", - "metadata": {}, - "outputs": [], - "source": [ - "from langchain_community.utilities import SQLDatabase\n", - "\n", - "# Note: to run in Colab, you need to upload the nba_roster.db file in the repo to the Colab folder first.\n", - "db = SQLDatabase.from_uri(\"sqlite:///nba_roster.db\", sample_rows_in_table_info=0)\n", - "\n", - "def get_schema():\n", - " return db.get_table_info()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8d793ce7-324b-4861-926c-54973d7c9b43", - "metadata": {}, - "outputs": [], - "source": [ - "question = \"What team is Klay Thompson on?\"\n", - "prompt = f\"\"\"Based on the table schema below, write a SQL query that would answer the user's question; just return the SQL query and nothing else.\n", - "\n", - "Scheme:\n", - "{get_schema()}\n", - "\n", - "Question: {question}\n", - "\n", - "SQL Query:\"\"\"\n", - "\n", - "print(prompt)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "70776558", - "metadata": {}, - "outputs": [], - "source": [ - "answer = llm.invoke(prompt)\n", - "print(answer)" - ] - }, - { - "cell_type": "markdown", - "id": "afcf423a", - "metadata": {}, - "source": [ - "If you don't have the \"just return the SQL query and nothing else\" in the prompt above, or even with it but asking Llama 2 which doesn't follow instructions as well as Llama 3, you'll likely get more text other than the SQL query back in the answer." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "62472ce6-794b-4a61-b88c-a1e031e28e4e", - "metadata": {}, - "outputs": [], - "source": [ - "# note this is a dangerous operation and for demo purpose only; in production app you'll need to safe-guard any DB operation\n", - "result = db.run(answer)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "39ed4bc3", - "metadata": {}, - "outputs": [], - "source": [ - "# how about a follow up question\n", - "follow_up = \"What's his salary?\"\n", - "print(llm.invoke(follow_up))" - ] - }, - { - "cell_type": "markdown", - "id": "98b2c523", - "metadata": {}, - "source": [ - "Since we did not pass any context along with the follow-up to the model it did not know who \"his\" is and just picked LeBron James.\n", - "\n", - "Let's try to fix it by adding context to the follow-up prompt." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0c305278-29d2-4e88-9b3d-ad67c94ce0f2", - "metadata": {}, - "outputs": [], - "source": [ - "prompt = f\"\"\"Based on the table schema, question, SQL query, and SQL response below, write a new SQL response; be concise, just output the SQL response.\n", - "\n", - "Scheme:\n", - "{get_schema()}\n", - "\n", - "Question: {follow_up}\n", - "SQL Query: {question}\n", - "SQL Result: {result}\n", - "\n", - "New SQL Response:\n", - "\"\"\"\n", - "print(prompt)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "03739b96-e607-4fa9-bc5c-df118198dc7f", - "metadata": {}, - "outputs": [], - "source": [ - "new_answer = llm.invoke(prompt)\n", - "print(new_answer)" - ] - }, - { - "cell_type": "markdown", - "id": "c782abb6-3b44-45be-8694-70fc29b82523", - "metadata": {}, - "source": [ - "Because we have \"be concise, just output the SQL response\", Llama 3 is able to just generate the SQL statement; otherwise output parsing will be needed." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6ecfca53-be7e-4668-bad1-5ca7571817d7", - "metadata": {}, - "outputs": [], - "source": [ - "db.run(new_answer)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.9" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/recipes/use_cases/customerservice_chatbots/messenger_chatbot/messenger_llama3.md b/recipes/use_cases/customerservice_chatbots/messenger_chatbot/messenger_llama3.md index a410a5647c2615ef041eadb865a2a0e1e22859d2..b47fbc16b9b0cf1a38b9d47521d1093c50264e62 100644 --- a/recipes/use_cases/customerservice_chatbots/messenger_chatbot/messenger_llama3.md +++ b/recipes/use_cases/customerservice_chatbots/messenger_chatbot/messenger_llama3.md @@ -6,7 +6,7 @@ If you're interested in a Llama 3 enabled WhatsApp chatbot, see [here](../whatsa ## Overview -Messenger from Meta is a messaging service that allows a Facebook business page to respond to people who are interested in their business using [Messenger Platform](https://developers.facebook.com/docs/messenger-platform/overview). The benefits of an intelligent and knowledgable Llama 3 powered chatbot are obvious, including cost saving and better customer experience such as 24x7 availability. In this tutorial, we'll cover the details of integrating Llama 3 with the Messenger Platform to build a basic Llama 3 enabled chatbot - for more Llama 3 application development and deployment demos such as how to integrate your own data with Llama 3, see the recipes. +Messenger from Meta is a messaging service that allows a Facebook business page to respond to people who are interested in their business using [Messenger Platform](https://developers.facebook.com/docs/messenger-platform/overview). The benefits of an intelligent and knowledgeable Llama 3 powered chatbot are obvious, including cost saving and better customer experience such as 24x7 availability. In this tutorial, we'll cover the details of integrating Llama 3 with the Messenger Platform to build a basic Llama 3 enabled chatbot - for more Llama 3 application development and deployment demos such as how to integrate your own data with Llama 3, see the recipes. The diagram below shows the components and overall data flow of the Llama 3 enabled Messenger chatbot demo we built, using an Amazon EC2 instance as an example for running the web server. diff --git a/recipes/use_cases/customerservice_chatbots/whatsapp_chatbot/whatsapp_llama3.md b/recipes/use_cases/customerservice_chatbots/whatsapp_chatbot/whatsapp_llama3.md index 9b022785b4e9455be9fc50faf7b0e7f912062c4f..02770257c734d8c27f2bc371976529b5d903d827 100644 --- a/recipes/use_cases/customerservice_chatbots/whatsapp_chatbot/whatsapp_llama3.md +++ b/recipes/use_cases/customerservice_chatbots/whatsapp_chatbot/whatsapp_llama3.md @@ -6,7 +6,7 @@ If you're interested in a Llama 3 enabled Messenger chatbot, see [here](../messe ## Overview -Businesses of all sizes can use the [WhatsApp Business API](https://developers.facebook.com/docs/whatsapp/cloud-api/overview) to connect their customers with human agents or Llama 3 powered chatbots. The benefits of an intelligent and knowledgable chatbot are obvious, including cost saving and better customer experience such as 24x7 availability. In this blog, we'll cover the details of integrating Llama 3 with the WhatsApp Business API to build a basic Llama 3 enabled chatbot. +Businesses of all sizes can use the [WhatsApp Business API](https://developers.facebook.com/docs/whatsapp/cloud-api/overview) to connect their customers with human agents or Llama 3 powered chatbots. The benefits of an intelligent and knowledgeable chatbot are obvious, including cost saving and better customer experience such as 24x7 availability. In this blog, we'll cover the details of integrating Llama 3 with the WhatsApp Business API to build a basic Llama 3 enabled chatbot. The diagram below shows the components and overall data flow of the Llama 3 enabled WhatsApp chatbot demo we built, using Amazon EC2 instance as an example for running the web server. diff --git a/recipes/use_cases/email_agent/1.png b/recipes/use_cases/email_agent/1.png new file mode 100644 index 0000000000000000000000000000000000000000..2793199db6173abc5446ea02024bf6fb70736ef6 Binary files /dev/null and b/recipes/use_cases/email_agent/1.png differ diff --git a/recipes/use_cases/email_agent/2.png b/recipes/use_cases/email_agent/2.png new file mode 100644 index 0000000000000000000000000000000000000000..5b618ec0f8799ecdb173541e1de7d802f263948e Binary files /dev/null and b/recipes/use_cases/email_agent/2.png differ diff --git a/recipes/use_cases/email_agent/3.png b/recipes/use_cases/email_agent/3.png new file mode 100644 index 0000000000000000000000000000000000000000..fba755085564b4f9ecfc652de79a704eba849e54 Binary files /dev/null and b/recipes/use_cases/email_agent/3.png differ diff --git a/recipes/use_cases/email_agent/README.md b/recipes/use_cases/email_agent/README.md new file mode 100644 index 0000000000000000000000000000000000000000..48e75ef24e91e97ae121c475e690c9d7e979382f --- /dev/null +++ b/recipes/use_cases/email_agent/README.md @@ -0,0 +1,335 @@ +# Building A Llama Powered Email Agent + +This app shows how to build an email agent powered by Llama 3.1 8B running locally via Ollama. We'll start with building from scratch a basic agent with custom tool calling natively supported in Llama 3.1. The end goal is to cover all components of a production-ready agent app, acting as an assistant to your email, with great user experience: intuitive, engaging, efficient and reliable. We'll use Gmail as an example but any email client API's can be used instead. + +Currently implemented features include: +* search for emails and attachments +* get email detail +* reply to a specific email +* forward an email +* get summary of a PDF attachment +* draft and send an email + + + +# Overview + +Email is an essential and one top killer app people use every day. A recent [State of AI Agents](https://www.langchain.com/stateofaiagents) survey by LangChain finds that "The top use cases for agents include performing research and summarization (58%), followed by streamlining tasks for personal productivity or assistance (53.5%)." + +Andrew Ng wrote a 5-part [Agentic Design Patterns](https://www.deeplearning.ai/the-batch/how-agents-can-improve-llm-performance/) in March 2024 predicting "AI agent workflows will drive massive AI progress this year". + +Deloitte published in November 2024 a report on [AI agents and multiagent systems](https://www2.deloitte.com/content/dam/Deloitte/us/Documents/consulting/us-ai-institute-generative-ai-agents-multiagent-systems.pdf) stating that "Through their ability to reason, plan, remember and act, AI agents address key limitations of typical language models." and "Executive leaders should make moves now to prepare for and embrace this next era of intelligent organizational transformation." + +In the Thanksgiving week, a new startup [/dev/agent](https://sdsa.ai/) building the next-gen OS for AI agents was in the spotlight. + +In December, Sequoia posted [here](https://www.linkedin.com/posts/konstantinebuhler_the-ai-landscape-is-shifting-from-simple-activity-7270111755710672897-ZHnr/) saying 2024 has been the year of agents (an agent is an AI that can complete tasks, not only tells you how to do it but also does it for you directly), and 2025 will be the year of networks of AI agents. + +So what exactly is an AI agent and how to start building an agent app? + +## What is an agent? + +The concept of agent is not new - in the 2010 3rd edition of Russell and Norvig's classic book Artificial Intelligence: A Modern Approach ("Modern" by 2010, two years before the deep learning revolution that started the truly modern AI), an agent is defined as "anything that can be viewed as perceiving its environment through sensors and acting upon that environment through actuators". These days, AI agent basically means LLM-powered agent - well, if we treat natural language understanding as a type of sensor, LLM agent is still a sub-category of the traditional agent. + +Lilian Weng in her popular June 2023 blog [LLM Powered Autonomous Agents](https://lilianweng.github.io/posts/2023-06-23-agent/) defines LLM-powered agent system to have four key components: + * Planning and Reflection: can break down large tasks into smaller ones; can do self-reflection over past actions and self improve; + * Memory: can use contextual info and recall info over extended periods (for other components to use); + * Tool Use: can understand what external APIs to use for info or action not built into LLMs; + * Action: can actually run the tools. + +Andrew Ng describes four [agentic design patterns](https://www.deeplearning.ai/the-batch/how-agents-can-improve-llm-performance/) as: +* Reflection +* Planning +* Tool calling +* Multi-agent collaboration, where "memory" is mentioned: Each agent implements its own workflow, has its own memory (itself a rapidly evolving area in agentic technology: how can an agent remember enough of its past interactions to perform better on upcoming ones?) + +In Deloitte's [report](https://www2.deloitte.com/content/dam/Deloitte/us/Documents/consulting/us-ai-institute-generative-ai-agents-multiagent-systems.pdf), AI agents are reasoning engines that can understand context, plan workflows, connect to external tools and data, and execute actions to achieve a defined goal. + +In a November 2024 blog by Letta [The AI agents stack](https://www.letta.com/blog/ai-agents-stack), LLM powered agent is described as the combination of tools use, autonomous execution, and memory. + +In addition, Harrison Chase defines agent in the blog [What is an AI agent](https://blog.langchain.dev/what-is-an-agent/) as "a system that uses an LLM to decide the control flow of an application." + +Yet another simple [summary](https://www.felicis.com/insight/the-agentic-web) by Felicis of what an agent does is that an agent expands LLMs to go from chat to act: an agent can pair LLMs with external data, multi-step reasoning and planning, and act on the user's behalf. + +All in all (see [Resources](#resources) for even more info), agents are systems that take a high-level task, use an LLM as a reasoning and planning engine, with the help of contextual info and long-term memory if needed, to decide what actions to take, reflect and improve on the actions, and eventually execute those actions to accomplish the task. + +It's time to see an agent app in action and enjoy some coding. Below is a preview of the questions or requests one may ask the agent: + +# Example Asks to Email Agent + +* do i have any emails with attachments? +* what's the content of the email from LangSmith +* do i have emails with attachment larger than 1mb? +* what kind of attachments for the email with subject papers to read? +* give me a summary of the pdf thinking_llm.pdf +* Draft an email to xxx@gmail.com saying working on it and will keep you updated. thanks for your patience. +* send the draft +* do i have any emails with attachment larger than 10mb? +* how about 5mb +* reply to the email saying thanks for sharing! +* forward the email to xxx@gmail.com +* how many emails do i have from xxx@gmail.com? +* how about from yyy@gmail.com? + +Below are screenshots of some example interactions with the agent: + + + + +# Setup and Installation + +If you feel intimated by the steps of the following Enable Gmail API section, you may want to check again the example asks (to see what you can ask to the agent) and the example log (to see the whole conversation with the agent) - the devil's in the detail and all the glorious description of a powerful trendy agent may not mention the little details one has to deal with to build it. + +## Enable Gmail API +1. Go to the [Google Cloud Console](https://console.cloud.google.com/). +2. Create a new project by clicking the dropdown on the top left then click NEW PROJECT. +3. Enter a Project name then click CREATE. +4. Under "APIs & Services" > "Enabled APIs & services", search for "gmail" and then Enable the "Gmail API" for your project. +5. Under "APIs & Services" > "OAuth consent screen", click "GO TO NEW EXPERIENCE", then click "GET STARTED", enter App name, select your gmail as User support email, choose External under Audience, enter your gmail again as Contact Information, and finally check the I agree to the Google API Services under Finish and click Continue - Create. +5. Again under "APIs & Services", go to Credentials. Click on + CREATE CREDENTIALS, then choose OAuth client ID (NOT API key). +Select Desktop App (NOT Web application, because you're assumed to want to start your Gmail agent locally first) as the application type and name it. Click Create to generate your client ID and client secret. +6. Click Download JSON and rename the downloaded file as credentials.json. This file will be used in your Python script for authentication. + +## Install Ollama with Llama 3.1 8B + +Download Ollama (available for macOS, Linux, and Windows) [here](https://ollama.com/). Then download and test run the Llama 3.1 8B model by running on a Terminal: +``` +ollama run llama3.1 +``` + +This will download a quantized version of Llama 3.1 of the size 4.7GB. + +## Install required packages +First, create a Conda or virtual env: + +``` +conda create -n emailagent python=3.10 +conda activate emailagent +``` +or +``` +python -m venv emailagent +source emailagent/bin/activate # on Linux, macOS: +source emailagent\Scripts\activate # on Windows +``` + +Then install the required Python libraries: +``` +git clone https://github.com/meta-llama/llama-recipes +cd llama-recipes/recipes/use_cases/email_agent +pip install -r requirements.txt +``` + +# Run Email Agent + +To run the agent, you need to first copy the `credentials.json` file downloaded and renamed above in Step 6 of Enable Gmail API to the email_agent folder, then run: +``` +python main.py --email <your_gmail_address> +``` + +The first time you run it, you'll get a prompt like this; +``` +Please visit this URL to authorize this application: https://accounts.google.com/o/oauth2/auth?response_type=code&client_id=xxxx +Enter the authorization code: +``` + +You need to copy the URL above and open it in a browser - if you Sign in with Google using the same Gmail you enabled for the Gmail API, then you'll see "You’ve been given access to an app that’s currently being tested. You should only continue if you know the developer that invited you.", otherwise if you sign in with another Gmail, you'll see "Gmail Agent App has not completed the Google verification process. The app is currently being tested, and can only be accessed by developer-approved testers. If you think you should have access, contact the developer." + +In the latter case, go to APIs & Services > OAuth consent screen > Test users, and click the + ADD USERS button, and you'll see this message: While publishing status is set to "Testing", only test users are able to access the app. Allowed user cap prior to app verification is 100, and is counted over the entire lifetime of the app. + +After clicking Continue, check the Select all checkbox to enable both settings required for running the agent: +``` +View your email messages and settings. +Manage drafts and send emails. +``` + +Finally, copy the Authorization code and paste it to the Terminal, hit Enter and you'll see the agent's initial greeting (which will likely differ because the default temperature value 0.8 is used here - see [Ollama's model file](https://github.com/ollama/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values) for detail) such as: +``` +Hello! I'm Email Agent, here to help you manage your email account with ease. + +What would you like to do today? Do you want me to: + +Check and respond to new emails +Compose a new email +Organize your inbox with filters or labels +Delete unwanted emails +Something else? + +Let me know how I can assist you! + +Your ask: +``` + +If you cancel here and run the command `python main.py --email <your_gmail_address>` again you should see the agent greeting right away without the need to enter an authorization code, unless you enter a different Gmail address for the first time - in fact, for each authorized (added as a test user) Gmail address, a file `token_xxxx@gmail.com.pickle` will be created which contains the authorized token. + +See the example asks and interaction log above for the types of asks you may enter. + +# Implementation Notes +Notes here mainly cover how custom functions are defined, how Gmail API based functions are implemented, and how an Agent class is defined to handle memory for contextual chat and perform pre- and post-processing on the tool calling. + +## Available Custom Functions (Tools) Definition +The `functions_prompt.py` defines the following six custom functions, as part of the system prompt (along with examples for each function call spec that Llama should return): + +* list_emails_function +* get_email_function +* send_email_function (new, reply, forward) +* get_pdf_summary_function +* create_draft_function +* send_draft_function + +Below is an example function call spec in JSON format, for the user asks such as "do i have emails with attachments larger than 5mb", "any attachments larger than 5mb" or "let me know if i have large attachments over 5mb": +``` +{"name": "list_emails", "parameters": {"query": "has:attachment larger:5mb"}} +``` + +Before LLMs, it'd be a REAL pain to cover ALL the possible user natural language inputs that can be and should be all translated into the same semantic representation (if you've done Amazon Alex Skill or Google Assistant development or any pre-LLM NLU work before, you'd know that the JSON format is the same as intent-slots representation). Now LLMs such as Llama do the most heavy lifting in translating a natural language open input into its semantic representation. + +But still, if you look at how the `list_emails_function` (which is used to search for emails based on a user query) is defined below, you'd see a lot of work would be needed to convert the user's asks to the filter values the Gmail API can accept: + +``` +list_emails_function = """ +{ + "type": "function", + "function": { + "name": "list_emails", + "description": "Return a list of emails matching an optionally specified query.", + "parameters": { + "type": "dic", + "properties": [ + { + "maxResults": { + "type": "integer", + "description": "The default maximum number of emails to return is 100; the maximum allowed value for this field is 500." + } + }, + { + "query": { + "type": "string", + "description": "One or more keywords in the email subject and body, or one or more filters. There can be 6 types of filters: 1) Field-specific Filters: from, to, cc, bcc, subject; 2) Date Filters: before, after, older than, newer than); 3) Status Filters: read, unread, starred, importatant; 4) Attachment Filters: has, filename or type; 5) Size Filters: larger, smaller; 6) logical operators (or, and, not)." + } + } + ], + "required": [] + } + } +} +""" +``` +In fact, even though many hours of pre-processing work has been done to cover some test examples, not all of the examples in `functions_prompt.py`,have been covered and tested. + +## Actual Function Call Implementation + +For each defined custom function call, its implementation using the Gmail API is present in `email_agent.py`. For example, the `list_emails` is defined as follows: + +``` +def list_emails(query='', max_results=100): + emails = [] + next_page_token = None + + while True: + response = service.users().messages().list( + userId=user_id, + maxResults=max_results, + pageToken=next_page_token, + q=query + ).execute() + + if 'messages' in response: + for msg in response['messages']: + sender, subject, received_time = get_email_info(msg['id']) + emails.append( + { + "message_id": msg['id'], + "sender": sender, + "subject": subject, + "received_time": received_time + } + ) + + next_page_token = response.get('nextPageToken') + + if not next_page_token: + break + + return emails +``` + +The function will be called by our agent after a user ask such as "do i have emails with attachments larger than 5mb" gets Llama's response below: +``` +{"name": "list_emails", "parameters": {"query": "has:attachment larger:5mb"}} + ``` + +## The Agent class +Implemented also in `email_agent.py`, the Agent class uses 3 instance members to allow for contextual aware asks to the agent, making it have short-term memory: +1. `messages`: this list holds all the previous user asks and the function call results based on Llama's response to the user asks, making Llama able to answer follow-up questions such as "how about 5mb" (after initial ask "attachments larger than 10mb") or "how about from yyy@gmail.com" (after ask "any emails from xxx@gmail.com). +2. `emails`: this list holds a list of emails that matches the user query, so follow-up questions such as "what kind of attachments for the email with subject xxx" can be answered. +3. `draft_id`: this is used to handle the ask "send the draft" after an initial ask such as "draft an email to xxx". + +The `__call__` method of `Agent` includes the call to Llama with the `messages` and parses the Llama response if it's a tool calling spec JSON result, or if Llama doesn't return a tool calling spec, it means it doesn't find a custom tool for the user ask so the Llama response is returned directly: +``` + try: + res = json.loads(result.split("<|python_tag|>")[-1]) + function_name = res['name'] + parameters = res['parameters'] + return {"function_name": function_name, + "parameters": parameters} + except: + return result +``` + +Also implemented there are both pre-processing logic, mainly to convert some parameter values from Llama's responses to what Gmail APIs can accept to make the API calls happy, and post-processing logic to convert function call results to user-friendly natural language. + +``` +function_name = result["function_name"] +func = globals()[function_name] +parameters = result["parameters"] +... <pre-processing> +result = func(**parameters) +... <post-processing> +``` + +When you try out the app, you'll likely find that further pre- and post-processing still needed to make it production ready. In a great video on [Vertical LLM Agents](https://www.youtube.com/watch?v=eBVi_sLaYsc), Jake Heller said "after passes frankly even like 100 tests the odds that it will do on any random distribution of user inputs of the next 100,000, 100% accurately is very high" and "by the time you've dealt with like all the edge cases... there might be dozens of things you build into your application to actually make it work well and then you get to the prompting piece and writing out tests and very specific prompts and the strategy for how you break down a big problem into step by step by step thinking and how you feed in the information how you format that information the right way". That's what all the business logic is about. We'll cover decomposing a complicated ask and multi-step reasoning in a future version of the app, and continue to explore the best possible way to streamline the pre- and post-processing. + +## Debugging output + +When running the app, the detailed Llama returns, pre-processed tool call specs and the actual tool calling results are inside the `-------------------------` block, e.g.: + +------------------------- +Calling Llama... + +Llama returned: {'function_name': 'list_emails', 'parameters': {'query': 'subject:papers to read has:attachment'}}. + +Calling tool to access Gmail API: list_emails, {'query': 'subject:papers to read has:attachment'}... + +Tool calling returned: [{'message_id': '1936ef72ad3f30e8', 'sender': 'xxx@gmail.com', 'subject': 'Fwd: papers to read', 'received_time': '2024-11-27 10:51:51 PST'}, {'message_id': '1936b819706a4923', 'sender': 'Jeff Tang <xxx@gmail.com>', 'subject': 'papers to read', 'received_time': '2024-11-26 18:44:19 PST'}] + +------------------------- + + +# TODOs + +1. Port the app to using [Llama Stack](https://github.com/meta-llama/llama-stack) Agents API. +2. Improve the search, reply, forward, create email draft, and query about types of attachments. +3. Improve the fallback and error handling mechanism when the user asks don't lead to a correct function calling spec or the function calling fails. +4. Improve the user experience by showing progress when some Gmail search API calls take long (minutes) to complete. +5. Implement the async behavior of the agent - schedule an email to be sent later. +6. Implement the agent planning - decomposing a complicated ask into sub-tasks, using ReAct and other methods. +7. Implement the agent long-term memory - longer context and memory across sessions (consider using Llama Stack/MemGPT/Letta) +8. Implement reflection - on the tool calling spec and results. +9. Introduce multiple-agent collaboration. +10. Implement the agent observability. +11. Compare different agent frameworks using the app as the case study. +12. Add and implement a test plan and productionize the app. + + +# Resources +1. Lilian Weng's blog [LLM Powered Autonomous Agents](https://lilianweng.github.io/posts/2023-06-23-agent/) +2. Andrew Ng's posts [Agentic Design Patterns](https://www.deeplearning.ai/the-batch/how-agents-can-improve-llm-performance/) with basic [implementations from scratch](https://github.com/neural-maze/agentic_patterns). +3. LangChain's survey [State of AI Agents](https://www.langchain.com/stateofaiagents) +4. Deloitte's report [AI agents and multiagent systems](https://www2.deloitte.com/content/dam/Deloitte/us/Documents/consulting/us-ai-institute-generative-ai-agents-multiagent-systems.pdf) +5. Letta's blog [The AI agents stack](https://www.letta.com/blog/ai-agents-stack) +6. Microsoft's multi-agent system [Magentic-One](https://www.microsoft.com/en-us/research/articles/magentic-one-a-generalist-multi-agent-system-for-solving-complex-tasks) +7. Amazon's [Multi-Agent Orchestrator framework](https://awslabs.github.io/multi-agent-orchestrator/) +8. Deeplearning.ai's [agent related courses](https://www.deeplearning.ai/courses/?courses_date_desc%5Bquery%5D=agents) (Meta, AWS, Microsoft, LangChain, LlamaIndex, crewAI, AutoGen, Letta) and some [lessons ported to using Llama](https://github.com/meta-llama/llama-recipes/tree/main/recipes/quickstart/agents/DeepLearningai_Course_Notebooks). +9. Felicis's [The Agentic Web](https://www.felicis.com/insight/the-agentic-web) +10. A pretty complete [list of AI agents](https://github.com/e2b-dev/awesome-ai-agents), not including [/dev/agents](https://sdsa.ai/), a very new startup building the next-gen OS for AI agents, though. +11. Sequoia's [post](https://www.linkedin.com/posts/konstantinebuhler_the-ai-landscape-is-shifting-from-simple-activity-7270111755710672897-ZHnr/) on 2024 being the year of AI agents and 2025 networks of AI agents. diff --git a/recipes/use_cases/email_agent/email_agent.png b/recipes/use_cases/email_agent/email_agent.png new file mode 100644 index 0000000000000000000000000000000000000000..76956d696aa9dd8051cc642b4e6c9deffe900b75 Binary files /dev/null and b/recipes/use_cases/email_agent/email_agent.png differ diff --git a/recipes/use_cases/email_agent/email_agent.py b/recipes/use_cases/email_agent/email_agent.py new file mode 100644 index 0000000000000000000000000000000000000000..8936ea21782f2bcf3b954a7467341ba9089588c0 --- /dev/null +++ b/recipes/use_cases/email_agent/email_agent.py @@ -0,0 +1,624 @@ +from google.auth.transport.requests import Request +from google_auth_oauthlib.flow import InstalledAppFlow +from googleapiclient.discovery import build +from email.mime.text import MIMEText +from email.mime.multipart import MIMEMultipart +from email.mime.base import MIMEBase +from email import encoders + +from bs4 import BeautifulSoup +import os +import pytz +import base64 +import pickle +from datetime import datetime, timezone +import json +import ollama +from pypdf import PdfReader +from pathlib import Path + +SCOPES = ['https://www.googleapis.com/auth/gmail.readonly', 'https://www.googleapis.com/auth/gmail.compose'] + +def authenticate_gmail(user_email): + creds = None + token_file = f'token_{user_email}.pickle' # Unique token file for each user + + # Load the user's token if it exists + if os.path.exists(token_file): + with open(token_file, 'rb') as token: + creds = pickle.load(token) + + # If no valid credentials, prompt the user to log in + if not creds or not creds.valid: + if creds and creds.expired and creds.refresh_token: + creds.refresh(Request()) + else: + flow = InstalledAppFlow.from_client_secrets_file('credentials.json', SCOPES) + creds = flow.run_console() + + # Save the new credentials to a user-specific token file + with open(token_file, 'wb') as token: + pickle.dump(creds, token) + + # Build the Gmail API service + service = build('gmail', 'v1', credentials=creds) + return service + + +def num_of_emails(query=''): + response = service.users().messages().list( + userId='me', + q=query, + maxResults=1).execute() + return response.get('resultSizeEstimate', 0) + + +def list_emails(query='', max_results=100): + emails = [] + next_page_token = None + + while True: + response = service.users().messages().list( + userId=user_id, + maxResults=max_results, + pageToken=next_page_token, + q=query + ).execute() + + if 'messages' in response: + for msg in response['messages']: + sender, subject, received_time = get_email_info(msg['id']) + emails.append( + { + "message_id": msg['id'], + "sender": sender, + "subject": subject, + "received_time": received_time + } + ) + + next_page_token = response.get('nextPageToken') + + if not next_page_token: + break + + return emails + +def get_email_detail(detail, which=''): + if detail == 'body': + return get_email_body(which) + elif detail == 'attachment': + return get_email_attachments(which) + + +def get_email_body(message_id): + try: + message = service.users().messages().get( + userId=user_id, + id=message_id, + format='full').execute() + + # Recursive function to extract the parts + def extract_parts(payload): + text_body = "" + if 'parts' in payload: + for part in payload['parts']: + return extract_parts(part) + else: + mime_type = payload.get('mimeType') + body = payload.get('body', {}).get('data') + if mime_type == 'text/html': + decoded_body = base64.urlsafe_b64decode(body).decode('utf-8') + soup = BeautifulSoup(decoded_body, 'html.parser') + text_body = soup.get_text().strip() + elif mime_type == 'text/plain': + decoded_body = base64.urlsafe_b64decode(body).decode('utf-8') + text_body = decoded_body + + return text_body + + return extract_parts(message['payload']) + + except Exception as e: + print(f"An error occurred: {e}") + return None + + +def parse_message(message): + payload = message['payload'] + headers = payload.get("headers") + + subject = None + sender = None + for header in headers: + if header['name'] == 'Subject': + subject = header['value'] + elif header['name'] == 'From': + sender = header['value'] + + internal_date = message.get('internalDate') + utc_time = datetime.fromtimestamp(int(internal_date) / 1000, tz=timezone.utc) + + # Convert UTC to the specified timezone + local_timezone = pytz.timezone("America/Los_Angeles") + local_time = utc_time.astimezone(local_timezone) + + # Format the local time as a string + received_time = local_time.strftime('%Y-%m-%d %H:%M:%S %Z') + + # Check if the email is plain text or multipart + if 'parts' in payload: + # Multipart message - find the text/plain or text/html part + for part in payload['parts']: + if part['mimeType'] == 'text/plain' or part['mimeType'] == 'text/html': # You can also look for 'text/html' + data = part['body']['data'] + body = base64.urlsafe_b64decode(data).decode('utf-8') + return sender, subject, received_time, body + elif part['mimeType'] in ['multipart/related', 'multipart/mixed', 'multipart/alternative']: + return sender, subject, received_time, get_email_body(message.get('id')) + else: + # Single part message + data = payload['body']['data'] + body = base64.urlsafe_b64decode(data).decode('utf-8') + return sender, subject, received_time, body + + +def get_email_info(msg_id): + message = service.users().messages().get( + userId=user_id, + id=msg_id, + format='full').execute() + + sender, subject, received_time, body = parse_message(message) + + return sender, subject, received_time + + +def reply_email(message_id, reply_text): + # Fetch the original message + original_message = service.users().messages().get( + userId=user_id, + id=message_id, + format='full').execute() + + # Get headers + headers = original_message['payload']['headers'] + subject = None + to = None + for header in headers: + if header['name'] == 'Subject': + subject = header['value'] + if header['name'] == 'From': + to = header['value'] + + # Create the reply subject + if not subject.startswith("Re: "): + subject = "Re: " + subject + + # Compose the reply message + reply_message = MIMEText(reply_text) + reply_message['to'] = to + reply_message['from'] = user_id + reply_message['subject'] = subject + reply_message['In-Reply-To'] = message_id + + # Encode and send the message + raw_message = base64.urlsafe_b64encode(reply_message.as_bytes()).decode("utf-8") + body = {'raw': raw_message, + 'threadId': original_message['threadId']} + sent_message = service.users().messages().send( + userId=user_id, + body=body).execute() + print("Reply sent. Message ID:", sent_message['id']) + + +def forward_email(message_id, forward_to, email_body=None): + """ + Forwards an email, preserving the original MIME type, including multipart/related. + """ + # Get the original message in 'full' format + original_message = service.users().messages().get( + userId=user_id, + id=message_id, + format='full').execute() + + # Extract the payload and headers + payload = original_message.get('payload', {}) + headers = payload.get('headers', []) + parts = payload.get('parts', []) + # Get the Subject + subject = next((header['value'] for header in headers if header['name'].lower() == 'subject'), 'No Subject') + + # Create a new MIME message for forwarding + mime_message = MIMEMultipart(payload.get('mimeType', 'mixed').split('/')[-1]) + mime_message['To'] = forward_to + mime_message['Subject'] = f"Fwd: {subject}" + + # Add the optional custom email body + if email_body: + mime_message.attach(MIMEText(email_body, 'plain')) + + # Function to fetch attachment data by attachmentId + def fetch_attachment_data(attachment_id, message_id): + attachment = service.users().messages().attachments().get( + userId=user_id, messageId=message_id, id=attachment_id + ).execute() + return base64.urlsafe_b64decode(attachment['data']) + + # Rebuild MIME structure + def rebuild_parts(parts): + """ + Recursively rebuild MIME parts. + """ + if not parts: + return None + + for part in parts: + part_mime_type = part.get('mimeType', 'text/plain') + part_body = part.get('body', {}) + part_data = part_body.get('data', '') + part_parts = part.get('parts', []) # Sub-parts for multipart types + filename = part.get('filename') + attachment_id = part_body.get('attachmentId') + + if part_mime_type.startswith('multipart/'): + # Rebuild nested multipart + sub_multipart = MIMEMultipart(part_mime_type.split('/')[-1]) + sub_parts = rebuild_parts(part_parts) + if sub_parts: + for sub_part in sub_parts: + sub_multipart.attach(sub_part) + yield sub_multipart + elif filename and attachment_id: + # Handle attachments + decoded_data = fetch_attachment_data(attachment_id, message_id) + attachment = MIMEBase(*part_mime_type.split('/')) + attachment.set_payload(decoded_data) + encoders.encode_base64(attachment) + attachment.add_header('Content-Disposition', f'attachment; filename="{filename}"') + yield attachment + else: + if part_data: + # Decode and attach non-multipart parts + decoded_data = base64.urlsafe_b64decode(part_data) + + if part_mime_type == 'text/plain': + yield MIMEText(decoded_data.decode('utf-8'), 'plain') + elif part_mime_type == 'text/html': + yield MIMEText(decoded_data.decode('utf-8'), 'html') + + # Rebuild the main MIME structure + rebuilt_parts = rebuild_parts(parts) + if rebuilt_parts: + for rebuilt_part in rebuilt_parts: + mime_message.attach(rebuilt_part) + + # Encode the MIME message to base64 + raw = base64.urlsafe_b64encode(mime_message.as_bytes()).decode('utf-8') + + # Send the email + forward_body = {'raw': raw} + sent_message = service.users().messages().send(userId=user_id, body=forward_body).execute() + + print(f"Message forwarded successfully! Message ID: {sent_message['id']}") + + +def send_email(action, to, subject, body="", email_id=""): + if action == "compose": + message = MIMEText(body) + message['to'] = to + message['from'] = user_id + message['subject'] = subject + + # Encode and send the message + raw_message = base64.urlsafe_b64encode(message.as_bytes()).decode("utf-8") + body = {'raw': raw_message} + sent_message = service.users().messages().send( + userId=user_id, + body=body).execute() + return sent_message['id'] + elif action == "reply": # reply or forward; a message id is needed + reply_email(email_id, body) + elif action == "forward": + forward_email(email_id, to, body) + + +def create_draft(action, to, subject, body="", email_id=""): + if action == "new": + message = MIMEText(body) + message['to'] = to + message['from'] = user_id + message['subject'] = subject + + encoded_message = base64.urlsafe_b64encode(message.as_bytes()).decode() + draft_body = {'message': {'raw': encoded_message}} + draft = service.users().drafts().create( + userId=user_id, + body=draft_body).execute() + print(f"Draft created with ID: {draft['id']}") + return draft['id'] + elif action == "reply": + return create_reply_draft(email_id, body) + elif action == "forward": + return create_forward_draft(email_id, to, body) + else: + return + + + +def create_reply_draft(message_id, reply_text): + # Fetch the original message + original_message = service.users().messages().get( + userId=user_id, + id=message_id, + format='full').execute() + + # Get headers + headers = original_message['payload']['headers'] + subject = None + to = None + for header in headers: + if header['name'] == 'Subject': + subject = header['value'] + if header['name'] == 'From': + to = header['value'] + + # Create the reply subject + if not subject.startswith("Re: "): + subject = "Re: " + subject + + # Compose the reply message + reply_message = MIMEText(reply_text) + reply_message['to'] = to + reply_message['from'] = user_id + reply_message['subject'] = subject + reply_message['In-Reply-To'] = message_id + + encoded_message = base64.urlsafe_b64encode(reply_message.as_bytes()).decode() + draft_body = {'message': {'raw': encoded_message, 'threadId': original_message['threadId']}} + draft = service.users().drafts().create(userId=user_id, body=draft_body).execute() + return draft['id'] + + +def create_forward_draft(message_id, recipient_email, custom_message=None): + # Get the original message + original_message = service.users().messages().get( + userId=user_id, + id=message_id, + format='raw').execute() + + # Decode the raw message + raw_message = base64.urlsafe_b64decode(original_message['raw'].encode('utf-8')) + + # Prepare the forward header and optional custom message + forward_header = f"----- Forwarded message -----\nFrom: {recipient_email}\n\n" + if custom_message: + forward_header += f"{custom_message}\n\n" + + # Combine the forward header with the original message + new_message = forward_header + raw_message.decode('utf-8') + + # Encode the combined message into base64 format + encoded_message = base64.urlsafe_b64encode(new_message.encode('utf-8')).decode('utf-8') + + draft_body = {'message': {'raw': encoded_message, 'threadId': original_message['threadId']}} + draft = service.users().drafts().create(userId=user_id, body=draft_body).execute() + print(f"Forward draft created with ID: {draft['id']}") + return draft['id'] + + +def send_draft(id): + sent_message = service.users().drafts().send( + userId=user_id, + body={'id': id} + ).execute() + return f"Draft sent with email ID: {sent_message['id']}" + + +def get_pdf_summary(file_name): + text = pdf2text(file_name) + print("Calling Llama to generate a summary...") + response = llama31(text, "Generate a summary of the input text in 5 sentences.") + return response + + +def get_email_attachments(message_id, mime_type='application/pdf'): + attachments = [] + + # Helper function to process email parts + def process_parts(parts): + for part in parts: + if part['mimeType'] in ['multipart/related', 'multipart/mixed', 'multipart/alternative']: + # Recursively process nested parts + if 'parts' in part: + process_parts(part['parts']) + elif 'filename' in part and part['filename']: + if part['mimeType'] == mime_type: # Check for the desired MIME type + attachment_id = part['body'].get('attachmentId') + if attachment_id: + # Get the attachment data + attachment = service.users().messages().attachments().get( + userId=user_id, + messageId=message_id, + id=attachment_id + ).execute() + + # Decode the attachment content + file_data = base64.urlsafe_b64decode(attachment['data'].encode('UTF-8')) + + with open(part['filename'], "wb") as f: + f.write(file_data) + + # Save the attachment information + attachments.append( + {'filename': part['filename'], + 'data': file_data, + 'size': attachment.get('size', 0) + }) + + # Retrieve the email message + message = service.users().messages().get( + userId=user_id, + id=message_id, + format='full').execute() + payload = message['payload'] + + # Start processing the parts + if 'parts' in payload: + process_parts(payload['parts']) + + rslt = "" + for a in attachments: + rslt += f"{a['filename']} - {a['size']} bytes\n" + return rslt #attachments + + +def pdf2text(file): + text = '' + try: + with Path(file).open("rb") as f: + reader = PdfReader(f) + text = "\n\n".join([page.extract_text() for page in reader.pages]) + except Exception as e: + raise f"Error reading the PDF file: {str(e)}" + + print(f"\nPDF text length: {len(text)}\n") + + return text + + +user_email = None +service = None +user_id = 'me' + +def set_email_service(gmail): + global user_email + global service + + user_email = gmail + service = authenticate_gmail(user_email) + +class Agent: + def __init__(self, system_prompt=""): + self.system_prompt = system_prompt + self.messages = [] + + # agent-specific short term memory, used to answer follow up questions AFTER a list of emails is found matching user's query + self.emails = [] + self.draft_id = None + + if self.system_prompt: + self.messages.append({"role": "system", "content": system_prompt}) + + def __call__(self, user_prompt_or_tool_result, is_tool_call=False): + # if it's tool call result, use "ipython" instead of "user" for the role + self.messages.append({"role": ("ipython" if is_tool_call else "user"), "content": user_prompt_or_tool_result}) + result = self.llama() + print(f"\nLlama returned: {result}.") + if type(result) == dict: # result is a dict only if it's a tool call spec + function_name = result["function_name"] + func = globals()[function_name] + parameters = result["parameters"] + if function_name == "get_email_detail": + # TODO: parse which - valid values are first, second, + # third, fourth, last, from xxx + if 'id' in parameters.keys(): + parameters['which'] = parameters['id'] + del parameters['id'] # per the function spec + elif 'which' in parameters.keys(): + if 'from ' in parameters['which']: + sender = parameters['which'].split('from ')[-1] + for email in self.emails: + if email['sender'].find(sender) != -1: + parameters['which'] = email['message_id'] + break + if 'subject ' in parameters['which']: + subject = parameters['which'].split('subject ')[-1] + # exact match beats substring + for email in self.emails: + if email['subject'].upper() == subject.upper(): + parameters['which'] = email['message_id'] + break + elif email['subject'].upper().find(subject.upper()) != -1: + parameters['which'] = email['message_id'] + + elif 'id_' in parameters['which']: + parameters['which'] = parameters['which'].split('id_')[-1] + else: + parameters['which'] = self.emails[-1]['message_id'] + elif function_name == "send_draft": + parameters['id'] = self.draft_id + + print(f"\nCalling tool to access Gmail API: {function_name}, {parameters}...") + result = func(**parameters) + print(f"\nTool calling returned: {result}") + + # convert function calling result to concise summary, offering interactive follow ups, + # for smooth and user friendly experience + if function_name == 'list_emails': + self.emails = result + num = len(result) + if num == 0: + output = "I couldn't find any such emails. What else would you like to do?" + elif num <= 5: + output = f"I found {num} email{'s' if num > 1 else ''} matching your query:\n" + for i, email in enumerate(result, start=1): + output += f"{i}. From: {email['sender']}, Subject: {email['subject']}, Received on: {email['received_time']}\n" + else: + output = f"I found {num} emails matching your query. Here are the first 5 emails:\n" + for i in range(1, 6): + output += f"{i}. From: {result[i-1]['sender']}, Subject: {result[i-1]['subject']}, Received on: {result[i-1]['received_time']}\n" + elif function_name == "get_email_detail": + output = result + elif function_name == "get_pdf_summary": + output = result + elif function_name == "send_email": + output = "Email sent." + elif function_name == "create_draft": + output = "Draft created." + self.draft_id = result + elif function_name == "send_draft": + output = result + + print(f"\n-------------------------\n\nAgent: {output}\n") + else: + output = result # direct text, not JSON, response by Llama + + # adding this may cause Llama to hallucinate when answering + # follow up questions. e.g. "do i have emails with attachments + # larger than 20mb" got right tool calling response, then + # follow up "larger than 10mb" got hallucinated response. + # self.messages.append({"role": "assistant", "content": output}) + + # this mitigates the hallucination + self.messages.append({"role": "assistant", "content": str(result)}) + + return output + + def llama(self): + response = ollama.chat(model='llama3.1', + messages = self.messages, + options = { + "temperature": 0.0 + } + ) + result = response['message']['content'] + + try: + res = json.loads(result.split("<|python_tag|>")[-1]) + function_name = res['name'] + parameters = res['parameters'] + return {"function_name": function_name, + "parameters": parameters} + except: + return result + + +def llama31(user_prompt: str, system_prompt = ""): + response = ollama.chat(model='llama3.1', + messages=[ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": user_prompt}, + ], + ) + return response['message']['content'] diff --git a/recipes/use_cases/email_agent/functions_prompt.py b/recipes/use_cases/email_agent/functions_prompt.py new file mode 100644 index 0000000000000000000000000000000000000000..acaf9b63f1b0d19ede6b92d8fbdb765881bf58c5 --- /dev/null +++ b/recipes/use_cases/email_agent/functions_prompt.py @@ -0,0 +1,241 @@ +list_emails_function = """ +{ + "type": "function", + "function": { + "name": "list_emails", + "description": "Return a list of emails matching an optionally specified query.", + "parameters": { + "type": "dic", + "properties": [ + { + "maxResults": { + "type": "integer", + "description": "The default maximum number of emails to return is 100; the maximum allowed value for this field is 500." + } + }, + { + "query": { + "type": "string", + "description": "One or more keywords in the email subject and body, or one or more filters. There can be 6 types of filters: 1) Field-specific Filters: from, to, cc, bcc, subject; 2) Date Filters: before, after, older than, newer than); 3) Status Filters: read, unread, starred, importatant; 4) Attachment Filters: has, filename or type; 5) Size Filters: larger, smaller; 6) logical operators (or, and, not)." + } + } + ], + "required": [] + } + } +} +""" + +get_email_function = """ +{ + "type": "function", + "function": { + "name": "get_email_detail", + "description": "Get detailed info about a specific email", + "parameters": { + "type": "dict", + "properties": [ + { + "detail": { + "type": "string", + "description": "what detail the user wants to know about - two possible values: body or attachment" + } + }, + { + "which": { + "type": "string", + "description": "which email to get detail about - possible values include: 'first', 'second', ..., 'last', 'from ...', and 'subject ...'" + } + }, + ], + "required": ["detail", "which"] + } + } +} +""" + +send_email_function = """ +{ + "type": "function", + "function": { + "name": "send_email", + "description": "Compose, reply, or forward email", + "parameters": { + "type": "dict", + "properties": [ + { + "action": { + "type": "string", + "description": "Whether to compose, reply, or forward an email" + } + }, + { + "to": { + "type": "string", + "description": "The recipient of the email" + } + }, + { + "subject": { + "type": "string", + "description": "The email subject" + } + }, + { + "body": { + "type": "string", + "description": "The email content" + } + }, + { + "email_id": { + "type": "string", + "description": "the email id to reply or forward to" + } + } + ], + "required": ["action", "to", "subject", "body"] + } + } +} +""" + +get_pdf_summary_function = """ +{ + "type": "function", + "function": { + "name": "get_pdf_summary", + "description": "get a summary of a PDF attachment", + "parameters": { + "type": "dict", + "properties": [ + { + "file_name": { + "type": "string", + "description": "The name of the PDF file" + } + }, + ], + "required": ["file_name"] + } + } +} +""" + +create_draft_function = """ +{ + "type": "function", + "function": { + "name": "create_draft", + "description": "Create a new, reply, or forward email draft", + "parameters": { + "type": "dict", + "properties": [ + { + "action": { + "type": "string", + "description": "Whether to draft a new, reply, or forward an email" + } + }, + { + "to": { + "type": "string", + "description": "The recipient of the email" + } + }, + { + "subject": { + "type": "string", + "description": "The email subject" + } + }, + { + "body": { + "type": "string", + "description": "The email content" + } + }, + { + "email_id": { + "type": "string", + "description": "the email id to reply or forward to, or empty if draft a new email." + } + } + ], + "required": ["action", "to", "subject", "body", "email_id"] + } + } +} +""" + +# for now, only allow for one draft email to be saved in a session +# to support for multiple drafts, cf how get_email_detail after list_emails is implemented. +send_draft_function = """ +{ + "type": "function", + "function": { + "name": "send_draft", + "description": "Send a draft email", + "parameters": { + "type": "dict", + "properties": [ + { + "id": { + "type": "string", + "description": "draft id" + } + }, + ], + "required": ["id"] + } + } +} +""" + +examples = """ +{"name": "list_emails", "parameters": {"query": "has:attachment larger:5mb"}} +{"name": "list_emails", "parameters": {"query": "has:attachment"}} +{"name": "list_emails", "parameters": {"query": "newer_than:1d"}} +{"name": "list_emails", "parameters": {"query": "older_than:1d"}} +{"name": "list_emails", "parameters": {"query": "is:unread"}} +{"name": "list_emails", "parameters": {"query": "<query> is:unread"}} +{"name": "list_emails", "parameters": {"query": "<query> is:read"}} +{"name": "get_email_detail", "parameters": {"detail": "body", "which": "first"}} +{"name": "get_email_detail", "parameters": {"detail": "body", "which": "last"}} +{"name": "get_email_detail", "parameters": {"detail": "body", "which": "second"}} +{"name": "get_email_detail", "parameters": {"detail": "body", "which": "subject <subject info>"}} +{"name": "get_email_detail", "parameters": {"detail": "attachment", "which": "from <sender info>"}} +{"name": "get_email_detail", "parameters": {"detail": "attachment", "which": "first"}} +{"name": "get_email_detail", "parameters": {"detail": "attachment", "which": "last"}} +{"name": "get_email_detail", "parameters": {"detail": "attachment", "which": "<email id>"}} +{"name": "send_email", "parameters": {"action": "compose", "to": "jeffxtang@meta.com", "subject": "xxxxx", "body": "xxxxx"}} +{"name": "send_email", "parameters": {"action": "reply", "to": "", "subject": "xxxxx", "body": "xxxxx", "email_id": "xxxxx"}} +{"name": "send_email", "parameters": {"action": "forward", "to": "jeffxtang@meta.com", "subject": "xxxxx", "body": "xxxxx", "email_id": "xxxxx"}} +{"name": "create_draft", "parameters": {"action": "new", "to": "jeffxtang@meta.com", "subject": "xxxxx", "body": "xxxxx", "email_id": ""}} +{"name": "create_draft", "parameters": {"action": "reply", "to": "", "subject": "xxxxx", "body": "xxxxx", "email_id": "xxxxx"}} +{"name": "create_draft", "parameters": {"action": "forward", "to": "jeffxtang@meta.com", "subject": "xxxxx", "body": "xxxxx", "email_id": "xxxxx"}} +{"name": "send_draft", "parameters": {"id": "..."}} +{"name": "get_pdf_summary", "parameters": {"file_name": "..."}} +""" + +system_prompt = f""" +Environment: ipython +Cutting Knowledge Date: December 2023 +Today Date: 1 December 2024 + +Your name is Email Agent, an assistant that can perform all email related tasks for your user. +Respond to the user's ask by making use of the following functions if needed. +If no available functions can be used, just say "I don't know" and don't make up facts. +Here is a list of available functions in JSON format: + +{list_emails_function} +{get_email_function} +{send_email_function} +{get_pdf_summary_function} +{create_draft_function} +{send_draft_function} + +Example responses: +{examples} + +""" diff --git a/recipes/use_cases/email_agent/main.py b/recipes/use_cases/email_agent/main.py new file mode 100644 index 0000000000000000000000000000000000000000..49c563fd8f24fbbcee21be07dfa6ff3f88825fad --- /dev/null +++ b/recipes/use_cases/email_agent/main.py @@ -0,0 +1,32 @@ +import argparse +import email_agent +from email_agent import * +from functions_prompt import system_prompt + + +def main(): + parser = argparse.ArgumentParser(description="Set email address") + parser.add_argument("--email", type=str, required=True, help="Your Gmail address") + args = parser.parse_args() + + email_agent.set_email_service(args.email) + + greeting = llama31("hello", "Your name is Email Agent, an assistant that can perform all email related tasks for your user.") + agent_response = f"{greeting}\n\nYour ask: " + agent = Agent(system_prompt) + + while True: + ask = input(agent_response) + if ask == "bye": + print(llama31("bye")) + break + print("\n-------------------------\nCalling Llama...") + agent(ask) + agent_response = "Your ask: " + + +if __name__ == "__main__": + main() + + + diff --git a/recipes/use_cases/email_agent/requirements.txt b/recipes/use_cases/email_agent/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..d400c92523b39765e7744e3bf4b529e8dba67251 --- /dev/null +++ b/recipes/use_cases/email_agent/requirements.txt @@ -0,0 +1,9 @@ + +google-auth==2.27.0 +google-auth-oauthlib==0.4.6 +google-auth-httplib2==0.1.0 +google-api-python-client==2.34.0 +pytz +beautifulsoup4 +ollama +pypdf \ No newline at end of file diff --git a/recipes/use_cases/end2end-recipes/RAFT-Chatbot/raft_eval.py b/recipes/use_cases/end2end-recipes/RAFT-Chatbot/raft_eval.py index 59dd649a62c57305e9a1c01fb592ca7edf521bc4..73c1361db7dc9feacc0b903cc4464c822802cd9c 100644 --- a/recipes/use_cases/end2end-recipes/RAFT-Chatbot/raft_eval.py +++ b/recipes/use_cases/end2end-recipes/RAFT-Chatbot/raft_eval.py @@ -82,7 +82,7 @@ def generate_answers_with_RAG(model_name, question_list,api_config,retriever,api ) all_tasks = [] for q in question_list: - # retrive the top K documents + # retrieve the top K documents retrieved_docs = retriever.invoke(q) # format the documents into a string documents = format_docs_raft(retrieved_docs) @@ -200,7 +200,7 @@ def main(api_config): questions.append(item["question"]) groud_truth.append(item["answer"]) generated_answers = {} - # build retriver + # build retriever retriever = build_retriever(api_config,"sentence-transformers/multi-qa-mpnet-base-cos-v1",api_config["rag_topk"]) # Generate answers for 8B models model_name = api_config["model_name"] @@ -312,7 +312,7 @@ def parse_arguments(): "-r", "--rag_topk", default=5, type=int, - help="set the number of top k documents the RAG needs to retrive." + help="set the number of top k documents the RAG needs to retrieve." ) parser.add_argument("--chunk_size", type=int, default=1000, help="The character size of each chunk used in RAG") return parser.parse_args() diff --git a/recipes/use_cases/end2end-recipes/RAFT-Chatbot/raft_eval_config.yaml b/recipes/use_cases/end2end-recipes/RAFT-Chatbot/raft_eval_config.yaml index 9cd5baa765c09e0310faa8694b38fc489e2e80b7..612b54f8e029794152531ec1e004501379e6c299 100644 --- a/recipes/use_cases/end2end-recipes/RAFT-Chatbot/raft_eval_config.yaml +++ b/recipes/use_cases/end2end-recipes/RAFT-Chatbot/raft_eval_config.yaml @@ -9,8 +9,8 @@ judge_prompt_template: > <|begin_of_text|><|start_header_id|>system<|end_header_id|>You have been provided with a question, a teacher's answer and a student's answer below. Given that question, you need to score the how good the student answer is compare to the teacher's answer. If the student's answer is correct based on the teacher's answer, then return YES, else return NO. - Here are the grade criterias to follow: - 1. Review it carefully to make sure that the keywords and numerical vaules are exactly the same. + Here are the grade criteria to follow: + 1. Review it carefully to make sure that the keywords and numerical values are exactly the same. 2. Ensure that the student answer does not contain any conflicting statements. 3. It is OK if the student answer contains more information than the ground truth answer, as long as it is factually accurate relative to the ground truth answer. YES means that the student's answer meets all of the criteria. diff --git a/recipes/use_cases/end2end-recipes/RAFT-Chatbot/raft_utils.py b/recipes/use_cases/end2end-recipes/RAFT-Chatbot/raft_utils.py index 73ae187b70f45bd6aab7199b1ad40d71eea7b6b9..ed3528e0d55d87d2883c505177b122d7e7924003 100644 --- a/recipes/use_cases/end2end-recipes/RAFT-Chatbot/raft_utils.py +++ b/recipes/use_cases/end2end-recipes/RAFT-Chatbot/raft_utils.py @@ -112,7 +112,7 @@ def generate_questions(api_config): if len(documents) == 0: logging.info(f"Error reading files, document_text is {len(documents)}") document_batches = get_chunks(documents,api_config["chunk_size"],api_config) - # use OpenAI API protocol to hanlde the chat request, including local VLLM openai compatible server + # use OpenAI API protocol to handle the chat request, including local VLLM openai compatible server llm = ChatOpenAI( openai_api_key=key, openai_api_base=api_url, @@ -132,7 +132,7 @@ def generate_questions(api_config): queries = [strip_str(q) for q in queries] queries = [q for q in queries if any(c.isalpha() for c in q)] if len(queries) > int(api_config['questions_per_chunk']): - # As the model may have unrelated question at the begining of the result + # As the model may have unrelated question at the beginning of the result # if queries is more than questions_per_chunk, then we need to truncate it and only keep last questions_per_chunk lines queries = queries[-int(api_config['questions_per_chunk']):] final_result.append(queries) @@ -152,7 +152,7 @@ def generate_COT(chunk_questions_zip,api_config) -> dict: prompt = api_config['COT_prompt_template'].format(question=question,context=str(document_content)) all_tasks.append(prompt) chunk_questions.append((document_content,question)) - # use OpenAI API protocol to hanlde the chat request, including local VLLM openai compatible server + # use OpenAI API protocol to handle the chat request, including local VLLM openai compatible server llm = ChatOpenAI( openai_api_key=api_config["api_key"], openai_api_base=api_config["endpoint_url"], diff --git a/src/llama_recipes/inference/prompt_format_utils.py b/src/llama_recipes/inference/prompt_format_utils.py index f664f126e037f45d1640632644fb8a43dadfbe97..a791c89d61be9e00691bc9b84b8bafe4630747af 100644 --- a/src/llama_recipes/inference/prompt_format_utils.py +++ b/src/llama_recipes/inference/prompt_format_utils.py @@ -218,7 +218,7 @@ LLAMA_GUARD_3_CATEGORY = [ ), SafetyCategory( "Indiscriminate Weapons. ", - """I models should not create content that enables the creation of weapons of mass destruction. Examples of such weapons include, but are not limited to: + """AI models should not create content that enables the creation of weapons of mass destruction. Examples of such weapons include, but are not limited to: - Chemical weapons (ex: nerve gas) - Biological weapons (ex: anthrax) - Radiological weapons (ex: salted bombs) diff --git a/src/llama_recipes/inference/safety_utils.py b/src/llama_recipes/inference/safety_utils.py index f81a05a3acbb8314dc8172f7bc93434602fcc868..fd1f23a67f970ede89c00165b781d3153fe81cee 100644 --- a/src/llama_recipes/inference/safety_utils.py +++ b/src/llama_recipes/inference/safety_utils.py @@ -152,7 +152,7 @@ class AzureSaftyChecker(object): report = "|" + "|".join(f"{c.name:^10}" for c in categories) + "|\n" report += "|" + "|".join(f"{levels[s]:^10}" for s in severities) + "|\n" - return "Azure Content Saftey API", is_safe, report + return "Azure Content Safety API", is_safe, report class LlamaGuardSafetyChecker(object): diff --git a/src/llama_recipes/policies/anyprecision_optimizer.py b/src/llama_recipes/policies/anyprecision_optimizer.py index 22b0ca00173bd8b40c8982c615a3a04a697d6484..f110ed6fc320ee174ea39d58bb21c28e2c457350 100644 --- a/src/llama_recipes/policies/anyprecision_optimizer.py +++ b/src/llama_recipes/policies/anyprecision_optimizer.py @@ -7,7 +7,7 @@ # buffer dtypes. # Optional Kahan summation is used to offset precision reduction for # the weight updates. This allows full training in BFloat16 (equal or -# better than FP32 results in many cases) due to high precision weight upates. +# better than FP32 results in many cases) due to high precision weight updates. import torch from torch.optim.optimizer import Optimizer diff --git a/src/llama_recipes/utils/train_utils.py b/src/llama_recipes/utils/train_utils.py index c594b6a1e6555bba31524d972b4adf41995e7bb3..6c95a983fa9a0bec6e4fd1547f1eed322bb5eda3 100644 --- a/src/llama_recipes/utils/train_utils.py +++ b/src/llama_recipes/utils/train_utils.py @@ -81,7 +81,7 @@ def train(model, train_dataloader,eval_dataloader, tokenizer, optimizer, lr_sche local_rank: The rank of the current node in a distributed setting train_config: The training configuration eval_dataloader: The dataloader containing the eval data - tokenizer: tokenizer used in the eval for decoding the predicitons + tokenizer: tokenizer used in the eval for decoding the predictions Returns: results dictionary containing average training and validation perplexity and loss """ @@ -579,7 +579,7 @@ def save_train_params(train_config, fsdp_config, rank): fsdp_config_dict = {k: str(v) for k, v in vars(fsdp_config).items() if not k.startswith('__')} # Merge the two dictionaries into one train_params_dict = {**train_config_dict, **fsdp_config_dict} - # Construct the folder name (follwoing FSDP checkpointing style) using properties of the train_config object + # Construct the folder name (following FSDP checkpointing style) using properties of the train_config object folder_name = ( train_config.dist_checkpoint_root_folder + "/"