diff --git a/.github/scripts/spellcheck_conf/wordlist.txt b/.github/scripts/spellcheck_conf/wordlist.txt
index 9332a4389e347b28b1aecc0bd3f092d74229561b..4e30058b4b0e46d2e5621b78031dcc3dee38a347 100644
--- a/.github/scripts/spellcheck_conf/wordlist.txt
+++ b/.github/scripts/spellcheck_conf/wordlist.txt
@@ -1351,6 +1351,11 @@ Weaviate
 MediaGen
 SDXL
 SVD
+Agentic
+AutoGen
+DeepLearning
+Deeplearning
+Llamaindex
 KV
 KVs
 XSUM
diff --git a/recipes/quickstart/agents/dlai/AI_Agentic_Design_Patterns_with_AutoGen_L4_Tool_Use_and_Conversational_Chess.ipynb b/recipes/quickstart/agents/dlai/AI_Agentic_Design_Patterns_with_AutoGen_L4_Tool_Use_and_Conversational_Chess.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..bfb533e039a13002eda5bf76098a5654bc6e533c
--- /dev/null
+++ b/recipes/quickstart/agents/dlai/AI_Agentic_Design_Patterns_with_AutoGen_L4_Tool_Use_and_Conversational_Chess.ipynb
@@ -0,0 +1,314 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "id": "7a4b75bb-d60a-41e3-abca-1ca0f0bf1201",
+   "metadata": {},
+   "source": [
+    "<a href=\"https://colab.research.google.com/github/meta-llama/llama-recipes/blob/main/recipes/quickstart/agents/AI_Agentic_Design_Patterns_with_AutoGen_L4_Tool_Use_and_Conversational_Chess.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "51581f90-911f-46ef-82dd-f3ca9c1d4b96",
+   "metadata": {},
+   "source": [
+    "This notebook ports the DeepLearning.AI short course [AI Agentic Design Patterns with AutoGen Lesson 4 Tool Use and Conversational Chess](https://learn.deeplearning.ai/courses/ai-agentic-design-patterns-with-autogen/lesson/5/tool-use-and-conversational-chess) to using Llama 3. \n",
+    "\n",
+    "You should take the course before or after going through this notebook to have a deeper understanding."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "f9824ea5-3791-4638-a09d-43eb2c906d79",
+   "metadata": {
+    "scrolled": true
+   },
+   "outputs": [],
+   "source": [
+    "!pip install chess\n",
+    "!pip install pyautogen"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "a082a6dc-ceb1-4a3e-b3ae-afcb835de6da",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import chess\n",
+    "import chess.svg\n",
+    "from typing_extensions import Annotated"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "fbcdd9ea-f589-463d-a306-3fb3fcde770c",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "board = chess.Board()\n",
+    "\n",
+    "made_move = False"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "9d27858c-4a0b-40f6-bd58-01b19c33ab38",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def get_legal_moves(\n",
+    "    \n",
+    ") -> Annotated[str, \"A list of legal moves in UCI format\"]:\n",
+    "    return \"Possible moves are: \" + \",\".join(\n",
+    "        [str(move) for move in board.legal_moves]\n",
+    "    )"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "67742daa-9d9a-46b3-9466-beb96d535334",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from IPython.display import SVG\n",
+    "\n",
+    "def make_move(\n",
+    "    move: Annotated[str, \"A move in UCI format.\"]\n",
+    ") -> Annotated[str, \"Result of the move.\"]:\n",
+    "    move = chess.Move.from_uci(move)\n",
+    "    board.push_uci(str(move))\n",
+    "    global made_move\n",
+    "    made_move = True\n",
+    "    \n",
+    "    svg_str = chess.svg.board(\n",
+    "            board,\n",
+    "            arrows=[(move.from_square, move.to_square)],\n",
+    "            fill={move.from_square: \"gray\"},\n",
+    "            size=200\n",
+    "        )\n",
+    "    display(\n",
+    "        SVG(data=svg_str)\n",
+    "    )\n",
+    "    \n",
+    "    # Get the piece name.\n",
+    "    piece = board.piece_at(move.to_square)\n",
+    "    piece_symbol = piece.unicode_symbol()\n",
+    "    piece_name = (\n",
+    "        chess.piece_name(piece.piece_type).capitalize()\n",
+    "        if piece_symbol.isupper()\n",
+    "        else chess.piece_name(piece.piece_type)\n",
+    "    )\n",
+    "    return f\"Moved {piece_name} ({piece_symbol}) from \"\\\n",
+    "    f\"{chess.SQUARE_NAMES[move.from_square]} to \"\\\n",
+    "    f\"{chess.SQUARE_NAMES[move.to_square]}.\""
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "e84508c0-0465-4be8-a97b-2e702265bcfb",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# base url from https://console.groq.com/docs/openai\n",
+    "config_list = [\n",
+    "    {\n",
+    "        \"model\": \"llama3-70b-8192\",\n",
+    "        \"base_url\": \"https://api.groq.com/openai/v1\",\n",
+    "        'api_key': 'your_groq_api_key', # get a free key at https://console.groq.com/keys\n",
+    "    },\n",
+    "]"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "86dbb782-61f0-4b61-aab5-41fd12c26f51",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from autogen import ConversableAgent\n",
+    "\n",
+    "# Player white agent\n",
+    "player_white = ConversableAgent(\n",
+    "    name=\"Player White\",\n",
+    "    system_message=\"You are a chess player and you play as white. \"\n",
+    "    \"First call get_legal_moves(), to get a list of legal moves in UCI format. \"\n",
+    "    \"Then call make_move(move) to make a move. Finally, tell the proxy what you have moved and ask the black to move\", # added \"Finally...\" to make the agents work\n",
+    "    llm_config={\"config_list\": config_list,\n",
+    "                \"temperature\": 0,\n",
+    "               },\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "1c57411c-183a-44ea-95ab-33c0e97feb74",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Player black agent\n",
+    "player_black = ConversableAgent(\n",
+    "    name=\"Player Black\",\n",
+    "    system_message=\"You are a chess player and you play as black. \"\n",
+    "    \"First call get_legal_moves(), to get a list of legal moves in UCI format. \"\n",
+    "    \"Then call make_move(move) to make a move. Finally, tell the proxy what you have moved and ask the white to move\", # added \"Finally...\" to make the agents work\n",
+    "    llm_config={\"config_list\": config_list,\n",
+    "                \"temperature\": 0,\n",
+    "               },)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "60e5cb2d-4273-45a9-af40-0ffb1ada0009",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def check_made_move(msg):\n",
+    "    global made_move\n",
+    "    if made_move:\n",
+    "        made_move = False\n",
+    "        return True\n",
+    "    else:\n",
+    "        return False\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "be4c7b55-9d50-4aa8-ae4b-3b959ffbb298",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "board_proxy = ConversableAgent(\n",
+    "    name=\"Board Proxy\",\n",
+    "    llm_config=False,\n",
+    "    is_termination_msg=check_made_move,\n",
+    "    default_auto_reply=\"Please make a move.\",\n",
+    "    human_input_mode=\"NEVER\",\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "e122875c-8bff-4212-8a1b-5f91d253fdd7",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from autogen import register_function"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "20edcb8c-5b7b-438e-b476-1cb16d14ef62",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "for caller in [player_white, player_black]:\n",
+    "    register_function(\n",
+    "        get_legal_moves,\n",
+    "        caller=caller,\n",
+    "        executor=board_proxy,\n",
+    "        name=\"get_legal_moves\",\n",
+    "        description=\"Call this tool to get all legal moves in UCI format.\",\n",
+    "    )\n",
+    "    \n",
+    "    register_function(\n",
+    "        make_move,\n",
+    "        caller=caller,\n",
+    "        executor=board_proxy,\n",
+    "        name=\"make_move\",\n",
+    "        description=\"Call this tool to make a move.\",\n",
+    "    )"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "b254ea02-0a81-4e9f-91fa-788dead9ffb8",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "player_black.llm_config[\"tools\"]"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "3715f56c-8ab8-4563-8f00-233beb3959b9",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "player_white.register_nested_chats(\n",
+    "    trigger=player_black,\n",
+    "    chat_queue=[\n",
+    "        {\n",
+    "            \"sender\": board_proxy,\n",
+    "            \"recipient\": player_white,\n",
+    "            \"summary_method\": \"last_msg\",\n",
+    "        }\n",
+    "    ],\n",
+    ")\n",
+    "\n",
+    "player_black.register_nested_chats(\n",
+    "    trigger=player_white,\n",
+    "    chat_queue=[\n",
+    "        {\n",
+    "            \"sender\": board_proxy,\n",
+    "            \"recipient\": player_black,\n",
+    "            \"summary_method\": \"last_msg\",\n",
+    "        }\n",
+    "    ],\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "eda4f544-ab4c-4e9e-bceb-f93ad57c4026",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "board = chess.Board()\n",
+    "\n",
+    "chat_result = player_black.initiate_chat(\n",
+    "    player_white,\n",
+    "    message=\"Let's play chess! Your move.\",\n",
+    "    max_turns=3,\n",
+    ")"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3 (ipykernel)",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.10.14"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/recipes/quickstart/agents/dlai/AI_Agents_in_LangGraph_L1_Build_an_Agent_from_Scratch.ipynb b/recipes/quickstart/agents/dlai/AI_Agents_in_LangGraph_L1_Build_an_Agent_from_Scratch.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..6f29e4e73df12db1368886acad70a33bf1f5246a
--- /dev/null
+++ b/recipes/quickstart/agents/dlai/AI_Agents_in_LangGraph_L1_Build_an_Agent_from_Scratch.ipynb
@@ -0,0 +1,784 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "id": "de56ee05-3b71-43c9-8cbf-6ad9b3233f38",
+   "metadata": {},
+   "source": [
+    "<a href=\"https://colab.research.google.com/github/meta-llama/llama-recipes/blob/main/recipes/quickstart/agents/AI_Agents_in_LangGraph_L1_Build_an_Agent_from_Scratch.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "16ba1896-6867-4c68-9951-b0aadb819782",
+   "metadata": {},
+   "source": [
+    "This notebook ports the DeepLearning.AI short course [AI Agents in LangGraph Lesson 1 Build an Agent from Scratch](https://learn.deeplearning.ai/courses/ai-agents-in-langgraph/lesson/2/build-an-agent-from-scratch) to using Llama 3, with a bonus section that ports the agent from scratch code to using LangGraph, introduced in [Lession 2 LangGraph Components](https://learn.deeplearning.ai/courses/ai-agents-in-langgraph/lesson/3/langgraph-components) of the course. \n",
+    "\n",
+    "You should take the course, especially the first two lessons, before or after going through this notebook, to have a deeper understanding."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "9b168b57-6ff8-41d1-8f8f-a0c4a5ff108e",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!pip install groq"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "2c067d5f-c58c-47c0-8ccd-9a8710711bf7",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import os \n",
+    "from groq import Groq\n",
+    "\n",
+    "os.environ['GROQ_API_KEY'] = 'your_groq_api_key' # get a free key at https://console.groq.com/keys"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "5f7d8d95-36fb-4b14-bd28-99d305c0fded",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# a quick sanity test of calling Llama 3 70b on Groq \n",
+    "# see https://console.groq.com/docs/text-chat for more info\n",
+    "client = Groq()\n",
+    "chat_completion = client.chat.completions.create(\n",
+    "    model=\"llama3-70b-8192\",\n",
+    "    messages=[{\"role\": \"user\", \"content\": \"what are the words Charlotte wrote for the pig?\"}]\n",
+    ")\n",
+    "print(chat_completion.choices[0].message.content)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "f758c771-5afe-4008-9d7f-92a6f526778b",
+   "metadata": {},
+   "source": [
+    "### ReAct Agent from Sractch"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "c00c0479-0913-4a92-8991-fe5a9a936bdb",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "client = Groq()\n",
+    "model = \"llama3-8b-8192\" # this model works with the prompt below only for the first simpler example; you'll see how to modify the prompt to make it work for a more complicated question\n",
+    "#model = \"llama3-70b-8192\" # this model works with the prompt below for both example questions \n",
+    "\n",
+    "class Agent:\n",
+    "    def __init__(self, system=\"\"):\n",
+    "        self.system = system\n",
+    "        self.messages = []\n",
+    "        if self.system:\n",
+    "            self.messages.append({\"role\": \"system\", \"content\": system})\n",
+    "\n",
+    "    def __call__(self, message):\n",
+    "        self.messages.append({\"role\": \"user\", \"content\": message})\n",
+    "        result = self.execute()\n",
+    "        self.messages.append({\"role\": \"assistant\", \"content\": result})\n",
+    "        return result\n",
+    "\n",
+    "    def execute(self):\n",
+    "        completion = client.chat.completions.create(\n",
+    "                        model=model,\n",
+    "                        temperature=0,\n",
+    "                        messages=self.messages)\n",
+    "        return completion.choices[0].message.content\n",
+    "    "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "f766fb44-e8c2-43db-af83-8b9053a334ef",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "prompt = \"\"\"\n",
+    "You run in a loop of Thought, Action, PAUSE, Observation.\n",
+    "At the end of the loop you output an Answer\n",
+    "Use Thought to describe your thoughts about the question you have been asked.\n",
+    "Use Action to run one of the actions available to you - then return PAUSE.\n",
+    "Observation will be the result of running those actions.\n",
+    "\n",
+    "Your available actions are:\n",
+    "\n",
+    "calculate:\n",
+    "e.g. calculate: 4 * 7 / 3\n",
+    "Runs a calculation and returns the number - uses Python so be sure to use floating point syntax if necessary\n",
+    "\n",
+    "average_dog_weight:\n",
+    "e.g. average_dog_weight: Collie\n",
+    "returns average weight of a dog when given the breed\n",
+    "\n",
+    "Example session:\n",
+    "\n",
+    "Question: How much does a Bulldog weigh?\n",
+    "Thought: I should look the dogs weight using average_dog_weight\n",
+    "Action: average_dog_weight: Bulldog\n",
+    "PAUSE\n",
+    "\n",
+    "You will be called again with this:\n",
+    "\n",
+    "Observation: A Bulldog weights 51 lbs\n",
+    "\n",
+    "You then output:\n",
+    "\n",
+    "Answer: A bulldog weights 51 lbs\n",
+    "\"\"\".strip()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "93ab1290-625b-4b69-be4d-210fca43a513",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def calculate(what):\n",
+    "    return eval(what)\n",
+    "\n",
+    "def average_dog_weight(name):\n",
+    "    if name in \"Scottish Terrier\": \n",
+    "        return(\"Scottish Terriers average 20 lbs\")\n",
+    "    elif name in \"Border Collie\":\n",
+    "        return(\"a Border Collies average weight is 37 lbs\")\n",
+    "    elif name in \"Toy Poodle\":\n",
+    "        return(\"a toy poodles average weight is 7 lbs\")\n",
+    "    else:\n",
+    "        return(\"An average dog weights 50 lbs\")\n",
+    "\n",
+    "known_actions = {\n",
+    "    \"calculate\": calculate,\n",
+    "    \"average_dog_weight\": average_dog_weight\n",
+    "}"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "52f900d9-15f0-4f48-9bf3-6165c70e4b42",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "abot = Agent(prompt)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "f1b612c9-2a7d-4325-b36f-182899252538",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "result = abot(\"How much does a toy poodle weigh?\")\n",
+    "print(result)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "e27dda33-c76d-4a19-8aef-02ba5389e7a6",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "abot.messages"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "b6e85ca1-85af-43e3-a5ea-c5faf0935361",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# manually call the exeternal func (tool) for now\n",
+    "result = average_dog_weight(\"Toy Poodle\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "a9263ac7-fa81-4c95-91c8-a6c0741ab7f8",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "result"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "cb309710-0693-422f-a739-38ca9455e497",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "next_prompt = \"Observation: {}\".format(result)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "bd567e42-b5a9-4e4e-8807-38bb1d6c80a4",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "abot(next_prompt)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "255bf148-bf85-40c5-b33e-d849a42c127b",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "abot.messages"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "c286a6d5-b5b3-473b-bad6-aa6f1468e603",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "abot = Agent(prompt)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "f5e13b6e-e68e-45c2-b688-a257b531e482",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "question = \"\"\"I have 2 dogs, a border collie and a scottish terrier. \\\n",
+    "What is their combined weight\"\"\"\n",
+    "abot(question)\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "049202f1-585f-42c3-8511-08eca7e5ed0e",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "abot.messages"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "f086f19a-30fe-40ca-aafb-f1ce7c28982d",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "next_prompt = \"Observation: {}\".format(average_dog_weight(\"Border Collie\"))\n",
+    "print(next_prompt)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "1747c78d-642d-4f57-81a0-27218eab3958",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "abot(next_prompt)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "85809d8f-cd95-4e0a-acb7-9705817bea70",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "abot.messages"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "e77591fa-4e04-4eb6-8a40-ca26a71765f9",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "next_prompt = \"Observation: {}\".format(average_dog_weight(\"Scottish Terrier\"))\n",
+    "print(next_prompt)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "1f72b638-de07-4972-bbdb-8c8602f3d143",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "abot(next_prompt)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "eb5bf29d-22f9-4c0d-aea6-7e9c99e71835",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "abot.messages"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "a67add73-b3c3-42be-9c54-f8a6ac828869",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "next_prompt = \"Observation: {}\".format(eval(\"37 + 20\"))\n",
+    "print(next_prompt)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "801fda04-9756-4ae4-9990-559216d38be8",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "abot(next_prompt)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "56f7b9f4-289f-498d-8bc8-da9bb7365d52",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "abot.messages"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "155ee9b3-a4f9-43dd-b23e-0f268f72f198",
+   "metadata": {},
+   "source": [
+    "### Automate the ReAct action execution"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "5b2196f8-88e6-4eb4-82b0-cf251a07e313",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import re\n",
+    "\n",
+    "# automate the action execution above to make the whole ReAct (Thought - Action- Observsation) process fully automated\n",
+    "action_re = re.compile('^Action: (\\w+): (.*)$')   # python regular expression to selection action"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "ea5710d6-5d9a-46ff-a275-46311257d9fd",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def query(question, max_turns=5):\n",
+    "    i = 0\n",
+    "    bot = Agent(prompt) # set system prompt\n",
+    "    next_prompt = question\n",
+    "    while i < max_turns:\n",
+    "        i += 1\n",
+    "        result = bot(next_prompt)\n",
+    "        print(result)\n",
+    "        actions = [\n",
+    "            action_re.match(a)\n",
+    "            for a in result.split('\\n')\n",
+    "            if action_re.match(a)\n",
+    "        ]\n",
+    "        if actions:\n",
+    "            # There is an action to run\n",
+    "            action, action_input = actions[0].groups()\n",
+    "            if action not in known_actions:\n",
+    "                raise Exception(\"Unknown action: {}: {}\".format(action, action_input))\n",
+    "            print(\" -- running {} {}\".format(action, action_input))\n",
+    "\n",
+    "            # key to make the agent process fully automated:\n",
+    "            # programtically call the external func with arguments, with the info returned by LLM\n",
+    "            observation = known_actions[action](action_input) \n",
+    "\n",
+    "            print(\"Observation:\", observation)\n",
+    "            next_prompt = \"Observation: {}\".format(observation)\n",
+    "        else:\n",
+    "            return"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "232d0818-7580-424b-9538-1e2b1c15360b",
+   "metadata": {},
+   "source": [
+    "#### Using model \"llama3-8b-8192\", the code below will cause an invalid syntax error because the Action returned is calculate: (average_dog_weight: Border Collie) + (average_dog_weight: Scottish Terrier), instead of the expected \"Action: average_dog_weight: Border Collie\"."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "bb0095f3-b3f1-48cf-b3fb-36049b6b91f0",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "question = \"\"\"I have 2 dogs, a border collie and a scottish terrier. \\\n",
+    "What is their combined weight\"\"\"\n",
+    "query(question)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "952ffac8-5ec2-48f3-8049-d03c130dad0d",
+   "metadata": {},
+   "source": [
+    "#### Prompt engineering in action:\n",
+    "REPLACE \"Use Thought to describe your thoughts about the question you have been asked. Use Action to run one of the actions available to you - then return PAUSE.\" with \n",
+    "\"First, use Thought to describe your thoughts about the question you have been asked, and generate Action to run one of the actions available to you, then return PAUSE.\""
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "ec791ad6-b39a-4f46-b149-704c23d6c506",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "prompt = \"\"\"\n",
+    "You run in a loop of Thought, Action, PAUSE, Observation.\n",
+    "At the end of the loop you output an Answer.\n",
+    "First, use Thought to describe your thoughts about the question you have been asked, and generate Action to run one of the actions available to you, then return PAUSE.\n",
+    "Observation will be the result of running those actions.\n",
+    "\n",
+    "Your available actions are:\n",
+    "\n",
+    "calculate:\n",
+    "e.g. calculate: 4 * 7 / 3\n",
+    "Runs a calculation and returns the number - uses Python so be sure to use floating point syntax if necessary\n",
+    "\n",
+    "average_dog_weight:\n",
+    "e.g. average_dog_weight: Collie\n",
+    "returns average weight of a dog when given the breed\n",
+    "\n",
+    "Example session:\n",
+    "\n",
+    "Question: How much does a Bulldog weigh?\n",
+    "Thought: I should look the dogs weight using average_dog_weight\n",
+    "Action: average_dog_weight: Bulldog\n",
+    "PAUSE\n",
+    "\n",
+    "You will be called again with this:\n",
+    "\n",
+    "Observation: A Bulldog weights 51 lbs\n",
+    "\n",
+    "You then output:\n",
+    "\n",
+    "Answer: A bulldog weights 51 lbs\n",
+    "\"\"\".strip()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "90bcf731-4d89-473b-98e1-53826da149f9",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "question = \"\"\"I have 2 dogs, a border collie and a scottish terrier. \\\n",
+    "What is their combined weight\"\"\"\n",
+    "query(question)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "09d30a8f-3783-4ee5-a48e-7d89e22a508a",
+   "metadata": {},
+   "source": [
+    "### Bonus: Port the Agent Implementation to LangGraph"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "6b5ed82e-2d70-45ac-b026-904da211f81a",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!pip install langchain\n",
+    "!pip install langgraph\n",
+    "!pip install langchain_openai\n",
+    "!pip install langchain_community\n",
+    "!pip install httpx\n",
+    "!pip install langchain-groq"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "a8ed3b90-688e-4aa2-8e43-e951af29a57f",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from langgraph.graph import StateGraph, END\n",
+    "from typing import TypedDict, Annotated\n",
+    "import operator\n",
+    "from langchain_core.messages import AnyMessage, SystemMessage, HumanMessage, ToolMessage\n",
+    "from langchain_openai import ChatOpenAI\n",
+    "from langchain_community.tools.tavily_search import TavilySearchResults"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "c555f945-7db0-4dc9-9ea5-5632bf941fe4",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from langchain_groq import ChatGroq\n",
+    "\n",
+    "model = ChatGroq(temperature=0, model_name=\"llama3-8b-8192\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "7755a055-fa1f-474f-b558-230cc5a67a33",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from langchain_core.tools import tool\n",
+    "from langgraph.prebuilt import ToolNode\n",
+    "\n",
+    "@tool\n",
+    "def calculate(what):\n",
+    "    \"\"\"Runs a calculation and returns the number.\"\"\"\n",
+    "    return eval(what)\n",
+    "\n",
+    "@tool\n",
+    "def average_dog_weight(name):\n",
+    "    \"\"\"Returns the average weight of a dog.\"\"\"\n",
+    "    if name in \"Scottish Terrier\":\n",
+    "        return(\"Scottish Terriers average 20 lbs\")\n",
+    "    elif name in \"Border Collie\":\n",
+    "        return(\"a Border Collies average weight is 37 lbs\")\n",
+    "    elif name in \"Toy Poodle\":\n",
+    "        return(\"a toy poodles average weight is 7 lbs\")\n",
+    "    else:\n",
+    "        return(\"An average dog weights 50 lbs\")\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "4a003862-8fd2-45b1-8fe4-78d7cd5888d9",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "prompt = \"\"\"\n",
+    "You run in a loop of Thought, Action, Observation.\n",
+    "At the end of the loop you output an Answer\n",
+    "Use Thought to describe your thoughts about the question you have been asked.\n",
+    "Use Action to run one of the actions available to you.\n",
+    "Observation will be the result of running those actions.\n",
+    "\n",
+    "Your available actions are:\n",
+    "\n",
+    "calculate:\n",
+    "e.g. calculate: 4 * 7 / 3\n",
+    "Runs a calculation and returns the number - uses Python so be sure to use floating point syntax if necessary\n",
+    "\n",
+    "average_dog_weight:\n",
+    "e.g. average_dog_weight: Collie\n",
+    "returns average weight of a dog when given the breed\n",
+    "\n",
+    "Example session:\n",
+    "\n",
+    "Question: How much does a Bulldog weigh?\n",
+    "Thought: I should look the dogs weight using average_dog_weight\n",
+    "Action: average_dog_weight: Bulldog\n",
+    "\n",
+    "You will be called again with this:\n",
+    "\n",
+    "Observation: A Bulldog weights 51 lbs\n",
+    "\n",
+    "You then output:\n",
+    "\n",
+    "Answer: A bulldog weights 51 lbs\n",
+    "\"\"\".strip()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "471c0aa7-547f-4d5f-9e99-73ef47101d41",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "class AgentState(TypedDict):\n",
+    "    messages: Annotated[list[AnyMessage], operator.add]"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "530e8a60-085a-4485-af03-bafc6b2c1d88",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "class Agent:\n",
+    "\n",
+    "    def __init__(self, model, tools, system=\"\"):\n",
+    "        self.system = system\n",
+    "        graph = StateGraph(AgentState)\n",
+    "        graph.add_node(\"llm\", self.call_llm)\n",
+    "        graph.add_node(\"action\", self.take_action)\n",
+    "        graph.add_conditional_edges(\n",
+    "            \"llm\",\n",
+    "            self.exists_action,\n",
+    "            {True: \"action\", False: END}\n",
+    "        )\n",
+    "        graph.add_edge(\"action\", \"llm\")\n",
+    "        graph.set_entry_point(\"llm\")\n",
+    "        self.graph = graph.compile()\n",
+    "        self.tools = {t.name: t for t in tools}\n",
+    "        self.model = model.bind_tools(tools)\n",
+    "\n",
+    "    def exists_action(self, state: AgentState):\n",
+    "        result = state['messages'][-1]\n",
+    "        return len(result.tool_calls) > 0\n",
+    "\n",
+    "    def call_llm(self, state: AgentState):\n",
+    "        messages = state['messages']\n",
+    "        if self.system:\n",
+    "            messages = [SystemMessage(content=self.system)] + messages\n",
+    "        message = self.model.invoke(messages)\n",
+    "        return {'messages': [message]}\n",
+    "\n",
+    "    def take_action(self, state: AgentState):\n",
+    "        tool_calls = state['messages'][-1].tool_calls\n",
+    "        results = []\n",
+    "        for t in tool_calls:\n",
+    "            print(f\"Calling: {t}\")\n",
+    "            result = self.tools[t['name']].invoke(t['args'])\n",
+    "            results.append(ToolMessage(tool_call_id=t['id'], name=t['name'], content=str(result)))\n",
+    "        print(\"Back to the model!\")\n",
+    "        return {'messages': results}"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "3db8dcea-d4eb-46df-bd90-55acd4c5520a",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "abot = Agent(model, [calculate, average_dog_weight], system=prompt)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "72c62e36-9321-40d2-86d8-b3c9caf3020f",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "messages = [HumanMessage(content=\"How much does a Toy Poodle weigh?\")]\n",
+    "result = abot.graph.invoke({\"messages\": messages})\n",
+    "result['messages'], result['messages'][-1].content\n",
+    "\n",
+    "# the code above will cause an error because Llama 3 8B incorrectly returns an extra \"calculate\" tool call"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "56b4c622-b306-4aa3-84e6-4ccd6d6f272f",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# using the Llama 3 70B will fix the error\n",
+    "model = ChatGroq(temperature=0, model_name=\"llama3-70b-8192\")\n",
+    "abot = Agent(model, [calculate, average_dog_weight], system=prompt)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "629ca375-979a-45d7-bad8-7240ae9ad844",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Toy Poodle case sensitive here - can be fixed easily by modifying def average_dog_weight\n",
+    "messages = [HumanMessage(content=\"How much does a Toy Poodle weigh?\")]\n",
+    "result = abot.graph.invoke({\"messages\": messages})\n",
+    "result['messages'], result['messages'][-1].content"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "30e253ae-e742-4df8-92e6-fadfc3826003",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "messages = [HumanMessage(content=\"I have 2 dogs, a border collie and a scottish terrier. What are their average weights? Total weight?\")]\n",
+    "result = abot.graph.invoke({\"messages\": messages})"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "238ec75c-4ff6-4561-bb0a-895530a61e47",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "result['messages'], result['messages'][-1].content"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3 (ipykernel)",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.10.14"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/recipes/quickstart/agents/dlai/Building_Agentic_RAG_with_Llamaindex_L1_Router_Engine.ipynb b/recipes/quickstart/agents/dlai/Building_Agentic_RAG_with_Llamaindex_L1_Router_Engine.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..989344445689febd8a97cd4312ad90a0afbad3aa
--- /dev/null
+++ b/recipes/quickstart/agents/dlai/Building_Agentic_RAG_with_Llamaindex_L1_Router_Engine.ipynb
@@ -0,0 +1,355 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "<a href=\"https://colab.research.google.com/github/meta-llama/llama-recipes/blob/main/recipes/quickstart/agents/Building_Agentic_RAG_with_Llamaindex_L1_Router_Engine.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "This notebook ports the DeepLearning.AI short course [Building Agentic RAG with Llamaindex Lesson 1 Router Engine](https://learn.deeplearning.ai/courses/building-agentic-rag-with-llamaindex/lesson/2/router-query-engine) to using Llama 3. \n",
+    "\n",
+    "You should take the course before or after going through this notebook to have a deeper understanding."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "colab": {
+     "base_uri": "https://localhost:8080/"
+    },
+    "id": "eiJsOa29ej7G",
+    "outputId": "094c60c2-d782-4baf-bfc3-913f53ac1ff3"
+   },
+   "outputs": [],
+   "source": [
+    "!pip install llama-index\n",
+    "!pip install llama-index-embeddings-huggingface\n",
+    "!pip install llama-index-llms-groq"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "colab": {
+     "base_uri": "https://localhost:8080/"
+    },
+    "id": "Wv_-jm5XGoWo",
+    "outputId": "d0dd37f4-ca33-4704-d221-6ea80ee09eb5"
+   },
+   "outputs": [],
+   "source": [
+    "import os \n",
+    "os.environ['GROQ_API_KEY'] = 'your_groq_api_key' # get a free key at https://console.groq.com/keys"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "colab": {
+     "base_uri": "https://localhost:8080/"
+    },
+    "id": "aUi629qVUsG5",
+    "outputId": "015a6a1a-a9e1-4c4f-dcaf-7d341b1a0b0e"
+   },
+   "outputs": [],
+   "source": [
+    "!wget \"https://openreview.net/pdf?id=VtmBAGCN7o\" -O metagpt.pdf"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "id": "auZQalH5J7CU"
+   },
+   "outputs": [],
+   "source": [
+    "from llama_index.core import SimpleDirectoryReader\n",
+    "\n",
+    "documents = SimpleDirectoryReader(input_files=[\"metagpt.pdf\"]).load_data()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "id": "GFfUjJypJ7Eq"
+   },
+   "outputs": [],
+   "source": [
+    "from llama_index.core.node_parser import SentenceSplitter\n",
+    "\n",
+    "splitter = SentenceSplitter(chunk_size=1024)\n",
+    "nodes = splitter.get_nodes_from_documents(documents)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "id": "bguUp2D5LhST"
+   },
+   "outputs": [],
+   "source": [
+    "from llama_index.llms.groq import Groq\n",
+    "\n",
+    "from llama_index.core import Settings, VectorStoreIndex\n",
+    "from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n",
+    "\n",
+    "llm = Groq(model=\"llama3-8b-8192\") #, api_key=GROQ_API_TOKEN)\n",
+    "Settings.llm = llm\n",
+    "#llm.complete(\"Who wrote the book godfather\").text\n",
+    "\n",
+    "Settings.embed_model = HuggingFaceEmbedding(\n",
+    "    model_name=\"BAAI/bge-small-en-v1.5\"\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "id": "pkmnSHsMJ7Hg"
+   },
+   "outputs": [],
+   "source": [
+    "from llama_index.core import SummaryIndex, VectorStoreIndex\n",
+    "\n",
+    "summary_index = SummaryIndex(nodes)\n",
+    "vector_index = VectorStoreIndex(nodes)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "id": "BnMq-qO9ezcE"
+   },
+   "outputs": [],
+   "source": [
+    "summary_query_engine = summary_index.as_query_engine(\n",
+    "    response_mode=\"tree_summarize\",\n",
+    "    use_async=True,\n",
+    ")\n",
+    "vector_query_engine = vector_index.as_query_engine()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "id": "vnK7YXlVLojh"
+   },
+   "outputs": [],
+   "source": [
+    "from llama_index.core.tools import QueryEngineTool\n",
+    "\n",
+    "summary_tool = QueryEngineTool.from_defaults(\n",
+    "    query_engine=summary_query_engine,\n",
+    "    description=(\n",
+    "        \"Useful for summarization questions related to MetaGPT\"\n",
+    "    ),\n",
+    ")\n",
+    "\n",
+    "vector_tool = QueryEngineTool.from_defaults(\n",
+    "    query_engine=vector_query_engine,\n",
+    "    description=(\n",
+    "        \"Useful for retrieving specific context from the MetaGPT paper.\"\n",
+    "    ),\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "id": "EvLJilU4LomU"
+   },
+   "outputs": [],
+   "source": [
+    "from llama_index.core.query_engine.router_query_engine import RouterQueryEngine\n",
+    "from llama_index.core.selectors import LLMSingleSelector\n",
+    "\n",
+    "query_engine = RouterQueryEngine(\n",
+    "    selector=LLMSingleSelector.from_defaults(),\n",
+    "    query_engine_tools=[\n",
+    "        summary_tool,\n",
+    "        vector_tool,\n",
+    "    ],\n",
+    "    verbose=True\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "id": "O_HfeD6TMCJf"
+   },
+   "outputs": [],
+   "source": [
+    "import nest_asyncio\n",
+    "\n",
+    "nest_asyncio.apply()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "colab": {
+     "base_uri": "https://localhost:8080/"
+    },
+    "id": "KAjQW6M1LopK",
+    "outputId": "fd85f083-599f-40b0-8fed-c3c96b09c5c9"
+   },
+   "outputs": [],
+   "source": [
+    "response = query_engine.query(\"What is the summary of the document?\")\n",
+    "print(str(response))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "colab": {
+     "base_uri": "https://localhost:8080/"
+    },
+    "id": "ySNeKv3PLosR",
+    "outputId": "9568d416-9ec1-4f50-dbbf-61a208205113"
+   },
+   "outputs": [],
+   "source": [
+    "print(len(response.source_nodes))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "colab": {
+     "base_uri": "https://localhost:8080/"
+    },
+    "id": "KOP52yxpLou-",
+    "outputId": "168a6ff3-2a2a-4588-dc2f-978ae8c0bbc1"
+   },
+   "outputs": [],
+   "source": [
+    "response = query_engine.query(\n",
+    "    \"How do agents share information with other agents? This is not a summarization question.\"\n",
+    ")\n",
+    "print(str(response))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "id": "luGziGzfMHbD"
+   },
+   "outputs": [],
+   "source": [
+    "def get_router_query_engine(file_path: str):\n",
+    "    \"\"\"Get router query engine.\"\"\"\n",
+    "\n",
+    "    documents = SimpleDirectoryReader(input_files=[file_path]).load_data()\n",
+    "\n",
+    "    splitter = SentenceSplitter(chunk_size=1024)\n",
+    "    nodes = splitter.get_nodes_from_documents(documents)\n",
+    "\n",
+    "    summary_index = SummaryIndex(nodes)\n",
+    "    vector_index = VectorStoreIndex(nodes)\n",
+    "\n",
+    "    summary_query_engine = summary_index.as_query_engine(\n",
+    "        response_mode=\"tree_summarize\",\n",
+    "        use_async=True,\n",
+    "    )\n",
+    "    vector_query_engine = vector_index.as_query_engine()\n",
+    "\n",
+    "    summary_tool = QueryEngineTool.from_defaults(\n",
+    "        query_engine=summary_query_engine,\n",
+    "        description=(\n",
+    "            \"Useful for summarization questions related to MetaGPT\"\n",
+    "        ),\n",
+    "    )\n",
+    "\n",
+    "    vector_tool = QueryEngineTool.from_defaults(\n",
+    "        query_engine=vector_query_engine,\n",
+    "        description=(\n",
+    "            \"Useful for retrieving specific context from the MetaGPT paper.\"\n",
+    "        ),\n",
+    "    )\n",
+    "\n",
+    "    query_engine = RouterQueryEngine(\n",
+    "        selector=LLMSingleSelector.from_defaults(),\n",
+    "        query_engine_tools=[\n",
+    "            summary_tool,\n",
+    "            vector_tool,\n",
+    "        ],\n",
+    "        verbose=True\n",
+    "    )\n",
+    "    return query_engine\n",
+    "\n",
+    "query_engine = get_router_query_engine(\"metagpt.pdf\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "colab": {
+     "base_uri": "https://localhost:8080/"
+    },
+    "id": "SHqbTBxQNryl",
+    "outputId": "ca2f16e9-2274-42e6-fdd5-3218d8f0c3f8"
+   },
+   "outputs": [],
+   "source": [
+    "response = query_engine.query(\"Tell me about the ablation study results?\")\n",
+    "print(str(response))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "id": "5r1MHbLOPT8Y"
+   },
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "colab": {
+   "provenance": []
+  },
+  "kernelspec": {
+   "display_name": "Python 3 (ipykernel)",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.10.14"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}
diff --git a/recipes/quickstart/agents/dlai/Functions_Tools_and_Agents_with_LangChain_L1_Function_Calling.ipynb b/recipes/quickstart/agents/dlai/Functions_Tools_and_Agents_with_LangChain_L1_Function_Calling.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..755b503ad3c415d36d1d0bb5e5e25773d33b2b25
--- /dev/null
+++ b/recipes/quickstart/agents/dlai/Functions_Tools_and_Agents_with_LangChain_L1_Function_Calling.ipynb
@@ -0,0 +1,581 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "id": "2ba1b4ef-3b96-4e7e-b5d0-155b839db73c",
+   "metadata": {},
+   "source": [
+    "<a href=\"https://colab.research.google.com/github/meta-llama/llama-recipes/blob/main/recipes/quickstart/agents/Functions_Tools_and_Agents_with_LangChain_L1_Function_Calling.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "f91905c8-21ca-4d81-9614-b9c7344d08c3",
+   "metadata": {},
+   "source": [
+    "This notebook ports the DeepLearning.AI short course [Functions, Tools and Agents with LangChain Lesson 1 OpenAI Function Calling](https://learn.deeplearning.ai/courses/functions-tools-agents-langchain/lesson/2/openai-function-calling) to using Llama 3. \n",
+    "\n",
+    "You should take the course before or after going through this notebook to have a deeper understanding."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "31bfe801-e24d-459b-8b3f-e91a34024368",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!pip install groq"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "88659373-0deb-45eb-8934-0b02d70bd047",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import json\n",
+    "\n",
+    "# Example dummy function hard coded to return the same weather\n",
+    "# In production, this could be your backend API or an external API\n",
+    "def get_current_weather(location, unit=\"fahrenheit\"):\n",
+    "    \"\"\"Get the current weather in a given location\"\"\"\n",
+    "    weather_info = {\n",
+    "        \"location\": location,\n",
+    "        \"temperature\": \"72\",\n",
+    "        \"unit\": unit,\n",
+    "        \"forecast\": [\"sunny\", \"windy\"],\n",
+    "    }\n",
+    "    return json.dumps(weather_info)\n",
+    "\n",
+    "known_functions = {\n",
+    "    \"get_current_weather\": get_current_weather\n",
+    "}"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "359a584a-5b26-4497-afb4-72b63027edb9",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# https://console.groq.com/docs/tool-use#models\n",
+    "# Groq API endpoints support tool use for programmatic execution of specified operations through requests with explicitly defined \n",
+    "# operations. With tool use, Groq API model endpoints deliver structured JSON output that can be used to directly invoke functions.\n",
+    "\n",
+    "from groq import Groq\n",
+    "import os\n",
+    "import json\n",
+    "\n",
+    "client = Groq(api_key = 'your_groq_api_key' # get a free key at https://console.groq.com/keys')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "5cc17dc9-2827-4d39-a13d-a4ed5f53c8e6",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "functions = [\n",
+    "    {\n",
+    "        \"name\": \"get_current_weather\",\n",
+    "        \"description\": \"Get the current weather in a given location\",\n",
+    "        \"parameters\": {\n",
+    "            \"type\": \"object\",\n",
+    "            \"properties\": {\n",
+    "                \"location\": {\n",
+    "                    \"type\": \"string\",\n",
+    "                    \"description\": \"The city and state, e.g. San Francisco, CA\",\n",
+    "                },\n",
+    "                \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n",
+    "            },\n",
+    "            \"required\": [\"location\"],\n",
+    "        },\n",
+    "    }\n",
+    "]\n",
+    "\n",
+    "tools = [\n",
+    "    {\n",
+    "        \"type\": \"function\",\n",
+    "        \"function\": {\n",
+    "            \"name\": \"get_current_weather\",\n",
+    "            \"description\": \"Get the current weather in a given location\",\n",
+    "            \"parameters\": {\n",
+    "                \"type\": \"object\",\n",
+    "                \"properties\": {\n",
+    "                    \"location\": {\n",
+    "                        \"type\": \"string\",\n",
+    "                        \"description\": \"The city and state, e.g. San Francisco, CA\",\n",
+    "                    },\n",
+    "                    \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n",
+    "                },\n",
+    "                \"required\": [\"location\"],\n",
+    "            },\n",
+    "        }\n",
+    "    }\n",
+    "]"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "5a64d28e-b169-4855-b3c2-d6722c56394c",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "messages = [\n",
+    "    {\n",
+    "        \"role\": \"user\",\n",
+    "        \"content\": \"What's the weather like in Boston?\"\n",
+    "    }\n",
+    "]"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "a733c1e1-c7f2-4707-b1be-02179df0abc6",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "response = client.chat.completions.create(\n",
+    "    model=\"llama3-70b-8192\",\n",
+    "    messages=messages,\n",
+    "    functions=functions,\n",
+    "    #tools=tools, # you can also replace functions with tools, as specified in https://console.groq.com/docs/tool-use \n",
+    "    max_tokens=4096, \n",
+    "    temperature=0\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "9058073d-cf91-4747-9860-7e2a1d774acf",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "response"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "ffd4ed64-0436-499e-a7e5-4224833b72f3",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "response_message = response.choices[0].message\n",
+    "response_message"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "5458444a-a448-4c5b-b06c-47ab6cd25626",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "response_message.content"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "c669a048-1a3e-43e9-b98f-d0b6a3a0f4c0",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "response_message.function_call"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "27f3de5d-5110-486e-8b07-5086939d364d",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "json.loads(response_message.function_call.arguments)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "b69e6497-9e68-47d4-99ae-d45db6c1a8db",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "args = json.loads(response_message.function_call.arguments)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "f41a7162-9ce8-4353-827b-f6f3bb278218",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "get_current_weather(args)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "bb0546f2-de55-417a-9b38-66787b673fb7",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "function_call = response.choices[0].message.function_call\n",
+    "function_call"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "0dd1fcf0-7105-4cad-82b5-22ce3b24fc07",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "function_call.name, function_call.arguments"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "e6d58efe-0ada-48a2-b12b-6bff948a2983",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# by defining and using known_functions, we can programatically call function\n",
+    "function_response = known_functions[function_call.name](function_call.arguments)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "cee6ca19-6924-4a7b-ba7f-7b1a33344ca0",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "function_response"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "8480be29-3326-4d95-8742-dff976a7ab2e",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# add the message returned by tool and query LLM again to get final answer\n",
+    "messages.append(\n",
+    "{\n",
+    "    \"role\": \"function\",\n",
+    "    \"name\": function_call.name,\n",
+    "    \"content\": function_response,\n",
+    "}\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "4a42e35f-c601-4c14-8de5-bdbba01dc622",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "messages"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "f9a2d1ee-9e41-480a-a5cc-62c273d3a179",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "response = client.chat.completions.create(\n",
+    "    model=\"llama3-70b-8192\",\n",
+    "    messages=messages,\n",
+    "    temperature=0\n",
+    ")\n",
+    "\n",
+    "response.choices[0].message.content"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "54019c56-11cf-465a-a440-296081adee93",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "messages = [\n",
+    "    {\n",
+    "        \"role\": \"user\",\n",
+    "        \"content\": \"hi!\",\n",
+    "    }\n",
+    "]"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "922724ec-1744-4ccf-9a86-5f1823dce0e0",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "response = client.chat.completions.create(\n",
+    "    model=\"llama3-70b-8192\",\n",
+    "    messages=messages,\n",
+    "    functions=functions,\n",
+    "    function_call=\"none\", # default is auto (let LLM decide if using function call or not. can also be none, or a dict {{\"name\": \"func_name\"}\n",
+    "    temperature=0\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "04c3152a-f51b-45cb-a27c-0672337520b4",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "print(response)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "582fac7c-0de7-420c-8150-038e74be4b9a",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "response_message = response.choices[0].message\n",
+    "response_message"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "e3d62357-04c9-459c-b36a-89e58444ea63",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "messages = [\n",
+    "    {\n",
+    "        \"role\": \"user\",\n",
+    "        \"content\": \"hi!\",\n",
+    "    }\n",
+    "]\n",
+    "response = client.chat.completions.create(\n",
+    "    model=\"llama3-70b-8192\",\n",
+    "    messages=messages,\n",
+    "    functions=functions,\n",
+    "    function_call=\"auto\", # default is auto (let LLM decide if using function call or not. can also be none, or a dict {{\"name\": \"func_name\"}\n",
+    "    temperature=0\n",
+    ")\n",
+    "print(response)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "632df69d-85bc-4e44-814c-7c1d2fe97228",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "messages = [\n",
+    "    {\n",
+    "        \"role\": \"user\",\n",
+    "        \"content\": \"hi!\",\n",
+    "    }\n",
+    "]\n",
+    "response = client.chat.completions.create(\n",
+    "    model=\"llama3-70b-8192\",\n",
+    "    messages=messages,\n",
+    "    functions=functions,\n",
+    "    function_call=\"none\", # default is auto (let LLM decide if using function call or not. can also be none, or a dict {{\"name\": \"func_name\"}\n",
+    "    temperature=0\n",
+    ")\n",
+    "print(response)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "c773ab17-a620-44eb-877f-9e0bc23fb00b",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "messages = [\n",
+    "    {\n",
+    "        \"role\": \"user\",\n",
+    "        \"content\": \"What's the weather in Boston?\",\n",
+    "    }\n",
+    "]\n",
+    "response = client.chat.completions.create(\n",
+    "    model=\"llama3-70b-8192\",\n",
+    "    messages=messages,\n",
+    "    functions=functions,\n",
+    "    function_call=\"none\", # default is auto (let LLM decide if using function call or not. can also be none, or a dict {{\"name\": \"func_name\"}\n",
+    "    temperature=0\n",
+    ")\n",
+    "print(response)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "c4a8ee80-83ae-4189-837c-54bb9c93c315",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "messages = [\n",
+    "    {\n",
+    "        \"role\": \"user\",\n",
+    "        \"content\": \"hi!\",\n",
+    "    }\n",
+    "]\n",
+    "response = client.chat.completions.create(\n",
+    "    model=\"llama3-70b-8192\",\n",
+    "    messages=messages,\n",
+    "    functions=functions,\n",
+    "    function_call={\"name\": \"get_current_weather\"}, # default is auto (let LLM decide if using function call or not. can also be none, or a dict {{\"name\": \"func_name\"}\n",
+    "    temperature=0\n",
+    ")\n",
+    "print(response)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "daa5801a-2e71-4630-a8cd-7e84d1214f51",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "messages = [\n",
+    "    {\n",
+    "        \"role\": \"user\",\n",
+    "        \"content\": \"What's the weather like in Boston!\",\n",
+    "    }\n",
+    "]\n",
+    "response = client.chat.completions.create(\n",
+    "    model=\"llama3-70b-8192\",\n",
+    "    messages=messages,\n",
+    "    functions=functions,\n",
+    "    function_call={\"name\": \"get_current_weather\"}, # default is auto (let LLM decide if using function call or not. can also be none, or a dict {{\"name\": \"func_name\"}\n",
+    "    temperature=0\n",
+    ")\n",
+    "print(response)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "de5924d4-4225-48d1-a390-e44f3167d547",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "function_call = response.choices[0].message.function_call\n",
+    "function_call.name, function_call.arguments"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "fb9f3340-b905-47f3-a478-cf3d786faa1f",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "args = json.loads(response.choices[0].message.function_call.arguments)\n",
+    "observation = known_functions[function_call.name](args)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "3c31e9b5-99ed-46f3-8849-133c71ea87d4",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "observation"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "b73c550e-5aa2-49de-8422-0c3e706f1df4",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "messages.append(\n",
+    "        {\n",
+    "            \"role\": \"function\",\n",
+    "            \"name\": function_call.name,\n",
+    "            \"content\": observation,\n",
+    "        }\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "c60302f1-07e2-4f22-bd60-b54e1ea2e3db",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "messages"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "a35f7f3d-4e39-4744-b5e3-2065e67eea28",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "response = client.chat.completions.create(\n",
+    "    model=\"llama3-70b-8192\",\n",
+    "    messages=messages,\n",
+    ")\n",
+    "print(response)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "7d4745e1-0477-4b6b-84de-9c82e0bc2452",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "response.choices[0].message.content"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3 (ipykernel)",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.10.14"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/recipes/quickstart/agents/dlai/README.md b/recipes/quickstart/agents/dlai/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..25a4029a1196d30d6c4309624cdced2bc8f3efd0
--- /dev/null
+++ b/recipes/quickstart/agents/dlai/README.md
@@ -0,0 +1,11 @@
+# Quickstart Llama 3 Powered Agent Examples Ported from DeepLearning.ai Short Courses
+
+The notebooks in this folder are ported from the 4 recent agent short courses on [Deeplearning.ai](https://www.deeplearning.ai) to use Llama 3 to build agent apps from scratch or with open source frameworks (LangChain, LlamaIndex, AutoGen).
+
+1. [Functions Tools and Agents with LangChain L1 Function Calling](Functions_Tools_and_Agents_with_LangChain_L1_Function_Calling.ipynb)
+
+2. [AI Agents in LangGraph L1 Build an Agent from Scratch](AI_Agents_in_LangGraph_L1_Build_an_Agent_from_Scratch.ipynb)
+
+3. [Building Agentic RAG with Llamaindex L1 Router Engine](Building_Agentic_RAG_with_Llamaindex_L1_Router_Engine.ipynb)
+
+4. [AI Agentic Design Patterns with AutoGen L4 Tool Use and Conversational Chess](AI_Agentic_Design_Patterns_with_AutoGen_L4_Tool_Use_and_Conversational_Chess.ipynb)