diff --git a/recipes/3p_integrations/llamaindex/dlai_agentic_rag/Building_Agentic_RAG_with_Llamaindex_L2_Tool_Calling.ipynb b/recipes/3p_integrations/llamaindex/dlai_agentic_rag/Building_Agentic_RAG_with_Llamaindex_L2_Tool_Calling.ipynb
index d8a6a256fccf358f9ad01a38c9c73aced7c7bdb0..b2dc170fb7b1f4de95163e85de977622163c9482 100644
--- a/recipes/3p_integrations/llamaindex/dlai_agentic_rag/Building_Agentic_RAG_with_Llamaindex_L2_Tool_Calling.ipynb
+++ b/recipes/3p_integrations/llamaindex/dlai_agentic_rag/Building_Agentic_RAG_with_Llamaindex_L2_Tool_Calling.ipynb
@@ -8,12 +8,14 @@
     "\n",
     "This notebook ports the DeepLearning.AI short course [Building Agentic RAG with Llamaindex Lesson 2 Tool Calling](https://learn.deeplearning.ai/courses/building-agentic-rag-with-llamaindex/lesson/3/tool-calling) to using Llama 3. \n",
     "\n",
-    "You should take the course before or after going through this notebook to have a deeper understanding."
+    "You should take the course before or after going through this notebook to have a deeper understanding.\n",
+    "\n",
+    "Note: Unlike Lesson 1 where we use Llama 3 70b on [Groq](https://groq.com/), this lesson uses Llama 3 on [Fireworks.ai](https://fireworks.ai/) to overcome the rate limit issue with Groq on some summary tool calling."
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 1,
+   "execution_count": null,
    "metadata": {
     "colab": {
      "base_uri": "https://localhost:8080/"
@@ -21,226 +23,16 @@
     "id": "eiJsOa29ej7G",
     "outputId": "edc5d39c-f379-4410-db9f-998db9c099be"
    },
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Requirement already satisfied: llama-index in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (0.10.44)\n",
-      "Requirement already satisfied: llama-index-agent-openai<0.3.0,>=0.1.4 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index) (0.2.7)\n",
-      "Requirement already satisfied: llama-index-cli<0.2.0,>=0.1.2 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index) (0.1.12)\n",
-      "Requirement already satisfied: llama-index-core==0.10.44 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index) (0.10.44)\n",
-      "Requirement already satisfied: llama-index-embeddings-openai<0.2.0,>=0.1.5 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index) (0.1.10)\n",
-      "Requirement already satisfied: llama-index-indices-managed-llama-cloud<0.2.0,>=0.1.2 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index) (0.1.6)\n",
-      "Requirement already satisfied: llama-index-legacy<0.10.0,>=0.9.48 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index) (0.9.48)\n",
-      "Requirement already satisfied: llama-index-llms-openai<0.2.0,>=0.1.13 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index) (0.1.22)\n",
-      "Requirement already satisfied: llama-index-multi-modal-llms-openai<0.2.0,>=0.1.3 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index) (0.1.6)\n",
-      "Requirement already satisfied: llama-index-program-openai<0.2.0,>=0.1.3 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index) (0.1.6)\n",
-      "Requirement already satisfied: llama-index-question-gen-openai<0.2.0,>=0.1.2 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index) (0.1.3)\n",
-      "Requirement already satisfied: llama-index-readers-file<0.2.0,>=0.1.4 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index) (0.1.23)\n",
-      "Requirement already satisfied: llama-index-readers-llama-parse<0.2.0,>=0.1.2 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index) (0.1.4)\n",
-      "Requirement already satisfied: PyYAML>=6.0.1 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core==0.10.44->llama-index) (6.0.1)\n",
-      "Requirement already satisfied: SQLAlchemy>=1.4.49 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from SQLAlchemy[asyncio]>=1.4.49->llama-index-core==0.10.44->llama-index) (2.0.30)\n",
-      "Requirement already satisfied: aiohttp<4.0.0,>=3.8.6 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core==0.10.44->llama-index) (3.9.5)\n",
-      "Requirement already satisfied: dataclasses-json in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core==0.10.44->llama-index) (0.6.7)\n",
-      "Requirement already satisfied: deprecated>=1.2.9.3 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core==0.10.44->llama-index) (1.2.14)\n",
-      "Requirement already satisfied: dirtyjson<2.0.0,>=1.0.8 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core==0.10.44->llama-index) (1.0.8)\n",
-      "Requirement already satisfied: fsspec>=2023.5.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core==0.10.44->llama-index) (2024.6.0)\n",
-      "Requirement already satisfied: httpx in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core==0.10.44->llama-index) (0.27.0)\n",
-      "Requirement already satisfied: llamaindex-py-client<0.2.0,>=0.1.18 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core==0.10.44->llama-index) (0.1.19)\n",
-      "Requirement already satisfied: nest-asyncio<2.0.0,>=1.5.8 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core==0.10.44->llama-index) (1.6.0)\n",
-      "Requirement already satisfied: networkx>=3.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core==0.10.44->llama-index) (3.3)\n",
-      "Requirement already satisfied: nltk<4.0.0,>=3.8.1 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core==0.10.44->llama-index) (3.8.1)\n",
-      "Requirement already satisfied: numpy in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core==0.10.44->llama-index) (1.26.4)\n",
-      "Requirement already satisfied: openai>=1.1.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core==0.10.44->llama-index) (1.33.0)\n",
-      "Requirement already satisfied: pandas in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core==0.10.44->llama-index) (2.2.2)\n",
-      "Requirement already satisfied: pillow>=9.0.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core==0.10.44->llama-index) (10.3.0)\n",
-      "Requirement already satisfied: requests>=2.31.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core==0.10.44->llama-index) (2.32.3)\n",
-      "Requirement already satisfied: tenacity<9.0.0,>=8.2.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core==0.10.44->llama-index) (8.3.0)\n",
-      "Requirement already satisfied: tiktoken>=0.3.3 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core==0.10.44->llama-index) (0.7.0)\n",
-      "Requirement already satisfied: tqdm<5.0.0,>=4.66.1 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core==0.10.44->llama-index) (4.66.4)\n",
-      "Requirement already satisfied: typing-extensions>=4.5.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core==0.10.44->llama-index) (4.12.2)\n",
-      "Requirement already satisfied: typing-inspect>=0.8.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core==0.10.44->llama-index) (0.9.0)\n",
-      "Requirement already satisfied: wrapt in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core==0.10.44->llama-index) (1.16.0)\n",
-      "Requirement already satisfied: beautifulsoup4<5.0.0,>=4.12.3 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-readers-file<0.2.0,>=0.1.4->llama-index) (4.12.3)\n",
-      "Requirement already satisfied: pypdf<5.0.0,>=4.0.1 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-readers-file<0.2.0,>=0.1.4->llama-index) (4.2.0)\n",
-      "Requirement already satisfied: striprtf<0.0.27,>=0.0.26 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-readers-file<0.2.0,>=0.1.4->llama-index) (0.0.26)\n",
-      "Requirement already satisfied: llama-parse<0.5.0,>=0.4.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-readers-llama-parse<0.2.0,>=0.1.2->llama-index) (0.4.4)\n",
-      "Requirement already satisfied: aiosignal>=1.1.2 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core==0.10.44->llama-index) (1.3.1)\n",
-      "Requirement already satisfied: attrs>=17.3.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core==0.10.44->llama-index) (23.2.0)\n",
-      "Requirement already satisfied: frozenlist>=1.1.1 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core==0.10.44->llama-index) (1.4.1)\n",
-      "Requirement already satisfied: multidict<7.0,>=4.5 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core==0.10.44->llama-index) (6.0.5)\n",
-      "Requirement already satisfied: yarl<2.0,>=1.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core==0.10.44->llama-index) (1.9.4)\n",
-      "Requirement already satisfied: async-timeout<5.0,>=4.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core==0.10.44->llama-index) (4.0.3)\n",
-      "Requirement already satisfied: soupsieve>1.2 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from beautifulsoup4<5.0.0,>=4.12.3->llama-index-readers-file<0.2.0,>=0.1.4->llama-index) (2.5)\n",
-      "Requirement already satisfied: pydantic>=1.10 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llamaindex-py-client<0.2.0,>=0.1.18->llama-index-core==0.10.44->llama-index) (2.7.3)\n",
-      "Requirement already satisfied: anyio in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from httpx->llama-index-core==0.10.44->llama-index) (4.4.0)\n",
-      "Requirement already satisfied: certifi in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from httpx->llama-index-core==0.10.44->llama-index) (2024.6.2)\n",
-      "Requirement already satisfied: httpcore==1.* in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from httpx->llama-index-core==0.10.44->llama-index) (1.0.5)\n",
-      "Requirement already satisfied: idna in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from httpx->llama-index-core==0.10.44->llama-index) (3.7)\n",
-      "Requirement already satisfied: sniffio in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from httpx->llama-index-core==0.10.44->llama-index) (1.3.1)\n",
-      "Requirement already satisfied: h11<0.15,>=0.13 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from httpcore==1.*->httpx->llama-index-core==0.10.44->llama-index) (0.14.0)\n",
-      "Requirement already satisfied: click in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from nltk<4.0.0,>=3.8.1->llama-index-core==0.10.44->llama-index) (8.1.7)\n",
-      "Requirement already satisfied: joblib in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from nltk<4.0.0,>=3.8.1->llama-index-core==0.10.44->llama-index) (1.4.2)\n",
-      "Requirement already satisfied: regex>=2021.8.3 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from nltk<4.0.0,>=3.8.1->llama-index-core==0.10.44->llama-index) (2024.5.15)\n",
-      "Requirement already satisfied: distro<2,>=1.7.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from openai>=1.1.0->llama-index-core==0.10.44->llama-index) (1.9.0)\n",
-      "Requirement already satisfied: charset-normalizer<4,>=2 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from requests>=2.31.0->llama-index-core==0.10.44->llama-index) (3.3.2)\n",
-      "Requirement already satisfied: urllib3<3,>=1.21.1 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from requests>=2.31.0->llama-index-core==0.10.44->llama-index) (2.2.1)\n",
-      "Requirement already satisfied: greenlet!=0.4.17 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from SQLAlchemy[asyncio]>=1.4.49->llama-index-core==0.10.44->llama-index) (3.0.3)\n",
-      "Requirement already satisfied: mypy-extensions>=0.3.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from typing-inspect>=0.8.0->llama-index-core==0.10.44->llama-index) (1.0.0)\n",
-      "Requirement already satisfied: marshmallow<4.0.0,>=3.18.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from dataclasses-json->llama-index-core==0.10.44->llama-index) (3.21.3)\n",
-      "Requirement already satisfied: python-dateutil>=2.8.2 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from pandas->llama-index-core==0.10.44->llama-index) (2.9.0.post0)\n",
-      "Requirement already satisfied: pytz>=2020.1 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from pandas->llama-index-core==0.10.44->llama-index) (2024.1)\n",
-      "Requirement already satisfied: tzdata>=2022.7 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from pandas->llama-index-core==0.10.44->llama-index) (2024.1)\n",
-      "Requirement already satisfied: exceptiongroup>=1.0.2 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from anyio->httpx->llama-index-core==0.10.44->llama-index) (1.2.1)\n",
-      "Requirement already satisfied: packaging>=17.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from marshmallow<4.0.0,>=3.18.0->dataclasses-json->llama-index-core==0.10.44->llama-index) (23.2)\n",
-      "Requirement already satisfied: annotated-types>=0.4.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from pydantic>=1.10->llamaindex-py-client<0.2.0,>=0.1.18->llama-index-core==0.10.44->llama-index) (0.7.0)\n",
-      "Requirement already satisfied: pydantic-core==2.18.4 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from pydantic>=1.10->llamaindex-py-client<0.2.0,>=0.1.18->llama-index-core==0.10.44->llama-index) (2.18.4)\n",
-      "Requirement already satisfied: six>=1.5 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from python-dateutil>=2.8.2->pandas->llama-index-core==0.10.44->llama-index) (1.16.0)\n",
-      "Requirement already satisfied: llama-index-embeddings-huggingface in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (0.2.1)\n",
-      "Requirement already satisfied: huggingface-hub>=0.19.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (0.23.3)\n",
-      "Requirement already satisfied: llama-index-core<0.11.0,>=0.10.1 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-embeddings-huggingface) (0.10.44)\n",
-      "Requirement already satisfied: sentence-transformers<3.0.0,>=2.6.1 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-embeddings-huggingface) (2.7.0)\n",
-      "Requirement already satisfied: filelock in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from huggingface-hub>=0.19.0->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (3.14.0)\n",
-      "Requirement already satisfied: fsspec>=2023.5.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from huggingface-hub>=0.19.0->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (2024.6.0)\n",
-      "Requirement already satisfied: packaging>=20.9 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from huggingface-hub>=0.19.0->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (23.2)\n",
-      "Requirement already satisfied: pyyaml>=5.1 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from huggingface-hub>=0.19.0->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (6.0.1)\n",
-      "Requirement already satisfied: requests in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from huggingface-hub>=0.19.0->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (2.32.3)\n",
-      "Requirement already satisfied: tqdm>=4.42.1 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from huggingface-hub>=0.19.0->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (4.66.4)\n",
-      "Requirement already satisfied: typing-extensions>=3.7.4.3 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from huggingface-hub>=0.19.0->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (4.12.2)\n",
-      "Requirement already satisfied: aiohttp in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (3.9.5)\n",
-      "Requirement already satisfied: minijinja>=1.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (2.0.1)\n",
-      "Requirement already satisfied: SQLAlchemy>=1.4.49 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from SQLAlchemy[asyncio]>=1.4.49->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (2.0.30)\n",
-      "Requirement already satisfied: dataclasses-json in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (0.6.7)\n",
-      "Requirement already satisfied: deprecated>=1.2.9.3 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.2.14)\n",
-      "Requirement already satisfied: dirtyjson<2.0.0,>=1.0.8 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.0.8)\n",
-      "Requirement already satisfied: httpx in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (0.27.0)\n",
-      "Requirement already satisfied: llamaindex-py-client<0.2.0,>=0.1.18 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (0.1.19)\n",
-      "Requirement already satisfied: nest-asyncio<2.0.0,>=1.5.8 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.6.0)\n",
-      "Requirement already satisfied: networkx>=3.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (3.3)\n",
-      "Requirement already satisfied: nltk<4.0.0,>=3.8.1 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (3.8.1)\n",
-      "Requirement already satisfied: numpy in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.26.4)\n",
-      "Requirement already satisfied: openai>=1.1.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.33.0)\n",
-      "Requirement already satisfied: pandas in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (2.2.2)\n",
-      "Requirement already satisfied: pillow>=9.0.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (10.3.0)\n",
-      "Requirement already satisfied: tenacity<9.0.0,>=8.2.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (8.3.0)\n",
-      "Requirement already satisfied: tiktoken>=0.3.3 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (0.7.0)\n",
-      "Requirement already satisfied: typing-inspect>=0.8.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (0.9.0)\n",
-      "Requirement already satisfied: wrapt in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.16.0)\n",
-      "Requirement already satisfied: transformers<5.0.0,>=4.34.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (4.41.2)\n",
-      "Requirement already satisfied: torch>=1.11.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (2.3.1)\n",
-      "Requirement already satisfied: scikit-learn in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (1.5.0)\n",
-      "Requirement already satisfied: scipy in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (1.13.1)\n",
-      "Requirement already satisfied: aiosignal>=1.1.2 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (1.3.1)\n",
-      "Requirement already satisfied: attrs>=17.3.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (23.2.0)\n",
-      "Requirement already satisfied: frozenlist>=1.1.1 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (1.4.1)\n",
-      "Requirement already satisfied: multidict<7.0,>=4.5 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (6.0.5)\n",
-      "Requirement already satisfied: yarl<2.0,>=1.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (1.9.4)\n",
-      "Requirement already satisfied: async-timeout<5.0,>=4.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (4.0.3)\n",
-      "Requirement already satisfied: pydantic>=1.10 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llamaindex-py-client<0.2.0,>=0.1.18->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (2.7.3)\n",
-      "Requirement already satisfied: anyio in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (4.4.0)\n",
-      "Requirement already satisfied: certifi in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (2024.6.2)\n",
-      "Requirement already satisfied: httpcore==1.* in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.0.5)\n",
-      "Requirement already satisfied: idna in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (3.7)\n",
-      "Requirement already satisfied: sniffio in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.3.1)\n",
-      "Requirement already satisfied: h11<0.15,>=0.13 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from httpcore==1.*->httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (0.14.0)\n",
-      "Requirement already satisfied: click in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from nltk<4.0.0,>=3.8.1->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (8.1.7)\n",
-      "Requirement already satisfied: joblib in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from nltk<4.0.0,>=3.8.1->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.4.2)\n",
-      "Requirement already satisfied: regex>=2021.8.3 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from nltk<4.0.0,>=3.8.1->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (2024.5.15)\n",
-      "Requirement already satisfied: distro<2,>=1.7.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from openai>=1.1.0->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.9.0)\n",
-      "Requirement already satisfied: charset-normalizer<4,>=2 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from requests->huggingface-hub>=0.19.0->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (3.3.2)\n",
-      "Requirement already satisfied: urllib3<3,>=1.21.1 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from requests->huggingface-hub>=0.19.0->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (2.2.1)\n",
-      "Requirement already satisfied: greenlet!=0.4.17 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from SQLAlchemy[asyncio]>=1.4.49->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (3.0.3)\n",
-      "Requirement already satisfied: sympy in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (1.12.1)\n",
-      "Requirement already satisfied: jinja2 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (3.1.4)\n",
-      "Requirement already satisfied: tokenizers<0.20,>=0.19 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from transformers<5.0.0,>=4.34.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (0.19.1)\n",
-      "Requirement already satisfied: safetensors>=0.4.1 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from transformers<5.0.0,>=4.34.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (0.4.3)\n",
-      "Requirement already satisfied: mypy-extensions>=0.3.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from typing-inspect>=0.8.0->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.0.0)\n",
-      "Requirement already satisfied: marshmallow<4.0.0,>=3.18.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from dataclasses-json->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (3.21.3)\n",
-      "Requirement already satisfied: python-dateutil>=2.8.2 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from pandas->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (2.9.0.post0)\n",
-      "Requirement already satisfied: pytz>=2020.1 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from pandas->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (2024.1)\n",
-      "Requirement already satisfied: tzdata>=2022.7 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from pandas->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (2024.1)\n",
-      "Requirement already satisfied: threadpoolctl>=3.1.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from scikit-learn->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (3.5.0)\n",
-      "Requirement already satisfied: exceptiongroup>=1.0.2 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from anyio->httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.2.1)\n",
-      "Requirement already satisfied: annotated-types>=0.4.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from pydantic>=1.10->llamaindex-py-client<0.2.0,>=0.1.18->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (0.7.0)\n",
-      "Requirement already satisfied: pydantic-core==2.18.4 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from pydantic>=1.10->llamaindex-py-client<0.2.0,>=0.1.18->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (2.18.4)\n",
-      "Requirement already satisfied: six>=1.5 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from python-dateutil>=2.8.2->pandas->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.16.0)\n",
-      "Requirement already satisfied: MarkupSafe>=2.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from jinja2->torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (2.1.5)\n",
-      "Requirement already satisfied: mpmath<1.4.0,>=1.1.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from sympy->torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (1.3.0)\n",
-      "Requirement already satisfied: llama-index-llms-groq in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (0.1.4)\n",
-      "Requirement already satisfied: llama-index-core<0.11.0,>=0.10.1 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-llms-groq) (0.10.44)\n",
-      "Requirement already satisfied: llama-index-llms-openai-like<0.2.0,>=0.1.3 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-llms-groq) (0.1.3)\n",
-      "Requirement already satisfied: PyYAML>=6.0.1 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (6.0.1)\n",
-      "Requirement already satisfied: SQLAlchemy>=1.4.49 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from SQLAlchemy[asyncio]>=1.4.49->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2.0.30)\n",
-      "Requirement already satisfied: aiohttp<4.0.0,>=3.8.6 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (3.9.5)\n",
-      "Requirement already satisfied: dataclasses-json in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (0.6.7)\n",
-      "Requirement already satisfied: deprecated>=1.2.9.3 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.2.14)\n",
-      "Requirement already satisfied: dirtyjson<2.0.0,>=1.0.8 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.0.8)\n",
-      "Requirement already satisfied: fsspec>=2023.5.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2024.6.0)\n",
-      "Requirement already satisfied: httpx in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (0.27.0)\n",
-      "Requirement already satisfied: llamaindex-py-client<0.2.0,>=0.1.18 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (0.1.19)\n",
-      "Requirement already satisfied: nest-asyncio<2.0.0,>=1.5.8 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.6.0)\n",
-      "Requirement already satisfied: networkx>=3.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (3.3)\n",
-      "Requirement already satisfied: nltk<4.0.0,>=3.8.1 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (3.8.1)\n",
-      "Requirement already satisfied: numpy in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.26.4)\n",
-      "Requirement already satisfied: openai>=1.1.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.33.0)\n",
-      "Requirement already satisfied: pandas in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2.2.2)\n",
-      "Requirement already satisfied: pillow>=9.0.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (10.3.0)\n",
-      "Requirement already satisfied: requests>=2.31.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2.32.3)\n",
-      "Requirement already satisfied: tenacity<9.0.0,>=8.2.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (8.3.0)\n",
-      "Requirement already satisfied: tiktoken>=0.3.3 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (0.7.0)\n",
-      "Requirement already satisfied: tqdm<5.0.0,>=4.66.1 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (4.66.4)\n",
-      "Requirement already satisfied: typing-extensions>=4.5.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (4.12.2)\n",
-      "Requirement already satisfied: typing-inspect>=0.8.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (0.9.0)\n",
-      "Requirement already satisfied: wrapt in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.16.0)\n",
-      "Requirement already satisfied: llama-index-llms-openai<0.2.0,>=0.1.1 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-llms-openai-like<0.2.0,>=0.1.3->llama-index-llms-groq) (0.1.22)\n",
-      "Requirement already satisfied: transformers<5.0.0,>=4.37.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llama-index-llms-openai-like<0.2.0,>=0.1.3->llama-index-llms-groq) (4.41.2)\n",
-      "Requirement already satisfied: aiosignal>=1.1.2 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.3.1)\n",
-      "Requirement already satisfied: attrs>=17.3.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (23.2.0)\n",
-      "Requirement already satisfied: frozenlist>=1.1.1 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.4.1)\n",
-      "Requirement already satisfied: multidict<7.0,>=4.5 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (6.0.5)\n",
-      "Requirement already satisfied: yarl<2.0,>=1.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.9.4)\n",
-      "Requirement already satisfied: async-timeout<5.0,>=4.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (4.0.3)\n",
-      "Requirement already satisfied: pydantic>=1.10 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from llamaindex-py-client<0.2.0,>=0.1.18->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2.7.3)\n",
-      "Requirement already satisfied: anyio in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (4.4.0)\n",
-      "Requirement already satisfied: certifi in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2024.6.2)\n",
-      "Requirement already satisfied: httpcore==1.* in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.0.5)\n",
-      "Requirement already satisfied: idna in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (3.7)\n",
-      "Requirement already satisfied: sniffio in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.3.1)\n",
-      "Requirement already satisfied: h11<0.15,>=0.13 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from httpcore==1.*->httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (0.14.0)\n",
-      "Requirement already satisfied: click in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from nltk<4.0.0,>=3.8.1->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (8.1.7)\n",
-      "Requirement already satisfied: joblib in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from nltk<4.0.0,>=3.8.1->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.4.2)\n",
-      "Requirement already satisfied: regex>=2021.8.3 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from nltk<4.0.0,>=3.8.1->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2024.5.15)\n",
-      "Requirement already satisfied: distro<2,>=1.7.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from openai>=1.1.0->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.9.0)\n",
-      "Requirement already satisfied: charset-normalizer<4,>=2 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from requests>=2.31.0->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (3.3.2)\n",
-      "Requirement already satisfied: urllib3<3,>=1.21.1 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from requests>=2.31.0->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2.2.1)\n",
-      "Requirement already satisfied: greenlet!=0.4.17 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from SQLAlchemy[asyncio]>=1.4.49->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (3.0.3)\n",
-      "Requirement already satisfied: filelock in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from transformers<5.0.0,>=4.37.0->llama-index-llms-openai-like<0.2.0,>=0.1.3->llama-index-llms-groq) (3.14.0)\n",
-      "Requirement already satisfied: huggingface-hub<1.0,>=0.23.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from transformers<5.0.0,>=4.37.0->llama-index-llms-openai-like<0.2.0,>=0.1.3->llama-index-llms-groq) (0.23.3)\n",
-      "Requirement already satisfied: packaging>=20.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from transformers<5.0.0,>=4.37.0->llama-index-llms-openai-like<0.2.0,>=0.1.3->llama-index-llms-groq) (23.2)\n",
-      "Requirement already satisfied: tokenizers<0.20,>=0.19 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from transformers<5.0.0,>=4.37.0->llama-index-llms-openai-like<0.2.0,>=0.1.3->llama-index-llms-groq) (0.19.1)\n",
-      "Requirement already satisfied: safetensors>=0.4.1 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from transformers<5.0.0,>=4.37.0->llama-index-llms-openai-like<0.2.0,>=0.1.3->llama-index-llms-groq) (0.4.3)\n",
-      "Requirement already satisfied: mypy-extensions>=0.3.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from typing-inspect>=0.8.0->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.0.0)\n",
-      "Requirement already satisfied: marshmallow<4.0.0,>=3.18.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from dataclasses-json->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (3.21.3)\n",
-      "Requirement already satisfied: python-dateutil>=2.8.2 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from pandas->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2.9.0.post0)\n",
-      "Requirement already satisfied: pytz>=2020.1 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from pandas->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2024.1)\n",
-      "Requirement already satisfied: tzdata>=2022.7 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from pandas->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2024.1)\n",
-      "Requirement already satisfied: exceptiongroup>=1.0.2 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from anyio->httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.2.1)\n",
-      "Requirement already satisfied: annotated-types>=0.4.0 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from pydantic>=1.10->llamaindex-py-client<0.2.0,>=0.1.18->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (0.7.0)\n",
-      "Requirement already satisfied: pydantic-core==2.18.4 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from pydantic>=1.10->llamaindex-py-client<0.2.0,>=0.1.18->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2.18.4)\n",
-      "Requirement already satisfied: six>=1.5 in /Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages (from python-dateutil>=2.8.2->pandas->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.16.0)\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "!pip install llama-index\n",
     "!pip install llama-index-embeddings-huggingface\n",
-    "!pip install llama-index-llms-groq"
+    "!pip install llama-index-llms-fireworks"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 2,
+   "execution_count": null,
    "metadata": {
     "id": "NZ9l6k_3WncE"
    },
@@ -253,7 +45,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 3,
+   "execution_count": null,
    "metadata": {
     "id": "QkaALpnIQ01b"
    },
@@ -276,17 +68,18 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 4,
+   "execution_count": null,
    "metadata": {},
    "outputs": [],
    "source": [
     "import os \n",
-    "os.environ['GROQ_API_KEY'] = 'xxx' # get a free key at https://console.groq.com/keys"
+    "\n",
+    "os.environ['FIREWORKS_API_KEY'] = 'xxx' # get a free key at https://fireworks.ai/api-keys"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 5,
+   "execution_count": null,
    "metadata": {
     "colab": {
      "base_uri": "https://localhost:8080/"
@@ -294,23 +87,17 @@
     "id": "mA3AG6CFQ3fj",
     "outputId": "b872d91f-3a16-4d40-cacb-59c8ba5b5bde"
    },
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "=== Calling Function ===\n",
-      "Calling function: mystery with args: {\"x\": 2, \"y\": 9}\n",
-      "=== Function Output ===\n",
-      "121\n",
-      "121\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
-    "from llama_index.llms.groq import Groq\n",
+    "from llama_index.llms.fireworks import Fireworks\n",
+    "\n",
+    "# Llama 3 8b on Fireworks.ai also works in some cases, but 70b works better overall\n",
+    "#llm = Fireworks(model=\"accounts/fireworks/models/llama-v3-8b-instruct\", temperature=0)\n",
+    "llm = Fireworks(model=\"accounts/fireworks/models/llama-v3-70b-instruct\", temperature=0)\n",
+    "\n",
+    "# a quick sanity test\n",
+    "#llm.complete(\"Who wrote the  book godfather? \").text\n",
     "\n",
-    "llm = Groq(model=\"llama3-70b-8192\", temperature=0)\n",
     "response = llm.predict_and_call(\n",
     "    [add_tool, mystery_tool],\n",
     "    \"Tell me the output of the mystery function on 2 and 9\",\n",
@@ -321,7 +108,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 6,
+   "execution_count": null,
    "metadata": {
     "colab": {
      "base_uri": "https://localhost:8080/"
@@ -329,32 +116,14 @@
     "id": "8TTkd6vuUMmh",
     "outputId": "c2c419af-e9d1-48bb-aa51-c785dcdee3a0"
    },
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "--2024-07-03 16:17:18--  https://openreview.net/pdf?id=VtmBAGCN7o\n",
-      "Resolving openreview.net (openreview.net)... 35.184.86.251\n",
-      "Connecting to openreview.net (openreview.net)|35.184.86.251|:443... connected.\n",
-      "HTTP request sent, awaiting response... 200 OK\n",
-      "Length: 16911937 (16M) [application/pdf]\n",
-      "Saving to: ‘metagpt.pdf’\n",
-      "\n",
-      "metagpt.pdf         100%[===================>]  16.13M  5.75MB/s    in 2.8s    \n",
-      "\n",
-      "2024-07-03 16:17:21 (5.75 MB/s) - ‘metagpt.pdf’ saved [16911937/16911937]\n",
-      "\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "!wget \"https://openreview.net/pdf?id=VtmBAGCN7o\" -O metagpt.pdf"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 7,
+   "execution_count": null,
    "metadata": {
     "id": "auZQalH5J7CU"
    },
@@ -368,7 +137,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 8,
+   "execution_count": null,
    "metadata": {
     "id": "GFfUjJypJ7Eq"
    },
@@ -381,7 +150,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 9,
+   "execution_count": null,
    "metadata": {
     "colab": {
      "base_uri": "https://localhost:8080/"
@@ -389,99 +158,23 @@
     "id": "FrqorjH3VHmT",
     "outputId": "b4888caf-0623-4d64-dba1-4d74c12e64f3"
    },
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "page_label: 1\n",
-      "file_name: metagpt.pdf\n",
-      "file_path: metagpt.pdf\n",
-      "file_type: application/pdf\n",
-      "file_size: 16911937\n",
-      "creation_date: 2024-07-03\n",
-      "last_modified_date: 2024-07-03\n",
-      "\n",
-      "Preprint\n",
-      "METAGPT: M ETA PROGRAMMING FOR A\n",
-      "MULTI -AGENT COLLABORATIVE FRAMEWORK\n",
-      "Sirui Hong1∗, Mingchen Zhuge2∗, Jonathan Chen1, Xiawu Zheng3, Yuheng Cheng4,\n",
-      "Ceyao Zhang4,Jinlin Wang1,Zili Wang ,Steven Ka Shing Yau5,Zijuan Lin4,\n",
-      "Liyang Zhou6,Chenyu Ran1,Lingfeng Xiao1,7,Chenglin Wu1†,J¨urgen Schmidhuber2,8\n",
-      "1DeepWisdom,2AI Initiative, King Abdullah University of Science and Technology,\n",
-      "3Xiamen University,4The Chinese University of Hong Kong, Shenzhen,\n",
-      "5Nanjing University,6University of Pennsylvania,\n",
-      "7University of California, Berkeley,8The Swiss AI Lab IDSIA/USI/SUPSI\n",
-      "ABSTRACT\n",
-      "Remarkable progress has been made on automated problem solving through so-\n",
-      "cieties of agents based on large language models (LLMs). Existing LLM-based\n",
-      "multi-agent systems can already solve simple dialogue tasks. Solutions to more\n",
-      "complex tasks, however, are complicated through logic inconsistencies due to\n",
-      "cascading hallucinations caused by naively chaining LLMs. Here we introduce\n",
-      "MetaGPT, an innovative meta-programming framework incorporating efficient\n",
-      "human workflows into LLM-based multi-agent collaborations. MetaGPT en-\n",
-      "codes Standardized Operating Procedures (SOPs) into prompt sequences for more\n",
-      "streamlined workflows, thus allowing agents with human-like domain expertise\n",
-      "to verify intermediate results and reduce errors. MetaGPT utilizes an assembly\n",
-      "line paradigm to assign diverse roles to various agents, efficiently breaking down\n",
-      "complex tasks into subtasks involving many agents working together. On col-\n",
-      "laborative software engineering benchmarks, MetaGPT generates more coherent\n",
-      "solutions than previous chat-based multi-agent systems. Our project can be found\n",
-      "at https://github.com/geekan/MetaGPT.\n",
-      "1 I NTRODUCTION\n",
-      "Autonomous agents utilizing Large Language Models (LLMs) offer promising opportunities to en-\n",
-      "hance and replicate human workflows. In real-world applications, however, existing systems (Park\n",
-      "et al., 2023; Zhuge et al., 2023; Cai et al., 2023; Wang et al., 2023c; Li et al., 2023; Du et al., 2023;\n",
-      "Liang et al., 2023; Hao et al., 2023) tend to oversimplify the complexities. They struggle to achieve\n",
-      "effective, coherent, and accurate problem-solving processes, particularly when there is a need for\n",
-      "meaningful collaborative interaction (Chen et al., 2024; Zhang et al., 2023; Dong et al., 2023; Zhou\n",
-      "et al., 2023; Qian et al., 2023).\n",
-      "Through extensive collaborative practice, humans have developed widely accepted Standardized\n",
-      "Operating Procedures (SOPs) across various domains (Belbin, 2012; Manifesto, 2001; DeMarco &\n",
-      "Lister, 2013). These SOPs play a critical role in supporting task decomposition and effective coor-\n",
-      "dination. Furthermore, SOPs outline the responsibilities of each team member, while establishing\n",
-      "standards for intermediate outputs. Well-defined SOPs improve the consistent and accurate exe-\n",
-      "cution of tasks that align with defined roles and quality standards (Belbin, 2012; Manifesto, 2001;\n",
-      "DeMarco & Lister, 2013; Wooldridge & Jennings, 1998). For instance, in a software company,\n",
-      "Product Managers analyze competition and user needs to create Product Requirements Documents\n",
-      "(PRDs) using a standardized structure, to guide the developmental process.\n",
-      "Inspired by such ideas, we design a promising GPT -based Meta -Programming framework called\n",
-      "MetaGPT that significantly benefits from SOPs. Unlike other works (Li et al., 2023; Qian et al.,\n",
-      "2023), MetaGPT requires agents to generate structured outputs, such as high-quality requirements\n",
-      "∗These authors contributed equally to this work.\n",
-      "†Chenglin Wu (alexanderwu@fuzhi.ai) is the corresponding author, affiliated with DeepWisdom.\n",
-      "1\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "print(nodes[0].get_content(metadata_mode=\"all\"))"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 10,
+   "execution_count": null,
    "metadata": {
     "id": "9WisqWK4VPCZ"
    },
-   "outputs": [
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "/Users/jeffxtang/anaconda3/envs/PR_TEMP/lib/python3.10/site-packages/huggingface_hub/file_download.py:1132: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.\n",
-      "  warnings.warn(\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
-    "from llama_index.llms.groq import Groq\n",
-    "\n",
     "from llama_index.core import Settings, VectorStoreIndex\n",
     "from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n",
     "\n",
     "Settings.llm = llm\n",
-    "#llm.complete(\"Who wrote the book godfather\").text\n",
     "\n",
     "Settings.embed_model = HuggingFaceEmbedding(\n",
     "    model_name=\"BAAI/bge-small-en-v1.5\"\n",
@@ -490,7 +183,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 12,
+   "execution_count": null,
    "metadata": {
     "id": "YS4e0mzsVKsl"
    },
@@ -506,7 +199,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 13,
+   "execution_count": null,
    "metadata": {
     "id": "S7tz2Z28VKv1"
    },
@@ -530,7 +223,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 14,
+   "execution_count": null,
    "metadata": {
     "colab": {
      "base_uri": "https://localhost:8080/"
@@ -538,22 +231,14 @@
     "id": "CttWxW8aVKyk",
     "outputId": "4b64a64f-a989-4ee0-f08e-a6d6f5db42b6"
    },
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "MetaGPT achieves a new state-of-the-art (SoTA) with 85.9% and 87.7% in Pass@1, and a 100% task completion rate, demonstrating the robustness and efficiency of its design.\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "print(str(response))"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 15,
+   "execution_count": null,
    "metadata": {
     "colab": {
      "base_uri": "https://localhost:8080/"
@@ -561,15 +246,7 @@
     "id": "ZvQGoUR0VK1I",
     "outputId": "9033f46f-baba-4345-bd6c-29ce4db3ea39"
    },
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "{'page_label': '2', 'file_name': 'metagpt.pdf', 'file_path': 'metagpt.pdf', 'file_type': 'application/pdf', 'file_size': 16911937, 'creation_date': '2024-07-03', 'last_modified_date': '2024-07-03'}\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "for n in response.source_nodes:\n",
     "    print(n.metadata)"
@@ -577,7 +254,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 16,
+   "execution_count": null,
    "metadata": {
     "id": "5r1MHbLOPT8Y"
    },
@@ -622,7 +299,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 17,
+   "execution_count": null,
    "metadata": {
     "colab": {
      "base_uri": "https://localhost:8080/"
@@ -630,18 +307,7 @@
     "id": "2jMB3iS6VjFg",
     "outputId": "4ecd2d26-e159-4765-9aa5-818c5308ae02"
    },
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "=== Calling Function ===\n",
-      "Calling function: vector_tool with args: {\"query\": \"MetaGPT\", \"page_numbers\": []}\n",
-      "=== Function Output ===\n",
-      "MetaGPT is a system that alleviates or solves deep-seated challenges in developing complex systems, including using context efficiently, reducing hallucinations, and addressing information overload. It employs a unique design that includes a global message pool and a subscription mechanism to streamline communication and filter out irrelevant contexts.\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "response = llm.predict_and_call(\n",
     "    [vector_query_tool],\n",
@@ -652,7 +318,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 18,
+   "execution_count": null,
    "metadata": {
     "colab": {
      "base_uri": "https://localhost:8080/"
@@ -660,16 +326,7 @@
     "id": "so2p09VNVm9I",
     "outputId": "8fd7027b-e356-492c-decf-36340ad90978"
    },
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "{'page_label': '26', 'file_name': 'metagpt.pdf', 'file_path': 'metagpt.pdf', 'file_type': 'application/pdf', 'file_size': 16911937, 'creation_date': '2024-07-03', 'last_modified_date': '2024-07-03'}\n",
-      "{'page_label': '23', 'file_name': 'metagpt.pdf', 'file_path': 'metagpt.pdf', 'file_type': 'application/pdf', 'file_size': 16911937, 'creation_date': '2024-07-03', 'last_modified_date': '2024-07-03'}\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "for n in response.source_nodes:\n",
     "    print(n.metadata)"
@@ -677,7 +334,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 19,
+   "execution_count": null,
    "metadata": {
     "id": "AuxhlFxHV4MV"
    },
@@ -702,7 +359,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 21,
+   "execution_count": null,
    "metadata": {
     "colab": {
      "base_uri": "https://localhost:8080/"
@@ -710,46 +367,7 @@
     "id": "dSqh6iLkV61F",
     "outputId": "9dc5e545-9f9d-4a7d-8332-beb158574b8e"
    },
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "=== Calling Function ===\n",
-      "Calling function: summary_tool with args: {\"input\": \"MetaGPT comparisons with ChatDev\"}\n"
-     ]
-    },
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "Retrying llama_index.llms.openai.base.OpenAI._chat in 0.7729974179267081 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 24572, Requested 872. Please try again in 3m14.442999999s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n",
-      "Retrying llama_index.llms.openai.base.OpenAI._chat in 0.8118681920875381 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 24486, Requested 872. Please try again in 3m13.585s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n",
-      "Retrying llama_index.llms.openai.base.OpenAI._achat in 0.8048217954293545 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 24379, Requested 4059. Please try again in 3m44.384s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n",
-      "Retrying llama_index.llms.openai.base.OpenAI._achat in 0.2284912426549598 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 24379, Requested 3827. Please try again in 3m42.063s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n",
-      "Retrying llama_index.llms.openai.base.OpenAI._achat in 0.36108768097936184 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 24378, Requested 3311. Please try again in 3m36.899s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n",
-      "Retrying llama_index.llms.openai.base.OpenAI._achat in 0.49291998486341715 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 24379, Requested 3997. Please try again in 3m43.762s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n",
-      "Retrying llama_index.llms.openai.base.OpenAI._achat in 0.8259759161981742 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 24378, Requested 3530. Please try again in 3m39.085999999s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n",
-      "Retrying llama_index.llms.openai.base.OpenAI._achat in 0.2728628462532011 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 24378, Requested 3464. Please try again in 3m38.425s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n",
-      "Retrying llama_index.llms.openai.base.OpenAI._achat in 0.38569455296015964 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 24378, Requested 413. Please try again in 3m7.915s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n",
-      "Retrying llama_index.llms.openai.base.OpenAI._achat in 0.9688055640406157 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 24351, Requested 3827. Please try again in 3m41.784s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n",
-      "Retrying llama_index.llms.openai.base.OpenAI._achat in 1.8499550236003799 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 24345, Requested 3464. Please try again in 3m38.098s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n",
-      "Retrying llama_index.llms.openai.base.OpenAI._achat in 1.2462884758029988 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 24338, Requested 3311. Please try again in 3m36.494s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n",
-      "Retrying llama_index.llms.openai.base.OpenAI._achat in 0.5612671070659787 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 24335, Requested 413. Please try again in 3m7.481s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n",
-      "Retrying llama_index.llms.openai.base.OpenAI._achat in 0.3393996207225132 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 24324, Requested 3997. Please try again in 3m43.213s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n",
-      "Retrying llama_index.llms.openai.base.OpenAI._achat in 1.6827062094362717 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 24291, Requested 3530. Please try again in 3m38.215999999s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n",
-      "Retrying llama_index.llms.openai.base.OpenAI._achat in 1.138545067954334 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 24291, Requested 4059. Please try again in 3m43.507s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n"
-     ]
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "=== Function Output ===\n",
-      "Encountered error: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 24285, Requested 3997. Please try again in 3m42.827s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "response = llm.predict_and_call(\n",
     "    [vector_query_tool, summary_tool],\n",
@@ -758,26 +376,6 @@
     ")"
    ]
   },
-  {
-   "cell_type": "code",
-   "execution_count": 22,
-   "metadata": {},
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "Groq(callback_manager=<llama_index.core.callbacks.base.CallbackManager object at 0x178f50a90>, system_prompt=None, messages_to_prompt=<function messages_to_prompt at 0x17fa13400>, completion_to_prompt=<function default_completion_to_prompt at 0x17fa9f400>, output_parser=None, pydantic_program_mode=<PydanticProgramMode.DEFAULT: 'default'>, query_wrapper_prompt=None, model='llama3-70b-8192', temperature=0.0, max_tokens=None, logprobs=None, top_logprobs=0, additional_kwargs={}, max_retries=3, timeout=60.0, default_headers=None, reuse_client=True, api_key='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', api_base='https://api.groq.com/openai/v1', api_version='', context_window=3900, is_chat_model=True, is_function_calling_model=True, tokenizer=None)"
-      ]
-     },
-     "execution_count": 22,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
-   "source": [
-    "llm"
-   ]
-  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -788,15 +386,7 @@
     "id": "OsPn62A2V8R7",
     "outputId": "cf74caa0-5f59-4f3c-f806-f996b317df8c"
    },
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "{'page_label': '8', 'file_name': 'metagpt.pdf', 'file_path': 'metagpt.pdf', 'file_type': 'application/pdf', 'file_size': 16911937, 'creation_date': '2024-05-11', 'last_modified_date': '2024-05-11'}\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "for n in response.source_nodes:\n",
     "    print(n.metadata)"
@@ -812,54 +402,13 @@
     "id": "ugdDXz1EV96J",
     "outputId": "fdc93775-04a0-4632-f190-0ef8d3800651"
    },
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "\u001b[1;3;38;5;200mThought: The current language of the user is: English. I need to use a tool to help me answer the question.\n",
-      "Action: summary_tool\n",
-      "Action Input: {'input': 'Please provide the paper text or a brief description of the paper'}\n",
-      "\u001b[0m"
-     ]
-    },
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 0.15510587781355234 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 3000, Used 2886, Requested ~3837. Please try again in 1m14.456s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n",
-      "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 0.8139104116113804 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 3000, Used 2803, Requested ~4062. Please try again in 1m17.29s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n",
-      "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 0.2273722542576545 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 3000, Used 2786, Requested ~4011. Please try again in 1m15.935s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n",
-      "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 0.8338470386016188 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 3000, Used 2542, Requested ~3837. Please try again in 1m7.573s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n",
-      "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 0.5597229378294226 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 3000, Used 2447, Requested ~4011. Please try again in 1m9.146999999s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n",
-      "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 0.8236594000712218 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 3000, Used 2434, Requested ~4062. Please try again in 1m9.901s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n",
-      "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 3.792455535339309 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 3000, Used 2147, Requested ~3837. Please try again in 59.667s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n",
-      "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 2.752465317045643 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 3000, Used 2098, Requested ~4062. Please try again in 1m3.19s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n",
-      "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 3.2152173100995927 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 3000, Used 2072, Requested ~4011. Please try again in 1m1.654s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n"
-     ]
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "\u001b[1;3;34mObservation: Error: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 3000, Used 1406, Requested ~3453. Please try again in 37.162s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}\n",
-      "\u001b[0m"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "response = llm.predict_and_call(\n",
     "    [vector_query_tool, summary_tool],\n",
     "    \"What is a summary of the paper?\",\n",
     "    verbose=True\n",
-    ")\n",
-    "\n",
-    "# got the error \"Rate limit reached for model `llama3-70b-8192`\"\n",
-    "# Observation: Error: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 3000, Used 1406, Requested ~3453. Please try again in 37.162s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}\n",
-    "# https://console.groq.com/settings/limits\n",
-    "# ID\tREQUESTS PER MINUTE |\tREQUESTS PER DAY | TOKENS PER MINUTE\n",
-    "# llama3-70b-8192\t30 | 14,400\t| 6,000\n",
-    "# llama3-8b-8192\t30 | 14,400\t| 30,000"
+    ")"
    ]
   },
   {