diff --git a/recipes/3p_integrations/llamaindex/dlai_agentic_rag/Building_Agentic_RAG_with_Llamaindex_L2_Tool_Calling.ipynb b/recipes/3p_integrations/llamaindex/dlai_agentic_rag/Building_Agentic_RAG_with_Llamaindex_L2_Tool_Calling.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..1416d52cdc29cd299d881c3090fbf05d88fef391 --- /dev/null +++ b/recipes/3p_integrations/llamaindex/dlai_agentic_rag/Building_Agentic_RAG_with_Llamaindex_L2_Tool_Calling.ipynb @@ -0,0 +1,996 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "language_info": { + "name": "python" + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "eiJsOa29ej7G", + "outputId": "edc5d39c-f379-4410-db9f-998db9c099be" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Collecting llama-index\n", + " Downloading llama_index-0.10.36-py3-none-any.whl (6.8 kB)\n", + "Collecting llama-index-agent-openai<0.3.0,>=0.1.4 (from llama-index)\n", + " Downloading llama_index_agent_openai-0.2.4-py3-none-any.whl (13 kB)\n", + "Collecting llama-index-cli<0.2.0,>=0.1.2 (from llama-index)\n", + " Downloading llama_index_cli-0.1.12-py3-none-any.whl (26 kB)\n", + "Collecting llama-index-core<0.11.0,>=0.10.35 (from llama-index)\n", + " Downloading llama_index_core-0.10.36-py3-none-any.whl (15.4 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m15.4/15.4 MB\u001b[0m \u001b[31m22.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting llama-index-embeddings-openai<0.2.0,>=0.1.5 (from llama-index)\n", + " Downloading llama_index_embeddings_openai-0.1.9-py3-none-any.whl (6.0 kB)\n", + "Collecting llama-index-indices-managed-llama-cloud<0.2.0,>=0.1.2 (from llama-index)\n", + " Downloading llama_index_indices_managed_llama_cloud-0.1.6-py3-none-any.whl (6.7 kB)\n", + "Collecting llama-index-legacy<0.10.0,>=0.9.48 (from llama-index)\n", + " Downloading llama_index_legacy-0.9.48-py3-none-any.whl (2.0 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.0/2.0 MB\u001b[0m \u001b[31m42.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting llama-index-llms-openai<0.2.0,>=0.1.13 (from llama-index)\n", + " Downloading llama_index_llms_openai-0.1.18-py3-none-any.whl (11 kB)\n", + "Collecting llama-index-multi-modal-llms-openai<0.2.0,>=0.1.3 (from llama-index)\n", + " Downloading llama_index_multi_modal_llms_openai-0.1.5-py3-none-any.whl (5.8 kB)\n", + "Collecting llama-index-program-openai<0.2.0,>=0.1.3 (from llama-index)\n", + " Downloading llama_index_program_openai-0.1.6-py3-none-any.whl (5.2 kB)\n", + "Collecting llama-index-question-gen-openai<0.2.0,>=0.1.2 (from llama-index)\n", + " Downloading llama_index_question_gen_openai-0.1.3-py3-none-any.whl (2.9 kB)\n", + "Collecting llama-index-readers-file<0.2.0,>=0.1.4 (from llama-index)\n", + " Downloading llama_index_readers_file-0.1.22-py3-none-any.whl (36 kB)\n", + "Collecting llama-index-readers-llama-parse<0.2.0,>=0.1.2 (from llama-index)\n", + " Downloading llama_index_readers_llama_parse-0.1.4-py3-none-any.whl (2.5 kB)\n", + "Collecting openai>=1.14.0 (from llama-index-agent-openai<0.3.0,>=0.1.4->llama-index)\n", + " Downloading openai-1.28.0-py3-none-any.whl (320 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m320.1/320.1 kB\u001b[0m \u001b[31m12.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: PyYAML>=6.0.1 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.35->llama-index) (6.0.1)\n", + "Requirement already satisfied: SQLAlchemy[asyncio]>=1.4.49 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.35->llama-index) (2.0.30)\n", + "Requirement already satisfied: aiohttp<4.0.0,>=3.8.6 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.35->llama-index) (3.9.5)\n", + "Collecting dataclasses-json (from llama-index-core<0.11.0,>=0.10.35->llama-index)\n", + " Downloading dataclasses_json-0.6.6-py3-none-any.whl (28 kB)\n", + "Collecting deprecated>=1.2.9.3 (from llama-index-core<0.11.0,>=0.10.35->llama-index)\n", + " Downloading Deprecated-1.2.14-py2.py3-none-any.whl (9.6 kB)\n", + "Collecting dirtyjson<2.0.0,>=1.0.8 (from llama-index-core<0.11.0,>=0.10.35->llama-index)\n", + " Downloading dirtyjson-1.0.8-py3-none-any.whl (25 kB)\n", + "Requirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.35->llama-index) (2023.6.0)\n", + "Collecting httpx (from llama-index-core<0.11.0,>=0.10.35->llama-index)\n", + " Downloading httpx-0.27.0-py3-none-any.whl (75 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m75.6/75.6 kB\u001b[0m \u001b[31m7.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting llamaindex-py-client<0.2.0,>=0.1.18 (from llama-index-core<0.11.0,>=0.10.35->llama-index)\n", + " Downloading llamaindex_py_client-0.1.19-py3-none-any.whl (141 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m141.9/141.9 kB\u001b[0m \u001b[31m7.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: nest-asyncio<2.0.0,>=1.5.8 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.35->llama-index) (1.6.0)\n", + "Requirement already satisfied: networkx>=3.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.35->llama-index) (3.3)\n", + "Requirement already satisfied: nltk<4.0.0,>=3.8.1 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.35->llama-index) (3.8.1)\n", + "Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.35->llama-index) (1.25.2)\n", + "Requirement already satisfied: pandas in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.35->llama-index) (2.0.3)\n", + "Requirement already satisfied: pillow>=9.0.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.35->llama-index) (9.4.0)\n", + "Requirement already satisfied: requests>=2.31.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.35->llama-index) (2.31.0)\n", + "Requirement already satisfied: tenacity<9.0.0,>=8.2.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.35->llama-index) (8.3.0)\n", + "Collecting tiktoken>=0.3.3 (from llama-index-core<0.11.0,>=0.10.35->llama-index)\n", + " Downloading tiktoken-0.6.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (1.8 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.8/1.8 MB\u001b[0m \u001b[31m40.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: tqdm<5.0.0,>=4.66.1 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.35->llama-index) (4.66.4)\n", + "Requirement already satisfied: typing-extensions>=4.5.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.35->llama-index) (4.11.0)\n", + "Collecting typing-inspect>=0.8.0 (from llama-index-core<0.11.0,>=0.10.35->llama-index)\n", + " Downloading typing_inspect-0.9.0-py3-none-any.whl (8.8 kB)\n", + "Requirement already satisfied: wrapt in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.35->llama-index) (1.14.1)\n", + "Requirement already satisfied: beautifulsoup4<5.0.0,>=4.12.3 in /usr/local/lib/python3.10/dist-packages (from llama-index-readers-file<0.2.0,>=0.1.4->llama-index) (4.12.3)\n", + "Collecting pypdf<5.0.0,>=4.0.1 (from llama-index-readers-file<0.2.0,>=0.1.4->llama-index)\n", + " Downloading pypdf-4.2.0-py3-none-any.whl (290 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m290.4/290.4 kB\u001b[0m \u001b[31m19.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting striprtf<0.0.27,>=0.0.26 (from llama-index-readers-file<0.2.0,>=0.1.4->llama-index)\n", + " Downloading striprtf-0.0.26-py3-none-any.whl (6.9 kB)\n", + "Collecting llama-parse<0.5.0,>=0.4.0 (from llama-index-readers-llama-parse<0.2.0,>=0.1.2->llama-index)\n", + " Downloading llama_parse-0.4.2-py3-none-any.whl (7.6 kB)\n", + "Requirement already satisfied: aiosignal>=1.1.2 in /usr/local/lib/python3.10/dist-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.11.0,>=0.10.35->llama-index) (1.3.1)\n", + "Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.11.0,>=0.10.35->llama-index) (23.2.0)\n", + "Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.10/dist-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.11.0,>=0.10.35->llama-index) (1.4.1)\n", + "Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.10/dist-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.11.0,>=0.10.35->llama-index) (6.0.5)\n", + "Requirement already satisfied: yarl<2.0,>=1.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.11.0,>=0.10.35->llama-index) (1.9.4)\n", + "Requirement already satisfied: async-timeout<5.0,>=4.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.11.0,>=0.10.35->llama-index) (4.0.3)\n", + "Requirement already satisfied: soupsieve>1.2 in /usr/local/lib/python3.10/dist-packages (from beautifulsoup4<5.0.0,>=4.12.3->llama-index-readers-file<0.2.0,>=0.1.4->llama-index) (2.5)\n", + "Requirement already satisfied: pydantic>=1.10 in /usr/local/lib/python3.10/dist-packages (from llamaindex-py-client<0.2.0,>=0.1.18->llama-index-core<0.11.0,>=0.10.35->llama-index) (2.7.1)\n", + "Requirement already satisfied: anyio in /usr/local/lib/python3.10/dist-packages (from httpx->llama-index-core<0.11.0,>=0.10.35->llama-index) (3.7.1)\n", + "Requirement already satisfied: certifi in /usr/local/lib/python3.10/dist-packages (from httpx->llama-index-core<0.11.0,>=0.10.35->llama-index) (2024.2.2)\n", + "Collecting httpcore==1.* (from httpx->llama-index-core<0.11.0,>=0.10.35->llama-index)\n", + " Downloading httpcore-1.0.5-py3-none-any.whl (77 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m77.9/77.9 kB\u001b[0m \u001b[31m5.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: idna in /usr/local/lib/python3.10/dist-packages (from httpx->llama-index-core<0.11.0,>=0.10.35->llama-index) (3.7)\n", + "Requirement already satisfied: sniffio in /usr/local/lib/python3.10/dist-packages (from httpx->llama-index-core<0.11.0,>=0.10.35->llama-index) (1.3.1)\n", + "Collecting h11<0.15,>=0.13 (from httpcore==1.*->httpx->llama-index-core<0.11.0,>=0.10.35->llama-index)\n", + " Downloading h11-0.14.0-py3-none-any.whl (58 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m58.3/58.3 kB\u001b[0m \u001b[31m3.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: click in /usr/local/lib/python3.10/dist-packages (from nltk<4.0.0,>=3.8.1->llama-index-core<0.11.0,>=0.10.35->llama-index) (8.1.7)\n", + "Requirement already satisfied: joblib in /usr/local/lib/python3.10/dist-packages (from nltk<4.0.0,>=3.8.1->llama-index-core<0.11.0,>=0.10.35->llama-index) (1.4.2)\n", + "Requirement already satisfied: regex>=2021.8.3 in /usr/local/lib/python3.10/dist-packages (from nltk<4.0.0,>=3.8.1->llama-index-core<0.11.0,>=0.10.35->llama-index) (2023.12.25)\n", + "Requirement already satisfied: distro<2,>=1.7.0 in /usr/lib/python3/dist-packages (from openai>=1.14.0->llama-index-agent-openai<0.3.0,>=0.1.4->llama-index) (1.7.0)\n", + "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests>=2.31.0->llama-index-core<0.11.0,>=0.10.35->llama-index) (3.3.2)\n", + "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests>=2.31.0->llama-index-core<0.11.0,>=0.10.35->llama-index) (2.0.7)\n", + "Requirement already satisfied: greenlet!=0.4.17 in /usr/local/lib/python3.10/dist-packages (from SQLAlchemy[asyncio]>=1.4.49->llama-index-core<0.11.0,>=0.10.35->llama-index) (3.0.3)\n", + "Collecting mypy-extensions>=0.3.0 (from typing-inspect>=0.8.0->llama-index-core<0.11.0,>=0.10.35->llama-index)\n", + " Downloading mypy_extensions-1.0.0-py3-none-any.whl (4.7 kB)\n", + "Collecting marshmallow<4.0.0,>=3.18.0 (from dataclasses-json->llama-index-core<0.11.0,>=0.10.35->llama-index)\n", + " Downloading marshmallow-3.21.2-py3-none-any.whl (49 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m49.3/49.3 kB\u001b[0m \u001b[31m2.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.10/dist-packages (from pandas->llama-index-core<0.11.0,>=0.10.35->llama-index) (2.8.2)\n", + "Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.10/dist-packages (from pandas->llama-index-core<0.11.0,>=0.10.35->llama-index) (2023.4)\n", + "Requirement already satisfied: tzdata>=2022.1 in /usr/local/lib/python3.10/dist-packages (from pandas->llama-index-core<0.11.0,>=0.10.35->llama-index) (2024.1)\n", + "Requirement already satisfied: exceptiongroup in /usr/local/lib/python3.10/dist-packages (from anyio->httpx->llama-index-core<0.11.0,>=0.10.35->llama-index) (1.2.1)\n", + "Requirement already satisfied: packaging>=17.0 in /usr/local/lib/python3.10/dist-packages (from marshmallow<4.0.0,>=3.18.0->dataclasses-json->llama-index-core<0.11.0,>=0.10.35->llama-index) (24.0)\n", + "Requirement already satisfied: annotated-types>=0.4.0 in /usr/local/lib/python3.10/dist-packages (from pydantic>=1.10->llamaindex-py-client<0.2.0,>=0.1.18->llama-index-core<0.11.0,>=0.10.35->llama-index) (0.6.0)\n", + "Requirement already satisfied: pydantic-core==2.18.2 in /usr/local/lib/python3.10/dist-packages (from pydantic>=1.10->llamaindex-py-client<0.2.0,>=0.1.18->llama-index-core<0.11.0,>=0.10.35->llama-index) (2.18.2)\n", + "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.8.2->pandas->llama-index-core<0.11.0,>=0.10.35->llama-index) (1.16.0)\n", + "Installing collected packages: striprtf, dirtyjson, pypdf, mypy-extensions, marshmallow, h11, deprecated, typing-inspect, tiktoken, httpcore, httpx, dataclasses-json, openai, llamaindex-py-client, llama-index-legacy, llama-index-core, llama-parse, llama-index-readers-file, llama-index-llms-openai, llama-index-indices-managed-llama-cloud, llama-index-embeddings-openai, llama-index-readers-llama-parse, llama-index-multi-modal-llms-openai, llama-index-cli, llama-index-agent-openai, llama-index-program-openai, llama-index-question-gen-openai, llama-index\n", + "Successfully installed dataclasses-json-0.6.6 deprecated-1.2.14 dirtyjson-1.0.8 h11-0.14.0 httpcore-1.0.5 httpx-0.27.0 llama-index-0.10.36 llama-index-agent-openai-0.2.4 llama-index-cli-0.1.12 llama-index-core-0.10.36 llama-index-embeddings-openai-0.1.9 llama-index-indices-managed-llama-cloud-0.1.6 llama-index-legacy-0.9.48 llama-index-llms-openai-0.1.18 llama-index-multi-modal-llms-openai-0.1.5 llama-index-program-openai-0.1.6 llama-index-question-gen-openai-0.1.3 llama-index-readers-file-0.1.22 llama-index-readers-llama-parse-0.1.4 llama-parse-0.4.2 llamaindex-py-client-0.1.19 marshmallow-3.21.2 mypy-extensions-1.0.0 openai-1.28.0 pypdf-4.2.0 striprtf-0.0.26 tiktoken-0.6.0 typing-inspect-0.9.0\n", + "Collecting llama-index-embeddings-huggingface\n", + " Downloading llama_index_embeddings_huggingface-0.2.0-py3-none-any.whl (7.1 kB)\n", + "Requirement already satisfied: huggingface-hub[inference]>=0.19.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-embeddings-huggingface) (0.20.3)\n", + "Requirement already satisfied: llama-index-core<0.11.0,>=0.10.1 in /usr/local/lib/python3.10/dist-packages (from llama-index-embeddings-huggingface) (0.10.36)\n", + "Collecting sentence-transformers<3.0.0,>=2.6.1 (from llama-index-embeddings-huggingface)\n", + " Downloading sentence_transformers-2.7.0-py3-none-any.whl (171 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m171.5/171.5 kB\u001b[0m \u001b[31m1.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (3.14.0)\n", + "Requirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (2023.6.0)\n", + "Requirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (2.31.0)\n", + "Requirement already satisfied: tqdm>=4.42.1 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (4.66.4)\n", + "Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (6.0.1)\n", + "Requirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (4.11.0)\n", + "Requirement already satisfied: packaging>=20.9 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (24.0)\n", + "Requirement already satisfied: aiohttp in /usr/local/lib/python3.10/dist-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (3.9.5)\n", + "Requirement already satisfied: pydantic<3.0,>1.1 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (2.7.1)\n", + "Requirement already satisfied: SQLAlchemy[asyncio]>=1.4.49 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (2.0.30)\n", + "Requirement already satisfied: dataclasses-json in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (0.6.6)\n", + "Requirement already satisfied: deprecated>=1.2.9.3 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.2.14)\n", + "Requirement already satisfied: dirtyjson<2.0.0,>=1.0.8 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.0.8)\n", + "Requirement already satisfied: httpx in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (0.27.0)\n", + "Requirement already satisfied: llamaindex-py-client<0.2.0,>=0.1.18 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (0.1.19)\n", + "Requirement already satisfied: nest-asyncio<2.0.0,>=1.5.8 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.6.0)\n", + "Requirement already satisfied: networkx>=3.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (3.3)\n", + "Requirement already satisfied: nltk<4.0.0,>=3.8.1 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (3.8.1)\n", + "Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.25.2)\n", + "Requirement already satisfied: openai>=1.1.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.28.0)\n", + "Requirement already satisfied: pandas in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (2.0.3)\n", + "Requirement already satisfied: pillow>=9.0.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (9.4.0)\n", + "Requirement already satisfied: tenacity<9.0.0,>=8.2.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (8.3.0)\n", + "Requirement already satisfied: tiktoken>=0.3.3 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (0.6.0)\n", + "Requirement already satisfied: typing-inspect>=0.8.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (0.9.0)\n", + "Requirement already satisfied: wrapt in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.14.1)\n", + "Requirement already satisfied: transformers<5.0.0,>=4.34.0 in /usr/local/lib/python3.10/dist-packages (from sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (4.40.2)\n", + "Requirement already satisfied: torch>=1.11.0 in /usr/local/lib/python3.10/dist-packages (from sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (2.2.1+cu121)\n", + "Requirement already satisfied: scikit-learn in /usr/local/lib/python3.10/dist-packages (from sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (1.2.2)\n", + "Requirement already satisfied: scipy in /usr/local/lib/python3.10/dist-packages (from sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (1.11.4)\n", + "Requirement already satisfied: aiosignal>=1.1.2 in /usr/local/lib/python3.10/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (1.3.1)\n", + "Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (23.2.0)\n", + "Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.10/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (1.4.1)\n", + "Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.10/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (6.0.5)\n", + "Requirement already satisfied: yarl<2.0,>=1.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (1.9.4)\n", + "Requirement already satisfied: async-timeout<5.0,>=4.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (4.0.3)\n", + "Requirement already satisfied: anyio in /usr/local/lib/python3.10/dist-packages (from httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (3.7.1)\n", + "Requirement already satisfied: certifi in /usr/local/lib/python3.10/dist-packages (from httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (2024.2.2)\n", + "Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.10/dist-packages (from httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.0.5)\n", + "Requirement already satisfied: idna in /usr/local/lib/python3.10/dist-packages (from httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (3.7)\n", + "Requirement already satisfied: sniffio in /usr/local/lib/python3.10/dist-packages (from httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.3.1)\n", + "Requirement already satisfied: h11<0.15,>=0.13 in /usr/local/lib/python3.10/dist-packages (from httpcore==1.*->httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (0.14.0)\n", + "Requirement already satisfied: click in /usr/local/lib/python3.10/dist-packages (from nltk<4.0.0,>=3.8.1->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (8.1.7)\n", + "Requirement already satisfied: joblib in /usr/local/lib/python3.10/dist-packages (from nltk<4.0.0,>=3.8.1->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.4.2)\n", + "Requirement already satisfied: regex>=2021.8.3 in /usr/local/lib/python3.10/dist-packages (from nltk<4.0.0,>=3.8.1->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (2023.12.25)\n", + "Requirement already satisfied: distro<2,>=1.7.0 in /usr/lib/python3/dist-packages (from openai>=1.1.0->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.7.0)\n", + "Requirement already satisfied: annotated-types>=0.4.0 in /usr/local/lib/python3.10/dist-packages (from pydantic<3.0,>1.1->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (0.6.0)\n", + "Requirement already satisfied: pydantic-core==2.18.2 in /usr/local/lib/python3.10/dist-packages (from pydantic<3.0,>1.1->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (2.18.2)\n", + "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (3.3.2)\n", + "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (2.0.7)\n", + "Requirement already satisfied: greenlet!=0.4.17 in /usr/local/lib/python3.10/dist-packages (from SQLAlchemy[asyncio]>=1.4.49->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (3.0.3)\n", + "Requirement already satisfied: sympy in /usr/local/lib/python3.10/dist-packages (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (1.12)\n", + "Requirement already satisfied: jinja2 in /usr/local/lib/python3.10/dist-packages (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (3.1.4)\n", + "Collecting nvidia-cuda-nvrtc-cu12==12.1.105 (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface)\n", + " Using cached nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (23.7 MB)\n", + "Collecting nvidia-cuda-runtime-cu12==12.1.105 (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface)\n", + " Using cached nvidia_cuda_runtime_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (823 kB)\n", + "Collecting nvidia-cuda-cupti-cu12==12.1.105 (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface)\n", + " Using cached nvidia_cuda_cupti_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (14.1 MB)\n", + "Collecting nvidia-cudnn-cu12==8.9.2.26 (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface)\n", + " Using cached nvidia_cudnn_cu12-8.9.2.26-py3-none-manylinux1_x86_64.whl (731.7 MB)\n", + "Collecting nvidia-cublas-cu12==12.1.3.1 (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface)\n", + " Using cached nvidia_cublas_cu12-12.1.3.1-py3-none-manylinux1_x86_64.whl (410.6 MB)\n", + "Collecting nvidia-cufft-cu12==11.0.2.54 (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface)\n", + " Using cached nvidia_cufft_cu12-11.0.2.54-py3-none-manylinux1_x86_64.whl (121.6 MB)\n", + "Collecting nvidia-curand-cu12==10.3.2.106 (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface)\n", + " Using cached nvidia_curand_cu12-10.3.2.106-py3-none-manylinux1_x86_64.whl (56.5 MB)\n", + "Collecting nvidia-cusolver-cu12==11.4.5.107 (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface)\n", + " Using cached nvidia_cusolver_cu12-11.4.5.107-py3-none-manylinux1_x86_64.whl (124.2 MB)\n", + "Collecting nvidia-cusparse-cu12==12.1.0.106 (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface)\n", + " Using cached nvidia_cusparse_cu12-12.1.0.106-py3-none-manylinux1_x86_64.whl (196.0 MB)\n", + "Collecting nvidia-nccl-cu12==2.19.3 (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface)\n", + " Using cached nvidia_nccl_cu12-2.19.3-py3-none-manylinux1_x86_64.whl (166.0 MB)\n", + "Collecting nvidia-nvtx-cu12==12.1.105 (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface)\n", + " Using cached nvidia_nvtx_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (99 kB)\n", + "Requirement already satisfied: triton==2.2.0 in /usr/local/lib/python3.10/dist-packages (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (2.2.0)\n", + "Collecting nvidia-nvjitlink-cu12 (from nvidia-cusolver-cu12==11.4.5.107->torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface)\n", + " Using cached nvidia_nvjitlink_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl (21.1 MB)\n", + "Requirement already satisfied: tokenizers<0.20,>=0.19 in /usr/local/lib/python3.10/dist-packages (from transformers<5.0.0,>=4.34.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (0.19.1)\n", + "Requirement already satisfied: safetensors>=0.4.1 in /usr/local/lib/python3.10/dist-packages (from transformers<5.0.0,>=4.34.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (0.4.3)\n", + "Requirement already satisfied: mypy-extensions>=0.3.0 in /usr/local/lib/python3.10/dist-packages (from typing-inspect>=0.8.0->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.0.0)\n", + "Requirement already satisfied: marshmallow<4.0.0,>=3.18.0 in /usr/local/lib/python3.10/dist-packages (from dataclasses-json->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (3.21.2)\n", + "Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.10/dist-packages (from pandas->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (2.8.2)\n", + "Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.10/dist-packages (from pandas->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (2023.4)\n", + "Requirement already satisfied: tzdata>=2022.1 in /usr/local/lib/python3.10/dist-packages (from pandas->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (2024.1)\n", + "Requirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from scikit-learn->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (3.5.0)\n", + "Requirement already satisfied: exceptiongroup in /usr/local/lib/python3.10/dist-packages (from anyio->httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.2.1)\n", + "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.8.2->pandas->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.16.0)\n", + "Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.10/dist-packages (from jinja2->torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (2.1.5)\n", + "Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.10/dist-packages (from sympy->torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (1.3.0)\n", + "Installing collected packages: nvidia-nvtx-cu12, nvidia-nvjitlink-cu12, nvidia-nccl-cu12, nvidia-curand-cu12, nvidia-cufft-cu12, nvidia-cuda-runtime-cu12, nvidia-cuda-nvrtc-cu12, nvidia-cuda-cupti-cu12, nvidia-cublas-cu12, nvidia-cusparse-cu12, nvidia-cudnn-cu12, nvidia-cusolver-cu12, sentence-transformers, llama-index-embeddings-huggingface\n", + "Successfully installed llama-index-embeddings-huggingface-0.2.0 nvidia-cublas-cu12-12.1.3.1 nvidia-cuda-cupti-cu12-12.1.105 nvidia-cuda-nvrtc-cu12-12.1.105 nvidia-cuda-runtime-cu12-12.1.105 nvidia-cudnn-cu12-8.9.2.26 nvidia-cufft-cu12-11.0.2.54 nvidia-curand-cu12-10.3.2.106 nvidia-cusolver-cu12-11.4.5.107 nvidia-cusparse-cu12-12.1.0.106 nvidia-nccl-cu12-2.19.3 nvidia-nvjitlink-cu12-12.4.127 nvidia-nvtx-cu12-12.1.105 sentence-transformers-2.7.0\n", + "Collecting llama-index-llms-groq\n", + " Downloading llama_index_llms_groq-0.1.3-py3-none-any.whl (2.7 kB)\n", + "Requirement already satisfied: llama-index-core<0.11.0,>=0.10.1 in /usr/local/lib/python3.10/dist-packages (from llama-index-llms-groq) (0.10.36)\n", + "Collecting llama-index-llms-openai-like<0.2.0,>=0.1.3 (from llama-index-llms-groq)\n", + " Downloading llama_index_llms_openai_like-0.1.3-py3-none-any.whl (3.0 kB)\n", + "Requirement already satisfied: PyYAML>=6.0.1 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (6.0.1)\n", + "Requirement already satisfied: SQLAlchemy[asyncio]>=1.4.49 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2.0.30)\n", + "Requirement already satisfied: aiohttp<4.0.0,>=3.8.6 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (3.9.5)\n", + "Requirement already satisfied: dataclasses-json in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (0.6.6)\n", + "Requirement already satisfied: deprecated>=1.2.9.3 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.2.14)\n", + "Requirement already satisfied: dirtyjson<2.0.0,>=1.0.8 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.0.8)\n", + "Requirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2023.6.0)\n", + "Requirement already satisfied: httpx in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (0.27.0)\n", + "Requirement already satisfied: llamaindex-py-client<0.2.0,>=0.1.18 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (0.1.19)\n", + "Requirement already satisfied: nest-asyncio<2.0.0,>=1.5.8 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.6.0)\n", + "Requirement already satisfied: networkx>=3.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (3.3)\n", + "Requirement already satisfied: nltk<4.0.0,>=3.8.1 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (3.8.1)\n", + "Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.25.2)\n", + "Requirement already satisfied: openai>=1.1.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.28.0)\n", + "Requirement already satisfied: pandas in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2.0.3)\n", + "Requirement already satisfied: pillow>=9.0.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (9.4.0)\n", + "Requirement already satisfied: requests>=2.31.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2.31.0)\n", + "Requirement already satisfied: tenacity<9.0.0,>=8.2.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (8.3.0)\n", + "Requirement already satisfied: tiktoken>=0.3.3 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (0.6.0)\n", + "Requirement already satisfied: tqdm<5.0.0,>=4.66.1 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (4.66.4)\n", + "Requirement already satisfied: typing-extensions>=4.5.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (4.11.0)\n", + "Requirement already satisfied: typing-inspect>=0.8.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (0.9.0)\n", + "Requirement already satisfied: wrapt in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.14.1)\n", + "Requirement already satisfied: llama-index-llms-openai<0.2.0,>=0.1.1 in /usr/local/lib/python3.10/dist-packages (from llama-index-llms-openai-like<0.2.0,>=0.1.3->llama-index-llms-groq) (0.1.18)\n", + "Requirement already satisfied: transformers<5.0.0,>=4.37.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-llms-openai-like<0.2.0,>=0.1.3->llama-index-llms-groq) (4.40.2)\n", + "Requirement already satisfied: aiosignal>=1.1.2 in /usr/local/lib/python3.10/dist-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.3.1)\n", + "Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (23.2.0)\n", + "Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.10/dist-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.4.1)\n", + "Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.10/dist-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (6.0.5)\n", + "Requirement already satisfied: yarl<2.0,>=1.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.9.4)\n", + "Requirement already satisfied: async-timeout<5.0,>=4.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (4.0.3)\n", + "Requirement already satisfied: pydantic>=1.10 in /usr/local/lib/python3.10/dist-packages (from llamaindex-py-client<0.2.0,>=0.1.18->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2.7.1)\n", + "Requirement already satisfied: anyio in /usr/local/lib/python3.10/dist-packages (from httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (3.7.1)\n", + "Requirement already satisfied: certifi in /usr/local/lib/python3.10/dist-packages (from httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2024.2.2)\n", + "Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.10/dist-packages (from httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.0.5)\n", + "Requirement already satisfied: idna in /usr/local/lib/python3.10/dist-packages (from httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (3.7)\n", + "Requirement already satisfied: sniffio in /usr/local/lib/python3.10/dist-packages (from httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.3.1)\n", + "Requirement already satisfied: h11<0.15,>=0.13 in /usr/local/lib/python3.10/dist-packages (from httpcore==1.*->httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (0.14.0)\n", + "Requirement already satisfied: click in /usr/local/lib/python3.10/dist-packages (from nltk<4.0.0,>=3.8.1->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (8.1.7)\n", + "Requirement already satisfied: joblib in /usr/local/lib/python3.10/dist-packages (from nltk<4.0.0,>=3.8.1->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.4.2)\n", + "Requirement already satisfied: regex>=2021.8.3 in /usr/local/lib/python3.10/dist-packages (from nltk<4.0.0,>=3.8.1->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2023.12.25)\n", + "Requirement already satisfied: distro<2,>=1.7.0 in /usr/lib/python3/dist-packages (from openai>=1.1.0->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.7.0)\n", + "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests>=2.31.0->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (3.3.2)\n", + "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests>=2.31.0->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2.0.7)\n", + "Requirement already satisfied: greenlet!=0.4.17 in /usr/local/lib/python3.10/dist-packages (from SQLAlchemy[asyncio]>=1.4.49->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (3.0.3)\n", + "Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from transformers<5.0.0,>=4.37.0->llama-index-llms-openai-like<0.2.0,>=0.1.3->llama-index-llms-groq) (3.14.0)\n", + "Requirement already satisfied: huggingface-hub<1.0,>=0.19.3 in /usr/local/lib/python3.10/dist-packages (from transformers<5.0.0,>=4.37.0->llama-index-llms-openai-like<0.2.0,>=0.1.3->llama-index-llms-groq) (0.20.3)\n", + "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/dist-packages (from transformers<5.0.0,>=4.37.0->llama-index-llms-openai-like<0.2.0,>=0.1.3->llama-index-llms-groq) (24.0)\n", + "Requirement already satisfied: tokenizers<0.20,>=0.19 in /usr/local/lib/python3.10/dist-packages (from transformers<5.0.0,>=4.37.0->llama-index-llms-openai-like<0.2.0,>=0.1.3->llama-index-llms-groq) (0.19.1)\n", + "Requirement already satisfied: safetensors>=0.4.1 in /usr/local/lib/python3.10/dist-packages (from transformers<5.0.0,>=4.37.0->llama-index-llms-openai-like<0.2.0,>=0.1.3->llama-index-llms-groq) (0.4.3)\n", + "Requirement already satisfied: mypy-extensions>=0.3.0 in /usr/local/lib/python3.10/dist-packages (from typing-inspect>=0.8.0->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.0.0)\n", + "Requirement already satisfied: marshmallow<4.0.0,>=3.18.0 in /usr/local/lib/python3.10/dist-packages (from dataclasses-json->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (3.21.2)\n", + "Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.10/dist-packages (from pandas->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2.8.2)\n", + "Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.10/dist-packages (from pandas->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2023.4)\n", + "Requirement already satisfied: tzdata>=2022.1 in /usr/local/lib/python3.10/dist-packages (from pandas->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2024.1)\n", + "Requirement already satisfied: exceptiongroup in /usr/local/lib/python3.10/dist-packages (from anyio->httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.2.1)\n", + "Requirement already satisfied: annotated-types>=0.4.0 in /usr/local/lib/python3.10/dist-packages (from pydantic>=1.10->llamaindex-py-client<0.2.0,>=0.1.18->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (0.6.0)\n", + "Requirement already satisfied: pydantic-core==2.18.2 in /usr/local/lib/python3.10/dist-packages (from pydantic>=1.10->llamaindex-py-client<0.2.0,>=0.1.18->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2.18.2)\n", + "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.8.2->pandas->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.16.0)\n", + "Installing collected packages: llama-index-llms-openai-like, llama-index-llms-groq\n", + "Successfully installed llama-index-llms-groq-0.1.3 llama-index-llms-openai-like-0.1.3\n" + ] + } + ], + "source": [ + "!pip install llama-index\n", + "!pip install llama-index-embeddings-huggingface\n", + "!pip install llama-index-llms-groq" + ] + }, + { + "cell_type": "code", + "source": [ + "from getpass import getpass\n", + "import os\n", + "\n", + "GROQ_API_TOKEN = getpass()" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "Wv_-jm5XGoWo", + "outputId": "e7f2ca02-8ab3-4838-e1bf-3dbd3fb48e64" + }, + "execution_count": null, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "··········\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "import nest_asyncio\n", + "\n", + "nest_asyncio.apply()" + ], + "metadata": { + "id": "NZ9l6k_3WncE" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "from llama_index.core.tools import FunctionTool\n", + "\n", + "def add(x: int, y: int) -> int:\n", + " \"\"\"Adds two integers together.\"\"\"\n", + " return x + y\n", + "\n", + "def mystery(x: int, y: int) -> int:\n", + " \"\"\"Mystery function that operates on top of two numbers.\"\"\"\n", + " return (x + y) * (x + y)\n", + "\n", + "\n", + "add_tool = FunctionTool.from_defaults(fn=add)\n", + "mystery_tool = FunctionTool.from_defaults(fn=mystery)" + ], + "metadata": { + "id": "QkaALpnIQ01b" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "from llama_index.llms.groq import Groq\n", + "\n", + "llm = Groq(model=\"llama3-70b-8192\", api_key=GROQ_API_TOKEN)\n", + "response = llm.predict_and_call(\n", + " [add_tool, mystery_tool],\n", + " \"Tell me the output of the mystery function on 2 and 9\",\n", + " verbose=True\n", + ")\n", + "print(str(response))" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "mA3AG6CFQ3fj", + "outputId": "b872d91f-3a16-4d40-cacb-59c8ba5b5bde" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[1;3;38;5;200mThought: The current language of the user is: English. I need to use a tool to help me answer the question.\n", + "Action: mystery\n", + "Action Input: {'x': 2, 'y': 9}\n", + "\u001b[0m\u001b[1;3;34mObservation: 121\n", + "\u001b[0m121\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "!wget \"https://openreview.net/pdf?id=VtmBAGCN7o\" -O metagpt.pdf" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "8TTkd6vuUMmh", + "outputId": "c2c419af-e9d1-48bb-aa51-c785dcdee3a0" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "--2024-05-11 00:58:11-- https://openreview.net/pdf?id=VtmBAGCN7o\n", + "Resolving openreview.net (openreview.net)... 35.184.86.251\n", + "Connecting to openreview.net (openreview.net)|35.184.86.251|:443... connected.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 16911937 (16M) [application/pdf]\n", + "Saving to: ‘metagpt.pdf’\n", + "\n", + "metagpt.pdf 100%[===================>] 16.13M 46.7MB/s in 0.3s \n", + "\n", + "2024-05-11 00:58:11 (46.7 MB/s) - ‘metagpt.pdf’ saved [16911937/16911937]\n", + "\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "from llama_index.core import SimpleDirectoryReader\n", + "\n", + "# https://arxiv.org/pdf/2308.00352 metagpt.pdf\n", + "documents = SimpleDirectoryReader(input_files=[\"metagpt.pdf\"]).load_data()" + ], + "metadata": { + "id": "auZQalH5J7CU" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "from llama_index.core.node_parser import SentenceSplitter\n", + "splitter = SentenceSplitter(chunk_size=1024)\n", + "nodes = splitter.get_nodes_from_documents(documents)" + ], + "metadata": { + "id": "GFfUjJypJ7Eq" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "print(nodes[0].get_content(metadata_mode=\"all\"))" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "FrqorjH3VHmT", + "outputId": "b4888caf-0623-4d64-dba1-4d74c12e64f3" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "page_label: 1\n", + "file_name: metagpt.pdf\n", + "file_path: metagpt.pdf\n", + "file_type: application/pdf\n", + "file_size: 16911937\n", + "creation_date: 2024-05-11\n", + "last_modified_date: 2024-05-11\n", + "\n", + "Preprint\n", + "METAGPT: M ETA PROGRAMMING FOR A\n", + "MULTI -AGENT COLLABORATIVE FRAMEWORK\n", + "Sirui Hong1∗, Mingchen Zhuge2∗, Jonathan Chen1, Xiawu Zheng3, Yuheng Cheng4,\n", + "Ceyao Zhang4,Jinlin Wang1,Zili Wang ,Steven Ka Shing Yau5,Zijuan Lin4,\n", + "Liyang Zhou6,Chenyu Ran1,Lingfeng Xiao1,7,Chenglin Wu1†,J¨urgen Schmidhuber2,8\n", + "1DeepWisdom,2AI Initiative, King Abdullah University of Science and Technology,\n", + "3Xiamen University,4The Chinese University of Hong Kong, Shenzhen,\n", + "5Nanjing University,6University of Pennsylvania,\n", + "7University of California, Berkeley,8The Swiss AI Lab IDSIA/USI/SUPSI\n", + "ABSTRACT\n", + "Remarkable progress has been made on automated problem solving through so-\n", + "cieties of agents based on large language models (LLMs). Existing LLM-based\n", + "multi-agent systems can already solve simple dialogue tasks. Solutions to more\n", + "complex tasks, however, are complicated through logic inconsistencies due to\n", + "cascading hallucinations caused by naively chaining LLMs. Here we introduce\n", + "MetaGPT, an innovative meta-programming framework incorporating efficient\n", + "human workflows into LLM-based multi-agent collaborations. MetaGPT en-\n", + "codes Standardized Operating Procedures (SOPs) into prompt sequences for more\n", + "streamlined workflows, thus allowing agents with human-like domain expertise\n", + "to verify intermediate results and reduce errors. MetaGPT utilizes an assembly\n", + "line paradigm to assign diverse roles to various agents, efficiently breaking down\n", + "complex tasks into subtasks involving many agents working together. On col-\n", + "laborative software engineering benchmarks, MetaGPT generates more coherent\n", + "solutions than previous chat-based multi-agent systems. Our project can be found\n", + "at https://github.com/geekan/MetaGPT.\n", + "1 I NTRODUCTION\n", + "Autonomous agents utilizing Large Language Models (LLMs) offer promising opportunities to en-\n", + "hance and replicate human workflows. In real-world applications, however, existing systems (Park\n", + "et al., 2023; Zhuge et al., 2023; Cai et al., 2023; Wang et al., 2023c; Li et al., 2023; Du et al., 2023;\n", + "Liang et al., 2023; Hao et al., 2023) tend to oversimplify the complexities. They struggle to achieve\n", + "effective, coherent, and accurate problem-solving processes, particularly when there is a need for\n", + "meaningful collaborative interaction (Chen et al., 2024; Zhang et al., 2023; Dong et al., 2023; Zhou\n", + "et al., 2023; Qian et al., 2023).\n", + "Through extensive collaborative practice, humans have developed widely accepted Standardized\n", + "Operating Procedures (SOPs) across various domains (Belbin, 2012; Manifesto, 2001; DeMarco &\n", + "Lister, 2013). These SOPs play a critical role in supporting task decomposition and effective coor-\n", + "dination. Furthermore, SOPs outline the responsibilities of each team member, while establishing\n", + "standards for intermediate outputs. Well-defined SOPs improve the consistent and accurate exe-\n", + "cution of tasks that align with defined roles and quality standards (Belbin, 2012; Manifesto, 2001;\n", + "DeMarco & Lister, 2013; Wooldridge & Jennings, 1998). For instance, in a software company,\n", + "Product Managers analyze competition and user needs to create Product Requirements Documents\n", + "(PRDs) using a standardized structure, to guide the developmental process.\n", + "Inspired by such ideas, we design a promising GPT -based Meta -Programming framework called\n", + "MetaGPT that significantly benefits from SOPs. Unlike other works (Li et al., 2023; Qian et al.,\n", + "2023), MetaGPT requires agents to generate structured outputs, such as high-quality requirements\n", + "∗These authors contributed equally to this work.\n", + "†Chenglin Wu (alexanderwu@fuzhi.ai) is the corresponding author, affiliated with DeepWisdom.\n", + "1\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "from llama_index.llms.groq import Groq\n", + "\n", + "from llama_index.core import Settings, VectorStoreIndex\n", + "from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n", + "\n", + "Settings.llm = llm\n", + "#llm.complete(\"Who wrote the book godfather\").text\n", + "\n", + "Settings.embed_model = HuggingFaceEmbedding(\n", + " model_name=\"BAAI/bge-small-en-v1.5\"\n", + ")" + ], + "metadata": { + "id": "9WisqWK4VPCZ" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "# Settings.llm and embed_model apply to which call below? VectorStoreIndex(), as_query_engine?\n", + "\n", + "from llama_index.core import VectorStoreIndex\n", + "\n", + "vector_index = VectorStoreIndex(nodes)\n", + "query_engine = vector_index.as_query_engine(similarity_top_k=2)" + ], + "metadata": { + "id": "YS4e0mzsVKsl" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "from llama_index.core.vector_stores import MetadataFilters\n", + "\n", + "query_engine = vector_index.as_query_engine(\n", + " similarity_top_k=2,\n", + " filters=MetadataFilters.from_dicts(\n", + " [\n", + " {\"key\": \"page_label\", \"value\": \"2\"}\n", + " ]\n", + " )\n", + ")\n", + "\n", + "response = query_engine.query(\n", + " \"What are some high-level results of MetaGPT?\",\n", + ")" + ], + "metadata": { + "id": "S7tz2Z28VKv1" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "print(str(response))" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "CttWxW8aVKyk", + "outputId": "4b64a64f-a989-4ee0-f08e-a6d6f5db42b6" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "MetaGPT achieves a new state-of-the-art (SoTA) with 85.9% and 87.7% in Pass@1 in code generation benchmarks. Additionally, it achieves a 100% task completion rate, demonstrating the robustness and efficiency of its design.\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "for n in response.source_nodes:\n", + " print(n.metadata)" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "ZvQGoUR0VK1I", + "outputId": "9033f46f-baba-4345-bd6c-29ce4db3ea39" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "{'page_label': '2', 'file_name': 'metagpt.pdf', 'file_path': 'metagpt.pdf', 'file_type': 'application/pdf', 'file_size': 16911937, 'creation_date': '2024-05-11', 'last_modified_date': '2024-05-11'}\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "from typing import List\n", + "from llama_index.core.vector_stores import FilterCondition\n", + "\n", + "\n", + "def vector_query(\n", + " query: str,\n", + " page_numbers: List[str]\n", + ") -> str:\n", + " \"\"\"Perform a vector search over an index.\n", + "\n", + " query (str): the string query to be embedded.\n", + " page_numbers (List[str]): Filter by set of pages. Leave BLANK if we want to perform a vector search\n", + " over all pages. Otherwise, filter by the set of specified pages.\n", + "\n", + " \"\"\"\n", + "\n", + " metadata_dicts = [\n", + " {\"key\": \"page_label\", \"value\": p} for p in page_numbers\n", + " ]\n", + "\n", + " query_engine = vector_index.as_query_engine(\n", + " similarity_top_k=2,\n", + " filters=MetadataFilters.from_dicts(\n", + " metadata_dicts,\n", + " condition=FilterCondition.OR\n", + " )\n", + " )\n", + " response = query_engine.query(query)\n", + " return response\n", + "\n", + "\n", + "vector_query_tool = FunctionTool.from_defaults(\n", + " name=\"vector_tool\",\n", + " fn=vector_query\n", + ")" + ], + "metadata": { + "id": "5r1MHbLOPT8Y" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "response = llm.predict_and_call(\n", + " [vector_query_tool],\n", + " \"What are the high-level results of MetaGPT as described on page 2?\",\n", + " verbose=True\n", + ")" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "2jMB3iS6VjFg", + "outputId": "4ecd2d26-e159-4765-9aa5-818c5308ae02" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[1;3;38;5;200mThought: The current language of the user is: English. I need to use a tool to help me answer the question.\n", + "Action: vector_tool\n", + "Action Input: {'query': 'MetaGPT results', 'page_numbers': ['2']}\n", + "\u001b[0m\u001b[1;3;34mObservation: MetaGPT achieves a new state-of-the-art (SoTA) with 85.9% and 87.7% in Pass@1, and a 100% task completion rate, demonstrating the robustness and efficiency of its design.\n", + "\u001b[0m" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "for n in response.source_nodes:\n", + " print(n.metadata)" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "so2p09VNVm9I", + "outputId": "8fd7027b-e356-492c-decf-36340ad90978" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "{'page_label': '2', 'file_name': 'metagpt.pdf', 'file_path': 'metagpt.pdf', 'file_type': 'application/pdf', 'file_size': 16911937, 'creation_date': '2024-05-11', 'last_modified_date': '2024-05-11'}\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "from llama_index.core import SummaryIndex\n", + "from llama_index.core.tools import QueryEngineTool\n", + "\n", + "summary_index = SummaryIndex(nodes)\n", + "summary_query_engine = summary_index.as_query_engine(\n", + " response_mode=\"tree_summarize\",\n", + " use_async=True,\n", + ")\n", + "summary_tool = QueryEngineTool.from_defaults(\n", + " name=\"summary_tool\",\n", + " query_engine=summary_query_engine,\n", + " description=(\n", + " \"Useful if you want to get a summary of MetaGPT\"\n", + " ),\n", + ")" + ], + "metadata": { + "id": "AuxhlFxHV4MV" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "response = llm.predict_and_call(\n", + " [vector_query_tool, summary_tool],\n", + " \"What are the MetaGPT comparisons with ChatDev described on page 8?\",\n", + " verbose=True\n", + ")" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "dSqh6iLkV61F", + "outputId": "9dc5e545-9f9d-4a7d-8332-beb158574b8e" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[1;3;38;5;200mThought: The current language of the user is: English. I need to use a tool to help me answer the question.\n", + "Action: vector_tool\n", + "Action Input: {'query': 'MetaGPT comparisons with ChatDev', 'page_numbers': ['8']}\n", + "\u001b[0m\u001b[1;3;34mObservation: MetaGPT outperforms ChatDev on the SoftwareDev dataset in nearly all metrics. Specifically, MetaGPT achieves a score of 3.75 in executability, which is very close to 4 (flawless), whereas ChatDev scores 2.25. Additionally, MetaGPT takes less time (503 seconds) compared to ChatDev (762 seconds). In terms of code statistics and human revision cost, MetaGPT also significantly outperforms ChatDev.\n", + "\u001b[0m" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "for n in response.source_nodes:\n", + " print(n.metadata)" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "OsPn62A2V8R7", + "outputId": "cf74caa0-5f59-4f3c-f806-f996b317df8c" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "{'page_label': '8', 'file_name': 'metagpt.pdf', 'file_path': 'metagpt.pdf', 'file_type': 'application/pdf', 'file_size': 16911937, 'creation_date': '2024-05-11', 'last_modified_date': '2024-05-11'}\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "response = llm.predict_and_call(\n", + " [vector_query_tool, summary_tool],\n", + " \"What is a summary of the paper?\",\n", + " verbose=True\n", + ")\n", + "\n", + "# got the error \"Rate limit reached for model `llama3-70b-8192`\"\n", + "# Observation: Error: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 3000, Used 1406, Requested ~3453. Please try again in 37.162s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}\n", + "# https://console.groq.com/settings/limits\n", + "# ID\tREQUESTS PER MINUTE |\tREQUESTS PER DAY | TOKENS PER MINUTE\n", + "# llama3-70b-8192\t30 | 14,400\t| 6,000\n", + "# llama3-8b-8192\t30 | 14,400\t| 30,000" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "ugdDXz1EV96J", + "outputId": "fdc93775-04a0-4632-f190-0ef8d3800651" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[1;3;38;5;200mThought: The current language of the user is: English. I need to use a tool to help me answer the question.\n", + "Action: summary_tool\n", + "Action Input: {'input': 'Please provide the paper text or a brief description of the paper'}\n", + "\u001b[0m" + ] + }, + { + "output_type": "stream", + "name": "stderr", + "text": [ + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 0.15510587781355234 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 3000, Used 2886, Requested ~3837. Please try again in 1m14.456s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 0.8139104116113804 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 3000, Used 2803, Requested ~4062. Please try again in 1m17.29s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 0.2273722542576545 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 3000, Used 2786, Requested ~4011. Please try again in 1m15.935s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 0.8338470386016188 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 3000, Used 2542, Requested ~3837. Please try again in 1m7.573s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 0.5597229378294226 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 3000, Used 2447, Requested ~4011. Please try again in 1m9.146999999s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 0.8236594000712218 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 3000, Used 2434, Requested ~4062. Please try again in 1m9.901s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 3.792455535339309 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 3000, Used 2147, Requested ~3837. Please try again in 59.667s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 2.752465317045643 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 3000, Used 2098, Requested ~4062. Please try again in 1m3.19s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 3.2152173100995927 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 3000, Used 2072, Requested ~4011. Please try again in 1m1.654s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n" + ] + }, + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[1;3;34mObservation: Error: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 3000, Used 1406, Requested ~3453. Please try again in 37.162s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}\n", + "\u001b[0m" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "llm" + ], + "metadata": { + "id": "uTmlDhjMV_MB", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "b0b3dfab-26b3-4356-b47e-e3128a6828a0" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "Groq(callback_manager=<llama_index.core.callbacks.base.CallbackManager object at 0x7d4a1f4934c0>, system_prompt=None, messages_to_prompt=<function messages_to_prompt at 0x7d4aeef60ca0>, completion_to_prompt=<function default_completion_to_prompt at 0x7d4aeee052d0>, output_parser=None, pydantic_program_mode=<PydanticProgramMode.DEFAULT: 'default'>, query_wrapper_prompt=None, model='llama3-70b-8192', temperature=0.1, max_tokens=None, logprobs=None, top_logprobs=0, additional_kwargs={}, max_retries=3, timeout=60.0, default_headers=None, reuse_client=True, api_key='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', api_base='https://api.groq.com/openai/v1', api_version='', context_window=3900, is_chat_model=True, is_function_calling_model=False, tokenizer=None)" + ] + }, + "metadata": {}, + "execution_count": 35 + } + ] + }, + { + "cell_type": "code", + "source": [ + "llm8b = Groq(model=\"llama3-8b-8192\", api_key=GROQ_API_TOKEN)\n" + ], + "metadata": { + "id": "X1EW9rjmOIL-" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "response = llm8b.predict_and_call(\n", + " [vector_query_tool, summary_tool],\n", + " \"What is a summary of the paper?\",\n", + " verbose=True\n", + ")\n" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "wglA1CLlOPrL", + "outputId": "55106a1a-f051-4139-8faa-134b90940900" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[1;3;38;5;200mThought: The current language of the user is: English. I need to use a tool to help me answer the question.\n", + "Action: summary_tool\n", + "Action Input: {'input': 'What is a summary of the paper?'}\n", + "\u001b[0m" + ] + }, + { + "output_type": "stream", + "name": "stderr", + "text": [ + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 0.96433431094118 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 3000, Used 0, Requested ~3311. Please try again in 6.22s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 0.8234959300023535 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 3000, Used 0, Requested ~3530. Please try again in 10.6s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 0.6157227133555229 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 3000, Used 0, Requested ~3464. Please try again in 9.28s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 1.53992389401012 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 3000, Used 0, Requested ~3311. Please try again in 6.22s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n" + ] + }, + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[1;3;34mObservation: Error: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 3000, Used 0, Requested ~3826. Please try again in 16.52s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}\n", + "\u001b[0m" + ] + } + ] + }, + { + "cell_type": "code", + "source": [], + "metadata": { + "id": "fx0RBeX0OS3n" + }, + "execution_count": null, + "outputs": [] + } + ] +} \ No newline at end of file diff --git a/recipes/3p_integrations/llamaindex/dlai_agentic_rag/Building_Agentic_RAG_with_Llamaindex_L3_Building_an_Agent_Reasoning_Loop.ipynb b/recipes/3p_integrations/llamaindex/dlai_agentic_rag/Building_Agentic_RAG_with_Llamaindex_L3_Building_an_Agent_Reasoning_Loop.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..5af999d207d10cf3f862d41167483db4af213961 --- /dev/null +++ b/recipes/3p_integrations/llamaindex/dlai_agentic_rag/Building_Agentic_RAG_with_Llamaindex_L3_Building_an_Agent_Reasoning_Loop.ipynb @@ -0,0 +1,5073 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "language_info": { + "name": "python" + }, + "widgets": { + "application/vnd.jupyter.widget-state+json": { + "c26422ab46584828b1e56737bfb5efbd": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_fc3fbc542ff94d46b283e5b989cddf54", + "IPY_MODEL_5890fca1cf53425287fc51703a162abf", + "IPY_MODEL_6ffe78e8c9d141b987a93840a76a0a9d" + ], + "layout": "IPY_MODEL_6b4fff8215b946a291f1b2264a05826a" + } + }, + "fc3fbc542ff94d46b283e5b989cddf54": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_87e2db5bc221448db9213c9b5efd91ef", + "placeholder": "", + "style": "IPY_MODEL_70e7c637ab724049810fd5c912fcbdf7", + "value": "modules.json: 100%" + } + }, + "5890fca1cf53425287fc51703a162abf": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_628a3a6eb163488f89f5f19f2d956cad", + "max": 349, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_113c2247163c458d806e1fb93b0f0d43", + "value": 349 + } + }, + "6ffe78e8c9d141b987a93840a76a0a9d": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_996a99f0cdb140afa802a25e5774dd24", + "placeholder": "", + "style": "IPY_MODEL_d8c87b8639ae4151a20f1c53352ea6b3", + "value": " 349/349 [00:00<00:00, 5.66kB/s]" + } + }, + "6b4fff8215b946a291f1b2264a05826a": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "87e2db5bc221448db9213c9b5efd91ef": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "70e7c637ab724049810fd5c912fcbdf7": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "628a3a6eb163488f89f5f19f2d956cad": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "113c2247163c458d806e1fb93b0f0d43": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "996a99f0cdb140afa802a25e5774dd24": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "d8c87b8639ae4151a20f1c53352ea6b3": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "240cd69316e6461da0e62ca1af562692": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_34b039cd03954432b32c8f991703f103", + "IPY_MODEL_e25bae0193d94c14ad34d76b07ae3ef3", + "IPY_MODEL_41e775a95eb34ed6acdb7efa0dc9dcc3" + ], + "layout": "IPY_MODEL_7e3bc961c28446cc91bc4a9f305c9939" + } + }, + "34b039cd03954432b32c8f991703f103": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_5b7b4ec04eef441ea1d2e0328c69a6c8", + "placeholder": "", + "style": "IPY_MODEL_e791599c91b94565a6ab40bccbac8946", + "value": "config_sentence_transformers.json: 100%" + } + }, + "e25bae0193d94c14ad34d76b07ae3ef3": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_096c28485c874776ad4255477b8fb059", + "max": 124, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_5cbde93ee45f4c23886ded1cd6993014", + "value": 124 + } + }, + "41e775a95eb34ed6acdb7efa0dc9dcc3": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_a714cf6ea3f1454aa6720c6f6b016e7d", + "placeholder": "", + "style": "IPY_MODEL_add3052606dc4cddbc81c600bec0aaab", + "value": " 124/124 [00:00<00:00, 2.02kB/s]" + } + }, + "7e3bc961c28446cc91bc4a9f305c9939": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "5b7b4ec04eef441ea1d2e0328c69a6c8": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "e791599c91b94565a6ab40bccbac8946": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "096c28485c874776ad4255477b8fb059": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "5cbde93ee45f4c23886ded1cd6993014": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "a714cf6ea3f1454aa6720c6f6b016e7d": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "add3052606dc4cddbc81c600bec0aaab": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "dac8de62f29f44d7b0954b99c116c319": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_00263e0751c44ebebe1dc472a64d483d", + "IPY_MODEL_7643b88058ad41718a81da6e4165cc2f", + "IPY_MODEL_e79b09ba1f794c3592bb46d1a5b6ea3a" + ], + "layout": "IPY_MODEL_d65ff865d8764d9a8389ad38c31a3255" + } + }, + "00263e0751c44ebebe1dc472a64d483d": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_0bd5ecf320ea4a9a9db0d9f40d9f2835", + "placeholder": "", + "style": "IPY_MODEL_955211921e694baa848bc3906a6c6a4b", + "value": "README.md: 100%" + } + }, + "7643b88058ad41718a81da6e4165cc2f": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_c7f6b63883264ccab41ad8a3e009d3af", + "max": 94783, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_c74d02fce14a4e6dbdfaa86f4ad9cc1b", + "value": 94783 + } + }, + "e79b09ba1f794c3592bb46d1a5b6ea3a": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_7f00ed3acdb54246a1b37a85326e2efa", + "placeholder": "", + "style": "IPY_MODEL_8760ad31040e477dbbf22f2ef55c0b1d", + "value": " 94.8k/94.8k [00:00<00:00, 1.09MB/s]" + } + }, + "d65ff865d8764d9a8389ad38c31a3255": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "0bd5ecf320ea4a9a9db0d9f40d9f2835": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "955211921e694baa848bc3906a6c6a4b": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "c7f6b63883264ccab41ad8a3e009d3af": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "c74d02fce14a4e6dbdfaa86f4ad9cc1b": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "7f00ed3acdb54246a1b37a85326e2efa": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "8760ad31040e477dbbf22f2ef55c0b1d": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "94e4ad5fac5e4da9ab5e4be8474b201e": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_5ed2612e54fc4864a62d57fc79fe8499", + "IPY_MODEL_538634dcebf6409fb886068f8b107d81", + "IPY_MODEL_e8555b9ad64546959630669407e755c7" + ], + "layout": "IPY_MODEL_613b45a23a534d009cb0c1cc3f79ff47" + } + }, + "5ed2612e54fc4864a62d57fc79fe8499": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_527bd695d191480a9e701bea0b8bb47d", + "placeholder": "", + "style": "IPY_MODEL_0595d14613734c3cb658aac398eb0ae8", + "value": "sentence_bert_config.json: 100%" + } + }, + "538634dcebf6409fb886068f8b107d81": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_f648cb3c78d043fe9e666849ead6854b", + "max": 52, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_acac5be270d3413ca2b0232b473673b2", + "value": 52 + } + }, + "e8555b9ad64546959630669407e755c7": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_d1a4da08fc8b4825b851d57c829160d4", + "placeholder": "", + "style": "IPY_MODEL_0164f6d7b2484beaa254d0c00c3f61ee", + "value": " 52.0/52.0 [00:00<00:00, 732B/s]" + } + }, + "613b45a23a534d009cb0c1cc3f79ff47": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "527bd695d191480a9e701bea0b8bb47d": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "0595d14613734c3cb658aac398eb0ae8": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "f648cb3c78d043fe9e666849ead6854b": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "acac5be270d3413ca2b0232b473673b2": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "d1a4da08fc8b4825b851d57c829160d4": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "0164f6d7b2484beaa254d0c00c3f61ee": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "85b68d1754b9413cbd774c7435b4e53e": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_78ee39a2d64844939e326e2ae065c34b", + "IPY_MODEL_fd44359a567c4cb98e0c33b35ebd90bb", + "IPY_MODEL_f9ad85e78dde4342843311cc94084f38" + ], + "layout": "IPY_MODEL_467c9f03076c42b483f2db2e1bdb788c" + } + }, + "78ee39a2d64844939e326e2ae065c34b": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_0cea7b2321424395b0441a8849ee3a62", + "placeholder": "", + "style": "IPY_MODEL_6bcaee1c2d3d498d9aa25333a0c0e9d4", + "value": "config.json: 100%" + } + }, + "fd44359a567c4cb98e0c33b35ebd90bb": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_6a8f6289e8e24bf29b699a3f08e528b4", + "max": 743, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_d9fc7e95604140368a11fbf78d2ab78f", + "value": 743 + } + }, + "f9ad85e78dde4342843311cc94084f38": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_297c46666c244ea98d54fc6e3fab94d2", + "placeholder": "", + "style": "IPY_MODEL_1f07e25b78dc4c47be24299c425ef7a2", + "value": " 743/743 [00:00<00:00, 11.4kB/s]" + } + }, + "467c9f03076c42b483f2db2e1bdb788c": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "0cea7b2321424395b0441a8849ee3a62": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "6bcaee1c2d3d498d9aa25333a0c0e9d4": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "6a8f6289e8e24bf29b699a3f08e528b4": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "d9fc7e95604140368a11fbf78d2ab78f": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "297c46666c244ea98d54fc6e3fab94d2": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "1f07e25b78dc4c47be24299c425ef7a2": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "5e5d2dd4d8774861ab8f9a3f57b57f12": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_6dd9c5007f1d438d8b280d0e025bf62f", + "IPY_MODEL_6d4da17710c74841a5d33efa344d7180", + "IPY_MODEL_a54dea1d3d3e48389d9301fb2d83b8df" + ], + "layout": "IPY_MODEL_bdd8a62fefaf4df8a4b688a918fed96a" + } + }, + "6dd9c5007f1d438d8b280d0e025bf62f": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_ff861eba4be248fea1d09f5d0b216b4f", + "placeholder": "", + "style": "IPY_MODEL_40810a4d473d4fd0af61a9a70d2fc416", + "value": "model.safetensors: 100%" + } + }, + "6d4da17710c74841a5d33efa344d7180": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_f32b687330fd4e5eaf547d42bdd8b5ef", + "max": 133466304, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_c95c399f24e94e62a57c244d72715d8f", + "value": 133466304 + } + }, + "a54dea1d3d3e48389d9301fb2d83b8df": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_f34031fa96004c5baf279935fd071efa", + "placeholder": "", + "style": "IPY_MODEL_f00575c770074c6791096d1f7125c5f6", + "value": " 133M/133M [00:02<00:00, 60.4MB/s]" + } + }, + "bdd8a62fefaf4df8a4b688a918fed96a": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "ff861eba4be248fea1d09f5d0b216b4f": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "40810a4d473d4fd0af61a9a70d2fc416": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "f32b687330fd4e5eaf547d42bdd8b5ef": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "c95c399f24e94e62a57c244d72715d8f": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "f34031fa96004c5baf279935fd071efa": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "f00575c770074c6791096d1f7125c5f6": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "d894b0cb613a4f22ae7c958863e5b340": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_8428fc1563e44fd9a0690e2125bcb2a8", + "IPY_MODEL_d4769437a5764b90a5c2c332f0df5386", + "IPY_MODEL_775d39014f464413ad7bfa22487b69e3" + ], + "layout": "IPY_MODEL_c7ea50e54a1a4166b4e691cf61a47728" + } + }, + "8428fc1563e44fd9a0690e2125bcb2a8": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_cad4d390039d485c991e6db3cb950474", + "placeholder": "", + "style": "IPY_MODEL_82a1a312336f4733abe09c766bff3c4b", + "value": "tokenizer_config.json: 100%" + } + }, + "d4769437a5764b90a5c2c332f0df5386": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_92cbda5df47741f39c99d210962b5aea", + "max": 366, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_be749e423bc34e87b3a3c82587901fa8", + "value": 366 + } + }, + "775d39014f464413ad7bfa22487b69e3": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_c24e7857fc2740e78df50818d70b1b8c", + "placeholder": "", + "style": "IPY_MODEL_d5b25bb18b804966890e50dd9f340223", + "value": " 366/366 [00:00<00:00, 5.13kB/s]" + } + }, + "c7ea50e54a1a4166b4e691cf61a47728": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "cad4d390039d485c991e6db3cb950474": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "82a1a312336f4733abe09c766bff3c4b": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "92cbda5df47741f39c99d210962b5aea": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "be749e423bc34e87b3a3c82587901fa8": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "c24e7857fc2740e78df50818d70b1b8c": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "d5b25bb18b804966890e50dd9f340223": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "6232e5f6d6fe423286b1649cd8faf34a": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_ce2ed96df931441da3decd38d8f13a31", + "IPY_MODEL_96958d88556a4f5db63a5f43347e5956", + "IPY_MODEL_fafbdcbca7e74aa48c61e5e48bf131ed" + ], + "layout": "IPY_MODEL_f6661758a8d0435b8a4025b8197550ab" + } + }, + "ce2ed96df931441da3decd38d8f13a31": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_7ab10b3032964477b03ea91240fd9c1e", + "placeholder": "", + "style": "IPY_MODEL_2d50cec7d1234fdb9a34449af864e777", + "value": "vocab.txt: 100%" + } + }, + "96958d88556a4f5db63a5f43347e5956": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_d194f5b4c9194241845c692c5bcabc5e", + "max": 231508, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_a177b6782e8444d1a1f8b1ac5a1ad475", + "value": 231508 + } + }, + "fafbdcbca7e74aa48c61e5e48bf131ed": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_4ac47f47e7704d68a5ad763aebb6153e", + "placeholder": "", + "style": "IPY_MODEL_8c2f1cfbef1d4ec4a7708ec938b41312", + "value": " 232k/232k [00:00<00:00, 2.68MB/s]" + } + }, + "f6661758a8d0435b8a4025b8197550ab": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "7ab10b3032964477b03ea91240fd9c1e": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "2d50cec7d1234fdb9a34449af864e777": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "d194f5b4c9194241845c692c5bcabc5e": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "a177b6782e8444d1a1f8b1ac5a1ad475": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "4ac47f47e7704d68a5ad763aebb6153e": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "8c2f1cfbef1d4ec4a7708ec938b41312": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "cf3792e56eb7419daabcbbd678c21fea": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_5729df06ec794b75a932acae3dab9676", + "IPY_MODEL_4fd71aed976441c087f0d411efdba5d1", + "IPY_MODEL_96dfe297cb144181937eee07ab3b07e0" + ], + "layout": "IPY_MODEL_8f371641cf73413a907eafac3b7162ba" + } + }, + "5729df06ec794b75a932acae3dab9676": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_8a417c96b9904aef9227dac62f1f7d2e", + "placeholder": "", + "style": "IPY_MODEL_271cba061d244e7091390f190ed1c218", + "value": "tokenizer.json: 100%" + } + }, + "4fd71aed976441c087f0d411efdba5d1": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_51793d5f94ea49c08c57bf3554489d59", + "max": 711396, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_767e6495dd904c4ea25646ac15fbcf9d", + "value": 711396 + } + }, + "96dfe297cb144181937eee07ab3b07e0": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_03310809a84243d6acf5ccaf3a37baaf", + "placeholder": "", + "style": "IPY_MODEL_d976cb6f959942d794a1d3d9aa1f0a2f", + "value": " 711k/711k [00:00<00:00, 5.69MB/s]" + } + }, + "8f371641cf73413a907eafac3b7162ba": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "8a417c96b9904aef9227dac62f1f7d2e": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "271cba061d244e7091390f190ed1c218": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "51793d5f94ea49c08c57bf3554489d59": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "767e6495dd904c4ea25646ac15fbcf9d": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "03310809a84243d6acf5ccaf3a37baaf": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "d976cb6f959942d794a1d3d9aa1f0a2f": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "712b7454fbe143e1ac17294e5ff4be49": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_ecb04a87ca0f4e989a5e6014d42c3519", + "IPY_MODEL_2b75235b072b48cc9b57beefd37558ce", + "IPY_MODEL_1a876366cf9841c0822bbd9b62c3712b" + ], + "layout": "IPY_MODEL_930c64c04c3745fea1b734c451a2ef97" + } + }, + "ecb04a87ca0f4e989a5e6014d42c3519": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_a9cf9d86b27945c5914f46a6c72c6f40", + "placeholder": "", + "style": "IPY_MODEL_7526c6b322ee487dbfc8f66e2f8e4aeb", + "value": "special_tokens_map.json: 100%" + } + }, + "2b75235b072b48cc9b57beefd37558ce": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_272338085bf04a5591fb22e8aaa22ff0", + "max": 125, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_ab5e7589aa3a475d97f05658fc52ef14", + "value": 125 + } + }, + "1a876366cf9841c0822bbd9b62c3712b": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_472f9b34c3a840808108da64184c41de", + "placeholder": "", + "style": "IPY_MODEL_4564e0fd6a8f493ca600494ca65780ca", + "value": " 125/125 [00:00<00:00, 2.33kB/s]" + } + }, + "930c64c04c3745fea1b734c451a2ef97": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "a9cf9d86b27945c5914f46a6c72c6f40": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "7526c6b322ee487dbfc8f66e2f8e4aeb": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "272338085bf04a5591fb22e8aaa22ff0": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "ab5e7589aa3a475d97f05658fc52ef14": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "472f9b34c3a840808108da64184c41de": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "4564e0fd6a8f493ca600494ca65780ca": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "e76ec2dae1fb4ed19c12b306a8a79303": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_f31fd86f61104ef2aa59eee21d05c7df", + "IPY_MODEL_b34277cc9400402e8744181e8ea6d985", + "IPY_MODEL_a8629205cfcd412ca98e5e678afa5b9d" + ], + "layout": "IPY_MODEL_fc676733182d48f89861f8439f621df1" + } + }, + "f31fd86f61104ef2aa59eee21d05c7df": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_e5ba5976600b4f84b19f79e074e0ed2a", + "placeholder": "", + "style": "IPY_MODEL_358dce24d99f4787a0470650c219ad6e", + "value": "1_Pooling/config.json: 100%" + } + }, + "b34277cc9400402e8744181e8ea6d985": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_76a4793a4d6f4dc384521246722a6eda", + "max": 190, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_8f162e0b3b8d463b9141f343b7e39814", + "value": 190 + } + }, + "a8629205cfcd412ca98e5e678afa5b9d": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_dfbf90ae3c1b424eb9cb802644ebfa7d", + "placeholder": "", + "style": "IPY_MODEL_57ddd696b13645e099d4ab41b12f0ab9", + "value": " 190/190 [00:00<00:00, 1.97kB/s]" + } + }, + "fc676733182d48f89861f8439f621df1": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "e5ba5976600b4f84b19f79e074e0ed2a": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "358dce24d99f4787a0470650c219ad6e": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "76a4793a4d6f4dc384521246722a6eda": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "8f162e0b3b8d463b9141f343b7e39814": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "dfbf90ae3c1b424eb9cb802644ebfa7d": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "57ddd696b13645e099d4ab41b12f0ab9": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + } + } + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "eiJsOa29ej7G", + "outputId": "71cbfc5e-4073-4bb7-e7e0-8af67bc76fb7" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Collecting llama-index\n", + " Downloading llama_index-0.10.42-py3-none-any.whl (6.8 kB)\n", + "Collecting llama-index-agent-openai<0.3.0,>=0.1.4 (from llama-index)\n", + " Downloading llama_index_agent_openai-0.2.6-py3-none-any.whl (12 kB)\n", + "Collecting llama-index-cli<0.2.0,>=0.1.2 (from llama-index)\n", + " Downloading llama_index_cli-0.1.12-py3-none-any.whl (26 kB)\n", + "Collecting llama-index-core==0.10.42 (from llama-index)\n", + " Downloading llama_index_core-0.10.42-py3-none-any.whl (15.4 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m15.4/15.4 MB\u001b[0m \u001b[31m22.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting llama-index-embeddings-openai<0.2.0,>=0.1.5 (from llama-index)\n", + " Downloading llama_index_embeddings_openai-0.1.10-py3-none-any.whl (6.2 kB)\n", + "Collecting llama-index-indices-managed-llama-cloud<0.2.0,>=0.1.2 (from llama-index)\n", + " Downloading llama_index_indices_managed_llama_cloud-0.1.6-py3-none-any.whl (6.7 kB)\n", + "Collecting llama-index-legacy<0.10.0,>=0.9.48 (from llama-index)\n", + " Downloading llama_index_legacy-0.9.48-py3-none-any.whl (2.0 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.0/2.0 MB\u001b[0m \u001b[31m32.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting llama-index-llms-openai<0.2.0,>=0.1.13 (from llama-index)\n", + " Downloading llama_index_llms_openai-0.1.21-py3-none-any.whl (11 kB)\n", + "Collecting llama-index-multi-modal-llms-openai<0.2.0,>=0.1.3 (from llama-index)\n", + " Downloading llama_index_multi_modal_llms_openai-0.1.6-py3-none-any.whl (5.8 kB)\n", + "Collecting llama-index-program-openai<0.2.0,>=0.1.3 (from llama-index)\n", + " Downloading llama_index_program_openai-0.1.6-py3-none-any.whl (5.2 kB)\n", + "Collecting llama-index-question-gen-openai<0.2.0,>=0.1.2 (from llama-index)\n", + " Downloading llama_index_question_gen_openai-0.1.3-py3-none-any.whl (2.9 kB)\n", + "Collecting llama-index-readers-file<0.2.0,>=0.1.4 (from llama-index)\n", + " Downloading llama_index_readers_file-0.1.23-py3-none-any.whl (36 kB)\n", + "Collecting llama-index-readers-llama-parse<0.2.0,>=0.1.2 (from llama-index)\n", + " Downloading llama_index_readers_llama_parse-0.1.4-py3-none-any.whl (2.5 kB)\n", + "Requirement already satisfied: PyYAML>=6.0.1 in /usr/local/lib/python3.10/dist-packages (from llama-index-core==0.10.42->llama-index) (6.0.1)\n", + "Requirement already satisfied: SQLAlchemy[asyncio]>=1.4.49 in /usr/local/lib/python3.10/dist-packages (from llama-index-core==0.10.42->llama-index) (2.0.30)\n", + "Requirement already satisfied: aiohttp<4.0.0,>=3.8.6 in /usr/local/lib/python3.10/dist-packages (from llama-index-core==0.10.42->llama-index) (3.9.5)\n", + "Collecting dataclasses-json (from llama-index-core==0.10.42->llama-index)\n", + " Downloading dataclasses_json-0.6.6-py3-none-any.whl (28 kB)\n", + "Collecting deprecated>=1.2.9.3 (from llama-index-core==0.10.42->llama-index)\n", + " Downloading Deprecated-1.2.14-py2.py3-none-any.whl (9.6 kB)\n", + "Collecting dirtyjson<2.0.0,>=1.0.8 (from llama-index-core==0.10.42->llama-index)\n", + " Downloading dirtyjson-1.0.8-py3-none-any.whl (25 kB)\n", + "Requirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core==0.10.42->llama-index) (2023.6.0)\n", + "Collecting httpx (from llama-index-core==0.10.42->llama-index)\n", + " Downloading httpx-0.27.0-py3-none-any.whl (75 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m75.6/75.6 kB\u001b[0m \u001b[31m6.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting llamaindex-py-client<0.2.0,>=0.1.18 (from llama-index-core==0.10.42->llama-index)\n", + " Downloading llamaindex_py_client-0.1.19-py3-none-any.whl (141 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m141.9/141.9 kB\u001b[0m \u001b[31m14.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: nest-asyncio<2.0.0,>=1.5.8 in /usr/local/lib/python3.10/dist-packages (from llama-index-core==0.10.42->llama-index) (1.6.0)\n", + "Requirement already satisfied: networkx>=3.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core==0.10.42->llama-index) (3.3)\n", + "Requirement already satisfied: nltk<4.0.0,>=3.8.1 in /usr/local/lib/python3.10/dist-packages (from llama-index-core==0.10.42->llama-index) (3.8.1)\n", + "Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (from llama-index-core==0.10.42->llama-index) (1.25.2)\n", + "Collecting openai>=1.1.0 (from llama-index-core==0.10.42->llama-index)\n", + " Downloading openai-1.30.5-py3-none-any.whl (320 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m320.7/320.7 kB\u001b[0m \u001b[31m10.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: pandas in /usr/local/lib/python3.10/dist-packages (from llama-index-core==0.10.42->llama-index) (2.0.3)\n", + "Requirement already satisfied: pillow>=9.0.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core==0.10.42->llama-index) (9.4.0)\n", + "Requirement already satisfied: requests>=2.31.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core==0.10.42->llama-index) (2.31.0)\n", + "Requirement already satisfied: tenacity<9.0.0,>=8.2.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core==0.10.42->llama-index) (8.3.0)\n", + "Collecting tiktoken>=0.3.3 (from llama-index-core==0.10.42->llama-index)\n", + " Downloading tiktoken-0.7.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (1.1 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.1/1.1 MB\u001b[0m \u001b[31m36.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: tqdm<5.0.0,>=4.66.1 in /usr/local/lib/python3.10/dist-packages (from llama-index-core==0.10.42->llama-index) (4.66.4)\n", + "Requirement already satisfied: typing-extensions>=4.5.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core==0.10.42->llama-index) (4.11.0)\n", + "Collecting typing-inspect>=0.8.0 (from llama-index-core==0.10.42->llama-index)\n", + " Downloading typing_inspect-0.9.0-py3-none-any.whl (8.8 kB)\n", + "Requirement already satisfied: wrapt in /usr/local/lib/python3.10/dist-packages (from llama-index-core==0.10.42->llama-index) (1.14.1)\n", + "Requirement already satisfied: beautifulsoup4<5.0.0,>=4.12.3 in /usr/local/lib/python3.10/dist-packages (from llama-index-readers-file<0.2.0,>=0.1.4->llama-index) (4.12.3)\n", + "Collecting pypdf<5.0.0,>=4.0.1 (from llama-index-readers-file<0.2.0,>=0.1.4->llama-index)\n", + " Downloading pypdf-4.2.0-py3-none-any.whl (290 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m290.4/290.4 kB\u001b[0m \u001b[31m14.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting striprtf<0.0.27,>=0.0.26 (from llama-index-readers-file<0.2.0,>=0.1.4->llama-index)\n", + " Downloading striprtf-0.0.26-py3-none-any.whl (6.9 kB)\n", + "Collecting llama-parse<0.5.0,>=0.4.0 (from llama-index-readers-llama-parse<0.2.0,>=0.1.2->llama-index)\n", + " Downloading llama_parse-0.4.4-py3-none-any.whl (8.0 kB)\n", + "Requirement already satisfied: aiosignal>=1.1.2 in /usr/local/lib/python3.10/dist-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core==0.10.42->llama-index) (1.3.1)\n", + "Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core==0.10.42->llama-index) (23.2.0)\n", + "Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.10/dist-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core==0.10.42->llama-index) (1.4.1)\n", + "Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.10/dist-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core==0.10.42->llama-index) (6.0.5)\n", + "Requirement already satisfied: yarl<2.0,>=1.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core==0.10.42->llama-index) (1.9.4)\n", + "Requirement already satisfied: async-timeout<5.0,>=4.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core==0.10.42->llama-index) (4.0.3)\n", + "Requirement already satisfied: soupsieve>1.2 in /usr/local/lib/python3.10/dist-packages (from beautifulsoup4<5.0.0,>=4.12.3->llama-index-readers-file<0.2.0,>=0.1.4->llama-index) (2.5)\n", + "Requirement already satisfied: pydantic>=1.10 in /usr/local/lib/python3.10/dist-packages (from llamaindex-py-client<0.2.0,>=0.1.18->llama-index-core==0.10.42->llama-index) (2.7.1)\n", + "Requirement already satisfied: anyio in /usr/local/lib/python3.10/dist-packages (from httpx->llama-index-core==0.10.42->llama-index) (3.7.1)\n", + "Requirement already satisfied: certifi in /usr/local/lib/python3.10/dist-packages (from httpx->llama-index-core==0.10.42->llama-index) (2024.2.2)\n", + "Collecting httpcore==1.* (from httpx->llama-index-core==0.10.42->llama-index)\n", + " Downloading httpcore-1.0.5-py3-none-any.whl (77 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m77.9/77.9 kB\u001b[0m \u001b[31m6.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: idna in /usr/local/lib/python3.10/dist-packages (from httpx->llama-index-core==0.10.42->llama-index) (3.7)\n", + "Requirement already satisfied: sniffio in /usr/local/lib/python3.10/dist-packages (from httpx->llama-index-core==0.10.42->llama-index) (1.3.1)\n", + "Collecting h11<0.15,>=0.13 (from httpcore==1.*->httpx->llama-index-core==0.10.42->llama-index)\n", + " Downloading h11-0.14.0-py3-none-any.whl (58 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m58.3/58.3 kB\u001b[0m \u001b[31m3.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: click in /usr/local/lib/python3.10/dist-packages (from nltk<4.0.0,>=3.8.1->llama-index-core==0.10.42->llama-index) (8.1.7)\n", + "Requirement already satisfied: joblib in /usr/local/lib/python3.10/dist-packages (from nltk<4.0.0,>=3.8.1->llama-index-core==0.10.42->llama-index) (1.4.2)\n", + "Requirement already satisfied: regex>=2021.8.3 in /usr/local/lib/python3.10/dist-packages (from nltk<4.0.0,>=3.8.1->llama-index-core==0.10.42->llama-index) (2024.5.15)\n", + "Requirement already satisfied: distro<2,>=1.7.0 in /usr/lib/python3/dist-packages (from openai>=1.1.0->llama-index-core==0.10.42->llama-index) (1.7.0)\n", + "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests>=2.31.0->llama-index-core==0.10.42->llama-index) (3.3.2)\n", + "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests>=2.31.0->llama-index-core==0.10.42->llama-index) (2.0.7)\n", + "Requirement already satisfied: greenlet!=0.4.17 in /usr/local/lib/python3.10/dist-packages (from SQLAlchemy[asyncio]>=1.4.49->llama-index-core==0.10.42->llama-index) (3.0.3)\n", + "Collecting mypy-extensions>=0.3.0 (from typing-inspect>=0.8.0->llama-index-core==0.10.42->llama-index)\n", + " Downloading mypy_extensions-1.0.0-py3-none-any.whl (4.7 kB)\n", + "Collecting marshmallow<4.0.0,>=3.18.0 (from dataclasses-json->llama-index-core==0.10.42->llama-index)\n", + " Downloading marshmallow-3.21.2-py3-none-any.whl (49 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m49.3/49.3 kB\u001b[0m \u001b[31m4.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.10/dist-packages (from pandas->llama-index-core==0.10.42->llama-index) (2.8.2)\n", + "Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.10/dist-packages (from pandas->llama-index-core==0.10.42->llama-index) (2023.4)\n", + "Requirement already satisfied: tzdata>=2022.1 in /usr/local/lib/python3.10/dist-packages (from pandas->llama-index-core==0.10.42->llama-index) (2024.1)\n", + "Requirement already satisfied: exceptiongroup in /usr/local/lib/python3.10/dist-packages (from anyio->httpx->llama-index-core==0.10.42->llama-index) (1.2.1)\n", + "Requirement already satisfied: packaging>=17.0 in /usr/local/lib/python3.10/dist-packages (from marshmallow<4.0.0,>=3.18.0->dataclasses-json->llama-index-core==0.10.42->llama-index) (24.0)\n", + "Requirement already satisfied: annotated-types>=0.4.0 in /usr/local/lib/python3.10/dist-packages (from pydantic>=1.10->llamaindex-py-client<0.2.0,>=0.1.18->llama-index-core==0.10.42->llama-index) (0.7.0)\n", + "Requirement already satisfied: pydantic-core==2.18.2 in /usr/local/lib/python3.10/dist-packages (from pydantic>=1.10->llamaindex-py-client<0.2.0,>=0.1.18->llama-index-core==0.10.42->llama-index) (2.18.2)\n", + "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.8.2->pandas->llama-index-core==0.10.42->llama-index) (1.16.0)\n", + "Installing collected packages: striprtf, dirtyjson, pypdf, mypy-extensions, marshmallow, h11, deprecated, typing-inspect, tiktoken, httpcore, httpx, dataclasses-json, openai, llamaindex-py-client, llama-index-legacy, llama-index-core, llama-parse, llama-index-readers-file, llama-index-llms-openai, llama-index-indices-managed-llama-cloud, llama-index-embeddings-openai, llama-index-readers-llama-parse, llama-index-multi-modal-llms-openai, llama-index-cli, llama-index-agent-openai, llama-index-program-openai, llama-index-question-gen-openai, llama-index\n", + "Successfully installed dataclasses-json-0.6.6 deprecated-1.2.14 dirtyjson-1.0.8 h11-0.14.0 httpcore-1.0.5 httpx-0.27.0 llama-index-0.10.42 llama-index-agent-openai-0.2.6 llama-index-cli-0.1.12 llama-index-core-0.10.42 llama-index-embeddings-openai-0.1.10 llama-index-indices-managed-llama-cloud-0.1.6 llama-index-legacy-0.9.48 llama-index-llms-openai-0.1.21 llama-index-multi-modal-llms-openai-0.1.6 llama-index-program-openai-0.1.6 llama-index-question-gen-openai-0.1.3 llama-index-readers-file-0.1.23 llama-index-readers-llama-parse-0.1.4 llama-parse-0.4.4 llamaindex-py-client-0.1.19 marshmallow-3.21.2 mypy-extensions-1.0.0 openai-1.30.5 pypdf-4.2.0 striprtf-0.0.26 tiktoken-0.7.0 typing-inspect-0.9.0\n", + "Collecting llama-index-embeddings-huggingface\n", + " Downloading llama_index_embeddings_huggingface-0.2.1-py3-none-any.whl (7.1 kB)\n", + "Requirement already satisfied: huggingface-hub[inference]>=0.19.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-embeddings-huggingface) (0.23.1)\n", + "Requirement already satisfied: llama-index-core<0.11.0,>=0.10.1 in /usr/local/lib/python3.10/dist-packages (from llama-index-embeddings-huggingface) (0.10.42)\n", + "Collecting sentence-transformers<3.0.0,>=2.6.1 (from llama-index-embeddings-huggingface)\n", + " Downloading sentence_transformers-2.7.0-py3-none-any.whl (171 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m171.5/171.5 kB\u001b[0m \u001b[31m3.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (3.14.0)\n", + "Requirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (2023.6.0)\n", + "Requirement already satisfied: packaging>=20.9 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (24.0)\n", + "Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (6.0.1)\n", + "Requirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (2.31.0)\n", + "Requirement already satisfied: tqdm>=4.42.1 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (4.66.4)\n", + "Requirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (4.11.0)\n", + "Requirement already satisfied: aiohttp in /usr/local/lib/python3.10/dist-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (3.9.5)\n", + "Collecting minijinja>=1.0 (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface)\n", + " Downloading minijinja-2.0.1-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (853 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m853.2/853.2 kB\u001b[0m \u001b[31m21.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: SQLAlchemy[asyncio]>=1.4.49 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (2.0.30)\n", + "Requirement already satisfied: dataclasses-json in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (0.6.6)\n", + "Requirement already satisfied: deprecated>=1.2.9.3 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.2.14)\n", + "Requirement already satisfied: dirtyjson<2.0.0,>=1.0.8 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.0.8)\n", + "Requirement already satisfied: httpx in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (0.27.0)\n", + "Requirement already satisfied: llamaindex-py-client<0.2.0,>=0.1.18 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (0.1.19)\n", + "Requirement already satisfied: nest-asyncio<2.0.0,>=1.5.8 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.6.0)\n", + "Requirement already satisfied: networkx>=3.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (3.3)\n", + "Requirement already satisfied: nltk<4.0.0,>=3.8.1 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (3.8.1)\n", + "Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.25.2)\n", + "Requirement already satisfied: openai>=1.1.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.30.5)\n", + "Requirement already satisfied: pandas in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (2.0.3)\n", + "Requirement already satisfied: pillow>=9.0.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (9.4.0)\n", + "Requirement already satisfied: tenacity<9.0.0,>=8.2.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (8.3.0)\n", + "Requirement already satisfied: tiktoken>=0.3.3 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (0.7.0)\n", + "Requirement already satisfied: typing-inspect>=0.8.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (0.9.0)\n", + "Requirement already satisfied: wrapt in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.14.1)\n", + "Requirement already satisfied: transformers<5.0.0,>=4.34.0 in /usr/local/lib/python3.10/dist-packages (from sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (4.41.1)\n", + "Requirement already satisfied: torch>=1.11.0 in /usr/local/lib/python3.10/dist-packages (from sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (2.3.0+cu121)\n", + "Requirement already satisfied: scikit-learn in /usr/local/lib/python3.10/dist-packages (from sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (1.2.2)\n", + "Requirement already satisfied: scipy in /usr/local/lib/python3.10/dist-packages (from sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (1.11.4)\n", + "Requirement already satisfied: aiosignal>=1.1.2 in /usr/local/lib/python3.10/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (1.3.1)\n", + "Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (23.2.0)\n", + "Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.10/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (1.4.1)\n", + "Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.10/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (6.0.5)\n", + "Requirement already satisfied: yarl<2.0,>=1.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (1.9.4)\n", + "Requirement already satisfied: async-timeout<5.0,>=4.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (4.0.3)\n", + "Requirement already satisfied: pydantic>=1.10 in /usr/local/lib/python3.10/dist-packages (from llamaindex-py-client<0.2.0,>=0.1.18->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (2.7.1)\n", + "Requirement already satisfied: anyio in /usr/local/lib/python3.10/dist-packages (from httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (3.7.1)\n", + "Requirement already satisfied: certifi in /usr/local/lib/python3.10/dist-packages (from httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (2024.2.2)\n", + "Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.10/dist-packages (from httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.0.5)\n", + "Requirement already satisfied: idna in /usr/local/lib/python3.10/dist-packages (from httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (3.7)\n", + "Requirement already satisfied: sniffio in /usr/local/lib/python3.10/dist-packages (from httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.3.1)\n", + "Requirement already satisfied: h11<0.15,>=0.13 in /usr/local/lib/python3.10/dist-packages (from httpcore==1.*->httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (0.14.0)\n", + "Requirement already satisfied: click in /usr/local/lib/python3.10/dist-packages (from nltk<4.0.0,>=3.8.1->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (8.1.7)\n", + "Requirement already satisfied: joblib in /usr/local/lib/python3.10/dist-packages (from nltk<4.0.0,>=3.8.1->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.4.2)\n", + "Requirement already satisfied: regex>=2021.8.3 in /usr/local/lib/python3.10/dist-packages (from nltk<4.0.0,>=3.8.1->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (2024.5.15)\n", + "Requirement already satisfied: distro<2,>=1.7.0 in /usr/lib/python3/dist-packages (from openai>=1.1.0->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.7.0)\n", + "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (3.3.2)\n", + "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (2.0.7)\n", + "Requirement already satisfied: greenlet!=0.4.17 in /usr/local/lib/python3.10/dist-packages (from SQLAlchemy[asyncio]>=1.4.49->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (3.0.3)\n", + "Requirement already satisfied: sympy in /usr/local/lib/python3.10/dist-packages (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (1.12)\n", + "Requirement already satisfied: jinja2 in /usr/local/lib/python3.10/dist-packages (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (3.1.4)\n", + "Collecting nvidia-cuda-nvrtc-cu12==12.1.105 (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface)\n", + " Using cached nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (23.7 MB)\n", + "Collecting nvidia-cuda-runtime-cu12==12.1.105 (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface)\n", + " Using cached nvidia_cuda_runtime_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (823 kB)\n", + "Collecting nvidia-cuda-cupti-cu12==12.1.105 (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface)\n", + " Using cached nvidia_cuda_cupti_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (14.1 MB)\n", + "Collecting nvidia-cudnn-cu12==8.9.2.26 (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface)\n", + " Using cached nvidia_cudnn_cu12-8.9.2.26-py3-none-manylinux1_x86_64.whl (731.7 MB)\n", + "Collecting nvidia-cublas-cu12==12.1.3.1 (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface)\n", + " Using cached nvidia_cublas_cu12-12.1.3.1-py3-none-manylinux1_x86_64.whl (410.6 MB)\n", + "Collecting nvidia-cufft-cu12==11.0.2.54 (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface)\n", + " Using cached nvidia_cufft_cu12-11.0.2.54-py3-none-manylinux1_x86_64.whl (121.6 MB)\n", + "Collecting nvidia-curand-cu12==10.3.2.106 (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface)\n", + " Using cached nvidia_curand_cu12-10.3.2.106-py3-none-manylinux1_x86_64.whl (56.5 MB)\n", + "Collecting nvidia-cusolver-cu12==11.4.5.107 (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface)\n", + " Using cached nvidia_cusolver_cu12-11.4.5.107-py3-none-manylinux1_x86_64.whl (124.2 MB)\n", + "Collecting nvidia-cusparse-cu12==12.1.0.106 (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface)\n", + " Using cached nvidia_cusparse_cu12-12.1.0.106-py3-none-manylinux1_x86_64.whl (196.0 MB)\n", + "Collecting nvidia-nccl-cu12==2.20.5 (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface)\n", + " Using cached nvidia_nccl_cu12-2.20.5-py3-none-manylinux2014_x86_64.whl (176.2 MB)\n", + "Collecting nvidia-nvtx-cu12==12.1.105 (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface)\n", + " Using cached nvidia_nvtx_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (99 kB)\n", + "Requirement already satisfied: triton==2.3.0 in /usr/local/lib/python3.10/dist-packages (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (2.3.0)\n", + "Collecting nvidia-nvjitlink-cu12 (from nvidia-cusolver-cu12==11.4.5.107->torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface)\n", + " Downloading nvidia_nvjitlink_cu12-12.5.40-py3-none-manylinux2014_x86_64.whl (21.3 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m21.3/21.3 MB\u001b[0m \u001b[31m29.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: tokenizers<0.20,>=0.19 in /usr/local/lib/python3.10/dist-packages (from transformers<5.0.0,>=4.34.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (0.19.1)\n", + "Requirement already satisfied: safetensors>=0.4.1 in /usr/local/lib/python3.10/dist-packages (from transformers<5.0.0,>=4.34.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (0.4.3)\n", + "Requirement already satisfied: mypy-extensions>=0.3.0 in /usr/local/lib/python3.10/dist-packages (from typing-inspect>=0.8.0->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.0.0)\n", + "Requirement already satisfied: marshmallow<4.0.0,>=3.18.0 in /usr/local/lib/python3.10/dist-packages (from dataclasses-json->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (3.21.2)\n", + "Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.10/dist-packages (from pandas->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (2.8.2)\n", + "Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.10/dist-packages (from pandas->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (2023.4)\n", + "Requirement already satisfied: tzdata>=2022.1 in /usr/local/lib/python3.10/dist-packages (from pandas->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (2024.1)\n", + "Requirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from scikit-learn->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (3.5.0)\n", + "Requirement already satisfied: exceptiongroup in /usr/local/lib/python3.10/dist-packages (from anyio->httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.2.1)\n", + "Requirement already satisfied: annotated-types>=0.4.0 in /usr/local/lib/python3.10/dist-packages (from pydantic>=1.10->llamaindex-py-client<0.2.0,>=0.1.18->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (0.7.0)\n", + "Requirement already satisfied: pydantic-core==2.18.2 in /usr/local/lib/python3.10/dist-packages (from pydantic>=1.10->llamaindex-py-client<0.2.0,>=0.1.18->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (2.18.2)\n", + "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.8.2->pandas->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.16.0)\n", + "Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.10/dist-packages (from jinja2->torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (2.1.5)\n", + "Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.10/dist-packages (from sympy->torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (1.3.0)\n", + "Installing collected packages: nvidia-nvtx-cu12, nvidia-nvjitlink-cu12, nvidia-nccl-cu12, nvidia-curand-cu12, nvidia-cufft-cu12, nvidia-cuda-runtime-cu12, nvidia-cuda-nvrtc-cu12, nvidia-cuda-cupti-cu12, nvidia-cublas-cu12, minijinja, nvidia-cusparse-cu12, nvidia-cudnn-cu12, nvidia-cusolver-cu12, sentence-transformers, llama-index-embeddings-huggingface\n", + "Successfully installed llama-index-embeddings-huggingface-0.2.1 minijinja-2.0.1 nvidia-cublas-cu12-12.1.3.1 nvidia-cuda-cupti-cu12-12.1.105 nvidia-cuda-nvrtc-cu12-12.1.105 nvidia-cuda-runtime-cu12-12.1.105 nvidia-cudnn-cu12-8.9.2.26 nvidia-cufft-cu12-11.0.2.54 nvidia-curand-cu12-10.3.2.106 nvidia-cusolver-cu12-11.4.5.107 nvidia-cusparse-cu12-12.1.0.106 nvidia-nccl-cu12-2.20.5 nvidia-nvjitlink-cu12-12.5.40 nvidia-nvtx-cu12-12.1.105 sentence-transformers-2.7.0\n", + "Collecting llama-index-llms-groq\n", + " Downloading llama_index_llms_groq-0.1.4-py3-none-any.whl (2.9 kB)\n", + "Requirement already satisfied: llama-index-core<0.11.0,>=0.10.1 in /usr/local/lib/python3.10/dist-packages (from llama-index-llms-groq) (0.10.42)\n", + "Collecting llama-index-llms-openai-like<0.2.0,>=0.1.3 (from llama-index-llms-groq)\n", + " Downloading llama_index_llms_openai_like-0.1.3-py3-none-any.whl (3.0 kB)\n", + "Requirement already satisfied: PyYAML>=6.0.1 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (6.0.1)\n", + "Requirement already satisfied: SQLAlchemy[asyncio]>=1.4.49 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2.0.30)\n", + "Requirement already satisfied: aiohttp<4.0.0,>=3.8.6 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (3.9.5)\n", + "Requirement already satisfied: dataclasses-json in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (0.6.6)\n", + "Requirement already satisfied: deprecated>=1.2.9.3 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.2.14)\n", + "Requirement already satisfied: dirtyjson<2.0.0,>=1.0.8 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.0.8)\n", + "Requirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2023.6.0)\n", + "Requirement already satisfied: httpx in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (0.27.0)\n", + "Requirement already satisfied: llamaindex-py-client<0.2.0,>=0.1.18 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (0.1.19)\n", + "Requirement already satisfied: nest-asyncio<2.0.0,>=1.5.8 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.6.0)\n", + "Requirement already satisfied: networkx>=3.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (3.3)\n", + "Requirement already satisfied: nltk<4.0.0,>=3.8.1 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (3.8.1)\n", + "Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.25.2)\n", + "Requirement already satisfied: openai>=1.1.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.30.5)\n", + "Requirement already satisfied: pandas in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2.0.3)\n", + "Requirement already satisfied: pillow>=9.0.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (9.4.0)\n", + "Requirement already satisfied: requests>=2.31.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2.31.0)\n", + "Requirement already satisfied: tenacity<9.0.0,>=8.2.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (8.3.0)\n", + "Requirement already satisfied: tiktoken>=0.3.3 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (0.7.0)\n", + "Requirement already satisfied: tqdm<5.0.0,>=4.66.1 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (4.66.4)\n", + "Requirement already satisfied: typing-extensions>=4.5.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (4.11.0)\n", + "Requirement already satisfied: typing-inspect>=0.8.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (0.9.0)\n", + "Requirement already satisfied: wrapt in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.14.1)\n", + "Requirement already satisfied: llama-index-llms-openai<0.2.0,>=0.1.1 in /usr/local/lib/python3.10/dist-packages (from llama-index-llms-openai-like<0.2.0,>=0.1.3->llama-index-llms-groq) (0.1.21)\n", + "Requirement already satisfied: transformers<5.0.0,>=4.37.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-llms-openai-like<0.2.0,>=0.1.3->llama-index-llms-groq) (4.41.1)\n", + "Requirement already satisfied: aiosignal>=1.1.2 in /usr/local/lib/python3.10/dist-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.3.1)\n", + "Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (23.2.0)\n", + "Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.10/dist-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.4.1)\n", + "Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.10/dist-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (6.0.5)\n", + "Requirement already satisfied: yarl<2.0,>=1.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.9.4)\n", + "Requirement already satisfied: async-timeout<5.0,>=4.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (4.0.3)\n", + "Requirement already satisfied: pydantic>=1.10 in /usr/local/lib/python3.10/dist-packages (from llamaindex-py-client<0.2.0,>=0.1.18->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2.7.1)\n", + "Requirement already satisfied: anyio in /usr/local/lib/python3.10/dist-packages (from httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (3.7.1)\n", + "Requirement already satisfied: certifi in /usr/local/lib/python3.10/dist-packages (from httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2024.2.2)\n", + "Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.10/dist-packages (from httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.0.5)\n", + "Requirement already satisfied: idna in /usr/local/lib/python3.10/dist-packages (from httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (3.7)\n", + "Requirement already satisfied: sniffio in /usr/local/lib/python3.10/dist-packages (from httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.3.1)\n", + "Requirement already satisfied: h11<0.15,>=0.13 in /usr/local/lib/python3.10/dist-packages (from httpcore==1.*->httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (0.14.0)\n", + "Requirement already satisfied: click in /usr/local/lib/python3.10/dist-packages (from nltk<4.0.0,>=3.8.1->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (8.1.7)\n", + "Requirement already satisfied: joblib in /usr/local/lib/python3.10/dist-packages (from nltk<4.0.0,>=3.8.1->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.4.2)\n", + "Requirement already satisfied: regex>=2021.8.3 in /usr/local/lib/python3.10/dist-packages (from nltk<4.0.0,>=3.8.1->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2024.5.15)\n", + "Requirement already satisfied: distro<2,>=1.7.0 in /usr/lib/python3/dist-packages (from openai>=1.1.0->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.7.0)\n", + "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests>=2.31.0->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (3.3.2)\n", + "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests>=2.31.0->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2.0.7)\n", + "Requirement already satisfied: greenlet!=0.4.17 in /usr/local/lib/python3.10/dist-packages (from SQLAlchemy[asyncio]>=1.4.49->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (3.0.3)\n", + "Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from transformers<5.0.0,>=4.37.0->llama-index-llms-openai-like<0.2.0,>=0.1.3->llama-index-llms-groq) (3.14.0)\n", + "Requirement already satisfied: huggingface-hub<1.0,>=0.23.0 in /usr/local/lib/python3.10/dist-packages (from transformers<5.0.0,>=4.37.0->llama-index-llms-openai-like<0.2.0,>=0.1.3->llama-index-llms-groq) (0.23.1)\n", + "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/dist-packages (from transformers<5.0.0,>=4.37.0->llama-index-llms-openai-like<0.2.0,>=0.1.3->llama-index-llms-groq) (24.0)\n", + "Requirement already satisfied: tokenizers<0.20,>=0.19 in /usr/local/lib/python3.10/dist-packages (from transformers<5.0.0,>=4.37.0->llama-index-llms-openai-like<0.2.0,>=0.1.3->llama-index-llms-groq) (0.19.1)\n", + "Requirement already satisfied: safetensors>=0.4.1 in /usr/local/lib/python3.10/dist-packages (from transformers<5.0.0,>=4.37.0->llama-index-llms-openai-like<0.2.0,>=0.1.3->llama-index-llms-groq) (0.4.3)\n", + "Requirement already satisfied: mypy-extensions>=0.3.0 in /usr/local/lib/python3.10/dist-packages (from typing-inspect>=0.8.0->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.0.0)\n", + "Requirement already satisfied: marshmallow<4.0.0,>=3.18.0 in /usr/local/lib/python3.10/dist-packages (from dataclasses-json->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (3.21.2)\n", + "Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.10/dist-packages (from pandas->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2.8.2)\n", + "Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.10/dist-packages (from pandas->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2023.4)\n", + "Requirement already satisfied: tzdata>=2022.1 in /usr/local/lib/python3.10/dist-packages (from pandas->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2024.1)\n", + "Requirement already satisfied: exceptiongroup in /usr/local/lib/python3.10/dist-packages (from anyio->httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.2.1)\n", + "Requirement already satisfied: annotated-types>=0.4.0 in /usr/local/lib/python3.10/dist-packages (from pydantic>=1.10->llamaindex-py-client<0.2.0,>=0.1.18->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (0.7.0)\n", + "Requirement already satisfied: pydantic-core==2.18.2 in /usr/local/lib/python3.10/dist-packages (from pydantic>=1.10->llamaindex-py-client<0.2.0,>=0.1.18->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2.18.2)\n", + "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.8.2->pandas->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.16.0)\n", + "Installing collected packages: llama-index-llms-openai-like, llama-index-llms-groq\n", + "Successfully installed llama-index-llms-groq-0.1.4 llama-index-llms-openai-like-0.1.3\n" + ] + } + ], + "source": [ + "!pip install llama-index\n", + "!pip install llama-index-embeddings-huggingface\n", + "!pip install llama-index-llms-groq" + ] + }, + { + "cell_type": "code", + "source": [ + "import nest_asyncio\n", + "\n", + "nest_asyncio.apply()" + ], + "metadata": { + "id": "sG25Zlzsfg3K" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, SummaryIndex\n", + "from llama_index.core.node_parser import SentenceSplitter\n", + "from llama_index.core.tools import FunctionTool, QueryEngineTool\n", + "from llama_index.core.vector_stores import MetadataFilters, FilterCondition\n", + "from typing import List, Optional\n", + "\n", + "def get_doc_tools(\n", + " file_path: str,\n", + " name: str,\n", + ") -> str:\n", + " \"\"\"Get vector query and summary query tools from a document.\"\"\"\n", + "\n", + " # load documents\n", + " documents = SimpleDirectoryReader(input_files=[file_path]).load_data()\n", + " splitter = SentenceSplitter(chunk_size=1024)\n", + " nodes = splitter.get_nodes_from_documents(documents)\n", + " vector_index = VectorStoreIndex(nodes)\n", + "\n", + " def vector_query(\n", + " query: str,\n", + " page_numbers: Optional[List[str]] = None\n", + " ) -> str:\n", + " \"\"\"Use to answer questions over the MetaGPT paper.\n", + "\n", + " Useful if you have specific questions over the MetaGPT paper.\n", + " Always leave page_numbers as None UNLESS there is a specific page you want to search for.\n", + "\n", + " Args:\n", + " query (str): the string query to be embedded.\n", + " page_numbers (Optional[List[str]]): Filter by set of pages. Leave as NONE\n", + " if we want to perform a vector search\n", + " over all pages. Otherwise, filter by the set of specified pages.\n", + "\n", + " \"\"\"\n", + "\n", + " page_numbers = page_numbers or []\n", + " metadata_dicts = [\n", + " {\"key\": \"page_label\", \"value\": p} for p in page_numbers\n", + " ]\n", + "\n", + " query_engine = vector_index.as_query_engine(\n", + " similarity_top_k=2,\n", + " filters=MetadataFilters.from_dicts(\n", + " metadata_dicts,\n", + " condition=FilterCondition.OR\n", + " )\n", + " )\n", + " response = query_engine.query(query)\n", + " return response\n", + "\n", + "\n", + " vector_query_tool = FunctionTool.from_defaults(\n", + " name=f\"vector_tool_{name}\",\n", + " fn=vector_query\n", + " )\n", + "\n", + " summary_index = SummaryIndex(nodes)\n", + " summary_query_engine = summary_index.as_query_engine(\n", + " response_mode=\"tree_summarize\",\n", + " use_async=True,\n", + " )\n", + " summary_tool = QueryEngineTool.from_defaults(\n", + " name=f\"summary_tool_{name}\",\n", + " query_engine=summary_query_engine,\n", + " description=(\n", + " \"Use ONLY IF you want to get a holistic summary of MetaGPT. \"\n", + " \"Do NOT use if you have specific questions over MetaGPT.\"\n", + " ),\n", + " )\n", + "\n", + " return vector_query_tool, summary_tool" + ], + "metadata": { + "id": "NnZdKJKVdOFs" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "!wget \"https://openreview.net/pdf?id=VtmBAGCN7o\" -O metagpt.pdf" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "CrQQkzV8dOJg", + "outputId": "5ef72923-d717-424b-9088-a1d35952967b" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "--2024-06-01 22:40:13-- https://openreview.net/pdf?id=VtmBAGCN7o\n", + "Resolving openreview.net (openreview.net)... 35.184.86.251\n", + "Connecting to openreview.net (openreview.net)|35.184.86.251|:443... connected.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 16911937 (16M) [application/pdf]\n", + "Saving to: ‘metagpt.pdf’\n", + "\n", + "metagpt.pdf 100%[===================>] 16.13M 38.5MB/s in 0.4s \n", + "\n", + "2024-06-01 22:40:13 (38.5 MB/s) - ‘metagpt.pdf’ saved [16911937/16911937]\n", + "\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "from getpass import getpass\n", + "import os\n", + "\n", + "GROQ_API_TOKEN = getpass()" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "PvJcLvQxdfx6", + "outputId": "869951b6-2b30-4f37-a7ee-f11eb73ba54c" + }, + "execution_count": null, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "··········\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "from llama_index.llms.groq import Groq\n", + "\n", + "from llama_index.core import Settings, VectorStoreIndex\n", + "from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n", + "\n", + "#llm = Groq(model=\"llama3-8b-8192\", api_key=GROQ_API_TOKEN)\n", + "llm = Groq(model=\"llama3-70b-8192\", api_key=GROQ_API_TOKEN)\n", + "Settings.llm = llm\n", + "\n", + "Settings.embed_model = HuggingFaceEmbedding(\n", + " model_name=\"BAAI/bge-small-en-v1.5\"\n", + ")" + ], + "metadata": { + "id": "oUZMHOoWdf1K", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 528, + "referenced_widgets": [ + "c26422ab46584828b1e56737bfb5efbd", + "fc3fbc542ff94d46b283e5b989cddf54", + "5890fca1cf53425287fc51703a162abf", + "6ffe78e8c9d141b987a93840a76a0a9d", + "6b4fff8215b946a291f1b2264a05826a", + "87e2db5bc221448db9213c9b5efd91ef", + "70e7c637ab724049810fd5c912fcbdf7", + "628a3a6eb163488f89f5f19f2d956cad", + "113c2247163c458d806e1fb93b0f0d43", + "996a99f0cdb140afa802a25e5774dd24", + "d8c87b8639ae4151a20f1c53352ea6b3", + "240cd69316e6461da0e62ca1af562692", + "34b039cd03954432b32c8f991703f103", + "e25bae0193d94c14ad34d76b07ae3ef3", + "41e775a95eb34ed6acdb7efa0dc9dcc3", + "7e3bc961c28446cc91bc4a9f305c9939", + "5b7b4ec04eef441ea1d2e0328c69a6c8", + "e791599c91b94565a6ab40bccbac8946", + "096c28485c874776ad4255477b8fb059", + "5cbde93ee45f4c23886ded1cd6993014", + "a714cf6ea3f1454aa6720c6f6b016e7d", + "add3052606dc4cddbc81c600bec0aaab", + "dac8de62f29f44d7b0954b99c116c319", + "00263e0751c44ebebe1dc472a64d483d", + "7643b88058ad41718a81da6e4165cc2f", + "e79b09ba1f794c3592bb46d1a5b6ea3a", + "d65ff865d8764d9a8389ad38c31a3255", + "0bd5ecf320ea4a9a9db0d9f40d9f2835", + "955211921e694baa848bc3906a6c6a4b", + "c7f6b63883264ccab41ad8a3e009d3af", + "c74d02fce14a4e6dbdfaa86f4ad9cc1b", + "7f00ed3acdb54246a1b37a85326e2efa", + "8760ad31040e477dbbf22f2ef55c0b1d", + "94e4ad5fac5e4da9ab5e4be8474b201e", + "5ed2612e54fc4864a62d57fc79fe8499", + "538634dcebf6409fb886068f8b107d81", + "e8555b9ad64546959630669407e755c7", + "613b45a23a534d009cb0c1cc3f79ff47", + "527bd695d191480a9e701bea0b8bb47d", + "0595d14613734c3cb658aac398eb0ae8", + "f648cb3c78d043fe9e666849ead6854b", + "acac5be270d3413ca2b0232b473673b2", + "d1a4da08fc8b4825b851d57c829160d4", + "0164f6d7b2484beaa254d0c00c3f61ee", + "85b68d1754b9413cbd774c7435b4e53e", + "78ee39a2d64844939e326e2ae065c34b", + "fd44359a567c4cb98e0c33b35ebd90bb", + "f9ad85e78dde4342843311cc94084f38", + "467c9f03076c42b483f2db2e1bdb788c", + "0cea7b2321424395b0441a8849ee3a62", + "6bcaee1c2d3d498d9aa25333a0c0e9d4", + "6a8f6289e8e24bf29b699a3f08e528b4", + "d9fc7e95604140368a11fbf78d2ab78f", + "297c46666c244ea98d54fc6e3fab94d2", + "1f07e25b78dc4c47be24299c425ef7a2", + "5e5d2dd4d8774861ab8f9a3f57b57f12", + "6dd9c5007f1d438d8b280d0e025bf62f", + "6d4da17710c74841a5d33efa344d7180", + "a54dea1d3d3e48389d9301fb2d83b8df", + "bdd8a62fefaf4df8a4b688a918fed96a", + "ff861eba4be248fea1d09f5d0b216b4f", + "40810a4d473d4fd0af61a9a70d2fc416", + "f32b687330fd4e5eaf547d42bdd8b5ef", + "c95c399f24e94e62a57c244d72715d8f", + "f34031fa96004c5baf279935fd071efa", + "f00575c770074c6791096d1f7125c5f6", + "d894b0cb613a4f22ae7c958863e5b340", + "8428fc1563e44fd9a0690e2125bcb2a8", + "d4769437a5764b90a5c2c332f0df5386", + "775d39014f464413ad7bfa22487b69e3", + "c7ea50e54a1a4166b4e691cf61a47728", + "cad4d390039d485c991e6db3cb950474", + "82a1a312336f4733abe09c766bff3c4b", + "92cbda5df47741f39c99d210962b5aea", + "be749e423bc34e87b3a3c82587901fa8", + "c24e7857fc2740e78df50818d70b1b8c", + "d5b25bb18b804966890e50dd9f340223", + "6232e5f6d6fe423286b1649cd8faf34a", + "ce2ed96df931441da3decd38d8f13a31", + "96958d88556a4f5db63a5f43347e5956", + "fafbdcbca7e74aa48c61e5e48bf131ed", + "f6661758a8d0435b8a4025b8197550ab", + "7ab10b3032964477b03ea91240fd9c1e", + "2d50cec7d1234fdb9a34449af864e777", + "d194f5b4c9194241845c692c5bcabc5e", + "a177b6782e8444d1a1f8b1ac5a1ad475", + "4ac47f47e7704d68a5ad763aebb6153e", + "8c2f1cfbef1d4ec4a7708ec938b41312", + "cf3792e56eb7419daabcbbd678c21fea", + "5729df06ec794b75a932acae3dab9676", + "4fd71aed976441c087f0d411efdba5d1", + "96dfe297cb144181937eee07ab3b07e0", + "8f371641cf73413a907eafac3b7162ba", + "8a417c96b9904aef9227dac62f1f7d2e", + "271cba061d244e7091390f190ed1c218", + "51793d5f94ea49c08c57bf3554489d59", + "767e6495dd904c4ea25646ac15fbcf9d", + "03310809a84243d6acf5ccaf3a37baaf", + "d976cb6f959942d794a1d3d9aa1f0a2f", + "712b7454fbe143e1ac17294e5ff4be49", + "ecb04a87ca0f4e989a5e6014d42c3519", + "2b75235b072b48cc9b57beefd37558ce", + "1a876366cf9841c0822bbd9b62c3712b", + "930c64c04c3745fea1b734c451a2ef97", + "a9cf9d86b27945c5914f46a6c72c6f40", + "7526c6b322ee487dbfc8f66e2f8e4aeb", + "272338085bf04a5591fb22e8aaa22ff0", + "ab5e7589aa3a475d97f05658fc52ef14", + "472f9b34c3a840808108da64184c41de", + "4564e0fd6a8f493ca600494ca65780ca", + "e76ec2dae1fb4ed19c12b306a8a79303", + "f31fd86f61104ef2aa59eee21d05c7df", + "b34277cc9400402e8744181e8ea6d985", + "a8629205cfcd412ca98e5e678afa5b9d", + "fc676733182d48f89861f8439f621df1", + "e5ba5976600b4f84b19f79e074e0ed2a", + "358dce24d99f4787a0470650c219ad6e", + "76a4793a4d6f4dc384521246722a6eda", + "8f162e0b3b8d463b9141f343b7e39814", + "dfbf90ae3c1b424eb9cb802644ebfa7d", + "57ddd696b13645e099d4ab41b12f0ab9" + ] + }, + "outputId": "f2f06d34-573a-49bd-b614-a02aa05246a0" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stderr", + "text": [ + "/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_token.py:89: UserWarning: \n", + "The secret `HF_TOKEN` does not exist in your Colab secrets.\n", + "To authenticate with the Hugging Face Hub, create a token in your settings tab (https://huggingface.co/settings/tokens), set it as secret in your Google Colab and restart your session.\n", + "You will be able to reuse this secret in all of your notebooks.\n", + "Please note that authentication is recommended but still optional to access public models or datasets.\n", + " warnings.warn(\n" + ] + }, + { + "output_type": "display_data", + "data": { + "text/plain": [ + "modules.json: 0%| | 0.00/349 [00:00<?, ?B/s]" + ], + "application/vnd.jupyter.widget-view+json": { + "version_major": 2, + "version_minor": 0, + "model_id": "c26422ab46584828b1e56737bfb5efbd" + } + }, + "metadata": {} + }, + { + "output_type": "display_data", + "data": { + "text/plain": [ + "config_sentence_transformers.json: 0%| | 0.00/124 [00:00<?, ?B/s]" + ], + "application/vnd.jupyter.widget-view+json": { + "version_major": 2, + "version_minor": 0, + "model_id": "240cd69316e6461da0e62ca1af562692" + } + }, + "metadata": {} + }, + { + "output_type": "display_data", + "data": { + "text/plain": [ + "README.md: 0%| | 0.00/94.8k [00:00<?, ?B/s]" + ], + "application/vnd.jupyter.widget-view+json": { + "version_major": 2, + "version_minor": 0, + "model_id": "dac8de62f29f44d7b0954b99c116c319" + } + }, + "metadata": {} + }, + { + "output_type": "display_data", + "data": { + "text/plain": [ + "sentence_bert_config.json: 0%| | 0.00/52.0 [00:00<?, ?B/s]" + ], + "application/vnd.jupyter.widget-view+json": { + "version_major": 2, + "version_minor": 0, + "model_id": "94e4ad5fac5e4da9ab5e4be8474b201e" + } + }, + "metadata": {} + }, + { + "output_type": "stream", + "name": "stderr", + "text": [ + "/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py:1132: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.\n", + " warnings.warn(\n" + ] + }, + { + "output_type": "display_data", + "data": { + "text/plain": [ + "config.json: 0%| | 0.00/743 [00:00<?, ?B/s]" + ], + "application/vnd.jupyter.widget-view+json": { + "version_major": 2, + "version_minor": 0, + "model_id": "85b68d1754b9413cbd774c7435b4e53e" + } + }, + "metadata": {} + }, + { + "output_type": "display_data", + "data": { + "text/plain": [ + "model.safetensors: 0%| | 0.00/133M [00:00<?, ?B/s]" + ], + "application/vnd.jupyter.widget-view+json": { + "version_major": 2, + "version_minor": 0, + "model_id": "5e5d2dd4d8774861ab8f9a3f57b57f12" + } + }, + "metadata": {} + }, + { + "output_type": "display_data", + "data": { + "text/plain": [ + "tokenizer_config.json: 0%| | 0.00/366 [00:00<?, ?B/s]" + ], + "application/vnd.jupyter.widget-view+json": { + "version_major": 2, + "version_minor": 0, + "model_id": "d894b0cb613a4f22ae7c958863e5b340" + } + }, + "metadata": {} + }, + { + "output_type": "display_data", + "data": { + "text/plain": [ + "vocab.txt: 0%| | 0.00/232k [00:00<?, ?B/s]" + ], + "application/vnd.jupyter.widget-view+json": { + "version_major": 2, + "version_minor": 0, + "model_id": "6232e5f6d6fe423286b1649cd8faf34a" + } + }, + "metadata": {} + }, + { + "output_type": "display_data", + "data": { + "text/plain": [ + "tokenizer.json: 0%| | 0.00/711k [00:00<?, ?B/s]" + ], + "application/vnd.jupyter.widget-view+json": { + "version_major": 2, + "version_minor": 0, + "model_id": "cf3792e56eb7419daabcbbd678c21fea" + } + }, + "metadata": {} + }, + { + "output_type": "display_data", + "data": { + "text/plain": [ + "special_tokens_map.json: 0%| | 0.00/125 [00:00<?, ?B/s]" + ], + "application/vnd.jupyter.widget-view+json": { + "version_major": 2, + "version_minor": 0, + "model_id": "712b7454fbe143e1ac17294e5ff4be49" + } + }, + "metadata": {} + }, + { + "output_type": "display_data", + "data": { + "text/plain": [ + "1_Pooling/config.json: 0%| | 0.00/190 [00:00<?, ?B/s]" + ], + "application/vnd.jupyter.widget-view+json": { + "version_major": 2, + "version_minor": 0, + "model_id": "e76ec2dae1fb4ed19c12b306a8a79303" + } + }, + "metadata": {} + } + ] + }, + { + "cell_type": "code", + "source": [ + "vector_tool, summary_tool = get_doc_tools(\"metagpt.pdf\", \"metagpt\")" + ], + "metadata": { + "id": "6zi9eRc2dOMQ" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "from llama_index.core.agent import ReActAgent\n", + "\n", + "\n", + "query_engine_tools = [vector_tool, summary_tool]\n", + "\n", + "agent = ReActAgent.from_tools(\n", + " query_engine_tools,\n", + " llm=llm,\n", + " verbose=True,\n", + ")" + ], + "metadata": { + "id": "QkaALpnIQ01b" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "response = agent.query(\n", + " \"Tell me about the agent roles in MetaGPT, and how they communicate with each other.\"\n", + ")\n", + "print(str(response))" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "d9wVsIt586s8", + "outputId": "aed1974d-5724-4543-d010-ca9997f95973" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[1;3;38;5;200mThought: The current language of the user is: English. I need to use a tool to help me answer the question.\n", + "Action: vector_tool_metagpt\n", + "Action Input: {'query': 'agent roles in MetaGPT', 'page_numbers': None}\n", + "\u001b[0m\u001b[1;3;34mObservation: In MetaGPT, there are five defined agent roles: Product Manager, Architect, Project Manager, Engineer, and QA Engineer.\n", + "\u001b[0m\u001b[1;3;38;5;200mThought: I need more information about the communication between these agent roles.\n", + "Action: vector_tool_metagpt\n", + "Action Input: {'query': 'communication between agent roles in MetaGPT'}\n", + "\u001b[0m\u001b[1;3;34mObservation: In MetaGPT, communication between agent roles is facilitated through a shared message pool, where agents publish structured messages and can subscribe to relevant messages based on their role profiles. This enables agents to obtain directional information from other roles and public information from the environment, enhancing communication efficiency.\n", + "\u001b[0m\u001b[1;3;38;5;200mThought: I have enough information to answer the question without using any more tools.\n", + "Answer: In MetaGPT, there are five defined agent roles: Product Manager, Architect, Project Manager, Engineer, and QA Engineer. These agents communicate with each other through a shared message pool, where they publish structured messages and can subscribe to relevant messages based on their role profiles, enabling them to obtain directional information from other roles and public information from the environment, enhancing communication efficiency.\n", + "\u001b[0mIn MetaGPT, there are five defined agent roles: Product Manager, Architect, Project Manager, Engineer, and QA Engineer. These agents communicate with each other through a shared message pool, where they publish structured messages and can subscribe to relevant messages based on their role profiles, enabling them to obtain directional information from other roles and public information from the environment, enhancing communication efficiency.\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "print(response.source_nodes[0].get_content(metadata_mode=\"all\"))" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "_vrBO-S1-HjF", + "outputId": "b8e8d35a-25f0-48d9-f992-3e5c3f573efe" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "page_label: 4\n", + "file_name: metagpt.pdf\n", + "file_path: metagpt.pdf\n", + "file_type: application/pdf\n", + "file_size: 16911937\n", + "creation_date: 2024-06-01\n", + "last_modified_date: 2024-06-01\n", + "\n", + "Preprint\n", + "Figure 2: An example of the communication protocol (left) and iterative programming with exe-\n", + "cutable feedback (right). Left: Agents use a shared message pool to publish structured messages.\n", + "They can also subscribe to relevant messages based on their profiles. Right : After generating the\n", + "initial code, the Engineer agent runs and checks for errors. If errors occur, the agent checks past\n", + "messages stored in memory and compares them with the PRD, system design, and code files.\n", + "3 M ETAGPT: A M ETA-PROGRAMMING FRAMEWORK\n", + "MetaGPT is a meta-programming framework for LLM-based multi-agent systems. Sec. 3.1 pro-\n", + "vides an explanation of role specialization, workflow and structured communication in this frame-\n", + "work, and illustrates how to organize a multi-agent system within the context of SOPs. Sec. 3.2\n", + "presents a communication protocol that enhances role communication efficiency. We also imple-\n", + "ment structured communication interfaces and an effective publish-subscribe mechanism. These\n", + "methods enable agents to obtain directional information from other roles and public information\n", + "from the environment. Finally, we introduce executable feedback—a self-correction mechanism for\n", + "further enhancing code generation quality during run-time in Sec. 3.3.\n", + "3.1 A GENTS IN STANDARD OPERATING PROCEDURES\n", + "Specialization of Roles Unambiguous role specialization enables the breakdown of complex work\n", + "into smaller and more specific tasks. Solving complex tasks or problems often requires the collab-\n", + "oration of agents with diverse skills and expertise, each contributing specialized outputs tailored to\n", + "specific issues.\n", + "In a software company, a Product Manager typically conducts business-oriented analysis and derives\n", + "insights, while a software engineer is responsible for programming. We define five roles in our\n", + "software company: Product Manager, Architect, Project Manager, Engineer, and QA Engineer, as\n", + "shown in Figure 1. In MetaGPT, we specify the agent’s profile, which includes their name, profile,\n", + "goal, and constraints for each role. We also initialize the specific context and skills for each role.\n", + "For instance, a Product Manager can use web search tools, while an Engineer can execute code, as\n", + "shown in Figure 2. All agents adhere to the React-style behavior as described in Yao et al. (2022).\n", + "Every agent monitors the environment ( i.e., the message pool in MetaGPT) to spot important ob-\n", + "servations ( e.g.,, messages from other agents). These messages can either directly trigger actions or\n", + "assist in finishing the job.\n", + "Workflow across Agents By defining the agents’ roles and operational skills, we can establish\n", + "basic workflows. In our work, we follow SOP in software development, which enables all agents to\n", + "work in a sequential manner.\n", + "4\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "response = agent.query(\n", + " \"Tell me about the evaluation datasets used.\"\n", + ")" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "HQ0dzdD1AZn7", + "outputId": "632142ae-891b-4dfc-cae8-5153b8b58bc6" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[1;3;38;5;200mThought: The current language of the user is: English. I need to use a tool to help me answer the question.\n", + "Action: vector_tool_metagpt\n", + "Action Input: {'query': 'evaluation datasets', 'page_numbers': None}\n", + "\u001b[0m\u001b[1;3;34mObservation: The SoftwareDev dataset is mentioned, which includes 70 diverse software development tasks.\n", + "\u001b[0m\u001b[1;3;38;5;200mThought: I need more information about the evaluation datasets used in MetaGPT.\n", + "Action: vector_tool_metagpt\n", + "Action Input: {'query': 'evaluation datasets'}\n", + "\u001b[0m\u001b[1;3;34mObservation: The SoftwareDev dataset is mentioned, which includes 70 diverse software development tasks.\n", + "\u001b[0m\u001b[1;3;38;5;200mThought: I need more information about the evaluation datasets used in MetaGPT.\n", + "Action: vector_tool_metagpt\n", + "Action Input: {'query': 'evaluation datasets besides SoftwareDev'}\n", + "\u001b[0m\u001b[1;3;34mObservation: HumanEval and MBPP.\n", + "\u001b[0m\u001b[1;3;38;5;200mThought: I have enough information to answer the question without using any more tools.\n", + "Answer: The evaluation datasets used in MetaGPT include SoftwareDev, HumanEval, and MBPP.\n", + "\u001b[0m" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "print(response.source_nodes[0].get_content(metadata_mode=\"all\"))" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "7gg99LMhA_vq", + "outputId": "44c5844f-8aec-4ad7-e38c-f62ac0298b35" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "page_label: 23\n", + "file_name: metagpt.pdf\n", + "file_path: metagpt.pdf\n", + "file_type: application/pdf\n", + "file_size: 16911937\n", + "creation_date: 2024-06-01\n", + "last_modified_date: 2024-06-01\n", + "\n", + "Preprint\n", + "Figure 10: The “Drawing App” generated by MetaGPT.\n", + "C E XPERIMENTS\n", + "C.1 D ETAILS OF THE SOFTWARE DEVDATASET\n", + "The SoftwareDev dataset includes 70 diverse software development tasks. Table 8 displays the\n", + "names and detailed prompts of 11 tasks within the dataset. Note that the first seven tasks listed are\n", + "used in the main experiments of this paper.\n", + "C.2 A DDITIONAL RESULTS\n", + "Quantitative results of MetaGPT As shown in Table 4, MetaGPT achieves an average score\n", + "of 3.9, surpassing ChatDev’s score of 2.1 Zhao et al. (2023), which is based on the Chat chain.\n", + "Compare the scores of general intelligent algorithms, including AutoGPT Torantulino et al. (2023),\n", + "which all score 1.0, failing to generate executable code. We observe that the generated code is often\n", + "short, lacks comprehensive logic, and tends to fail to handle cross-file dependencies correctly.\n", + "While models such as AutoGPT (Torantulino et al., 2023), Langchain (Chase, 2022), and Agent-\n", + "Verse (Chen et al., 2023) display robust general problem-solving capabilities, they lack an essential\n", + "element for developing complex systems: systematically deconstructing requirements. Conversely,\n", + "MetaGPT simplifies the process of transforming abstract requirements into detailed class and func-\n", + "tion designs through a specialized division of labor and SOPs workflow. When compared to Chat-\n", + "Dev (Zhao et al., 2023), MetaGPT’s structured messaging and feedback mechanisms not only reduce\n", + "loss of communication information but also improve the execution of code.\n", + "Quantitative results of MetaGPT w/o executable feedback Table 9 presents the performance of\n", + "MetaGPT with GPT-4 32K on 11 tasks within the SoftwareDev dataset. It also shows the average\n", + "performance across all 70 tasks (in the last line). Note that the version of MetaGPT used here is the\n", + "basic version without the executable feedback mechanism.\n", + "Quantitative results of MetaGPT with different LLMs To verify the performance of MetaGPT\n", + "on different LLMs, we randomly selected 5 SoftwareDev tasks and conducted experiments using\n", + "GPT-3.5 and Deepseek Coder 33B5as backends. As shown in Table 5, the results indicate that\n", + "although MetaGPT can complete tasks with these LLMs, using GPT-4 as the backend yields superior\n", + "performance.\n", + "5https://deepseekcoder.github.io\n", + "23\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "# use agent.chat instead of agent.query to pass conversational history automatically to answer follow up questions\n", + "response = agent.chat(\"Tell me the results over one of the above datasets.\")" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "9FplhNMmBO7o", + "outputId": "1eab921a-4670-48a1-c2a0-0c3f4142ed33" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[1;3;38;5;200mThought: The current language of the user is: English. I need to use a tool to help me answer the question.\n", + "Action: vector_tool_metagpt\n", + "Action Input: {'query': 'What are the results on the SoftwareDev dataset?', 'page_numbers': None}\n", + "\u001b[0m\u001b[1;3;34mObservation: The results of MetaGPT on the SoftwareDev dataset show an average score of 3.9, surpassing ChatDev's score of 2.1.\n", + "\u001b[0m\u001b[1;3;38;5;200mThought: I can answer without using any more tools. I'll use the user's language to answer\n", + "Answer: The results of MetaGPT on the SoftwareDev dataset show an average score of 3.9, surpassing ChatDev's score of 2.1.\n", + "\u001b[0m" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "# use agent.chat instead of agent.query to pass conversational history automatically to answer follow up questions\n", + "response = agent.chat(\"Tell me more.\")" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "38YcfpmdB2_L", + "outputId": "cb71adc1-876f-4c41-def6-b2c638ec66f1" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[1;3;38;5;200mThought: The current language of the user is: English. I need to use a tool to help me answer the question.\n", + "Action: vector_tool_metagpt\n", + "Action Input: {'query': 'What are the results of MetaGPT on the SoftwareDev dataset?', 'page_numbers': None}\n", + "\u001b[0m\u001b[1;3;34mObservation: The results of MetaGPT on the SoftwareDev dataset show that it achieves an average score of 3.9, surpassing ChatDev's score of 2.1. Additionally, MetaGPT outperforms ChatDev on nearly all metrics, including executability, code statistics, and human revision cost.\n", + "\u001b[0m\u001b[1;3;38;5;200mThought: I can answer without using any more tools. I'll use the user's language to answer\n", + "Answer: The results of MetaGPT on the SoftwareDev dataset show that it achieves an average score of 3.9, surpassing ChatDev's score of 2.1. Additionally, MetaGPT outperforms ChatDev on nearly all metrics, including executability, code statistics, and human revision cost.\n", + "\u001b[0m" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "agent = ReActAgent.from_tools(\n", + " query_engine_tools,\n", + " llm=llm,\n", + " verbose=True,\n", + ")" + ], + "metadata": { + "id": "C-FsTWo0CnRE" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "task = agent.create_task(\n", + " \"Tell me about the agent roles in MetaGPT, \"\n", + " \"and then how they communicate with each other.\"\n", + ")" + ], + "metadata": { + "id": "rCBVOv5MFnhY" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "step_output = agent.run_step(task.task_id)" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "ovokdaUpFoUx", + "outputId": "f9cb5cc4-a2cf-4800-8bf2-b12f595cef4a" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[1;3;38;5;200mThought: The current language of the user is: English. I need to use a tool to help me answer the question.\n", + "Action: vector_tool_metagpt\n", + "Action Input: {'query': 'agent roles in MetaGPT', 'page_numbers': None}\n", + "\u001b[0m\u001b[1;3;34mObservation: In MetaGPT, there are five defined roles: Product Manager, Architect, Project Manager, Engineer, and QA Engineer.\n", + "\u001b[0m" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "completed_steps = agent.get_completed_steps(task.task_id)\n", + "print(f\"Num completed for task {task.task_id}: {len(completed_steps)}\")\n", + "print(completed_steps[0].output.sources[0].raw_output)" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "iNQCyED2FqeP", + "outputId": "bcd1a12c-38f3-49ea-f1d6-821696be644c" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Num completed for task 42d9e8c7-1260-4bf1-97dd-09168e8fc483: 1\n", + "In MetaGPT, there are five defined roles: Product Manager, Architect, Project Manager, Engineer, and QA Engineer.\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "upcoming_steps = agent.get_upcoming_steps(task.task_id)\n", + "print(f\"Num upcoming steps for task {task.task_id}: {len(upcoming_steps)}\")\n", + "upcoming_steps[0]" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "DwnKrC63FuKw", + "outputId": "71151f4a-f540-4f3f-98a9-62d858e5dcb9" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Num upcoming steps for task 42d9e8c7-1260-4bf1-97dd-09168e8fc483: 1\n" + ] + }, + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "TaskStep(task_id='42d9e8c7-1260-4bf1-97dd-09168e8fc483', step_id='f85c3c09-fa65-4bca-ba1d-1d147dd84601', input=None, step_state={'is_first': False}, next_steps={}, prev_steps={}, is_ready=True)" + ] + }, + "metadata": {}, + "execution_count": 34 + } + ] + }, + { + "cell_type": "code", + "source": [ + "step_output = agent.run_step(\n", + " task.task_id, input=\"What about how agents share information?\"\n", + ")" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "JXlVID-NFwL6", + "outputId": "25e723c5-f4e3-4645-b45f-30046a1d95df" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Added user message to memory: What about how agents share information?\n", + "\u001b[1;3;38;5;200mThought: I need to use a tool to help me answer the question about how agents share information in MetaGPT.\n", + "Action: vector_tool_metagpt\n", + "Action Input: {'query': 'how agents share information in MetaGPT'}\n", + "\u001b[0m\u001b[1;3;34mObservation: In MetaGPT, agents share information through a global message pool, where they publish and access messages directly. This allows agents to exchange messages transparently, eliminating the need to inquire about other agents and await their responses. Additionally, a subscription mechanism is used to filter out irrelevant information, enabling agents to receive only task-related information and avoid distractions.\n", + "\u001b[0m" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "step_output = agent.run_step(task.task_id)\n", + "print(step_output.is_last)" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "m8cM1jA7GZHX", + "outputId": "f5e75a19-0fb6-4334-c1cd-0be31f6bc82b" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[1;3;38;5;200mThought: I can answer without using any more tools. I'll use the user's language to answer.\n", + "Answer: In MetaGPT, agents share information through a global message pool, where they publish and access messages directly. This allows agents to exchange messages transparently, eliminating the need to inquire about other agents and await their responses. Additionally, a subscription mechanism is used to filter out irrelevant information, enabling agents to receive only task-related information and avoid distractions.\n", + "\u001b[0mTrue\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "response = agent.finalize_response(task.task_id)" + ], + "metadata": { + "id": "DH3b_t9mGeP8" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "print(str(response))" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "zgaq4yqZGhRm", + "outputId": "c1434fcc-003b-4e26-ff47-58f477127a52" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "In MetaGPT, agents share information through a global message pool, where they publish and access messages directly. This allows agents to exchange messages transparently, eliminating the need to inquire about other agents and await their responses. Additionally, a subscription mechanism is used to filter out irrelevant information, enabling agents to receive only task-related information and avoid distractions.\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [], + "metadata": { + "id": "iyhvuua7GjVD" + }, + "execution_count": null, + "outputs": [] + } + ] +} \ No newline at end of file diff --git a/recipes/3p_integrations/llamaindex/dlai_agentic_rag/Building_Agentic_RAG_with_Llamaindex_L4_Building_a_Multi-Document_Agent.ipynb b/recipes/3p_integrations/llamaindex/dlai_agentic_rag/Building_Agentic_RAG_with_Llamaindex_L4_Building_a_Multi-Document_Agent.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..ef5e07524298608e61b5c05aa6f9d99f9434732a --- /dev/null +++ b/recipes/3p_integrations/llamaindex/dlai_agentic_rag/Building_Agentic_RAG_with_Llamaindex_L4_Building_a_Multi-Document_Agent.ipynb @@ -0,0 +1,5325 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "language_info": { + "name": "python" + }, + "widgets": { + "application/vnd.jupyter.widget-state+json": { + "3e13eb34c16c43b089268b9da16af93f": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_a83cbfd557e54a94897fdf017e7ebc83", + "IPY_MODEL_3c96d7aaed084280b5726a7d2565576e", + "IPY_MODEL_67e5fc395b834a4fa4b08e227e5a7159" + ], + "layout": "IPY_MODEL_0883864f208f4bceb4058cdedb434f40" + } + }, + "a83cbfd557e54a94897fdf017e7ebc83": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_243dd74534e14f9f8e9b1a971643739b", + "placeholder": "", + "style": "IPY_MODEL_57e48381bf5447e392655c7a139f33a3", + "value": "modules.json: 100%" + } + }, + "3c96d7aaed084280b5726a7d2565576e": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_7cfe756cf62043d0a997be5ff02ea402", + "max": 349, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_53f7b91752b7496693012b2c97d0a95b", + "value": 349 + } + }, + "67e5fc395b834a4fa4b08e227e5a7159": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_ef5e8c2088e74c2c929199eddca45de0", + "placeholder": "", + "style": "IPY_MODEL_179e9cbedb6c40378e1fdbd7da83afe7", + "value": " 349/349 [00:00<00:00, 18.0kB/s]" + } + }, + "0883864f208f4bceb4058cdedb434f40": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "243dd74534e14f9f8e9b1a971643739b": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "57e48381bf5447e392655c7a139f33a3": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "7cfe756cf62043d0a997be5ff02ea402": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "53f7b91752b7496693012b2c97d0a95b": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "ef5e8c2088e74c2c929199eddca45de0": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "179e9cbedb6c40378e1fdbd7da83afe7": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "ef9a7d590ca74920ac9dc030fdba6d9b": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_b5f18fba3ac142b2b5421fa297dd20c4", + "IPY_MODEL_033c1e5ded554a91aedd1b69ea63d6c1", + "IPY_MODEL_e4bbbf4b552e485ebaf64b56b00c4565" + ], + "layout": "IPY_MODEL_2f35c219bd034c0d96a8e99a2fca289f" + } + }, + "b5f18fba3ac142b2b5421fa297dd20c4": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_5b1d19791d934f55bdb60aaacba84df8", + "placeholder": "", + "style": "IPY_MODEL_7e9e641d1ada45588628294c4a9c9672", + "value": "config_sentence_transformers.json: 100%" + } + }, + "033c1e5ded554a91aedd1b69ea63d6c1": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_5b4ef9637e334f72b318f8dfac6abe0d", + "max": 124, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_7f5604938fba472b8fa6010f0d15227c", + "value": 124 + } + }, + "e4bbbf4b552e485ebaf64b56b00c4565": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_f2abc923820a4e2abdc29b799a75e206", + "placeholder": "", + "style": "IPY_MODEL_080053dd82cb44e989d8647ff7b3ec30", + "value": " 124/124 [00:00<00:00, 5.16kB/s]" + } + }, + "2f35c219bd034c0d96a8e99a2fca289f": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "5b1d19791d934f55bdb60aaacba84df8": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "7e9e641d1ada45588628294c4a9c9672": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "5b4ef9637e334f72b318f8dfac6abe0d": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "7f5604938fba472b8fa6010f0d15227c": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "f2abc923820a4e2abdc29b799a75e206": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "080053dd82cb44e989d8647ff7b3ec30": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "37c33e92bab94b858fbe9e1e67f3c8da": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_8cc5f291932a4ab280dec3aea1432d6e", + "IPY_MODEL_e77e66e342014277b7ae47eaf6611662", + "IPY_MODEL_92de3b8c634f4b519c48b822cfad71d3" + ], + "layout": "IPY_MODEL_84772f7879844088880ebd0cb63f69fc" + } + }, + "8cc5f291932a4ab280dec3aea1432d6e": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_c8659cbb7fa64b449b26212749721092", + "placeholder": "", + "style": "IPY_MODEL_3da3375a64ac486388c7eff17f2ab994", + "value": "README.md: 100%" + } + }, + "e77e66e342014277b7ae47eaf6611662": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_0a9dc9cc8fcc447085e6d8ec78ca3f2c", + "max": 94783, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_d504d09e128d4574962e0433ea804933", + "value": 94783 + } + }, + "92de3b8c634f4b519c48b822cfad71d3": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_40fd6d6c48e2480abf92915608aa0ec2", + "placeholder": "", + "style": "IPY_MODEL_17bce507a4d843a4ada4b2bb91088453", + "value": " 94.8k/94.8k [00:00<00:00, 3.22MB/s]" + } + }, + "84772f7879844088880ebd0cb63f69fc": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "c8659cbb7fa64b449b26212749721092": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "3da3375a64ac486388c7eff17f2ab994": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "0a9dc9cc8fcc447085e6d8ec78ca3f2c": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "d504d09e128d4574962e0433ea804933": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "40fd6d6c48e2480abf92915608aa0ec2": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "17bce507a4d843a4ada4b2bb91088453": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "9b1d56d6fa504d3f8bac4eab3963d3e4": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_2490327e0dc24be8a6994e8775bb4e84", + "IPY_MODEL_76e24fab9cab4ebbbacd8e3ef0b2a542", + "IPY_MODEL_064f8e0a650747679bd1eb5786940cfa" + ], + "layout": "IPY_MODEL_7aa63ea675bc4ee5a85e20f4984fd6f6" + } + }, + "2490327e0dc24be8a6994e8775bb4e84": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_851ea97e6fac4ddead5d60dd78ae1893", + "placeholder": "", + "style": "IPY_MODEL_0acd03d6252541d48efdfeeedabc4e41", + "value": "sentence_bert_config.json: 100%" + } + }, + "76e24fab9cab4ebbbacd8e3ef0b2a542": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_4852807640bb45d4b17f5f35eddf8fad", + "max": 52, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_a64abd9074264f77a892bc86bbb1d101", + "value": 52 + } + }, + "064f8e0a650747679bd1eb5786940cfa": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_ab8fabde10144b7e99211dac7efce2aa", + "placeholder": "", + "style": "IPY_MODEL_a05fc015f79c491ba0e0e4f028b9ad9e", + "value": " 52.0/52.0 [00:00<00:00, 3.25kB/s]" + } + }, + "7aa63ea675bc4ee5a85e20f4984fd6f6": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "851ea97e6fac4ddead5d60dd78ae1893": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "0acd03d6252541d48efdfeeedabc4e41": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "4852807640bb45d4b17f5f35eddf8fad": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "a64abd9074264f77a892bc86bbb1d101": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "ab8fabde10144b7e99211dac7efce2aa": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "a05fc015f79c491ba0e0e4f028b9ad9e": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "f57cc383fbb04339a9e1b162c81fa80f": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_513461983b1143fabd9acba6966d9c1a", + "IPY_MODEL_6a33b0bf37f24b3cb95e95a29751e666", + "IPY_MODEL_5467911545524d54aba88a4168c0bada" + ], + "layout": "IPY_MODEL_022df94206c14f0784f87891c6bbc7b1" + } + }, + "513461983b1143fabd9acba6966d9c1a": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_00e72ee855f34c7c9423e32db17794dc", + "placeholder": "", + "style": "IPY_MODEL_266d6356347f46baa06cbcb32b122db1", + "value": "config.json: 100%" + } + }, + "6a33b0bf37f24b3cb95e95a29751e666": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_c8453dd91d8a4057b8b0185ea24f7179", + "max": 743, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_b15b69aff04744f1bb716ff35a1874cf", + "value": 743 + } + }, + "5467911545524d54aba88a4168c0bada": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_39aad7b08fca41b9ae79a24348a2be87", + "placeholder": "", + "style": "IPY_MODEL_b29d984a333641af8625ac4507c448a4", + "value": " 743/743 [00:00<00:00, 42.7kB/s]" + } + }, + "022df94206c14f0784f87891c6bbc7b1": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "00e72ee855f34c7c9423e32db17794dc": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "266d6356347f46baa06cbcb32b122db1": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "c8453dd91d8a4057b8b0185ea24f7179": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "b15b69aff04744f1bb716ff35a1874cf": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "39aad7b08fca41b9ae79a24348a2be87": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "b29d984a333641af8625ac4507c448a4": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "19a338b2a3864cb08fbc34513060d5e7": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_40daa5594549493baf9f37c396e01286", + "IPY_MODEL_76cffb68a1ec452c864616708b78bbf0", + "IPY_MODEL_acd81175704e49dbb9933c6f268e4919" + ], + "layout": "IPY_MODEL_01da664ae3a44ea4b9605c5494b91e96" + } + }, + "40daa5594549493baf9f37c396e01286": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_b1ba78323bf24188809657cb458db379", + "placeholder": "", + "style": "IPY_MODEL_71b61cfa2fb249c1b600323bed4084f9", + "value": "model.safetensors: 100%" + } + }, + "76cffb68a1ec452c864616708b78bbf0": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_f836ae7999c3449ea5806e6754d3436a", + "max": 133466304, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_491354e8c4ce4180ab7170121bf0f5b7", + "value": 133466304 + } + }, + "acd81175704e49dbb9933c6f268e4919": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_78bf4ab890cd41a89594cab7c38db8a9", + "placeholder": "", + "style": "IPY_MODEL_0268ddd6729242dea9a69880a0b3c3ec", + "value": " 133M/133M [00:01<00:00, 122MB/s]" + } + }, + "01da664ae3a44ea4b9605c5494b91e96": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "b1ba78323bf24188809657cb458db379": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "71b61cfa2fb249c1b600323bed4084f9": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "f836ae7999c3449ea5806e6754d3436a": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "491354e8c4ce4180ab7170121bf0f5b7": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "78bf4ab890cd41a89594cab7c38db8a9": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "0268ddd6729242dea9a69880a0b3c3ec": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "9967d289a95b4cafa8345023143c4f60": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_835a2e6b6a45482a8147d66e4a31b10e", + "IPY_MODEL_cfde79061ba9422f9f9fb295ca806e4c", + "IPY_MODEL_d3811aea2bf444629e70e8d72212381c" + ], + "layout": "IPY_MODEL_9d67ec85304b49bdb497ed10d14f33e7" + } + }, + "835a2e6b6a45482a8147d66e4a31b10e": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_360ad4a30e2544f2a61630865a6fb6b0", + "placeholder": "", + "style": "IPY_MODEL_ed79435fbc0a43feb5aca86e77806bed", + "value": "tokenizer_config.json: 100%" + } + }, + "cfde79061ba9422f9f9fb295ca806e4c": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_265b2fc2c67a45598bc420d2a1245ce7", + "max": 366, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_39ecde5a49e9427d8f367fd387c11c77", + "value": 366 + } + }, + "d3811aea2bf444629e70e8d72212381c": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_1c4f6f6c000843dfa7760952df1a525d", + "placeholder": "", + "style": "IPY_MODEL_5b259ff2eeb94058886e880f459c0904", + "value": " 366/366 [00:00<00:00, 17.3kB/s]" + } + }, + "9d67ec85304b49bdb497ed10d14f33e7": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "360ad4a30e2544f2a61630865a6fb6b0": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "ed79435fbc0a43feb5aca86e77806bed": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "265b2fc2c67a45598bc420d2a1245ce7": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "39ecde5a49e9427d8f367fd387c11c77": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "1c4f6f6c000843dfa7760952df1a525d": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "5b259ff2eeb94058886e880f459c0904": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "7317d38d1bed429b92d59c581fdf35d9": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_996499071c244d3abd4877cf2120a3a0", + "IPY_MODEL_8142da4b1f71460d8be423a4c468e2cf", + "IPY_MODEL_627c75e12be241ba8eb6c8b635858016" + ], + "layout": "IPY_MODEL_ce3bee3ed9ca439587fefc8c3470ea3c" + } + }, + "996499071c244d3abd4877cf2120a3a0": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_41909b7281e24ebea0d75bb4fb33122e", + "placeholder": "", + "style": "IPY_MODEL_063a0977a87942c4ad26fd3c4ec7f990", + "value": "vocab.txt: 100%" + } + }, + "8142da4b1f71460d8be423a4c468e2cf": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_0580fba5bcde49e1ab4a9426db5a9c2b", + "max": 231508, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_fe9b90b5aa93433f86d0b15a4b942f5a", + "value": 231508 + } + }, + "627c75e12be241ba8eb6c8b635858016": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_071f1ba196f247a6acd6cdf7650fdd0a", + "placeholder": "", + "style": "IPY_MODEL_19a8d171526e43a6832104b2a89819b3", + "value": " 232k/232k [00:00<00:00, 3.87MB/s]" + } + }, + "ce3bee3ed9ca439587fefc8c3470ea3c": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "41909b7281e24ebea0d75bb4fb33122e": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "063a0977a87942c4ad26fd3c4ec7f990": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "0580fba5bcde49e1ab4a9426db5a9c2b": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "fe9b90b5aa93433f86d0b15a4b942f5a": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "071f1ba196f247a6acd6cdf7650fdd0a": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "19a8d171526e43a6832104b2a89819b3": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "9aa6ad4a993f4dea85746bab675fdf65": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_c9df65629477412f8f9ab49059d22067", + "IPY_MODEL_11379c2e71db4212b3aa0c0bdfc92479", + "IPY_MODEL_f1f648ad7b8d4087baea1e1074baefa1" + ], + "layout": "IPY_MODEL_e73fcdce6b2b45a8824acee86023929d" + } + }, + "c9df65629477412f8f9ab49059d22067": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_ea435a91897e4b75b082607b40ba7695", + "placeholder": "", + "style": "IPY_MODEL_63ce7f8041974841b6beca100fc1ff90", + "value": "tokenizer.json: 100%" + } + }, + "11379c2e71db4212b3aa0c0bdfc92479": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_e4cf4c2c7848419e8621be0aa33d2b34", + "max": 711396, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_7a30c116bef74095b1efdf273aad6dcf", + "value": 711396 + } + }, + "f1f648ad7b8d4087baea1e1074baefa1": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_ed448e04b1d2495787c0ee2806a7178a", + "placeholder": "", + "style": "IPY_MODEL_74b1f3a2a9b24f639b536066bd5ec01a", + "value": " 711k/711k [00:00<00:00, 17.2MB/s]" + } + }, + "e73fcdce6b2b45a8824acee86023929d": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "ea435a91897e4b75b082607b40ba7695": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "63ce7f8041974841b6beca100fc1ff90": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "e4cf4c2c7848419e8621be0aa33d2b34": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "7a30c116bef74095b1efdf273aad6dcf": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "ed448e04b1d2495787c0ee2806a7178a": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "74b1f3a2a9b24f639b536066bd5ec01a": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "e793c09809ee476caf8c8517f9cc1115": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_1e1f8b0e6e044cfb9120c8ab57fbb0e6", + "IPY_MODEL_db9147aaf68d4858a346b7e6891e230d", + "IPY_MODEL_12de797682e241a4828eb22f664c8b91" + ], + "layout": "IPY_MODEL_d1f7a6fa7a154fcf83232a813089f677" + } + }, + "1e1f8b0e6e044cfb9120c8ab57fbb0e6": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_234b2c7600dd4508be2ba8341cf92ae4", + "placeholder": "", + "style": "IPY_MODEL_63e79f779fe34fa18f8ce55c339ed071", + "value": "special_tokens_map.json: 100%" + } + }, + "db9147aaf68d4858a346b7e6891e230d": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_fe36574e45034b6d9eb70a5be64f2e98", + "max": 125, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_20af4d2562ef4d169b7d1fd60cd3b38f", + "value": 125 + } + }, + "12de797682e241a4828eb22f664c8b91": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_669c14fc1e8e4834bf4b0cf5e5d88ca6", + "placeholder": "", + "style": "IPY_MODEL_65b1fa77787141e7b3b02a4793c10346", + "value": " 125/125 [00:00<00:00, 7.61kB/s]" + } + }, + "d1f7a6fa7a154fcf83232a813089f677": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "234b2c7600dd4508be2ba8341cf92ae4": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "63e79f779fe34fa18f8ce55c339ed071": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "fe36574e45034b6d9eb70a5be64f2e98": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "20af4d2562ef4d169b7d1fd60cd3b38f": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "669c14fc1e8e4834bf4b0cf5e5d88ca6": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "65b1fa77787141e7b3b02a4793c10346": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "4558b35ea0194d209d94c317302cff90": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_4086395b295a4bff8db2d58fa83ac61c", + "IPY_MODEL_e46be04cc8844cd08a482dd94ca59a6c", + "IPY_MODEL_cf37c5378564462f9e65ee4211e75347" + ], + "layout": "IPY_MODEL_363e54358828421ea6766abb27423160" + } + }, + "4086395b295a4bff8db2d58fa83ac61c": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_58fc3a788ad949e5ab52731e7e422aae", + "placeholder": "", + "style": "IPY_MODEL_d625152e571e46fb8576827bfff17f72", + "value": "1_Pooling/config.json: 100%" + } + }, + "e46be04cc8844cd08a482dd94ca59a6c": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_3cde34eb2ad84c068acc3dea1e05b166", + "max": 190, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_92591354468047ffaf312805f23f2fc2", + "value": 190 + } + }, + "cf37c5378564462f9e65ee4211e75347": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_11b7fa7a4e9d4837b4e6ccd880eff962", + "placeholder": "", + "style": "IPY_MODEL_98b1c78cce564f038c7b817d150f8362", + "value": " 190/190 [00:00<00:00, 9.87kB/s]" + } + }, + "363e54358828421ea6766abb27423160": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "58fc3a788ad949e5ab52731e7e422aae": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "d625152e571e46fb8576827bfff17f72": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "3cde34eb2ad84c068acc3dea1e05b166": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "92591354468047ffaf312805f23f2fc2": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "11b7fa7a4e9d4837b4e6ccd880eff962": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "98b1c78cce564f038c7b817d150f8362": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + } + } + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "eiJsOa29ej7G", + "outputId": "4f524ff3-a0c1-49a2-8adb-15b96d5a7b63" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Collecting llama-index\n", + " Downloading llama_index-0.10.42-py3-none-any.whl (6.8 kB)\n", + "Collecting llama-index-agent-openai<0.3.0,>=0.1.4 (from llama-index)\n", + " Downloading llama_index_agent_openai-0.2.6-py3-none-any.whl (12 kB)\n", + "Collecting llama-index-cli<0.2.0,>=0.1.2 (from llama-index)\n", + " Downloading llama_index_cli-0.1.12-py3-none-any.whl (26 kB)\n", + "Collecting llama-index-core==0.10.42 (from llama-index)\n", + " Downloading llama_index_core-0.10.42-py3-none-any.whl (15.4 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m15.4/15.4 MB\u001b[0m \u001b[31m17.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting llama-index-embeddings-openai<0.2.0,>=0.1.5 (from llama-index)\n", + " Downloading llama_index_embeddings_openai-0.1.10-py3-none-any.whl (6.2 kB)\n", + "Collecting llama-index-indices-managed-llama-cloud<0.2.0,>=0.1.2 (from llama-index)\n", + " Downloading llama_index_indices_managed_llama_cloud-0.1.6-py3-none-any.whl (6.7 kB)\n", + "Collecting llama-index-legacy<0.10.0,>=0.9.48 (from llama-index)\n", + " Downloading llama_index_legacy-0.9.48-py3-none-any.whl (2.0 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.0/2.0 MB\u001b[0m \u001b[31m14.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting llama-index-llms-openai<0.2.0,>=0.1.13 (from llama-index)\n", + " Downloading llama_index_llms_openai-0.1.21-py3-none-any.whl (11 kB)\n", + "Collecting llama-index-multi-modal-llms-openai<0.2.0,>=0.1.3 (from llama-index)\n", + " Downloading llama_index_multi_modal_llms_openai-0.1.6-py3-none-any.whl (5.8 kB)\n", + "Collecting llama-index-program-openai<0.2.0,>=0.1.3 (from llama-index)\n", + " Downloading llama_index_program_openai-0.1.6-py3-none-any.whl (5.2 kB)\n", + "Collecting llama-index-question-gen-openai<0.2.0,>=0.1.2 (from llama-index)\n", + " Downloading llama_index_question_gen_openai-0.1.3-py3-none-any.whl (2.9 kB)\n", + "Collecting llama-index-readers-file<0.2.0,>=0.1.4 (from llama-index)\n", + " Downloading llama_index_readers_file-0.1.23-py3-none-any.whl (36 kB)\n", + "Collecting llama-index-readers-llama-parse<0.2.0,>=0.1.2 (from llama-index)\n", + " Downloading llama_index_readers_llama_parse-0.1.4-py3-none-any.whl (2.5 kB)\n", + "Requirement already satisfied: PyYAML>=6.0.1 in /usr/local/lib/python3.10/dist-packages (from llama-index-core==0.10.42->llama-index) (6.0.1)\n", + "Requirement already satisfied: SQLAlchemy[asyncio]>=1.4.49 in /usr/local/lib/python3.10/dist-packages (from llama-index-core==0.10.42->llama-index) (2.0.30)\n", + "Requirement already satisfied: aiohttp<4.0.0,>=3.8.6 in /usr/local/lib/python3.10/dist-packages (from llama-index-core==0.10.42->llama-index) (3.9.5)\n", + "Collecting dataclasses-json (from llama-index-core==0.10.42->llama-index)\n", + " Downloading dataclasses_json-0.6.6-py3-none-any.whl (28 kB)\n", + "Collecting deprecated>=1.2.9.3 (from llama-index-core==0.10.42->llama-index)\n", + " Downloading Deprecated-1.2.14-py2.py3-none-any.whl (9.6 kB)\n", + "Collecting dirtyjson<2.0.0,>=1.0.8 (from llama-index-core==0.10.42->llama-index)\n", + " Downloading dirtyjson-1.0.8-py3-none-any.whl (25 kB)\n", + "Requirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core==0.10.42->llama-index) (2023.6.0)\n", + "Collecting httpx (from llama-index-core==0.10.42->llama-index)\n", + " Downloading httpx-0.27.0-py3-none-any.whl (75 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m75.6/75.6 kB\u001b[0m \u001b[31m5.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting llamaindex-py-client<0.2.0,>=0.1.18 (from llama-index-core==0.10.42->llama-index)\n", + " Downloading llamaindex_py_client-0.1.19-py3-none-any.whl (141 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m141.9/141.9 kB\u001b[0m \u001b[31m10.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: nest-asyncio<2.0.0,>=1.5.8 in /usr/local/lib/python3.10/dist-packages (from llama-index-core==0.10.42->llama-index) (1.6.0)\n", + "Requirement already satisfied: networkx>=3.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core==0.10.42->llama-index) (3.3)\n", + "Requirement already satisfied: nltk<4.0.0,>=3.8.1 in /usr/local/lib/python3.10/dist-packages (from llama-index-core==0.10.42->llama-index) (3.8.1)\n", + "Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (from llama-index-core==0.10.42->llama-index) (1.25.2)\n", + "Collecting openai>=1.1.0 (from llama-index-core==0.10.42->llama-index)\n", + " Downloading openai-1.30.5-py3-none-any.whl (320 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m320.7/320.7 kB\u001b[0m \u001b[31m7.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: pandas in /usr/local/lib/python3.10/dist-packages (from llama-index-core==0.10.42->llama-index) (2.0.3)\n", + "Requirement already satisfied: pillow>=9.0.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core==0.10.42->llama-index) (9.4.0)\n", + "Requirement already satisfied: requests>=2.31.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core==0.10.42->llama-index) (2.31.0)\n", + "Requirement already satisfied: tenacity<9.0.0,>=8.2.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core==0.10.42->llama-index) (8.3.0)\n", + "Collecting tiktoken>=0.3.3 (from llama-index-core==0.10.42->llama-index)\n", + " Downloading tiktoken-0.7.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (1.1 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.1/1.1 MB\u001b[0m \u001b[31m33.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: tqdm<5.0.0,>=4.66.1 in /usr/local/lib/python3.10/dist-packages (from llama-index-core==0.10.42->llama-index) (4.66.4)\n", + "Requirement already satisfied: typing-extensions>=4.5.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core==0.10.42->llama-index) (4.11.0)\n", + "Collecting typing-inspect>=0.8.0 (from llama-index-core==0.10.42->llama-index)\n", + " Downloading typing_inspect-0.9.0-py3-none-any.whl (8.8 kB)\n", + "Requirement already satisfied: wrapt in /usr/local/lib/python3.10/dist-packages (from llama-index-core==0.10.42->llama-index) (1.14.1)\n", + "Requirement already satisfied: beautifulsoup4<5.0.0,>=4.12.3 in /usr/local/lib/python3.10/dist-packages (from llama-index-readers-file<0.2.0,>=0.1.4->llama-index) (4.12.3)\n", + "Collecting pypdf<5.0.0,>=4.0.1 (from llama-index-readers-file<0.2.0,>=0.1.4->llama-index)\n", + " Downloading pypdf-4.2.0-py3-none-any.whl (290 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m290.4/290.4 kB\u001b[0m \u001b[31m19.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting striprtf<0.0.27,>=0.0.26 (from llama-index-readers-file<0.2.0,>=0.1.4->llama-index)\n", + " Downloading striprtf-0.0.26-py3-none-any.whl (6.9 kB)\n", + "Collecting llama-parse<0.5.0,>=0.4.0 (from llama-index-readers-llama-parse<0.2.0,>=0.1.2->llama-index)\n", + " Downloading llama_parse-0.4.4-py3-none-any.whl (8.0 kB)\n", + "Requirement already satisfied: aiosignal>=1.1.2 in /usr/local/lib/python3.10/dist-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core==0.10.42->llama-index) (1.3.1)\n", + "Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core==0.10.42->llama-index) (23.2.0)\n", + "Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.10/dist-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core==0.10.42->llama-index) (1.4.1)\n", + "Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.10/dist-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core==0.10.42->llama-index) (6.0.5)\n", + "Requirement already satisfied: yarl<2.0,>=1.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core==0.10.42->llama-index) (1.9.4)\n", + "Requirement already satisfied: async-timeout<5.0,>=4.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core==0.10.42->llama-index) (4.0.3)\n", + "Requirement already satisfied: soupsieve>1.2 in /usr/local/lib/python3.10/dist-packages (from beautifulsoup4<5.0.0,>=4.12.3->llama-index-readers-file<0.2.0,>=0.1.4->llama-index) (2.5)\n", + "Requirement already satisfied: pydantic>=1.10 in /usr/local/lib/python3.10/dist-packages (from llamaindex-py-client<0.2.0,>=0.1.18->llama-index-core==0.10.42->llama-index) (2.7.1)\n", + "Requirement already satisfied: anyio in /usr/local/lib/python3.10/dist-packages (from httpx->llama-index-core==0.10.42->llama-index) (3.7.1)\n", + "Requirement already satisfied: certifi in /usr/local/lib/python3.10/dist-packages (from httpx->llama-index-core==0.10.42->llama-index) (2024.2.2)\n", + "Collecting httpcore==1.* (from httpx->llama-index-core==0.10.42->llama-index)\n", + " Downloading httpcore-1.0.5-py3-none-any.whl (77 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m77.9/77.9 kB\u001b[0m \u001b[31m6.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: idna in /usr/local/lib/python3.10/dist-packages (from httpx->llama-index-core==0.10.42->llama-index) (3.7)\n", + "Requirement already satisfied: sniffio in /usr/local/lib/python3.10/dist-packages (from httpx->llama-index-core==0.10.42->llama-index) (1.3.1)\n", + "Collecting h11<0.15,>=0.13 (from httpcore==1.*->httpx->llama-index-core==0.10.42->llama-index)\n", + " Downloading h11-0.14.0-py3-none-any.whl (58 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m58.3/58.3 kB\u001b[0m \u001b[31m5.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: click in /usr/local/lib/python3.10/dist-packages (from nltk<4.0.0,>=3.8.1->llama-index-core==0.10.42->llama-index) (8.1.7)\n", + "Requirement already satisfied: joblib in /usr/local/lib/python3.10/dist-packages (from nltk<4.0.0,>=3.8.1->llama-index-core==0.10.42->llama-index) (1.4.2)\n", + "Requirement already satisfied: regex>=2021.8.3 in /usr/local/lib/python3.10/dist-packages (from nltk<4.0.0,>=3.8.1->llama-index-core==0.10.42->llama-index) (2024.5.15)\n", + "Requirement already satisfied: distro<2,>=1.7.0 in /usr/lib/python3/dist-packages (from openai>=1.1.0->llama-index-core==0.10.42->llama-index) (1.7.0)\n", + "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests>=2.31.0->llama-index-core==0.10.42->llama-index) (3.3.2)\n", + "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests>=2.31.0->llama-index-core==0.10.42->llama-index) (2.0.7)\n", + "Requirement already satisfied: greenlet!=0.4.17 in /usr/local/lib/python3.10/dist-packages (from SQLAlchemy[asyncio]>=1.4.49->llama-index-core==0.10.42->llama-index) (3.0.3)\n", + "Collecting mypy-extensions>=0.3.0 (from typing-inspect>=0.8.0->llama-index-core==0.10.42->llama-index)\n", + " Downloading mypy_extensions-1.0.0-py3-none-any.whl (4.7 kB)\n", + "Collecting marshmallow<4.0.0,>=3.18.0 (from dataclasses-json->llama-index-core==0.10.42->llama-index)\n", + " Downloading marshmallow-3.21.2-py3-none-any.whl (49 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m49.3/49.3 kB\u001b[0m \u001b[31m3.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.10/dist-packages (from pandas->llama-index-core==0.10.42->llama-index) (2.8.2)\n", + "Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.10/dist-packages (from pandas->llama-index-core==0.10.42->llama-index) (2023.4)\n", + "Requirement already satisfied: tzdata>=2022.1 in /usr/local/lib/python3.10/dist-packages (from pandas->llama-index-core==0.10.42->llama-index) (2024.1)\n", + "Requirement already satisfied: exceptiongroup in /usr/local/lib/python3.10/dist-packages (from anyio->httpx->llama-index-core==0.10.42->llama-index) (1.2.1)\n", + "Requirement already satisfied: packaging>=17.0 in /usr/local/lib/python3.10/dist-packages (from marshmallow<4.0.0,>=3.18.0->dataclasses-json->llama-index-core==0.10.42->llama-index) (24.0)\n", + "Requirement already satisfied: annotated-types>=0.4.0 in /usr/local/lib/python3.10/dist-packages (from pydantic>=1.10->llamaindex-py-client<0.2.0,>=0.1.18->llama-index-core==0.10.42->llama-index) (0.7.0)\n", + "Requirement already satisfied: pydantic-core==2.18.2 in /usr/local/lib/python3.10/dist-packages (from pydantic>=1.10->llamaindex-py-client<0.2.0,>=0.1.18->llama-index-core==0.10.42->llama-index) (2.18.2)\n", + "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.8.2->pandas->llama-index-core==0.10.42->llama-index) (1.16.0)\n", + "Installing collected packages: striprtf, dirtyjson, pypdf, mypy-extensions, marshmallow, h11, deprecated, typing-inspect, tiktoken, httpcore, httpx, dataclasses-json, openai, llamaindex-py-client, llama-index-legacy, llama-index-core, llama-parse, llama-index-readers-file, llama-index-llms-openai, llama-index-indices-managed-llama-cloud, llama-index-embeddings-openai, llama-index-readers-llama-parse, llama-index-multi-modal-llms-openai, llama-index-cli, llama-index-agent-openai, llama-index-program-openai, llama-index-question-gen-openai, llama-index\n", + "Successfully installed dataclasses-json-0.6.6 deprecated-1.2.14 dirtyjson-1.0.8 h11-0.14.0 httpcore-1.0.5 httpx-0.27.0 llama-index-0.10.42 llama-index-agent-openai-0.2.6 llama-index-cli-0.1.12 llama-index-core-0.10.42 llama-index-embeddings-openai-0.1.10 llama-index-indices-managed-llama-cloud-0.1.6 llama-index-legacy-0.9.48 llama-index-llms-openai-0.1.21 llama-index-multi-modal-llms-openai-0.1.6 llama-index-program-openai-0.1.6 llama-index-question-gen-openai-0.1.3 llama-index-readers-file-0.1.23 llama-index-readers-llama-parse-0.1.4 llama-parse-0.4.4 llamaindex-py-client-0.1.19 marshmallow-3.21.2 mypy-extensions-1.0.0 openai-1.30.5 pypdf-4.2.0 striprtf-0.0.26 tiktoken-0.7.0 typing-inspect-0.9.0\n", + "Collecting llama-index-embeddings-huggingface\n", + " Downloading llama_index_embeddings_huggingface-0.2.1-py3-none-any.whl (7.1 kB)\n", + "Requirement already satisfied: huggingface-hub[inference]>=0.19.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-embeddings-huggingface) (0.23.1)\n", + "Requirement already satisfied: llama-index-core<0.11.0,>=0.10.1 in /usr/local/lib/python3.10/dist-packages (from llama-index-embeddings-huggingface) (0.10.42)\n", + "Collecting sentence-transformers<3.0.0,>=2.6.1 (from llama-index-embeddings-huggingface)\n", + " Downloading sentence_transformers-2.7.0-py3-none-any.whl (171 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m171.5/171.5 kB\u001b[0m \u001b[31m1.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (3.14.0)\n", + "Requirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (2023.6.0)\n", + "Requirement already satisfied: packaging>=20.9 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (24.0)\n", + "Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (6.0.1)\n", + "Requirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (2.31.0)\n", + "Requirement already satisfied: tqdm>=4.42.1 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (4.66.4)\n", + "Requirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (4.11.0)\n", + "Requirement already satisfied: aiohttp in /usr/local/lib/python3.10/dist-packages (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (3.9.5)\n", + "Collecting minijinja>=1.0 (from huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface)\n", + " Downloading minijinja-2.0.1-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (853 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m853.2/853.2 kB\u001b[0m \u001b[31m9.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: SQLAlchemy[asyncio]>=1.4.49 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (2.0.30)\n", + "Requirement already satisfied: dataclasses-json in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (0.6.6)\n", + "Requirement already satisfied: deprecated>=1.2.9.3 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.2.14)\n", + "Requirement already satisfied: dirtyjson<2.0.0,>=1.0.8 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.0.8)\n", + "Requirement already satisfied: httpx in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (0.27.0)\n", + "Requirement already satisfied: llamaindex-py-client<0.2.0,>=0.1.18 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (0.1.19)\n", + "Requirement already satisfied: nest-asyncio<2.0.0,>=1.5.8 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.6.0)\n", + "Requirement already satisfied: networkx>=3.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (3.3)\n", + "Requirement already satisfied: nltk<4.0.0,>=3.8.1 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (3.8.1)\n", + "Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.25.2)\n", + "Requirement already satisfied: openai>=1.1.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.30.5)\n", + "Requirement already satisfied: pandas in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (2.0.3)\n", + "Requirement already satisfied: pillow>=9.0.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (9.4.0)\n", + "Requirement already satisfied: tenacity<9.0.0,>=8.2.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (8.3.0)\n", + "Requirement already satisfied: tiktoken>=0.3.3 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (0.7.0)\n", + "Requirement already satisfied: typing-inspect>=0.8.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (0.9.0)\n", + "Requirement already satisfied: wrapt in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.14.1)\n", + "Requirement already satisfied: transformers<5.0.0,>=4.34.0 in /usr/local/lib/python3.10/dist-packages (from sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (4.41.1)\n", + "Requirement already satisfied: torch>=1.11.0 in /usr/local/lib/python3.10/dist-packages (from sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (2.3.0+cu121)\n", + "Requirement already satisfied: scikit-learn in /usr/local/lib/python3.10/dist-packages (from sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (1.2.2)\n", + "Requirement already satisfied: scipy in /usr/local/lib/python3.10/dist-packages (from sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (1.11.4)\n", + "Requirement already satisfied: aiosignal>=1.1.2 in /usr/local/lib/python3.10/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (1.3.1)\n", + "Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (23.2.0)\n", + "Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.10/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (1.4.1)\n", + "Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.10/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (6.0.5)\n", + "Requirement already satisfied: yarl<2.0,>=1.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (1.9.4)\n", + "Requirement already satisfied: async-timeout<5.0,>=4.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (4.0.3)\n", + "Requirement already satisfied: pydantic>=1.10 in /usr/local/lib/python3.10/dist-packages (from llamaindex-py-client<0.2.0,>=0.1.18->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (2.7.1)\n", + "Requirement already satisfied: anyio in /usr/local/lib/python3.10/dist-packages (from httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (3.7.1)\n", + "Requirement already satisfied: certifi in /usr/local/lib/python3.10/dist-packages (from httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (2024.2.2)\n", + "Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.10/dist-packages (from httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.0.5)\n", + "Requirement already satisfied: idna in /usr/local/lib/python3.10/dist-packages (from httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (3.7)\n", + "Requirement already satisfied: sniffio in /usr/local/lib/python3.10/dist-packages (from httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.3.1)\n", + "Requirement already satisfied: h11<0.15,>=0.13 in /usr/local/lib/python3.10/dist-packages (from httpcore==1.*->httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (0.14.0)\n", + "Requirement already satisfied: click in /usr/local/lib/python3.10/dist-packages (from nltk<4.0.0,>=3.8.1->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (8.1.7)\n", + "Requirement already satisfied: joblib in /usr/local/lib/python3.10/dist-packages (from nltk<4.0.0,>=3.8.1->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.4.2)\n", + "Requirement already satisfied: regex>=2021.8.3 in /usr/local/lib/python3.10/dist-packages (from nltk<4.0.0,>=3.8.1->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (2024.5.15)\n", + "Requirement already satisfied: distro<2,>=1.7.0 in /usr/lib/python3/dist-packages (from openai>=1.1.0->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.7.0)\n", + "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (3.3.2)\n", + "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests->huggingface-hub[inference]>=0.19.0->llama-index-embeddings-huggingface) (2.0.7)\n", + "Requirement already satisfied: greenlet!=0.4.17 in /usr/local/lib/python3.10/dist-packages (from SQLAlchemy[asyncio]>=1.4.49->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (3.0.3)\n", + "Requirement already satisfied: sympy in /usr/local/lib/python3.10/dist-packages (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (1.12)\n", + "Requirement already satisfied: jinja2 in /usr/local/lib/python3.10/dist-packages (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (3.1.4)\n", + "Collecting nvidia-cuda-nvrtc-cu12==12.1.105 (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface)\n", + " Using cached nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (23.7 MB)\n", + "Collecting nvidia-cuda-runtime-cu12==12.1.105 (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface)\n", + " Using cached nvidia_cuda_runtime_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (823 kB)\n", + "Collecting nvidia-cuda-cupti-cu12==12.1.105 (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface)\n", + " Using cached nvidia_cuda_cupti_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (14.1 MB)\n", + "Collecting nvidia-cudnn-cu12==8.9.2.26 (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface)\n", + " Using cached nvidia_cudnn_cu12-8.9.2.26-py3-none-manylinux1_x86_64.whl (731.7 MB)\n", + "Collecting nvidia-cublas-cu12==12.1.3.1 (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface)\n", + " Using cached nvidia_cublas_cu12-12.1.3.1-py3-none-manylinux1_x86_64.whl (410.6 MB)\n", + "Collecting nvidia-cufft-cu12==11.0.2.54 (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface)\n", + " Using cached nvidia_cufft_cu12-11.0.2.54-py3-none-manylinux1_x86_64.whl (121.6 MB)\n", + "Collecting nvidia-curand-cu12==10.3.2.106 (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface)\n", + " Using cached nvidia_curand_cu12-10.3.2.106-py3-none-manylinux1_x86_64.whl (56.5 MB)\n", + "Collecting nvidia-cusolver-cu12==11.4.5.107 (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface)\n", + " Using cached nvidia_cusolver_cu12-11.4.5.107-py3-none-manylinux1_x86_64.whl (124.2 MB)\n", + "Collecting nvidia-cusparse-cu12==12.1.0.106 (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface)\n", + " Using cached nvidia_cusparse_cu12-12.1.0.106-py3-none-manylinux1_x86_64.whl (196.0 MB)\n", + "Collecting nvidia-nccl-cu12==2.20.5 (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface)\n", + " Using cached nvidia_nccl_cu12-2.20.5-py3-none-manylinux2014_x86_64.whl (176.2 MB)\n", + "Collecting nvidia-nvtx-cu12==12.1.105 (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface)\n", + " Using cached nvidia_nvtx_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (99 kB)\n", + "Requirement already satisfied: triton==2.3.0 in /usr/local/lib/python3.10/dist-packages (from torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (2.3.0)\n", + "Collecting nvidia-nvjitlink-cu12 (from nvidia-cusolver-cu12==11.4.5.107->torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface)\n", + " Downloading nvidia_nvjitlink_cu12-12.5.40-py3-none-manylinux2014_x86_64.whl (21.3 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m21.3/21.3 MB\u001b[0m \u001b[31m38.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: tokenizers<0.20,>=0.19 in /usr/local/lib/python3.10/dist-packages (from transformers<5.0.0,>=4.34.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (0.19.1)\n", + "Requirement already satisfied: safetensors>=0.4.1 in /usr/local/lib/python3.10/dist-packages (from transformers<5.0.0,>=4.34.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (0.4.3)\n", + "Requirement already satisfied: mypy-extensions>=0.3.0 in /usr/local/lib/python3.10/dist-packages (from typing-inspect>=0.8.0->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.0.0)\n", + "Requirement already satisfied: marshmallow<4.0.0,>=3.18.0 in /usr/local/lib/python3.10/dist-packages (from dataclasses-json->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (3.21.2)\n", + "Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.10/dist-packages (from pandas->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (2.8.2)\n", + "Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.10/dist-packages (from pandas->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (2023.4)\n", + "Requirement already satisfied: tzdata>=2022.1 in /usr/local/lib/python3.10/dist-packages (from pandas->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (2024.1)\n", + "Requirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from scikit-learn->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (3.5.0)\n", + "Requirement already satisfied: exceptiongroup in /usr/local/lib/python3.10/dist-packages (from anyio->httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.2.1)\n", + "Requirement already satisfied: annotated-types>=0.4.0 in /usr/local/lib/python3.10/dist-packages (from pydantic>=1.10->llamaindex-py-client<0.2.0,>=0.1.18->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (0.7.0)\n", + "Requirement already satisfied: pydantic-core==2.18.2 in /usr/local/lib/python3.10/dist-packages (from pydantic>=1.10->llamaindex-py-client<0.2.0,>=0.1.18->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (2.18.2)\n", + "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.8.2->pandas->llama-index-core<0.11.0,>=0.10.1->llama-index-embeddings-huggingface) (1.16.0)\n", + "Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.10/dist-packages (from jinja2->torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (2.1.5)\n", + "Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.10/dist-packages (from sympy->torch>=1.11.0->sentence-transformers<3.0.0,>=2.6.1->llama-index-embeddings-huggingface) (1.3.0)\n", + "Installing collected packages: nvidia-nvtx-cu12, nvidia-nvjitlink-cu12, nvidia-nccl-cu12, nvidia-curand-cu12, nvidia-cufft-cu12, nvidia-cuda-runtime-cu12, nvidia-cuda-nvrtc-cu12, nvidia-cuda-cupti-cu12, nvidia-cublas-cu12, minijinja, nvidia-cusparse-cu12, nvidia-cudnn-cu12, nvidia-cusolver-cu12, sentence-transformers, llama-index-embeddings-huggingface\n", + "Successfully installed llama-index-embeddings-huggingface-0.2.1 minijinja-2.0.1 nvidia-cublas-cu12-12.1.3.1 nvidia-cuda-cupti-cu12-12.1.105 nvidia-cuda-nvrtc-cu12-12.1.105 nvidia-cuda-runtime-cu12-12.1.105 nvidia-cudnn-cu12-8.9.2.26 nvidia-cufft-cu12-11.0.2.54 nvidia-curand-cu12-10.3.2.106 nvidia-cusolver-cu12-11.4.5.107 nvidia-cusparse-cu12-12.1.0.106 nvidia-nccl-cu12-2.20.5 nvidia-nvjitlink-cu12-12.5.40 nvidia-nvtx-cu12-12.1.105 sentence-transformers-2.7.0\n", + "Collecting llama-index-llms-groq\n", + " Downloading llama_index_llms_groq-0.1.4-py3-none-any.whl (2.9 kB)\n", + "Requirement already satisfied: llama-index-core<0.11.0,>=0.10.1 in /usr/local/lib/python3.10/dist-packages (from llama-index-llms-groq) (0.10.42)\n", + "Collecting llama-index-llms-openai-like<0.2.0,>=0.1.3 (from llama-index-llms-groq)\n", + " Downloading llama_index_llms_openai_like-0.1.3-py3-none-any.whl (3.0 kB)\n", + "Requirement already satisfied: PyYAML>=6.0.1 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (6.0.1)\n", + "Requirement already satisfied: SQLAlchemy[asyncio]>=1.4.49 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2.0.30)\n", + "Requirement already satisfied: aiohttp<4.0.0,>=3.8.6 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (3.9.5)\n", + "Requirement already satisfied: dataclasses-json in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (0.6.6)\n", + "Requirement already satisfied: deprecated>=1.2.9.3 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.2.14)\n", + "Requirement already satisfied: dirtyjson<2.0.0,>=1.0.8 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.0.8)\n", + "Requirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2023.6.0)\n", + "Requirement already satisfied: httpx in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (0.27.0)\n", + "Requirement already satisfied: llamaindex-py-client<0.2.0,>=0.1.18 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (0.1.19)\n", + "Requirement already satisfied: nest-asyncio<2.0.0,>=1.5.8 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.6.0)\n", + "Requirement already satisfied: networkx>=3.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (3.3)\n", + "Requirement already satisfied: nltk<4.0.0,>=3.8.1 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (3.8.1)\n", + "Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.25.2)\n", + "Requirement already satisfied: openai>=1.1.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.30.5)\n", + "Requirement already satisfied: pandas in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2.0.3)\n", + "Requirement already satisfied: pillow>=9.0.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (9.4.0)\n", + "Requirement already satisfied: requests>=2.31.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2.31.0)\n", + "Requirement already satisfied: tenacity<9.0.0,>=8.2.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (8.3.0)\n", + "Requirement already satisfied: tiktoken>=0.3.3 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (0.7.0)\n", + "Requirement already satisfied: tqdm<5.0.0,>=4.66.1 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (4.66.4)\n", + "Requirement already satisfied: typing-extensions>=4.5.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (4.11.0)\n", + "Requirement already satisfied: typing-inspect>=0.8.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (0.9.0)\n", + "Requirement already satisfied: wrapt in /usr/local/lib/python3.10/dist-packages (from llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.14.1)\n", + "Requirement already satisfied: llama-index-llms-openai<0.2.0,>=0.1.1 in /usr/local/lib/python3.10/dist-packages (from llama-index-llms-openai-like<0.2.0,>=0.1.3->llama-index-llms-groq) (0.1.21)\n", + "Requirement already satisfied: transformers<5.0.0,>=4.37.0 in /usr/local/lib/python3.10/dist-packages (from llama-index-llms-openai-like<0.2.0,>=0.1.3->llama-index-llms-groq) (4.41.1)\n", + "Requirement already satisfied: aiosignal>=1.1.2 in /usr/local/lib/python3.10/dist-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.3.1)\n", + "Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (23.2.0)\n", + "Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.10/dist-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.4.1)\n", + "Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.10/dist-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (6.0.5)\n", + "Requirement already satisfied: yarl<2.0,>=1.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.9.4)\n", + "Requirement already satisfied: async-timeout<5.0,>=4.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (4.0.3)\n", + "Requirement already satisfied: pydantic>=1.10 in /usr/local/lib/python3.10/dist-packages (from llamaindex-py-client<0.2.0,>=0.1.18->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2.7.1)\n", + "Requirement already satisfied: anyio in /usr/local/lib/python3.10/dist-packages (from httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (3.7.1)\n", + "Requirement already satisfied: certifi in /usr/local/lib/python3.10/dist-packages (from httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2024.2.2)\n", + "Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.10/dist-packages (from httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.0.5)\n", + "Requirement already satisfied: idna in /usr/local/lib/python3.10/dist-packages (from httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (3.7)\n", + "Requirement already satisfied: sniffio in /usr/local/lib/python3.10/dist-packages (from httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.3.1)\n", + "Requirement already satisfied: h11<0.15,>=0.13 in /usr/local/lib/python3.10/dist-packages (from httpcore==1.*->httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (0.14.0)\n", + "Requirement already satisfied: click in /usr/local/lib/python3.10/dist-packages (from nltk<4.0.0,>=3.8.1->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (8.1.7)\n", + "Requirement already satisfied: joblib in /usr/local/lib/python3.10/dist-packages (from nltk<4.0.0,>=3.8.1->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.4.2)\n", + "Requirement already satisfied: regex>=2021.8.3 in /usr/local/lib/python3.10/dist-packages (from nltk<4.0.0,>=3.8.1->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2024.5.15)\n", + "Requirement already satisfied: distro<2,>=1.7.0 in /usr/lib/python3/dist-packages (from openai>=1.1.0->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.7.0)\n", + "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests>=2.31.0->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (3.3.2)\n", + "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests>=2.31.0->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2.0.7)\n", + "Requirement already satisfied: greenlet!=0.4.17 in /usr/local/lib/python3.10/dist-packages (from SQLAlchemy[asyncio]>=1.4.49->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (3.0.3)\n", + "Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from transformers<5.0.0,>=4.37.0->llama-index-llms-openai-like<0.2.0,>=0.1.3->llama-index-llms-groq) (3.14.0)\n", + "Requirement already satisfied: huggingface-hub<1.0,>=0.23.0 in /usr/local/lib/python3.10/dist-packages (from transformers<5.0.0,>=4.37.0->llama-index-llms-openai-like<0.2.0,>=0.1.3->llama-index-llms-groq) (0.23.1)\n", + "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/dist-packages (from transformers<5.0.0,>=4.37.0->llama-index-llms-openai-like<0.2.0,>=0.1.3->llama-index-llms-groq) (24.0)\n", + "Requirement already satisfied: tokenizers<0.20,>=0.19 in /usr/local/lib/python3.10/dist-packages (from transformers<5.0.0,>=4.37.0->llama-index-llms-openai-like<0.2.0,>=0.1.3->llama-index-llms-groq) (0.19.1)\n", + "Requirement already satisfied: safetensors>=0.4.1 in /usr/local/lib/python3.10/dist-packages (from transformers<5.0.0,>=4.37.0->llama-index-llms-openai-like<0.2.0,>=0.1.3->llama-index-llms-groq) (0.4.3)\n", + "Requirement already satisfied: mypy-extensions>=0.3.0 in /usr/local/lib/python3.10/dist-packages (from typing-inspect>=0.8.0->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.0.0)\n", + "Requirement already satisfied: marshmallow<4.0.0,>=3.18.0 in /usr/local/lib/python3.10/dist-packages (from dataclasses-json->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (3.21.2)\n", + "Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.10/dist-packages (from pandas->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2.8.2)\n", + "Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.10/dist-packages (from pandas->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2023.4)\n", + "Requirement already satisfied: tzdata>=2022.1 in /usr/local/lib/python3.10/dist-packages (from pandas->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2024.1)\n", + "Requirement already satisfied: exceptiongroup in /usr/local/lib/python3.10/dist-packages (from anyio->httpx->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.2.1)\n", + "Requirement already satisfied: annotated-types>=0.4.0 in /usr/local/lib/python3.10/dist-packages (from pydantic>=1.10->llamaindex-py-client<0.2.0,>=0.1.18->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (0.7.0)\n", + "Requirement already satisfied: pydantic-core==2.18.2 in /usr/local/lib/python3.10/dist-packages (from pydantic>=1.10->llamaindex-py-client<0.2.0,>=0.1.18->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (2.18.2)\n", + "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.8.2->pandas->llama-index-core<0.11.0,>=0.10.1->llama-index-llms-groq) (1.16.0)\n", + "Installing collected packages: llama-index-llms-openai-like, llama-index-llms-groq\n", + "Successfully installed llama-index-llms-groq-0.1.4 llama-index-llms-openai-like-0.1.3\n" + ] + } + ], + "source": [ + "!pip install llama-index\n", + "!pip install llama-index-embeddings-huggingface\n", + "!pip install llama-index-llms-groq" + ] + }, + { + "cell_type": "code", + "source": [ + "import nest_asyncio\n", + "\n", + "nest_asyncio.apply()" + ], + "metadata": { + "id": "sG25Zlzsfg3K" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, SummaryIndex\n", + "from llama_index.core.node_parser import SentenceSplitter\n", + "from llama_index.core.tools import FunctionTool, QueryEngineTool\n", + "from llama_index.core.vector_stores import MetadataFilters, FilterCondition\n", + "from typing import List, Optional\n", + "\n", + "def get_doc_tools(\n", + " file_path: str,\n", + " name: str,\n", + ") -> str:\n", + " \"\"\"Get vector query and summary query tools from a document.\"\"\"\n", + "\n", + " # load documents\n", + " documents = SimpleDirectoryReader(input_files=[file_path]).load_data()\n", + " splitter = SentenceSplitter(chunk_size=1024)\n", + " nodes = splitter.get_nodes_from_documents(documents)\n", + " vector_index = VectorStoreIndex(nodes)\n", + "\n", + " def vector_query(\n", + " query: str,\n", + " page_numbers: Optional[List[str]] = None\n", + " ) -> str:\n", + " \"\"\"Use to answer questions over the MetaGPT paper.\n", + "\n", + " Useful if you have specific questions over the MetaGPT paper.\n", + " Always leave page_numbers as None UNLESS there is a specific page you want to search for.\n", + "\n", + " Args:\n", + " query (str): the string query to be embedded.\n", + " page_numbers (Optional[List[str]]): Filter by set of pages. Leave as NONE\n", + " if we want to perform a vector search\n", + " over all pages. Otherwise, filter by the set of specified pages.\n", + "\n", + " \"\"\"\n", + "\n", + " page_numbers = page_numbers or []\n", + " metadata_dicts = [\n", + " {\"key\": \"page_label\", \"value\": p} for p in page_numbers\n", + " ]\n", + "\n", + " query_engine = vector_index.as_query_engine(\n", + " similarity_top_k=2,\n", + " filters=MetadataFilters.from_dicts(\n", + " metadata_dicts,\n", + " condition=FilterCondition.OR\n", + " )\n", + " )\n", + " response = query_engine.query(query)\n", + " return response\n", + "\n", + "\n", + " vector_query_tool = FunctionTool.from_defaults(\n", + " name=f\"vector_tool_{name}\",\n", + " fn=vector_query\n", + " )\n", + "\n", + " summary_index = SummaryIndex(nodes)\n", + " summary_query_engine = summary_index.as_query_engine(\n", + " response_mode=\"tree_summarize\",\n", + " use_async=True,\n", + " )\n", + " summary_tool = QueryEngineTool.from_defaults(\n", + " name=f\"summary_tool_{name}\",\n", + " query_engine=summary_query_engine,\n", + " description=(\n", + " \"Use ONLY IF you want to get a holistic summary of MetaGPT. \"\n", + " \"Do NOT use if you have specific questions over MetaGPT.\"\n", + " ),\n", + " )\n", + "\n", + " return vector_query_tool, summary_tool" + ], + "metadata": { + "id": "0z66nY0gKzR6" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "urls = [\n", + " \"https://openreview.net/pdf?id=VtmBAGCN7o\",\n", + " \"https://openreview.net/pdf?id=6PmJoRfdaK\",\n", + " \"https://openreview.net/pdf?id=hSyW5go0v8\",\n", + "]\n", + "\n", + "papers = [\n", + " \"metagpt.pdf\",\n", + " \"longlora.pdf\",\n", + " \"selfrag.pdf\",\n", + "]" + ], + "metadata": { + "id": "NGVdQc2FHejT" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "for url, paper in zip(urls, papers):\n", + " !wget \"{url}\" -O \"{paper}\"" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "Xju9QpOCK-vw", + "outputId": "4d9ad762-cfa4-4a46-c2be-6197e554d822" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "--2024-06-01 23:40:21-- https://openreview.net/pdf?id=VtmBAGCN7o\n", + "Resolving openreview.net (openreview.net)... 35.184.86.251\n", + "Connecting to openreview.net (openreview.net)|35.184.86.251|:443... connected.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 16911937 (16M) [application/pdf]\n", + "Saving to: ‘metagpt.pdf’\n", + "\n", + "metagpt.pdf 100%[===================>] 16.13M 72.3MB/s in 0.2s \n", + "\n", + "2024-06-01 23:40:22 (72.3 MB/s) - ‘metagpt.pdf’ saved [16911937/16911937]\n", + "\n", + "--2024-06-01 23:40:22-- https://openreview.net/pdf?id=6PmJoRfdaK\n", + "Resolving openreview.net (openreview.net)... 35.184.86.251\n", + "Connecting to openreview.net (openreview.net)|35.184.86.251|:443... connected.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 1168720 (1.1M) [application/pdf]\n", + "Saving to: ‘longlora.pdf’\n", + "\n", + "longlora.pdf 100%[===================>] 1.11M --.-KB/s in 0.05s \n", + "\n", + "2024-06-01 23:40:22 (21.7 MB/s) - ‘longlora.pdf’ saved [1168720/1168720]\n", + "\n", + "--2024-06-01 23:40:22-- https://openreview.net/pdf?id=hSyW5go0v8\n", + "Resolving openreview.net (openreview.net)... 35.184.86.251\n", + "Connecting to openreview.net (openreview.net)|35.184.86.251|:443... connected.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 1244749 (1.2M) [application/pdf]\n", + "Saving to: ‘selfrag.pdf’\n", + "\n", + "selfrag.pdf 100%[===================>] 1.19M --.-KB/s in 0.02s \n", + "\n", + "2024-06-01 23:40:22 (73.7 MB/s) - ‘selfrag.pdf’ saved [1244749/1244749]\n", + "\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "from getpass import getpass\n", + "import os\n", + "\n", + "GROQ_API_TOKEN = getpass()" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "EXLxN6f1LUDu", + "outputId": "67b0801b-a033-44bb-953a-5ca769e91c4d" + }, + "execution_count": null, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "··········\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "from llama_index.llms.groq import Groq\n", + "\n", + "from llama_index.core import Settings, VectorStoreIndex\n", + "from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n", + "\n", + "#llm = Groq(model=\"llama3-8b-8192\", api_key=GROQ_API_TOKEN)\n", + "llm = Groq(model=\"llama3-70b-8192\", api_key=GROQ_API_TOKEN)\n", + "Settings.llm = llm\n", + "\n", + "Settings.embed_model = HuggingFaceEmbedding(\n", + " model_name=\"BAAI/bge-small-en-v1.5\"\n", + ")" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 528, + "referenced_widgets": [ + "3e13eb34c16c43b089268b9da16af93f", + "a83cbfd557e54a94897fdf017e7ebc83", + "3c96d7aaed084280b5726a7d2565576e", + "67e5fc395b834a4fa4b08e227e5a7159", + "0883864f208f4bceb4058cdedb434f40", + "243dd74534e14f9f8e9b1a971643739b", + "57e48381bf5447e392655c7a139f33a3", + "7cfe756cf62043d0a997be5ff02ea402", + "53f7b91752b7496693012b2c97d0a95b", + "ef5e8c2088e74c2c929199eddca45de0", + "179e9cbedb6c40378e1fdbd7da83afe7", + "ef9a7d590ca74920ac9dc030fdba6d9b", + "b5f18fba3ac142b2b5421fa297dd20c4", + "033c1e5ded554a91aedd1b69ea63d6c1", + "e4bbbf4b552e485ebaf64b56b00c4565", + "2f35c219bd034c0d96a8e99a2fca289f", + "5b1d19791d934f55bdb60aaacba84df8", + "7e9e641d1ada45588628294c4a9c9672", + "5b4ef9637e334f72b318f8dfac6abe0d", + "7f5604938fba472b8fa6010f0d15227c", + "f2abc923820a4e2abdc29b799a75e206", + "080053dd82cb44e989d8647ff7b3ec30", + "37c33e92bab94b858fbe9e1e67f3c8da", + "8cc5f291932a4ab280dec3aea1432d6e", + "e77e66e342014277b7ae47eaf6611662", + "92de3b8c634f4b519c48b822cfad71d3", + "84772f7879844088880ebd0cb63f69fc", + "c8659cbb7fa64b449b26212749721092", + "3da3375a64ac486388c7eff17f2ab994", + "0a9dc9cc8fcc447085e6d8ec78ca3f2c", + "d504d09e128d4574962e0433ea804933", + "40fd6d6c48e2480abf92915608aa0ec2", + "17bce507a4d843a4ada4b2bb91088453", + "9b1d56d6fa504d3f8bac4eab3963d3e4", + "2490327e0dc24be8a6994e8775bb4e84", + "76e24fab9cab4ebbbacd8e3ef0b2a542", + "064f8e0a650747679bd1eb5786940cfa", + "7aa63ea675bc4ee5a85e20f4984fd6f6", + "851ea97e6fac4ddead5d60dd78ae1893", + "0acd03d6252541d48efdfeeedabc4e41", + "4852807640bb45d4b17f5f35eddf8fad", + "a64abd9074264f77a892bc86bbb1d101", + "ab8fabde10144b7e99211dac7efce2aa", + "a05fc015f79c491ba0e0e4f028b9ad9e", + "f57cc383fbb04339a9e1b162c81fa80f", + "513461983b1143fabd9acba6966d9c1a", + "6a33b0bf37f24b3cb95e95a29751e666", + "5467911545524d54aba88a4168c0bada", + "022df94206c14f0784f87891c6bbc7b1", + "00e72ee855f34c7c9423e32db17794dc", + "266d6356347f46baa06cbcb32b122db1", + "c8453dd91d8a4057b8b0185ea24f7179", + "b15b69aff04744f1bb716ff35a1874cf", + "39aad7b08fca41b9ae79a24348a2be87", + "b29d984a333641af8625ac4507c448a4", + "19a338b2a3864cb08fbc34513060d5e7", + "40daa5594549493baf9f37c396e01286", + "76cffb68a1ec452c864616708b78bbf0", + "acd81175704e49dbb9933c6f268e4919", + "01da664ae3a44ea4b9605c5494b91e96", + "b1ba78323bf24188809657cb458db379", + "71b61cfa2fb249c1b600323bed4084f9", + "f836ae7999c3449ea5806e6754d3436a", + "491354e8c4ce4180ab7170121bf0f5b7", + "78bf4ab890cd41a89594cab7c38db8a9", + "0268ddd6729242dea9a69880a0b3c3ec", + "9967d289a95b4cafa8345023143c4f60", + "835a2e6b6a45482a8147d66e4a31b10e", + "cfde79061ba9422f9f9fb295ca806e4c", + "d3811aea2bf444629e70e8d72212381c", + "9d67ec85304b49bdb497ed10d14f33e7", + "360ad4a30e2544f2a61630865a6fb6b0", + "ed79435fbc0a43feb5aca86e77806bed", + "265b2fc2c67a45598bc420d2a1245ce7", + "39ecde5a49e9427d8f367fd387c11c77", + "1c4f6f6c000843dfa7760952df1a525d", + "5b259ff2eeb94058886e880f459c0904", + "7317d38d1bed429b92d59c581fdf35d9", + "996499071c244d3abd4877cf2120a3a0", + "8142da4b1f71460d8be423a4c468e2cf", + "627c75e12be241ba8eb6c8b635858016", + "ce3bee3ed9ca439587fefc8c3470ea3c", + "41909b7281e24ebea0d75bb4fb33122e", + "063a0977a87942c4ad26fd3c4ec7f990", + "0580fba5bcde49e1ab4a9426db5a9c2b", + "fe9b90b5aa93433f86d0b15a4b942f5a", + "071f1ba196f247a6acd6cdf7650fdd0a", + "19a8d171526e43a6832104b2a89819b3", + "9aa6ad4a993f4dea85746bab675fdf65", + "c9df65629477412f8f9ab49059d22067", + "11379c2e71db4212b3aa0c0bdfc92479", + "f1f648ad7b8d4087baea1e1074baefa1", + "e73fcdce6b2b45a8824acee86023929d", + "ea435a91897e4b75b082607b40ba7695", + "63ce7f8041974841b6beca100fc1ff90", + "e4cf4c2c7848419e8621be0aa33d2b34", + "7a30c116bef74095b1efdf273aad6dcf", + "ed448e04b1d2495787c0ee2806a7178a", + "74b1f3a2a9b24f639b536066bd5ec01a", + "e793c09809ee476caf8c8517f9cc1115", + "1e1f8b0e6e044cfb9120c8ab57fbb0e6", + "db9147aaf68d4858a346b7e6891e230d", + "12de797682e241a4828eb22f664c8b91", + "d1f7a6fa7a154fcf83232a813089f677", + "234b2c7600dd4508be2ba8341cf92ae4", + "63e79f779fe34fa18f8ce55c339ed071", + "fe36574e45034b6d9eb70a5be64f2e98", + "20af4d2562ef4d169b7d1fd60cd3b38f", + "669c14fc1e8e4834bf4b0cf5e5d88ca6", + "65b1fa77787141e7b3b02a4793c10346", + "4558b35ea0194d209d94c317302cff90", + "4086395b295a4bff8db2d58fa83ac61c", + "e46be04cc8844cd08a482dd94ca59a6c", + "cf37c5378564462f9e65ee4211e75347", + "363e54358828421ea6766abb27423160", + "58fc3a788ad949e5ab52731e7e422aae", + "d625152e571e46fb8576827bfff17f72", + "3cde34eb2ad84c068acc3dea1e05b166", + "92591354468047ffaf312805f23f2fc2", + "11b7fa7a4e9d4837b4e6ccd880eff962", + "98b1c78cce564f038c7b817d150f8362" + ] + }, + "id": "SHZbbMdQLWZg", + "outputId": "72e5ca72-f19b-4684-b873-7d9387640bf3" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stderr", + "text": [ + "/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_token.py:89: UserWarning: \n", + "The secret `HF_TOKEN` does not exist in your Colab secrets.\n", + "To authenticate with the Hugging Face Hub, create a token in your settings tab (https://huggingface.co/settings/tokens), set it as secret in your Google Colab and restart your session.\n", + "You will be able to reuse this secret in all of your notebooks.\n", + "Please note that authentication is recommended but still optional to access public models or datasets.\n", + " warnings.warn(\n" + ] + }, + { + "output_type": "display_data", + "data": { + "text/plain": [ + "modules.json: 0%| | 0.00/349 [00:00<?, ?B/s]" + ], + "application/vnd.jupyter.widget-view+json": { + "version_major": 2, + "version_minor": 0, + "model_id": "3e13eb34c16c43b089268b9da16af93f" + } + }, + "metadata": {} + }, + { + "output_type": "display_data", + "data": { + "text/plain": [ + "config_sentence_transformers.json: 0%| | 0.00/124 [00:00<?, ?B/s]" + ], + "application/vnd.jupyter.widget-view+json": { + "version_major": 2, + "version_minor": 0, + "model_id": "ef9a7d590ca74920ac9dc030fdba6d9b" + } + }, + "metadata": {} + }, + { + "output_type": "display_data", + "data": { + "text/plain": [ + "README.md: 0%| | 0.00/94.8k [00:00<?, ?B/s]" + ], + "application/vnd.jupyter.widget-view+json": { + "version_major": 2, + "version_minor": 0, + "model_id": "37c33e92bab94b858fbe9e1e67f3c8da" + } + }, + "metadata": {} + }, + { + "output_type": "display_data", + "data": { + "text/plain": [ + "sentence_bert_config.json: 0%| | 0.00/52.0 [00:00<?, ?B/s]" + ], + "application/vnd.jupyter.widget-view+json": { + "version_major": 2, + "version_minor": 0, + "model_id": "9b1d56d6fa504d3f8bac4eab3963d3e4" + } + }, + "metadata": {} + }, + { + "output_type": "stream", + "name": "stderr", + "text": [ + "/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py:1132: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.\n", + " warnings.warn(\n" + ] + }, + { + "output_type": "display_data", + "data": { + "text/plain": [ + "config.json: 0%| | 0.00/743 [00:00<?, ?B/s]" + ], + "application/vnd.jupyter.widget-view+json": { + "version_major": 2, + "version_minor": 0, + "model_id": "f57cc383fbb04339a9e1b162c81fa80f" + } + }, + "metadata": {} + }, + { + "output_type": "display_data", + "data": { + "text/plain": [ + "model.safetensors: 0%| | 0.00/133M [00:00<?, ?B/s]" + ], + "application/vnd.jupyter.widget-view+json": { + "version_major": 2, + "version_minor": 0, + "model_id": "19a338b2a3864cb08fbc34513060d5e7" + } + }, + "metadata": {} + }, + { + "output_type": "display_data", + "data": { + "text/plain": [ + "tokenizer_config.json: 0%| | 0.00/366 [00:00<?, ?B/s]" + ], + "application/vnd.jupyter.widget-view+json": { + "version_major": 2, + "version_minor": 0, + "model_id": "9967d289a95b4cafa8345023143c4f60" + } + }, + "metadata": {} + }, + { + "output_type": "display_data", + "data": { + "text/plain": [ + "vocab.txt: 0%| | 0.00/232k [00:00<?, ?B/s]" + ], + "application/vnd.jupyter.widget-view+json": { + "version_major": 2, + "version_minor": 0, + "model_id": "7317d38d1bed429b92d59c581fdf35d9" + } + }, + "metadata": {} + }, + { + "output_type": "display_data", + "data": { + "text/plain": [ + "tokenizer.json: 0%| | 0.00/711k [00:00<?, ?B/s]" + ], + "application/vnd.jupyter.widget-view+json": { + "version_major": 2, + "version_minor": 0, + "model_id": "9aa6ad4a993f4dea85746bab675fdf65" + } + }, + "metadata": {} + }, + { + "output_type": "display_data", + "data": { + "text/plain": [ + "special_tokens_map.json: 0%| | 0.00/125 [00:00<?, ?B/s]" + ], + "application/vnd.jupyter.widget-view+json": { + "version_major": 2, + "version_minor": 0, + "model_id": "e793c09809ee476caf8c8517f9cc1115" + } + }, + "metadata": {} + }, + { + "output_type": "display_data", + "data": { + "text/plain": [ + "1_Pooling/config.json: 0%| | 0.00/190 [00:00<?, ?B/s]" + ], + "application/vnd.jupyter.widget-view+json": { + "version_major": 2, + "version_minor": 0, + "model_id": "4558b35ea0194d209d94c317302cff90" + } + }, + "metadata": {} + } + ] + }, + { + "cell_type": "code", + "source": [ + "from pathlib import Path\n", + "\n", + "paper_to_tools_dict = {}\n", + "for paper in papers:\n", + " print(f\"Getting tools for paper: {paper}\")\n", + " vector_tool, summary_tool = get_doc_tools(paper, Path(paper).stem)\n", + " paper_to_tools_dict[paper] = [vector_tool, summary_tool]" + ], + "metadata": { + "id": "NnZdKJKVdOFs", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "bea8307e-927f-44f6-f78c-f602f46a963f" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Getting tools for paper: metagpt.pdf\n", + "Getting tools for paper: longlora.pdf\n", + "Getting tools for paper: selfrag.pdf\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "initial_tools = [t for paper in papers for t in paper_to_tools_dict[paper]]" + ], + "metadata": { + "id": "FktfixgiLsMm" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "from llama_index.core.agent import ReActAgent\n", + "\n", + "\n", + "query_engine_tools = [vector_tool, summary_tool]\n", + "\n", + "agent = ReActAgent.from_tools(\n", + " initial_tools,\n", + " llm=llm,\n", + " verbose=True,\n", + ")" + ], + "metadata": { + "id": "QkaALpnIQ01b" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "response = agent.query(\n", + " \"Tell me about the evaluation dataset used in LongLoRA, \"\n", + " \"and then tell me about the evaluation results\"\n", + ")" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "d9wVsIt586s8", + "outputId": "de9a4d02-605b-4215-c234-2ab47482b48f" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[1;3;38;5;200mThought: The current language of the user is: English. I need to use a tool to help me answer the question.\n", + "Action: vector_tool_longlora\n", + "Action Input: {'query': 'evaluation dataset used in LongLoRA'}\n", + "\u001b[0m\u001b[1;3;34mObservation: The evaluation dataset used in LongLoRA is the proof-pile test set.\n", + "\u001b[0m\u001b[1;3;38;5;200mThought: I need more information to answer the question. I'll use a tool to get the evaluation results of LongLoRA.\n", + "Action: vector_tool_longlora\n", + "Action Input: {'query': 'evaluation results of LongLoRA'}\n", + "\u001b[0m\u001b[1;3;34mObservation: The evaluation results of LongLoRA show that it achieves strong empirical results on various tasks on Llama2 models from 7B/13B to 70B. Specifically, it extends Llama2 7B from 4k context to 100k, or Llama2 70B to 32k on a single 8 ×A100 machine. In terms of perplexity, LongLoRA demonstrates promising results, with perplexity decreasing as the context size increases. For example, for the Llama2 7B model, the perplexity improves from 2.72 to 2.50 by -0.22 when increasing the context window size from 8192 to 32768. Additionally, LongLoRA achieves good results on the topic retrieval task introduced in LongChat, with the model fine-tuned with a context length of 18k performing well on long conversations.\n", + "\u001b[0m\u001b[1;3;38;5;200mThought: I can answer without using any more tools. I'll use the user's language to answer.\n", + "Answer: The evaluation dataset used in LongLoRA is the proof-pile test set. The evaluation results of LongLoRA show that it achieves strong empirical results on various tasks on Llama2 models from 7B/13B to 70B. Specifically, it extends Llama2 7B from 4k context to 100k, or Llama2 70B to 32k on a single 8 ×A100 machine. In terms of perplexity, LongLoRA demonstrates promising results, with perplexity decreasing as the context size increases. For example, for the Llama2 7B model, the perplexity improves from 2.72 to 2.50 by -0.22 when increasing the context window size from 8192 to 32768. Additionally, LongLoRA achieves good results on the topic retrieval task introduced in LongChat, with the model fine-tuned with a context length of 18k performing well on long conversations.\n", + "\u001b[0m" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "response = agent.query(\"Give me a summary of both Self-RAG and LongLoRA\")\n", + "print(str(response))" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 602 + }, + "id": "_vrBO-S1-HjF", + "outputId": "d7890da1-adc8-46d8-be3a-aeca37f52dca" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[1;3;38;5;200mThought: The current language of the user is: English. I need to use a tool to help me answer the question.\n", + "Action: summary_tool_selfrag\n", + "Action Input: {'input': 'Self-RAG'}\n", + "\u001b[0m" + ] + }, + { + "output_type": "stream", + "name": "stderr", + "text": [ + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._chat in 0.04950782567241929 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 15629, Requested ~1296. Please try again in 1m49.25s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._chat in 1.6372255574348211 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 15614, Requested ~1296. Please try again in 1m49.107s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._chat in 0.09035215718071665 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 15442, Requested ~1296. Please try again in 1m47.38s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._chat in 0.3922668022705462 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 15424, Requested ~1296. Please try again in 1m47.202999999s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._chat in 3.8574420271748764 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 15375, Requested ~1296. Please try again in 1m46.719s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._chat in 0.1250970532178871 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 14970, Requested ~1491. Please try again in 1m44.613s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n" + ] + }, + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[1;3;34mObservation: Error: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 14980, Requested ~1296. Please try again in 1m42.768999999s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}\n", + "\u001b[0m" + ] + }, + { + "output_type": "stream", + "name": "stderr", + "text": [ + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._chat in 0.06293445905334383 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 14948, Requested ~1491. Please try again in 1m44.396s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._chat in 3.0979269693241385 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 14932, Requested ~1491. Please try again in 1m44.238999999s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._chat in 2.107768073985527 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 14612, Requested ~1491. Please try again in 1m41.035s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._chat in 5.467367118157581 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 14392, Requested ~1491. Please try again in 1m38.83s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n" + ] + }, + { + "output_type": "error", + "ename": "RateLimitError", + "evalue": "Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 13828, Requested ~1491. Please try again in 1m33.192s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mRateLimitError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m<ipython-input-15-3eca833282f9>\u001b[0m in \u001b[0;36m<cell line: 1>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mresponse\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0magent\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mquery\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Give me a summary of both Self-RAG and LongLoRA\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mresponse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/llama_index/core/instrumentation/dispatcher.py\u001b[0m in \u001b[0;36mwrapper\u001b[0;34m(func, instance, args, kwargs)\u001b[0m\n\u001b[1;32m 196\u001b[0m )\n\u001b[1;32m 197\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 198\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 199\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mBaseException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 200\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mevent\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mSpanDropEvent\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mspan_id\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mid_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0merr_str\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mstr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/llama_index/core/base/base_query_engine.py\u001b[0m in \u001b[0;36mquery\u001b[0;34m(self, str_or_query_bundle)\u001b[0m\n\u001b[1;32m 49\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstr_or_query_bundle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 50\u001b[0m \u001b[0mstr_or_query_bundle\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mQueryBundle\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstr_or_query_bundle\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 51\u001b[0;31m \u001b[0mquery_result\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_query\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstr_or_query_bundle\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 52\u001b[0m dispatcher.event(\n\u001b[1;32m 53\u001b[0m \u001b[0mQueryEndEvent\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mquery\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mstr_or_query_bundle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mresponse\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mquery_result\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/llama_index/core/callbacks/utils.py\u001b[0m in \u001b[0;36mwrapper\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 39\u001b[0m \u001b[0mcallback_manager\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcast\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mCallbackManager\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcallback_manager\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 40\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mcallback_manager\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mas_trace\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtrace_id\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 41\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 42\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 43\u001b[0m \u001b[0;34m@\u001b[0m\u001b[0mfunctools\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwraps\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfunc\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# preserve signature, name, etc. of func\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/llama_index/core/base/agent/types.py\u001b[0m in \u001b[0;36m_query\u001b[0;34m(self, query_bundle)\u001b[0m\n\u001b[1;32m 41\u001b[0m \u001b[0;34m@\u001b[0m\u001b[0mtrace_method\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"query\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 42\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_query\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mquery_bundle\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mQueryBundle\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0mRESPONSE_TYPE\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 43\u001b[0;31m agent_response = self.chat(\n\u001b[0m\u001b[1;32m 44\u001b[0m \u001b[0mquery_bundle\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mquery_str\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 45\u001b[0m \u001b[0mchat_history\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/llama_index/core/instrumentation/dispatcher.py\u001b[0m in \u001b[0;36mwrapper\u001b[0;34m(func, instance, args, kwargs)\u001b[0m\n\u001b[1;32m 196\u001b[0m )\n\u001b[1;32m 197\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 198\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 199\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mBaseException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 200\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mevent\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mSpanDropEvent\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mspan_id\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mid_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0merr_str\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mstr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/llama_index/core/callbacks/utils.py\u001b[0m in \u001b[0;36mwrapper\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 39\u001b[0m \u001b[0mcallback_manager\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcast\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mCallbackManager\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcallback_manager\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 40\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mcallback_manager\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mas_trace\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtrace_id\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 41\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 42\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 43\u001b[0m \u001b[0;34m@\u001b[0m\u001b[0mfunctools\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwraps\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfunc\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# preserve signature, name, etc. of func\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/llama_index/core/agent/runner/base.py\u001b[0m in \u001b[0;36mchat\u001b[0;34m(self, message, chat_history, tool_choice)\u001b[0m\n\u001b[1;32m 638\u001b[0m \u001b[0mpayload\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m{\u001b[0m\u001b[0mEventPayload\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mMESSAGES\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mmessage\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 639\u001b[0m ) as e:\n\u001b[0;32m--> 640\u001b[0;31m chat_response = self._chat(\n\u001b[0m\u001b[1;32m 641\u001b[0m \u001b[0mmessage\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mmessage\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 642\u001b[0m \u001b[0mchat_history\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mchat_history\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/llama_index/core/instrumentation/dispatcher.py\u001b[0m in \u001b[0;36mwrapper\u001b[0;34m(func, instance, args, kwargs)\u001b[0m\n\u001b[1;32m 196\u001b[0m )\n\u001b[1;32m 197\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 198\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 199\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mBaseException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 200\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mevent\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mSpanDropEvent\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mspan_id\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mid_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0merr_str\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mstr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/llama_index/core/agent/runner/base.py\u001b[0m in \u001b[0;36m_chat\u001b[0;34m(self, message, chat_history, tool_choice, mode)\u001b[0m\n\u001b[1;32m 570\u001b[0m \u001b[0;32mwhile\u001b[0m \u001b[0;32mTrue\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 571\u001b[0m \u001b[0;31m# pass step queue in as argument, assume step executor is stateless\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 572\u001b[0;31m cur_step_output = self._run_step(\n\u001b[0m\u001b[1;32m 573\u001b[0m \u001b[0mtask\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtask_id\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mmode\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtool_choice\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtool_choice\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 574\u001b[0m )\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/llama_index/core/instrumentation/dispatcher.py\u001b[0m in \u001b[0;36mwrapper\u001b[0;34m(func, instance, args, kwargs)\u001b[0m\n\u001b[1;32m 196\u001b[0m )\n\u001b[1;32m 197\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 198\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 199\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mBaseException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 200\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mevent\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mSpanDropEvent\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mspan_id\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mid_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0merr_str\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mstr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/llama_index/core/agent/runner/base.py\u001b[0m in \u001b[0;36m_run_step\u001b[0;34m(self, task_id, step, input, mode, **kwargs)\u001b[0m\n\u001b[1;32m 409\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 410\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mmode\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0mChatResponseMode\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mWAIT\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 411\u001b[0;31m \u001b[0mcur_step_output\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0magent_worker\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun_step\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstep\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtask\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 412\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0mmode\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0mChatResponseMode\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mSTREAM\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 413\u001b[0m \u001b[0mcur_step_output\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0magent_worker\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstream_step\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstep\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtask\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/llama_index/core/callbacks/utils.py\u001b[0m in \u001b[0;36mwrapper\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 39\u001b[0m \u001b[0mcallback_manager\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcast\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mCallbackManager\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcallback_manager\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 40\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mcallback_manager\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mas_trace\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtrace_id\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 41\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 42\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 43\u001b[0m \u001b[0;34m@\u001b[0m\u001b[0mfunctools\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwraps\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfunc\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# preserve signature, name, etc. of func\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/llama_index/core/agent/react/step.py\u001b[0m in \u001b[0;36mrun_step\u001b[0;34m(self, step, task, **kwargs)\u001b[0m\n\u001b[1;32m 744\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mrun_step\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstep\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mTaskStep\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtask\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mTask\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mAny\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0mTaskStepOutput\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 745\u001b[0m \u001b[0;34m\"\"\"Run step.\"\"\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 746\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_run_step\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstep\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtask\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 747\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 748\u001b[0m \u001b[0;34m@\u001b[0m\u001b[0mtrace_method\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"run_step\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/llama_index/core/agent/react/step.py\u001b[0m in \u001b[0;36m_run_step\u001b[0;34m(self, step, task)\u001b[0m\n\u001b[1;32m 536\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 537\u001b[0m \u001b[0;31m# send prompt\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 538\u001b[0;31m \u001b[0mchat_response\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_llm\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mchat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minput_chat\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 539\u001b[0m \u001b[0;31m# given react prompt outputs, call tools or return response\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 540\u001b[0m reasoning_steps, is_done = self._process_actions(\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/llama_index/core/instrumentation/dispatcher.py\u001b[0m in \u001b[0;36mwrapper\u001b[0;34m(func, instance, args, kwargs)\u001b[0m\n\u001b[1;32m 196\u001b[0m )\n\u001b[1;32m 197\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 198\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 199\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mBaseException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 200\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mevent\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mSpanDropEvent\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mspan_id\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mid_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0merr_str\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mstr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/llama_index/llms/openai_like/base.py\u001b[0m in \u001b[0;36mchat\u001b[0;34m(self, messages, **kwargs)\u001b[0m\n\u001b[1;32m 104\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mcompletion_response_to_chat_response\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcompletion_response\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 105\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 106\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mchat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmessages\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 107\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 108\u001b[0m def stream_chat(\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/llama_index/core/instrumentation/dispatcher.py\u001b[0m in \u001b[0;36mwrapper\u001b[0;34m(func, instance, args, kwargs)\u001b[0m\n\u001b[1;32m 196\u001b[0m )\n\u001b[1;32m 197\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 198\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 199\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mBaseException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 200\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mevent\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mSpanDropEvent\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mspan_id\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mid_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0merr_str\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mstr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/llama_index/core/llms/callbacks.py\u001b[0m in \u001b[0;36mwrapped_llm_chat\u001b[0;34m(_self, messages, **kwargs)\u001b[0m\n\u001b[1;32m 156\u001b[0m )\n\u001b[1;32m 157\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 158\u001b[0;31m \u001b[0mf_return_val\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0m_self\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmessages\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 159\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mBaseException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 160\u001b[0m callback_manager.on_event_end(\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/llama_index/llms/openai/base.py\u001b[0m in \u001b[0;36mchat\u001b[0;34m(self, messages, **kwargs)\u001b[0m\n\u001b[1;32m 298\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 299\u001b[0m \u001b[0mchat_fn\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcompletion_to_chat_decorator\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_complete\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 300\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mchat_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmessages\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 301\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 302\u001b[0m \u001b[0;34m@\u001b[0m\u001b[0mllm_chat_callback\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/tenacity/__init__.py\u001b[0m in \u001b[0;36mwrapped_f\u001b[0;34m(*args, **kw)\u001b[0m\n\u001b[1;32m 328\u001b[0m )\n\u001b[1;32m 329\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mwrapped_f\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mt\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mAny\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkw\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mt\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mAny\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0mt\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mAny\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 330\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkw\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 331\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 332\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mretry_with\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mt\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mAny\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mt\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mAny\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0mWrappedFn\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/tenacity/__init__.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, fn, *args, **kwargs)\u001b[0m\n\u001b[1;32m 465\u001b[0m \u001b[0mretry_state\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mRetryCallState\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mretry_object\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mfn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 466\u001b[0m \u001b[0;32mwhile\u001b[0m \u001b[0;32mTrue\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 467\u001b[0;31m \u001b[0mdo\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0miter\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mretry_state\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mretry_state\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 468\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdo\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mDoAttempt\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 469\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/tenacity/__init__.py\u001b[0m in \u001b[0;36miter\u001b[0;34m(self, retry_state)\u001b[0m\n\u001b[1;32m 366\u001b[0m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 367\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0maction\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0miter_state\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mactions\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 368\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0maction\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mretry_state\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 369\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mresult\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 370\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/tenacity/__init__.py\u001b[0m in \u001b[0;36mexc_check\u001b[0;34m(rs)\u001b[0m\n\u001b[1;32m 408\u001b[0m \u001b[0mretry_exc\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mretry_error_cls\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfut\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 409\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreraise\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 410\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mretry_exc\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreraise\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 411\u001b[0m \u001b[0;32mraise\u001b[0m \u001b[0mretry_exc\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mfut\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexception\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 412\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/tenacity/__init__.py\u001b[0m in \u001b[0;36mreraise\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 181\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mreraise\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0mt\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mNoReturn\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 182\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlast_attempt\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfailed\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 183\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlast_attempt\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mresult\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 184\u001b[0m \u001b[0;32mraise\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 185\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/lib/python3.10/concurrent/futures/_base.py\u001b[0m in \u001b[0;36mresult\u001b[0;34m(self, timeout)\u001b[0m\n\u001b[1;32m 449\u001b[0m \u001b[0;32mraise\u001b[0m \u001b[0mCancelledError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 450\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_state\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0mFINISHED\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 451\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__get_result\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 452\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 453\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_condition\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwait\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtimeout\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/lib/python3.10/concurrent/futures/_base.py\u001b[0m in \u001b[0;36m__get_result\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 401\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_exception\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 402\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 403\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_exception\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 404\u001b[0m \u001b[0;32mfinally\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 405\u001b[0m \u001b[0;31m# Break a reference cycle with the exception in self._exception\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/tenacity/__init__.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, fn, *args, **kwargs)\u001b[0m\n\u001b[1;32m 468\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdo\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mDoAttempt\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 469\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 470\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 471\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mBaseException\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;31m# noqa: B902\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 472\u001b[0m \u001b[0mretry_state\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mset_exception\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msys\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexc_info\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# type: ignore[arg-type]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/llama_index/llms/openai/base.py\u001b[0m in \u001b[0;36m_chat\u001b[0;34m(self, messages, **kwargs)\u001b[0m\n\u001b[1;32m 366\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 367\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreuse_client\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 368\u001b[0;31m response = client.chat.completions.create(\n\u001b[0m\u001b[1;32m 369\u001b[0m \u001b[0mmessages\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mmessage_dicts\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 370\u001b[0m \u001b[0mstream\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/openai/_utils/_utils.py\u001b[0m in \u001b[0;36mwrapper\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 275\u001b[0m \u001b[0mmsg\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34mf\"Missing required argument: {quote(missing[0])}\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 276\u001b[0m \u001b[0;32mraise\u001b[0m \u001b[0mTypeError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmsg\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 277\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 278\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 279\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mwrapper\u001b[0m \u001b[0;31m# type: ignore\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/openai/resources/chat/completions.py\u001b[0m in \u001b[0;36mcreate\u001b[0;34m(self, messages, model, frequency_penalty, function_call, functions, logit_bias, logprobs, max_tokens, n, presence_penalty, response_format, seed, stop, stream, stream_options, temperature, tool_choice, tools, top_logprobs, top_p, user, extra_headers, extra_query, extra_body, timeout)\u001b[0m\n\u001b[1;32m 588\u001b[0m \u001b[0mtimeout\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mfloat\u001b[0m \u001b[0;34m|\u001b[0m \u001b[0mhttpx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTimeout\u001b[0m \u001b[0;34m|\u001b[0m \u001b[0;32mNone\u001b[0m \u001b[0;34m|\u001b[0m \u001b[0mNotGiven\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mNOT_GIVEN\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 589\u001b[0m ) -> ChatCompletion | Stream[ChatCompletionChunk]:\n\u001b[0;32m--> 590\u001b[0;31m return self._post(\n\u001b[0m\u001b[1;32m 591\u001b[0m \u001b[0;34m\"/chat/completions\"\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 592\u001b[0m body=maybe_transform(\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/openai/_base_client.py\u001b[0m in \u001b[0;36mpost\u001b[0;34m(self, path, cast_to, body, options, files, stream, stream_cls)\u001b[0m\n\u001b[1;32m 1238\u001b[0m \u001b[0mmethod\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m\"post\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0murl\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mpath\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mjson_data\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mbody\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfiles\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mto_httpx_files\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfiles\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0moptions\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1239\u001b[0m )\n\u001b[0;32m-> 1240\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mcast\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mResponseT\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrequest\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcast_to\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mopts\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstream\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mstream\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstream_cls\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mstream_cls\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1241\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1242\u001b[0m def patch(\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/openai/_base_client.py\u001b[0m in \u001b[0;36mrequest\u001b[0;34m(self, cast_to, options, remaining_retries, stream, stream_cls)\u001b[0m\n\u001b[1;32m 919\u001b[0m \u001b[0mstream_cls\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mtype\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0m_StreamT\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m|\u001b[0m \u001b[0;32mNone\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 920\u001b[0m ) -> ResponseT | _StreamT:\n\u001b[0;32m--> 921\u001b[0;31m return self._request(\n\u001b[0m\u001b[1;32m 922\u001b[0m \u001b[0mcast_to\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcast_to\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 923\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0moptions\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/openai/_base_client.py\u001b[0m in \u001b[0;36m_request\u001b[0;34m(self, cast_to, options, remaining_retries, stream, stream_cls)\u001b[0m\n\u001b[1;32m 1018\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1019\u001b[0m \u001b[0mlog\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdebug\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Re-raising status error\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1020\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_make_status_error_from_response\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0merr\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mresponse\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1021\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1022\u001b[0m return self._process_response(\n", + "\u001b[0;31mRateLimitError\u001b[0m: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 13828, Requested ~1491. Please try again in 1m33.192s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "urls = [\n", + " \"https://openreview.net/pdf?id=VtmBAGCN7o\",\n", + " \"https://openreview.net/pdf?id=6PmJoRfdaK\",\n", + " \"https://openreview.net/pdf?id=LzPWWPAdY4\",\n", + " \"https://openreview.net/pdf?id=VTF8yNQM66\",\n", + " \"https://openreview.net/pdf?id=hSyW5go0v8\",\n", + " \"https://openreview.net/pdf?id=9WD9KwssyT\",\n", + " \"https://openreview.net/pdf?id=yV6fD7LYkF\",\n", + " \"https://openreview.net/pdf?id=hnrB5YHoYu\",\n", + " \"https://openreview.net/pdf?id=WbWtOYIzIK\",\n", + " \"https://openreview.net/pdf?id=c5pwL0Soay\",\n", + " \"https://openreview.net/pdf?id=TpD2aG1h0D\"\n", + "]\n", + "\n", + "papers = [\n", + " \"metagpt.pdf\",\n", + " \"longlora.pdf\",\n", + " \"loftq.pdf\",\n", + " \"swebench.pdf\",\n", + " \"selfrag.pdf\",\n", + " \"zipformer.pdf\",\n", + " \"values.pdf\",\n", + " \"finetune_fair_diffusion.pdf\",\n", + " \"knowledge_card.pdf\",\n", + " \"metra.pdf\",\n", + " \"vr_mcl.pdf\"\n", + "]" + ], + "metadata": { + "id": "iyhvuua7GjVD" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "for url, paper in zip(urls, papers):\n", + " !wget \"{url}\" -O \"{paper}\"" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "m-RoimylMmU1", + "outputId": "95f616e4-b479-4fe7-dba0-862fd596dad4" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "--2024-06-01 23:46:38-- https://openreview.net/pdf?id=VtmBAGCN7o\n", + "Resolving openreview.net (openreview.net)... 35.184.86.251\n", + "Connecting to openreview.net (openreview.net)|35.184.86.251|:443... connected.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 16911937 (16M) [application/pdf]\n", + "Saving to: ‘metagpt.pdf’\n", + "\n", + "metagpt.pdf 100%[===================>] 16.13M 38.4MB/s in 0.4s \n", + "\n", + "2024-06-01 23:46:39 (38.4 MB/s) - ‘metagpt.pdf’ saved [16911937/16911937]\n", + "\n", + "--2024-06-01 23:46:39-- https://openreview.net/pdf?id=6PmJoRfdaK\n", + "Resolving openreview.net (openreview.net)... 35.184.86.251\n", + "Connecting to openreview.net (openreview.net)|35.184.86.251|:443... connected.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 1168720 (1.1M) [application/pdf]\n", + "Saving to: ‘longlora.pdf’\n", + "\n", + "longlora.pdf 100%[===================>] 1.11M --.-KB/s in 0.03s \n", + "\n", + "2024-06-01 23:46:39 (41.2 MB/s) - ‘longlora.pdf’ saved [1168720/1168720]\n", + "\n", + "--2024-06-01 23:46:39-- https://openreview.net/pdf?id=LzPWWPAdY4\n", + "Resolving openreview.net (openreview.net)... 35.184.86.251\n", + "Connecting to openreview.net (openreview.net)|35.184.86.251|:443... connected.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 366134 (358K) [application/pdf]\n", + "Saving to: ‘loftq.pdf’\n", + "\n", + "loftq.pdf 100%[===================>] 357.55K --.-KB/s in 0.01s \n", + "\n", + "2024-06-01 23:46:39 (29.6 MB/s) - ‘loftq.pdf’ saved [366134/366134]\n", + "\n", + "--2024-06-01 23:46:39-- https://openreview.net/pdf?id=VTF8yNQM66\n", + "Resolving openreview.net (openreview.net)... 35.184.86.251\n", + "Connecting to openreview.net (openreview.net)|35.184.86.251|:443... connected.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 2680380 (2.6M) [application/pdf]\n", + "Saving to: ‘swebench.pdf’\n", + "\n", + "swebench.pdf 100%[===================>] 2.56M --.-KB/s in 0.03s \n", + "\n", + "2024-06-01 23:46:39 (80.5 MB/s) - ‘swebench.pdf’ saved [2680380/2680380]\n", + "\n", + "--2024-06-01 23:46:39-- https://openreview.net/pdf?id=hSyW5go0v8\n", + "Resolving openreview.net (openreview.net)... 35.184.86.251\n", + "Connecting to openreview.net (openreview.net)|35.184.86.251|:443... connected.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 1244749 (1.2M) [application/pdf]\n", + "Saving to: ‘selfrag.pdf’\n", + "\n", + "selfrag.pdf 100%[===================>] 1.19M --.-KB/s in 0.02s \n", + "\n", + "2024-06-01 23:46:40 (64.9 MB/s) - ‘selfrag.pdf’ saved [1244749/1244749]\n", + "\n", + "--2024-06-01 23:46:40-- https://openreview.net/pdf?id=9WD9KwssyT\n", + "Resolving openreview.net (openreview.net)... 35.184.86.251\n", + "Connecting to openreview.net (openreview.net)|35.184.86.251|:443... connected.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 511626 (500K) [application/pdf]\n", + "Saving to: ‘zipformer.pdf’\n", + "\n", + "zipformer.pdf 100%[===================>] 499.63K --.-KB/s in 0.01s \n", + "\n", + "2024-06-01 23:46:40 (46.7 MB/s) - ‘zipformer.pdf’ saved [511626/511626]\n", + "\n", + "--2024-06-01 23:46:40-- https://openreview.net/pdf?id=yV6fD7LYkF\n", + "Resolving openreview.net (openreview.net)... 35.184.86.251\n", + "Connecting to openreview.net (openreview.net)|35.184.86.251|:443... connected.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 4171982 (4.0M) [application/pdf]\n", + "Saving to: ‘values.pdf’\n", + "\n", + "values.pdf 100%[===================>] 3.98M --.-KB/s in 0.07s \n", + "\n", + "2024-06-01 23:46:40 (54.5 MB/s) - ‘values.pdf’ saved [4171982/4171982]\n", + "\n", + "--2024-06-01 23:46:40-- https://openreview.net/pdf?id=hnrB5YHoYu\n", + "Resolving openreview.net (openreview.net)... 35.184.86.251\n", + "Connecting to openreview.net (openreview.net)|35.184.86.251|:443... connected.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 34710410 (33M) [application/pdf]\n", + "Saving to: ‘finetune_fair_diffusion.pdf’\n", + "\n", + "finetune_fair_diffu 100%[===================>] 33.10M 52.4MB/s in 0.6s \n", + "\n", + "2024-06-01 23:46:41 (52.4 MB/s) - ‘finetune_fair_diffusion.pdf’ saved [34710410/34710410]\n", + "\n", + "--2024-06-01 23:46:41-- https://openreview.net/pdf?id=WbWtOYIzIK\n", + "Resolving openreview.net (openreview.net)... 35.184.86.251\n", + "Connecting to openreview.net (openreview.net)|35.184.86.251|:443... connected.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 877083 (857K) [application/pdf]\n", + "Saving to: ‘knowledge_card.pdf’\n", + "\n", + "knowledge_card.pdf 100%[===================>] 856.53K --.-KB/s in 0.01s \n", + "\n", + "2024-06-01 23:46:41 (81.9 MB/s) - ‘knowledge_card.pdf’ saved [877083/877083]\n", + "\n", + "--2024-06-01 23:46:41-- https://openreview.net/pdf?id=c5pwL0Soay\n", + "Resolving openreview.net (openreview.net)... 35.184.86.251\n", + "Connecting to openreview.net (openreview.net)|35.184.86.251|:443... connected.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 4775879 (4.6M) [application/pdf]\n", + "Saving to: ‘metra.pdf’\n", + "\n", + "metra.pdf 100%[===================>] 4.55M --.-KB/s in 0.05s \n", + "\n", + "2024-06-01 23:46:42 (97.3 MB/s) - ‘metra.pdf’ saved [4775879/4775879]\n", + "\n", + "--2024-06-01 23:46:42-- https://openreview.net/pdf?id=TpD2aG1h0D\n", + "Resolving openreview.net (openreview.net)... 35.184.86.251\n", + "Connecting to openreview.net (openreview.net)|35.184.86.251|:443... connected.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 1973959 (1.9M) [application/pdf]\n", + "Saving to: ‘vr_mcl.pdf’\n", + "\n", + "vr_mcl.pdf 100%[===================>] 1.88M --.-KB/s in 0.06s \n", + "\n", + "2024-06-01 23:46:42 (29.4 MB/s) - ‘vr_mcl.pdf’ saved [1973959/1973959]\n", + "\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "from pathlib import Path\n", + "\n", + "paper_to_tools_dict = {}\n", + "for paper in papers:\n", + " print(f\"Getting tools for paper: {paper}\")\n", + " vector_tool, summary_tool = get_doc_tools(paper, Path(paper).stem)\n", + " paper_to_tools_dict[paper] = [vector_tool, summary_tool]" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "BEfcjCQbMp2i", + "outputId": "81c48305-9569-470c-b66f-e1b615423cd2" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Getting tools for paper: metagpt.pdf\n", + "Getting tools for paper: longlora.pdf\n", + "Getting tools for paper: loftq.pdf\n", + "Getting tools for paper: swebench.pdf\n", + "Getting tools for paper: selfrag.pdf\n", + "Getting tools for paper: zipformer.pdf\n", + "Getting tools for paper: values.pdf\n", + "Getting tools for paper: finetune_fair_diffusion.pdf\n", + "Getting tools for paper: knowledge_card.pdf\n", + "Getting tools for paper: metra.pdf\n", + "Getting tools for paper: vr_mcl.pdf\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "all_tools = [t for paper in papers for t in paper_to_tools_dict[paper]]" + ], + "metadata": { + "id": "E2wXRc15Mp5Y" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "# define an \"object\" index and retriever over these tools\n", + "from llama_index.core import VectorStoreIndex\n", + "from llama_index.core.objects import ObjectIndex\n", + "\n", + "obj_index = ObjectIndex.from_objects(\n", + " all_tools,\n", + " index_cls=VectorStoreIndex,\n", + ")" + ], + "metadata": { + "id": "PKd8_J1vMp8a" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "obj_retriever = obj_index.as_retriever(similarity_top_k=3)" + ], + "metadata": { + "id": "jys0qlogM069" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "tools = obj_retriever.retrieve(\n", + " \"Tell me about the eval dataset used in MetaGPT and SWE-Bench\"\n", + ")" + ], + "metadata": { + "id": "q6iWXlbsM3LX" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "tools[2].metadata" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "oWIPUar2M4yp", + "outputId": "ad06205d-1996-4d6c-8a95-3c13824c75bd" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "ToolMetadata(description='vector_tool_swebench(query: str, page_numbers: Optional[List[str]] = None) -> str\\nUse to answer questions over the MetaGPT paper.\\n\\n Useful if you have specific questions over the MetaGPT paper.\\n Always leave page_numbers as None UNLESS there is a specific page you want to search for.\\n\\n Args:\\n query (str): the string query to be embedded.\\n page_numbers (Optional[List[str]]): Filter by set of pages. Leave as NONE\\n if we want to perform a vector search\\n over all pages. Otherwise, filter by the set of specified pages.\\n\\n ', name='vector_tool_swebench', fn_schema=<class 'pydantic.v1.main.vector_tool_swebench'>, return_direct=False)" + ] + }, + "metadata": {}, + "execution_count": 24 + } + ] + }, + { + "cell_type": "code", + "source": [ + "from llama_index.core.agent import FunctionCallingAgentWorker\n", + "from llama_index.core.agent import AgentRunner\n", + "\n", + "agent_worker = FunctionCallingAgentWorker.from_tools(\n", + " tool_retriever=obj_retriever,\n", + " llm=llm,\n", + " system_prompt=\"\"\" \\\n", + "You are an agent designed to answer queries over a set of given papers.\n", + "Please always use the tools provided to answer a question. Do not rely on prior knowledge.\\\n", + "\n", + "\"\"\",\n", + " verbose=True\n", + ")\n", + "agent = AgentRunner(agent_worker)" + ], + "metadata": { + "id": "qsD37iTpM6gb" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "response = agent.query(\n", + " \"Tell me about the evaluation dataset used \"\n", + " \"in MetaGPT and compare it against SWE-Bench\"\n", + ")\n", + "print(str(response))" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 1000 + }, + "id": "M6gM7iq6M8De", + "outputId": "392e2376-1aff-46bb-ab23-1da137fa27ed" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Added user message to memory: Tell me about the evaluation dataset used in MetaGPT and compare it against SWE-Bench\n", + "=== Calling Function ===\n", + "Calling function: summary_tool_swebench with args: {\"input\": \"Tell me about the evaluation dataset used in MetaGPT and compare it against SWE-Bench\"}\n" + ] + }, + { + "output_type": "stream", + "name": "stderr", + "text": [ + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._chat in 0.6840683729528756 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 15253, Requested ~4583. Please try again in 2m18.368s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._chat in 1.2432392138041768 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 15174, Requested ~4583. Please try again in 2m17.576s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._chat in 3.1286333459452784 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 15038, Requested ~4583. Please try again in 2m16.215s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._chat in 0.11829680891247651 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 14711, Requested ~4583. Please try again in 2m12.941s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._chat in 15.100091742335325 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 14685, Requested ~4583. Please try again in 2m12.686s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 0.915362775756969 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 13094, Requested ~3519. Please try again in 1m46.136s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 0.8908743319172123 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 13090, Requested ~910. Please try again in 1m20.005s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 0.10129596224885962 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 13090, Requested ~3540. Please try again in 1m46.303s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 0.11331795379328435 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 13090, Requested ~3433. Please try again in 1m45.235s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 0.9317285337611021 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 13090, Requested ~3436. Please try again in 1m45.261s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 0.35573138196624565 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 13089, Requested ~3485. Please try again in 1m45.747999999s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 0.0668964396740298 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 13089, Requested ~3349. Please try again in 1m44.386s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 0.4836657422259598 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 13089, Requested ~3634. Please try again in 1m47.238s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 0.5625126442433749 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 13089, Requested ~3279. Please try again in 1m43.682s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 0.47297858830248385 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 13089, Requested ~4110. Please try again in 1m51.994s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 0.1803537476878896 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 13089, Requested ~3406. Please try again in 1m44.952s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 0.07614055142402132 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 13089, Requested ~3758. Please try again in 1m48.47s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 0.7495750849144195 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 13089, Requested ~4118. Please try again in 1m52.07s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 0.03109138934362954 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 13087, Requested ~3570. Please try again in 1m46.577999999s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 0.8502529581162088 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 13069, Requested ~3349. Please try again in 1m44.184s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 1.8475820462040398 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 13068, Requested ~3540. Please try again in 1m46.083s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 1.0375363559272806 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 13067, Requested ~3433. Please try again in 1m45.002999999s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 0.034979770517973074 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 13064, Requested ~3758. Please try again in 1m48.227s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 1.9549340480511757 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 13056, Requested ~3406. Please try again in 1m44.622999999s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 0.8287099993357916 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 13055, Requested ~3570. Please try again in 1m46.257s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 3.4247943187817387 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 13051, Requested ~3758. Please try again in 1m48.097s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 1.6801382842857582 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 13040, Requested ~3485. Please try again in 1m45.259s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 0.5023870328064102 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 13026, Requested ~3634. Please try again in 1m46.605s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 1.0642112819658693 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 13026, Requested ~4110. Please try again in 1m51.366s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 1.5736663987867312 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 13019, Requested ~3279. Please try again in 1m42.98s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 0.7574819680098217 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 12997, Requested ~4118. Please try again in 1m51.151s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 0.7112769833611414 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 12992, Requested ~3519. Please try again in 1m45.117s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 0.8196907226475281 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 12992, Requested ~910. Please try again in 1m19.025s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 1.3448729912568922 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 12984, Requested ~3436. Please try again in 1m44.2s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 2.8341372547653463 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 12973, Requested ~3349. Please try again in 1m43.226s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 1.0422313365102571 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 12966, Requested ~3634. Please try again in 1m46.003999999s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 2.914534713409617 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 12961, Requested ~3570. Please try again in 1m45.316s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 2.3615825481101136 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 12953, Requested ~3433. Please try again in 1m43.862s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 1.1556752086944457 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 12910, Requested ~4118. Please try again in 1m50.281s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 3.860544622274675 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 12910, Requested ~3519. Please try again in 1m44.291s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 1.808207859465571 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 12908, Requested ~4110. Please try again in 1m50.187999999s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 1.4929012856982364 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 12899, Requested ~910. Please try again in 1m18.09s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 2.4684766467759935 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 12872, Requested ~3540. Please try again in 1m44.128999999s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 2.6012492303609633 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 12862, Requested ~3485. Please try again in 1m43.479s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 0.04384265083102701 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 12851, Requested ~3634. Please try again in 1m44.853s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 0.27717809227173973 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 12849, Requested ~3279. Please try again in 1m41.287s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 3.0612370836626672 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 12849, Requested ~3406. Please try again in 1m42.554s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 1.5745774583710985 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 12838, Requested ~3436. Please try again in 1m42.745s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 14.26716384841725 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 12836, Requested ~3634. Please try again in 1m44.709s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 2.076525377333196 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 12811, Requested ~3279. Please try again in 1m40.907999999s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 1.4821192625183395 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 12783, Requested ~4118. Please try again in 1m49.016s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 7.003644113344838 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 12740, Requested ~910. Please try again in 1m16.507s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 3.9893892384613316 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 12716, Requested ~4110. Please try again in 1m48.26s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 7.039559309892734 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 12706, Requested ~3433. Please try again in 1m41.396s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 4.558551289612238 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 12699, Requested ~3758. Please try again in 1m44.57s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 4.270976432566347 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 12679, Requested ~3349. Please try again in 1m40.286s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 5.275273237076027 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 12671, Requested ~3436. Please try again in 1m41.07s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 7.784914019153887 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 12660, Requested ~3570. Please try again in 1m42.301s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 2.181031855644699 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 12624, Requested ~4118. Please try again in 1m47.422s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 1.756647083033104 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 12616, Requested ~3540. Please try again in 1m41.56s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 10.135474467150452 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 12593, Requested ~3279. Please try again in 1m38.723s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 1.9428594741342975 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 12590, Requested ~3485. Please try again in 1m40.75s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 0.41220431777185773 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 12533, Requested ~3406. Please try again in 1m39.391s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 4.474581096518616 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 12513, Requested ~3519. Please try again in 1m40.32s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 2.658103904333455 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 12481, Requested ~3406. Please try again in 1m38.87s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._achat in 2.2739603866797005 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 12429, Requested ~3540. Please try again in 1m39.695s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n" + ] + }, + { + "output_type": "stream", + "name": "stdout", + "text": [ + "=== Function Output ===\n", + "Encountered error: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 12394, Requested ~4118. Please try again in 1m45.126s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}\n" + ] + }, + { + "output_type": "stream", + "name": "stderr", + "text": [ + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._chat in 0.5109261820972161 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 12361, Requested ~182. Please try again in 1m5.434s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._chat in 1.9397422137660878 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 12300, Requested ~182. Please try again in 1m4.819999999s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._chat in 0.768207041906039 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 12096, Requested ~182. Please try again in 1m2.788s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n", + "WARNING:llama_index.llms.openai.utils:Retrying llama_index.llms.openai.base.OpenAI._chat in 7.22187651320963 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for model `llama3-70b-8192` in organization `org_01hw1v17zqf6csfjsw04c5mxnm` on tokens per minute (TPM): Limit 6000, Used 12008, Requested ~182. Please try again in 1m1.904s. Visit https://console.groq.com/docs/rate-limits for more information.', 'type': 'tokens', 'code': 'rate_limit_exceeded'}}.\n" + ] + }, + { + "output_type": "error", + "ename": "KeyboardInterrupt", + "evalue": "", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mHTTPStatusError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/openai/_base_client.py\u001b[0m in \u001b[0;36m_request\u001b[0;34m(self, cast_to, options, remaining_retries, stream, stream_cls)\u001b[0m\n\u001b[1;32m 998\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 999\u001b[0;31m \u001b[0mresponse\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mraise_for_status\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1000\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mhttpx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mHTTPStatusError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0merr\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;31m# thrown on 4xx and 5xx status code\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/httpx/_models.py\u001b[0m in \u001b[0;36mraise_for_status\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 760\u001b[0m \u001b[0mmessage\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmessage\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mformat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0merror_type\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0merror_type\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 761\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mHTTPStatusError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmessage\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrequest\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mrequest\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mresponse\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 762\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mHTTPStatusError\u001b[0m: Client error '429 Too Many Requests' for url 'https://api.groq.com/openai/v1/chat/completions'\nFor more information check: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/429", + "\nDuring handling of the above exception, another exception occurred:\n", + "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m<ipython-input-26-8138d88ecef4>\u001b[0m in \u001b[0;36m<cell line: 1>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m response = agent.query(\n\u001b[0m\u001b[1;32m 2\u001b[0m \u001b[0;34m\"Tell me about the evaluation dataset used \"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0;34m\"in MetaGPT and compare it against SWE-Bench\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m )\n\u001b[1;32m 5\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mresponse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/llama_index/core/instrumentation/dispatcher.py\u001b[0m in \u001b[0;36mwrapper\u001b[0;34m(func, instance, args, kwargs)\u001b[0m\n\u001b[1;32m 196\u001b[0m )\n\u001b[1;32m 197\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 198\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 199\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mBaseException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 200\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mevent\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mSpanDropEvent\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mspan_id\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mid_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0merr_str\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mstr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/llama_index/core/base/base_query_engine.py\u001b[0m in \u001b[0;36mquery\u001b[0;34m(self, str_or_query_bundle)\u001b[0m\n\u001b[1;32m 49\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstr_or_query_bundle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 50\u001b[0m \u001b[0mstr_or_query_bundle\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mQueryBundle\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstr_or_query_bundle\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 51\u001b[0;31m \u001b[0mquery_result\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_query\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstr_or_query_bundle\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 52\u001b[0m dispatcher.event(\n\u001b[1;32m 53\u001b[0m \u001b[0mQueryEndEvent\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mquery\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mstr_or_query_bundle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mresponse\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mquery_result\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/llama_index/core/callbacks/utils.py\u001b[0m in \u001b[0;36mwrapper\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 39\u001b[0m \u001b[0mcallback_manager\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcast\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mCallbackManager\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcallback_manager\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 40\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mcallback_manager\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mas_trace\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtrace_id\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 41\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 42\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 43\u001b[0m \u001b[0;34m@\u001b[0m\u001b[0mfunctools\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwraps\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfunc\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# preserve signature, name, etc. of func\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/llama_index/core/base/agent/types.py\u001b[0m in \u001b[0;36m_query\u001b[0;34m(self, query_bundle)\u001b[0m\n\u001b[1;32m 41\u001b[0m \u001b[0;34m@\u001b[0m\u001b[0mtrace_method\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"query\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 42\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_query\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mquery_bundle\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mQueryBundle\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0mRESPONSE_TYPE\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 43\u001b[0;31m agent_response = self.chat(\n\u001b[0m\u001b[1;32m 44\u001b[0m \u001b[0mquery_bundle\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mquery_str\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 45\u001b[0m \u001b[0mchat_history\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/llama_index/core/instrumentation/dispatcher.py\u001b[0m in \u001b[0;36mwrapper\u001b[0;34m(func, instance, args, kwargs)\u001b[0m\n\u001b[1;32m 196\u001b[0m )\n\u001b[1;32m 197\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 198\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 199\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mBaseException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 200\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mevent\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mSpanDropEvent\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mspan_id\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mid_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0merr_str\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mstr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/llama_index/core/callbacks/utils.py\u001b[0m in \u001b[0;36mwrapper\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 39\u001b[0m \u001b[0mcallback_manager\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcast\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mCallbackManager\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcallback_manager\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 40\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mcallback_manager\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mas_trace\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtrace_id\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 41\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 42\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 43\u001b[0m \u001b[0;34m@\u001b[0m\u001b[0mfunctools\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwraps\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfunc\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# preserve signature, name, etc. of func\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/llama_index/core/agent/runner/base.py\u001b[0m in \u001b[0;36mchat\u001b[0;34m(self, message, chat_history, tool_choice)\u001b[0m\n\u001b[1;32m 638\u001b[0m \u001b[0mpayload\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m{\u001b[0m\u001b[0mEventPayload\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mMESSAGES\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mmessage\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 639\u001b[0m ) as e:\n\u001b[0;32m--> 640\u001b[0;31m chat_response = self._chat(\n\u001b[0m\u001b[1;32m 641\u001b[0m \u001b[0mmessage\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mmessage\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 642\u001b[0m \u001b[0mchat_history\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mchat_history\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/llama_index/core/instrumentation/dispatcher.py\u001b[0m in \u001b[0;36mwrapper\u001b[0;34m(func, instance, args, kwargs)\u001b[0m\n\u001b[1;32m 196\u001b[0m )\n\u001b[1;32m 197\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 198\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 199\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mBaseException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 200\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mevent\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mSpanDropEvent\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mspan_id\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mid_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0merr_str\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mstr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/llama_index/core/agent/runner/base.py\u001b[0m in \u001b[0;36m_chat\u001b[0;34m(self, message, chat_history, tool_choice, mode)\u001b[0m\n\u001b[1;32m 570\u001b[0m \u001b[0;32mwhile\u001b[0m \u001b[0;32mTrue\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 571\u001b[0m \u001b[0;31m# pass step queue in as argument, assume step executor is stateless\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 572\u001b[0;31m cur_step_output = self._run_step(\n\u001b[0m\u001b[1;32m 573\u001b[0m \u001b[0mtask\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtask_id\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mmode\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtool_choice\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtool_choice\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 574\u001b[0m )\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/llama_index/core/instrumentation/dispatcher.py\u001b[0m in \u001b[0;36mwrapper\u001b[0;34m(func, instance, args, kwargs)\u001b[0m\n\u001b[1;32m 196\u001b[0m )\n\u001b[1;32m 197\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 198\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 199\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mBaseException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 200\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mevent\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mSpanDropEvent\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mspan_id\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mid_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0merr_str\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mstr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/llama_index/core/agent/runner/base.py\u001b[0m in \u001b[0;36m_run_step\u001b[0;34m(self, task_id, step, input, mode, **kwargs)\u001b[0m\n\u001b[1;32m 409\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 410\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mmode\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0mChatResponseMode\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mWAIT\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 411\u001b[0;31m \u001b[0mcur_step_output\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0magent_worker\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun_step\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstep\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtask\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 412\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0mmode\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0mChatResponseMode\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mSTREAM\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 413\u001b[0m \u001b[0mcur_step_output\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0magent_worker\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstream_step\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstep\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtask\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/llama_index/core/callbacks/utils.py\u001b[0m in \u001b[0;36mwrapper\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 39\u001b[0m \u001b[0mcallback_manager\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcast\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mCallbackManager\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcallback_manager\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 40\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mcallback_manager\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mas_trace\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtrace_id\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 41\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 42\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 43\u001b[0m \u001b[0;34m@\u001b[0m\u001b[0mfunctools\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwraps\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfunc\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# preserve signature, name, etc. of func\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/llama_index/core/agent/function_calling/step.py\u001b[0m in \u001b[0;36mrun_step\u001b[0;34m(self, step, task, **kwargs)\u001b[0m\n\u001b[1;32m 287\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 288\u001b[0m \u001b[0;31m# get response and tool call (if exists)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 289\u001b[0;31m response = self._llm.chat_with_tools(\n\u001b[0m\u001b[1;32m 290\u001b[0m \u001b[0mtools\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtools\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 291\u001b[0m \u001b[0muser_msg\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/llama_index/llms/openai/base.py\u001b[0m in \u001b[0;36mchat_with_tools\u001b[0;34m(self, tools, user_msg, chat_history, verbose, allow_parallel_tool_calls, tool_choice, **kwargs)\u001b[0m\n\u001b[1;32m 817\u001b[0m \u001b[0mmessages\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0muser_msg\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 818\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 819\u001b[0;31m response = self.chat(\n\u001b[0m\u001b[1;32m 820\u001b[0m \u001b[0mmessages\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 821\u001b[0m \u001b[0mtools\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtool_specs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/llama_index/core/instrumentation/dispatcher.py\u001b[0m in \u001b[0;36mwrapper\u001b[0;34m(func, instance, args, kwargs)\u001b[0m\n\u001b[1;32m 196\u001b[0m )\n\u001b[1;32m 197\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 198\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 199\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mBaseException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 200\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mevent\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mSpanDropEvent\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mspan_id\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mid_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0merr_str\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mstr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/llama_index/llms/openai_like/base.py\u001b[0m in \u001b[0;36mchat\u001b[0;34m(self, messages, **kwargs)\u001b[0m\n\u001b[1;32m 104\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mcompletion_response_to_chat_response\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcompletion_response\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 105\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 106\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mchat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmessages\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 107\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 108\u001b[0m def stream_chat(\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/llama_index/core/instrumentation/dispatcher.py\u001b[0m in \u001b[0;36mwrapper\u001b[0;34m(func, instance, args, kwargs)\u001b[0m\n\u001b[1;32m 196\u001b[0m )\n\u001b[1;32m 197\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 198\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 199\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mBaseException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 200\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mevent\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mSpanDropEvent\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mspan_id\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mid_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0merr_str\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mstr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/llama_index/core/llms/callbacks.py\u001b[0m in \u001b[0;36mwrapped_llm_chat\u001b[0;34m(_self, messages, **kwargs)\u001b[0m\n\u001b[1;32m 156\u001b[0m )\n\u001b[1;32m 157\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 158\u001b[0;31m \u001b[0mf_return_val\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0m_self\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmessages\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 159\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mBaseException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 160\u001b[0m callback_manager.on_event_end(\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/llama_index/llms/openai/base.py\u001b[0m in \u001b[0;36mchat\u001b[0;34m(self, messages, **kwargs)\u001b[0m\n\u001b[1;32m 298\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 299\u001b[0m \u001b[0mchat_fn\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcompletion_to_chat_decorator\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_complete\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 300\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mchat_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmessages\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 301\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 302\u001b[0m \u001b[0;34m@\u001b[0m\u001b[0mllm_chat_callback\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/tenacity/__init__.py\u001b[0m in \u001b[0;36mwrapped_f\u001b[0;34m(*args, **kw)\u001b[0m\n\u001b[1;32m 328\u001b[0m )\n\u001b[1;32m 329\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mwrapped_f\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mt\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mAny\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkw\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mt\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mAny\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0mt\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mAny\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 330\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkw\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 331\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 332\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mretry_with\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mt\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mAny\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mt\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mAny\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0mWrappedFn\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/tenacity/__init__.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, fn, *args, **kwargs)\u001b[0m\n\u001b[1;32m 465\u001b[0m \u001b[0mretry_state\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mRetryCallState\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mretry_object\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mfn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 466\u001b[0m \u001b[0;32mwhile\u001b[0m \u001b[0;32mTrue\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 467\u001b[0;31m \u001b[0mdo\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0miter\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mretry_state\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mretry_state\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 468\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdo\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mDoAttempt\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 469\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/tenacity/__init__.py\u001b[0m in \u001b[0;36miter\u001b[0;34m(self, retry_state)\u001b[0m\n\u001b[1;32m 366\u001b[0m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 367\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0maction\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0miter_state\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mactions\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 368\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0maction\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mretry_state\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 369\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mresult\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 370\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/tenacity/__init__.py\u001b[0m in \u001b[0;36m<lambda>\u001b[0;34m(rs)\u001b[0m\n\u001b[1;32m 388\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_post_retry_check_actions\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mretry_state\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;34m\"RetryCallState\"\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 389\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0miter_state\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mis_explicit_retry\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0miter_state\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mretry_run_result\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 390\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_add_action_func\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;32mlambda\u001b[0m \u001b[0mrs\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mrs\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moutcome\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mresult\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 391\u001b[0m \u001b[0;32mreturn\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 392\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/lib/python3.10/concurrent/futures/_base.py\u001b[0m in \u001b[0;36mresult\u001b[0;34m(self, timeout)\u001b[0m\n\u001b[1;32m 449\u001b[0m \u001b[0;32mraise\u001b[0m \u001b[0mCancelledError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 450\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_state\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0mFINISHED\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 451\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__get_result\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 452\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 453\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_condition\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwait\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtimeout\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/lib/python3.10/concurrent/futures/_base.py\u001b[0m in \u001b[0;36m__get_result\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 401\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_exception\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 402\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 403\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_exception\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 404\u001b[0m \u001b[0;32mfinally\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 405\u001b[0m \u001b[0;31m# Break a reference cycle with the exception in self._exception\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/tenacity/__init__.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, fn, *args, **kwargs)\u001b[0m\n\u001b[1;32m 468\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdo\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mDoAttempt\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 469\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 470\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 471\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mBaseException\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;31m# noqa: B902\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 472\u001b[0m \u001b[0mretry_state\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mset_exception\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msys\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexc_info\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# type: ignore[arg-type]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/llama_index/llms/openai/base.py\u001b[0m in \u001b[0;36m_chat\u001b[0;34m(self, messages, **kwargs)\u001b[0m\n\u001b[1;32m 366\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 367\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreuse_client\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 368\u001b[0;31m response = client.chat.completions.create(\n\u001b[0m\u001b[1;32m 369\u001b[0m \u001b[0mmessages\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mmessage_dicts\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 370\u001b[0m \u001b[0mstream\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/openai/_utils/_utils.py\u001b[0m in \u001b[0;36mwrapper\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 275\u001b[0m \u001b[0mmsg\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34mf\"Missing required argument: {quote(missing[0])}\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 276\u001b[0m \u001b[0;32mraise\u001b[0m \u001b[0mTypeError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmsg\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 277\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 278\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 279\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mwrapper\u001b[0m \u001b[0;31m# type: ignore\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/openai/resources/chat/completions.py\u001b[0m in \u001b[0;36mcreate\u001b[0;34m(self, messages, model, frequency_penalty, function_call, functions, logit_bias, logprobs, max_tokens, n, presence_penalty, response_format, seed, stop, stream, stream_options, temperature, tool_choice, tools, top_logprobs, top_p, user, extra_headers, extra_query, extra_body, timeout)\u001b[0m\n\u001b[1;32m 588\u001b[0m \u001b[0mtimeout\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mfloat\u001b[0m \u001b[0;34m|\u001b[0m \u001b[0mhttpx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTimeout\u001b[0m \u001b[0;34m|\u001b[0m \u001b[0;32mNone\u001b[0m \u001b[0;34m|\u001b[0m \u001b[0mNotGiven\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mNOT_GIVEN\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 589\u001b[0m ) -> ChatCompletion | Stream[ChatCompletionChunk]:\n\u001b[0;32m--> 590\u001b[0;31m return self._post(\n\u001b[0m\u001b[1;32m 591\u001b[0m \u001b[0;34m\"/chat/completions\"\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 592\u001b[0m body=maybe_transform(\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/openai/_base_client.py\u001b[0m in \u001b[0;36mpost\u001b[0;34m(self, path, cast_to, body, options, files, stream, stream_cls)\u001b[0m\n\u001b[1;32m 1238\u001b[0m \u001b[0mmethod\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m\"post\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0murl\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mpath\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mjson_data\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mbody\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfiles\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mto_httpx_files\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfiles\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0moptions\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1239\u001b[0m )\n\u001b[0;32m-> 1240\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mcast\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mResponseT\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrequest\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcast_to\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mopts\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstream\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mstream\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstream_cls\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mstream_cls\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1241\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1242\u001b[0m def patch(\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/openai/_base_client.py\u001b[0m in \u001b[0;36mrequest\u001b[0;34m(self, cast_to, options, remaining_retries, stream, stream_cls)\u001b[0m\n\u001b[1;32m 919\u001b[0m \u001b[0mstream_cls\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mtype\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0m_StreamT\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m|\u001b[0m \u001b[0;32mNone\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 920\u001b[0m ) -> ResponseT | _StreamT:\n\u001b[0;32m--> 921\u001b[0;31m return self._request(\n\u001b[0m\u001b[1;32m 922\u001b[0m \u001b[0mcast_to\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcast_to\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 923\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0moptions\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/openai/_base_client.py\u001b[0m in \u001b[0;36m_request\u001b[0;34m(self, cast_to, options, remaining_retries, stream, stream_cls)\u001b[0m\n\u001b[1;32m 1003\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mretries\u001b[0m \u001b[0;34m>\u001b[0m \u001b[0;36m0\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_should_retry\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0merr\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mresponse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1004\u001b[0m \u001b[0merr\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mresponse\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mclose\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1005\u001b[0;31m return self._retry_request(\n\u001b[0m\u001b[1;32m 1006\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1007\u001b[0m \u001b[0mcast_to\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/openai/_base_client.py\u001b[0m in \u001b[0;36m_retry_request\u001b[0;34m(self, options, cast_to, remaining_retries, response_headers, stream, stream_cls)\u001b[0m\n\u001b[1;32m 1049\u001b[0m \u001b[0;31m# In a synchronous context we are blocking the entire thread. Up to the library user to run the client in a\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1050\u001b[0m \u001b[0;31m# different thread if necessary.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1051\u001b[0;31m \u001b[0mtime\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msleep\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtimeout\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1052\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1053\u001b[0m return self._request(\n", + "\u001b[0;31mKeyboardInterrupt\u001b[0m: " + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "response = agent.query(\n", + " \"Compare and contrast the LoRA papers (LongLoRA, LoftQ). \"\n", + " \"Analyze the approach in each paper first. \"\n", + ")" + ], + "metadata": { + "id": "9SsWMO7qM-NX" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [], + "metadata": { + "id": "Y5LqYiEqNAdX" + }, + "execution_count": null, + "outputs": [] + } + ] +} \ No newline at end of file diff --git a/recipes/3p_integrations/llamaindex/dlai_agentic_rag/README.md b/recipes/3p_integrations/llamaindex/dlai_agentic_rag/README.md new file mode 100644 index 0000000000000000000000000000000000000000..7b8c4601db8f5e85f10275cbba27824bd1fae6e6 --- /dev/null +++ b/recipes/3p_integrations/llamaindex/dlai_agentic_rag/README.md @@ -0,0 +1,11 @@ +# Building Agentic RAG with Llamaindex + +The folder here containts the Llama 3 ported notebooks of the DLAI short course [Building Agentic RAG with Llamaindex](). + +1. [Building Agentic RAG with Llamaindex L1 Router Engine](Building_Agentic_RAG_with_Llamaindex_L1_Router_Engine.ipynb) + +2. [Building Agentic RAG with Llamaindex L2 Tool Calling](Building_Agentic_RAG_with_Llamaindex_L2_Tool_Calling.ipynb) + +3. [Building Agentic RAG with Llamaindex L3 Building an Agent Reasoning Loop](Building_Agentic_RAG_with_Llamaindex_L3_Building_an_Agent Reasoning_Loop.ipynb) + +4. [Building Agentic RAG with Llamaindex L4 Building a Multi-Document Agent](Building_Agentic_RAG_with_Llamaindex_L4_Building_a_Multi-Document Agent.ipynb)