diff --git a/CHANGELOG.md b/CHANGELOG.md index 908c950ccb8bb6666b88741c7e140899aa0d0e9e..7c203cd21639db3963861d0c346f5b6971b54f60 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,10 +2,19 @@ ## Unreleased +### New Features + +- added native sync and async client support for the lasted `openai` client package (#8712) +- added support for `AzureOpenAIEmbedding` (#8712) + ### Bug Fixes / Nits - Fixed errors about "no host supplied" with `download_loader` (#8723) +### Breaking Changes + +- `OpenAIEmbedding` no longer supports azure, moved into the `AzureOpenAIEmbedding` class (#8712) + ## [0.8.62.post1] - 2023-11-05 ### Breaking Changes diff --git a/docs/examples/customization/llms/AzureOpenAI.ipynb b/docs/examples/customization/llms/AzureOpenAI.ipynb index 7cf5fdb5f4d2e7a5bdf47da896d3c9b627ac4820..f9ddfc665cdb65c34a059512390807bc226fdd99 100644 --- a/docs/examples/customization/llms/AzureOpenAI.ipynb +++ b/docs/examples/customization/llms/AzureOpenAI.ipynb @@ -52,11 +52,8 @@ "metadata": {}, "outputs": [], "source": [ - "import os\n", - "import json\n", - "import openai\n", "from llama_index.llms import AzureOpenAI\n", - "from llama_index.embeddings import OpenAIEmbedding\n", + "from llama_index.embeddings import AzureOpenAIEmbedding\n", "from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext\n", "import logging\n", "import sys\n", @@ -84,31 +81,26 @@ "metadata": {}, "outputs": [], "source": [ - "api_key = \"<insert api key from azure>\"\n", - "api_base = \"<insert api base url from azure>\"\n", - "api_type = \"azure\"\n", - "api_version = \"2023-05-15\"\n", + "api_key = \"<api-key>\"\n", + "azure_endpoint = \"https://<your-resource-name>.openai.azure.com/\"\n", + "api_version = \"2023-07-01-preview\"\n", "\n", "llm = AzureOpenAI(\n", - " model=\"model name\",\n", - " engine=\"<insert deployment name from azure>\",\n", + " model=\"gpt-35-turbo-16k\",\n", + " deployment_name=\"my-custom-llm\",\n", " api_key=api_key,\n", - " api_base=api_base,\n", - " api_type=api_type,\n", + " azure_endpoint=azure_endpoint,\n", " api_version=api_version,\n", ")\n", "\n", "# You need to deploy your own embedding model as well as your own chat completion model\n", - "embed_model = OpenAIEmbedding(\n", + "embed_model = AzureOpenAIEmbedding(\n", " model=\"text-embedding-ada-002\",\n", - " deployment_name=\"<insert EMBEDDING model deployment name from azure>\",\n", + " deployment_name=\"my-custom-embedding\",\n", " api_key=api_key,\n", - " api_base=api_base,\n", - " api_type=api_type,\n", + " azure_endpoint=azure_endpoint,\n", " api_version=api_version,\n", - ")\n", - "\n", - "documents = SimpleDirectoryReader(\"../../data/paul_graham/\").load_data()" + ")" ] }, { @@ -138,74 +130,19 @@ "name": "stdout", "output_type": "stream", "text": [ - "> Adding chunk: \t\t\n", - "\n", - "What I Worked On\n", - "\n", - "February 2021\n", - "\n", - "Before col...\n", - "> Adding chunk: interesting of that type. So I'm not surprised ...\n", - "> Adding chunk: to be the study of the ultimate truths, compare...\n", - "> Adding chunk: language called PL/I, and the situation was sim...\n", - "> Adding chunk: or if there even was a specific moment, but dur...\n", - "> Adding chunk: an uneasy alliance between two halves, theory a...\n", - "> Adding chunk: were hundreds of years old.\n", - "\n", - "And moreover this ...\n", - "> Adding chunk: that he'd found such a spectacular way to get o...\n", - "> Adding chunk: the classes that everyone has to take in fundam...\n", - "> Adding chunk: students wouldn't require the faculty to teach ...\n", - "> Adding chunk: or you get merely photographic accuracy, and wh...\n", - "> Adding chunk: But the Accademia wasn't teaching me anything e...\n", - "> Adding chunk: In Florence, after paying my part of the rent, ...\n", - "> Adding chunk: about a new thing called HTML, which was, as he...\n", - "> Adding chunk: were plenty of earnest students too: kids who \"...\n", - "> Adding chunk: Lisp hacking work was very rare, and I didn't w...\n", - "> Adding chunk: had done for the popularity of microcomputers. ...\n", - "> Adding chunk: shopping cart, and I wrote a new site generator...\n", - "> Adding chunk: seed funding from Idelle's husband Julian. In r...\n", - "> Adding chunk: for a month,\" he said, \"and it's still not done...\n", - "> Adding chunk: fun to work on. If all I'd had to do was work o...\n", - "> Adding chunk: the collar than a picture of the whole shirt. T...\n", - "> Adding chunk: partly because that's what startups did during ...\n", - "> Adding chunk: had given us a lot of options when they bought ...\n", - "> Adding chunk: That's what I should have done, just gone off s...\n", - "> Adding chunk: buy. Now I could actually choose what neighborh...\n", - "> Adding chunk: trying to build what it's now clear is about tw...\n", - "> Adding chunk: dream of building a new Lisp, partly because on...\n", - "> Adding chunk: me several years to understand the implications...\n", - "> Adding chunk: seems about as hip.\n", - "\n", - "It's not that unprestigiou...\n", - "> Adding chunk: charge of marketing at a Boston investment bank...\n", - "> Adding chunk: out \"But not me!\" and went on with the talk. Bu...\n", - "> Adding chunk: And neither of them helped founders enough in t...\n", - "> Adding chunk: fake investors, because they would in a similar...\n", - "> Adding chunk: batch was so good. You had to be pretty bold to...\n", - "> Adding chunk: had not originally intended YC to be a full-tim...\n", - "> Adding chunk: internal software in Arc. But while I continued...\n", - "> Adding chunk: double from a kidney stone, he suggested that i...\n", - "> Adding chunk: we agreed to make it a complete changing of the...\n", - "> Adding chunk: of 2014 painting. I'd never been able to work s...\n", - "> Adding chunk: his grad student Steve Russell suggested it. Ru...\n", - "> Adding chunk: defined goal, or it would have been hard to kee...\n", - "> Adding chunk: pools. It felt like I was doing life right. I r...\n", - "> Adding chunk: the more exciting.\n", - "\n", - "[2] Italian words for abstr...\n", - "> Adding chunk: expensive.\n", - "\n", - "[7] Technically the apartment wasn'...\n", - "> Adding chunk: online means you treat the online version as th...\n", - "> Adding chunk: logo had been a white V on a red circle, so I m...\n", - "> Adding chunk: YC was not working with Jessica anymore. We'd b...\n", - "> [build_index_from_documents] Total LLM token usage: 0 tokens\n", - "> [build_index_from_documents] Total embedding token usage: 17533 tokens\n" + "INFO:httpx:HTTP Request: POST https://test-simon.openai.azure.com//openai/deployments/my-custom-embedding/embeddings?api-version=2023-07-01-preview \"HTTP/1.1 200 OK\"\n", + "HTTP Request: POST https://test-simon.openai.azure.com//openai/deployments/my-custom-embedding/embeddings?api-version=2023-07-01-preview \"HTTP/1.1 200 OK\"\n", + "HTTP Request: POST https://test-simon.openai.azure.com//openai/deployments/my-custom-embedding/embeddings?api-version=2023-07-01-preview \"HTTP/1.1 200 OK\"\n", + "INFO:httpx:HTTP Request: POST https://test-simon.openai.azure.com//openai/deployments/my-custom-embedding/embeddings?api-version=2023-07-01-preview \"HTTP/1.1 200 OK\"\n", + "HTTP Request: POST https://test-simon.openai.azure.com//openai/deployments/my-custom-embedding/embeddings?api-version=2023-07-01-preview \"HTTP/1.1 200 OK\"\n", + "HTTP Request: POST https://test-simon.openai.azure.com//openai/deployments/my-custom-embedding/embeddings?api-version=2023-07-01-preview \"HTTP/1.1 200 OK\"\n" ] } ], "source": [ + "documents = SimpleDirectoryReader(\n", + " input_files=[\"../../data/paul_graham/paul_graham_essay.txt\"]\n", + ").load_data()\n", "index = VectorStoreIndex.from_documents(documents)" ] }, @@ -219,13 +156,21 @@ "name": "stdout", "output_type": "stream", "text": [ - "> [query] Total LLM token usage: 815 tokens\n", - "> [query] Total embedding token usage: 8 tokens\n", - "> Source (Doc id: ad03b507-8953-4201-b545-6195c5cfec49): me several years to understand the implications. It meant there would be a whole new generation o...\n", - "query was: What is most interesting about this essay?\n", - "answer was: \n", + "INFO:httpx:HTTP Request: POST https://test-simon.openai.azure.com//openai/deployments/my-custom-embedding/embeddings?api-version=2023-07-01-preview \"HTTP/1.1 200 OK\"\n", + "HTTP Request: POST https://test-simon.openai.azure.com//openai/deployments/my-custom-embedding/embeddings?api-version=2023-07-01-preview \"HTTP/1.1 200 OK\"\n", + "HTTP Request: POST https://test-simon.openai.azure.com//openai/deployments/my-custom-embedding/embeddings?api-version=2023-07-01-preview \"HTTP/1.1 200 OK\"\n", + "INFO:httpx:HTTP Request: POST https://test-simon.openai.azure.com//openai/deployments/my-custom-llm/chat/completions?api-version=2023-07-01-preview \"HTTP/1.1 200 OK\"\n", + "HTTP Request: POST https://test-simon.openai.azure.com//openai/deployments/my-custom-llm/chat/completions?api-version=2023-07-01-preview \"HTTP/1.1 200 OK\"\n", + "HTTP Request: POST https://test-simon.openai.azure.com//openai/deployments/my-custom-llm/chat/completions?api-version=2023-07-01-preview \"HTTP/1.1 200 OK\"\n", + "> Source (Doc id: 3e0d1e3f-9099-483f-9abd-8f352c5e730f): A lot of Lisp hackers dream of building a new Lisp, partly because one of the distinctive feature...\n", + "\n", + "> Source (Doc id: 06c1d986-1856-44cd-980d-651252ad1caf): What I Worked On\n", "\n", - "The most interesting thing about this essay is the way the author reflects on the impact of online publishing on their life and career. They discuss how the opening up of the internet to allow for more diverse, and less prestigious, forms of writing allowed them to pursue the kind of writing they were interested in, which was something that had not been possible before. Furthermore, the author acknowledges that their work may not be seen as prestigious, such as Latin, but yet still has a great impact. They further reflect on how their life and career have been shaped by working on these types of projects.\n" + "February 2021\n", + "\n", + "Before college the two main things I worked on, outside of schoo...\n", + "query was: What is most interesting about this essay?\n", + "answer was: The most interesting aspect of this essay is the author's exploration of the transformative power of publishing essays online. The author reflects on how the internet has democratized the publishing process, allowing anyone to publish their work and reach a wide audience. This realization led the author to start writing and publishing essays online, which eventually became a significant part of their work. The author also discusses the initial skepticism and lack of prestige associated with online essays, but they find encouragement in the potential for genuine discovery and the absence of the desire to impress others. Overall, the essay highlights the author's personal journey and the impact of online publishing on their career.\n" ] } ], diff --git a/docs/examples/llm/azure_openai.ipynb b/docs/examples/llm/azure_openai.ipynb index 6f58fb8ba6b0b48fade8e3035fc18b8769f4eaf7..c662e1432c63de87feb51af33e39e4e631d928f6 100644 --- a/docs/examples/llm/azure_openai.ipynb +++ b/docs/examples/llm/azure_openai.ipynb @@ -157,10 +157,9 @@ "Using Azure deployment of OpenAI models is very similar to normal OpenAI. \n", "You just need to configure a couple more environment variables.\n", "\n", - "- `OPENAI_API_TYPE`: set this to `azure`\n", - "- `OPENAI_API_VERSION`: set this to `2023-03-15-preview`\n", + "- `OPENAI_API_VERSION`: set this to `2023-07-01-preview`\n", " This may change in the future.\n", - "- `OPENAI_API_BASE`: your endpoint should look like the following\n", + "- `AZURE_OPENAI_ENDPOINT`: your endpoint should look like the following\n", " https://YOUR_RESOURCE_NAME.openai.azure.com/\n", "- `OPENAI_API_KEY`: your API key" ] @@ -176,10 +175,9 @@ "\n", "os.environ[\"OPENAI_API_KEY\"] = \"<your-api-key>\"\n", "os.environ[\n", - " \"OPENAI_API_BASE\"\n", + " \"AZURE_OPENAI_ENDPOINT\"\n", "] = \"https://<your-resource-name>.openai.azure.com/\"\n", - "os.environ[\"OPENAI_API_TYPE\"] = \"azure\"\n", - "os.environ[\"OPENAI_API_VERSION\"] = \"2023-03-15-preview\"" + "os.environ[\"OPENAI_API_VERSION\"] = \"2023-07-01-preview\"" ] }, { @@ -195,16 +193,7 @@ "execution_count": null, "id": "fd389e2c-a3d5-4b47-acbe-b22b3da17670", "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/suo/miniconda3/envs/llama/lib/python3.9/site-packages/deeplake/util/check_latest_version.py:32: UserWarning: A newer version of deeplake (3.6.7) is available. It's recommended that you update to the latest version using `pip install -U deeplake`.\n", - " warnings.warn(\n" - ] - } - ], + "outputs": [], "source": [ "from llama_index.llms import AzureOpenAI" ] @@ -248,10 +237,9 @@ " engine=\"my-custom-llm\",\n", " model=\"gpt-35-turbo-16k\",\n", " temperature=0.0,\n", - " api_base=\"https://<your-resource-name>.openai.azure.com/\",\n", + " azure_endpoint=\"https://<your-resource-name>.openai.azure.com/\",\n", " api_key=\"<your-api-key>\",\n", - " api_type=\"azure\",\n", - " api_version=\"2023-03-15-preview\",\n", + " api_version=\"2023-07-01-preview\",\n", ")" ] }, @@ -273,7 +261,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "the sun is shining brightly. Fluffy white clouds float lazily across the sky, creating a picturesque scene. The vibrant blue color of the sky brings a sense of calm and tranquility. It is a perfect day to be outside, enjoying the warmth of the sun and the gentle breeze. The sky seems to stretch endlessly, reminding us of the vastness and beauty of the world around us. It is a reminder to appreciate the simple pleasures in life and to take a moment to pause and admire the natural wonders that surround us.\n" + "the sun is shining brightly. Fluffy white clouds float lazily across the sky, creating a picturesque scene. The vibrant blue color of the sky brings a sense of calm and tranquility. It is a perfect day to be outside, enjoying the warmth of the sun and the gentle breeze. The sky seems to stretch endlessly, reminding us of the vastness and beauty of the world around us. It is a reminder to appreciate the simple pleasures in life and to take a moment to admire the natural wonders that surround us.\n" ] } ], diff --git a/docs/examples/llm/openai.ipynb b/docs/examples/llm/openai.ipynb index 1cf17d9e9147d2c348ed0825740122a450d4a334..782b0388034ff920d02f9e2fdf12ce02b5d74f5e 100644 --- a/docs/examples/llm/openai.ipynb +++ b/docs/examples/llm/openai.ipynb @@ -72,7 +72,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "a computer scientist, entrepreneur, and venture capitalist. He is best known as the co-founder of Y Combinator, a startup accelerator and seed capital firm. Graham has also written several influential essays on startups and entrepreneurship, which have gained a large following in the tech community. He has been involved in the founding and funding of numerous successful startups, including Reddit, Dropbox, and Airbnb. Graham is known for his insightful and often controversial opinions on various topics, including education, inequality, and the future of technology.\n" + "a computer scientist, entrepreneur, and venture capitalist. He is best known as the co-founder of the startup accelerator Y Combinator. Graham has also written several influential essays on startups and entrepreneurship, which have gained a wide following in the tech industry. He has been involved in the founding and funding of numerous successful startups, including Reddit, Dropbox, and Airbnb. Graham is known for his insightful and often contrarian views on technology and business, and he is considered one of the most influential figures in the startup ecosystem.\n" ] } ], @@ -116,7 +116,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "assistant: Ahoy there, matey! The name be Captain Crimsonbeard, the most colorful pirate to sail the seven seas!\n" + "assistant: Ahoy there, matey! The name be Captain Crimson, the most colorful pirate to sail the seven seas!\n" ] } ], @@ -163,7 +163,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "a computer scientist, entrepreneur, and venture capitalist. He is best known as the co-founder of the startup accelerator Y Combinator. Graham has also written several influential essays on startups and entrepreneurship, which have gained a large following in the tech community. He has been involved in the founding and funding of numerous successful startups, including Reddit, Dropbox, and Airbnb. Graham is known for his insightful and often controversial opinions on various topics, including education, inequality, and the future of technology." + "a computer scientist, entrepreneur, and venture capitalist. He is best known as the co-founder of Y Combinator, a startup accelerator and seed capital firm. Graham has also written several influential essays on startups and entrepreneurship, which have gained a large following in the tech community. He has been involved in the founding and funding of numerous successful startups, including Dropbox, Airbnb, and Reddit. Graham is considered a thought leader in the startup world and has been recognized for his contributions to the tech industry." ] } ], @@ -187,9 +187,9 @@ "metadata": {}, "outputs": [], "source": [ - "from llama_index.llms import OpenAI\n", + "from llama_index.llms import OpenAI, ChatMessage\n", "\n", - "llm = OpenAI(stream=True)\n", + "llm = OpenAI()\n", "messages = [\n", " ChatMessage(\n", " role=\"system\", content=\"You are a pirate with a colorful personality\"\n", @@ -260,7 +260,7 @@ "text": [ "\n", "\n", - "Paul Graham is an entrepreneur, venture capitalist, and computer scientist. He is best known for his work in the startup world, having co-founded the accelerator Y Combinator and investing in hundreds of startups. He is also a prolific writer, having written several books on topics such as startups, programming, and technology. He is a frequent speaker at conferences and universities, and his essays have been widely read and discussed.\n" + "Paul Graham is an entrepreneur, programmer, investor, and essayist.\n" ] } ], @@ -337,7 +337,15 @@ "execution_count": null, "id": "fdacb943-bab8-442a-a6db-aee935658340", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "FunctionCall(arguments='{\\n \"name\": \"Sunshine\",\\n \"artist\": \"John Smith\"\\n}', name='Song')\n" + ] + } + ], "source": [ "from llama_index.llms import OpenAI\n", "\n", @@ -388,7 +396,7 @@ "text": [ "\n", "\n", - "Paul Graham is an entrepreneur, venture capitalist, and computer scientist. He is best known for his work in the startup world, having co-founded the accelerator Y Combinator and investing in hundreds of startups. He is also a prolific writer, having written several books on topics such as startups, programming, and technology. He is a frequent speaker at conferences and universities, and his essays have been widely read and discussed.\n" + "Paul Graham is an entrepreneur, venture capitalist, and computer scientist. He\n" ] } ], @@ -418,7 +426,7 @@ "text": [ "\n", "\n", - "Paul Graham is an entrepreneur, venture capitalist, and computer scientist. He is best known for his work in the startup world, having co-founded the accelerator Y Combinator and investing in hundreds of startups. He is also a prolific writer, having written several books on topics such as startups, programming, and technology. He is a frequent speaker at conferences and universities, and his essays have been widely read and discussed." + "Paul Graham is an entrepreneur, venture capitalist, and computer scientist. He" ] } ], @@ -441,7 +449,15 @@ "execution_count": null, "id": "015c2d39", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "a computer scientist, entrepreneur, and venture capitalist. He is best known as the co-founder of the startup accelerator Y Combinator. Graham has also written several influential essays on startups and entrepreneurship, which have gained a wide following in the tech industry. He has been involved in the founding and funding of numerous successful startups, including Reddit, Dropbox, and Airbnb. Graham is known for his insightful and often controversial opinions on various topics, including education, inequality, and the future of technology.\n" + ] + } + ], "source": [ "from llama_index.llms import OpenAI\n", "\n", diff --git a/experimental/openai_fine_tuning/launch_training.py b/experimental/openai_fine_tuning/launch_training.py index 50286ccbde9a2ee308bfe61fc0f0ae7838e75c53..7fa7a936685a1b4b96e5bebe6e26fd746a21d018 100644 --- a/experimental/openai_fine_tuning/launch_training.py +++ b/experimental/openai_fine_tuning/launch_training.py @@ -3,38 +3,35 @@ import sys import time import openai +from openai import OpenAI from validate_json import validate_json -openai.api_key = os.getenv("OPENAI_API_KEY") +client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) def launch_training(data_path: str) -> None: validate_json(data_path) - file_name = os.path.basename(data_path) + # TODO: figure out how to specify file name in the new API + # file_name = os.path.basename(data_path) # upload file with open(data_path, "rb") as f: - output = openai.File.create( + output = client.files.create( file=f, purpose="fine-tune", - user_provided_filename=file_name, ) print("File uploaded...") # launch training while True: try: - openai.FineTuningJob.create( - training_file=output["id"], model="gpt-3.5-turbo" - ) + client.fine_tunes.create(training_file=output.id, model="gpt-3.5-turbo") break - except openai.error.InvalidRequestError: + except openai.BadRequestError: print("Waiting for file to be ready...") time.sleep(60) - print( - f"Training job {output['id']} launched. You will be emailed when it's complete." - ) + print(f"Training job {output.id} launched. You will be emailed when it's complete.") if __name__ == "__main__": diff --git a/llama_index/embeddings/__init__.py b/llama_index/embeddings/__init__.py index 5e132c868deb7cf21eda20cad78ea23a6052d7bb..ce9c91f9b0ae602cb0a5fdedf5ca5412b52f4054 100644 --- a/llama_index/embeddings/__init__.py +++ b/llama_index/embeddings/__init__.py @@ -4,6 +4,7 @@ from llama_index.embeddings.adapter import ( AdapterEmbeddingModel, LinearAdapterEmbeddingModel, ) +from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding from llama_index.embeddings.base import SimilarityMode from llama_index.embeddings.bedrock import BedrockEmbedding from llama_index.embeddings.clarifai import ClarifaiEmbedding @@ -47,6 +48,7 @@ __all__ = [ "LinearAdapterEmbeddingModel", "LLMRailsEmbedding", "OpenAIEmbedding", + "AzureOpenAIEmbedding", "OptimumEmbedding", "Pooling", "SimilarityMode", diff --git a/llama_index/embeddings/azure_openai.py b/llama_index/embeddings/azure_openai.py new file mode 100644 index 0000000000000000000000000000000000000000..ac12b31833c27608243b5f1a7408270c751609b2 --- /dev/null +++ b/llama_index/embeddings/azure_openai.py @@ -0,0 +1,98 @@ +from typing import Any, Dict, Optional, Tuple + +from openai import AsyncAzureOpenAI, AzureOpenAI + +from llama_index.bridge.pydantic import Field, PrivateAttr, root_validator +from llama_index.callbacks.base import CallbackManager +from llama_index.embeddings.base import DEFAULT_EMBED_BATCH_SIZE +from llama_index.embeddings.openai import ( + OpenAIEmbedding, + OpenAIEmbeddingMode, + OpenAIEmbeddingModelType, +) +from llama_index.llms.generic_utils import get_from_param_or_env +from llama_index.llms.openai_utils import resolve_from_aliases + + +class AzureOpenAIEmbedding(OpenAIEmbedding): + azure_endpoint: Optional[str] = Field( + default=None, description="The Azure endpoint to use." + ) + azure_deployment: Optional[str] = Field( + default=None, description="The Azure deployment to use." + ) + + _client: AzureOpenAI = PrivateAttr() + _aclient: AsyncAzureOpenAI = PrivateAttr() + + def __init__( + self, + mode: str = OpenAIEmbeddingMode.TEXT_SEARCH_MODE, + model: str = OpenAIEmbeddingModelType.TEXT_EMBED_ADA_002, + embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE, + additional_kwargs: Optional[Dict[str, Any]] = None, + api_key: Optional[str] = None, + api_version: Optional[str] = None, + # azure specific + azure_endpoint: Optional[str] = None, + azure_deployment: Optional[str] = None, + deployment_name: Optional[str] = None, + max_retries: int = 10, + callback_manager: Optional[CallbackManager] = None, + **kwargs: Any, + ): + azure_endpoint = get_from_param_or_env( + "azure_endpoint", azure_endpoint, "AZURE_OPENAI_ENDPOINT", "" + ) + + azure_deployment = resolve_from_aliases( + azure_deployment, + deployment_name, + ) + + super().__init__( + mode=mode, + model=model, + embed_batch_size=embed_batch_size, + additional_kwargs=additional_kwargs, + api_key=api_key, + api_version=api_version, + azure_endpoint=azure_endpoint, + azure_deployment=azure_deployment, + max_retries=max_retries, + callback_manager=callback_manager, + **kwargs, + ) + + @root_validator(pre=True) + def validate_env(cls, values: Dict[str, Any]) -> Dict[str, Any]: + """Validate necessary credentials are set.""" + if ( + values["api_base"] == "https://api.openai.com/v1" + and values["azure_endpoint"] is None + ): + raise ValueError( + "You must set OPENAI_API_BASE to your Azure endpoint. " + "It should look like https://YOUR_RESOURCE_NAME.openai.azure.com/" + ) + if values["api_version"] is None: + raise ValueError("You must set OPENAI_API_VERSION for Azure OpenAI.") + + return values + + def _get_clients(self) -> Tuple[AzureOpenAI, AsyncAzureOpenAI]: + client = AzureOpenAI(**self._get_credential_kwargs()) + aclient = AsyncAzureOpenAI(**self._get_credential_kwargs()) + return client, aclient + + def _get_credential_kwargs(self) -> Dict[str, Any]: + return { + "api_key": self.api_key, + "azure_endpoint": self.azure_endpoint, + "azure_deployment": self.azure_deployment, + "api_version": self.api_version, + } + + @classmethod + def class_name(cls) -> str: + return "AzureOpenAIEmbedding" diff --git a/llama_index/embeddings/openai.py b/llama_index/embeddings/openai.py index 370b6faa5465fc91bc03cc76c8e0302f751199a4..02c902bfce992b42b525281fefe167b09ec67d28 100644 --- a/llama_index/embeddings/openai.py +++ b/llama_index/embeddings/openai.py @@ -3,14 +3,13 @@ from enum import Enum from typing import Any, Dict, List, Optional, Tuple -import openai +from openai import AsyncOpenAI, OpenAI from llama_index.bridge.pydantic import Field, PrivateAttr from llama_index.callbacks.base import CallbackManager from llama_index.embeddings.base import DEFAULT_EMBED_BATCH_SIZE, BaseEmbedding from llama_index.llms.openai_utils import ( create_retry_decorator, - resolve_from_aliases, resolve_openai_credentials, ) @@ -103,9 +102,7 @@ _TEXT_MODE_MODEL_DICT = { @embedding_retry_decorator -def get_embedding( - text: str, engine: Optional[str] = None, **kwargs: Any -) -> List[float]: +def get_embedding(client: OpenAI, text: str, engine: str, **kwargs: Any) -> List[float]: """Get embedding. NOTE: Copied from OpenAI's embedding utils: @@ -117,14 +114,14 @@ def get_embedding( """ text = text.replace("\n", " ") - return openai.Embedding.create(input=[text], model=engine, **kwargs)["data"][0][ - "embedding" - ] + return ( + client.embeddings.create(input=[text], model=engine, **kwargs).data[0].embedding + ) @embedding_retry_decorator async def aget_embedding( - text: str, engine: Optional[str] = None, **kwargs: Any + aclient: AsyncOpenAI, text: str, engine: str, **kwargs: Any ) -> List[float]: """Asynchronously get embedding. @@ -137,14 +134,16 @@ async def aget_embedding( """ text = text.replace("\n", " ") - return (await openai.Embedding.acreate(input=[text], model=engine, **kwargs))[ - "data" - ][0]["embedding"] + return ( + (await aclient.embeddings.create(input=[text], model=engine, **kwargs)) + .data[0] + .embedding + ) @embedding_retry_decorator def get_embeddings( - list_of_text: List[str], engine: Optional[str] = None, **kwargs: Any + client: OpenAI, list_of_text: List[str], engine: str, **kwargs: Any ) -> List[List[float]]: """Get embeddings. @@ -159,13 +158,16 @@ def get_embeddings( list_of_text = [text.replace("\n", " ") for text in list_of_text] - data = openai.Embedding.create(input=list_of_text, model=engine, **kwargs).data - return [d["embedding"] for d in data] + data = client.embeddings.create(input=list_of_text, model=engine, **kwargs).data + return [d.embedding for d in data] @embedding_retry_decorator async def aget_embeddings( - list_of_text: List[str], engine: Optional[str] = None, **kwargs: Any + aclient: AsyncOpenAI, + list_of_text: List[str], + engine: str, + **kwargs: Any, ) -> List[List[float]]: """Asynchronously get embeddings. @@ -181,9 +183,9 @@ async def aget_embeddings( list_of_text = [text.replace("\n", " ") for text in list_of_text] data = ( - await openai.Embedding.acreate(input=list_of_text, model=engine, **kwargs) + await aclient.embeddings.create(input=list_of_text, model=engine, **kwargs) ).data - return [d["embedding"] for d in data] + return [d.embedding for d in data] def get_engine( @@ -218,56 +220,46 @@ class OpenAIEmbedding(BaseEmbedding): - OpenAIEmbeddingModelType.BABBAGE - OpenAIEmbeddingModelType.ADA - OpenAIEmbeddingModelType.TEXT_EMBED_ADA_002 - - deployment_name (Optional[str]): Optional deployment of model. Defaults to None. - If this value is not None, mode and model will be ignored. - Only available for using AzureOpenAI. """ - deployment_name: Optional[str] additional_kwargs: Dict[str, Any] = Field( default_factory=dict, description="Additional kwargs for the OpenAI API." ) - api_key: str = Field(default=None, description="The OpenAI API key.") - api_type: str = Field(default=None, description="The OpenAI API type.") + api_key: str = Field(description="The OpenAI API key.") api_base: str = Field(description="The base URL for OpenAI API.") - api_version: str = Field(description="The API version for OpenAI API.") + api_version: str = Field(description="The version for OpenAI API.") + + max_retries: int = Field( + default=10, description="Maximum number of retries.", gte=0 + ) _query_engine: OpenAIEmbeddingModeModel = PrivateAttr() _text_engine: OpenAIEmbeddingModeModel = PrivateAttr() + _client: OpenAI = PrivateAttr() + _aclient: AsyncOpenAI = PrivateAttr() def __init__( self, mode: str = OpenAIEmbeddingMode.TEXT_SEARCH_MODE, model: str = OpenAIEmbeddingModelType.TEXT_EMBED_ADA_002, - deployment_name: Optional[str] = None, embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE, additional_kwargs: Optional[Dict[str, Any]] = None, api_key: Optional[str] = None, - api_type: Optional[str] = None, api_base: Optional[str] = None, api_version: Optional[str] = None, + max_retries: int = 10, callback_manager: Optional[CallbackManager] = None, - # aliases for deployment name - deployment: Optional[str] = None, - deployment_id: Optional[str] = None, - engine: Optional[str] = None, **kwargs: Any, ) -> None: additional_kwargs = additional_kwargs or {} - api_key, api_type, api_base, api_version = resolve_openai_credentials( + api_key, api_base, api_version = resolve_openai_credentials( api_key=api_key, - api_type=api_type, api_base=api_base, api_version=api_version, ) - deployment_name = resolve_from_aliases( - deployment_name, deployment, deployment_id, engine - ) - self._query_engine = get_engine(mode, model, _QUERY_MODE_MODEL_DICT) self._text_engine = get_engine(mode, model, _TEXT_MODE_MODEL_DICT) @@ -275,69 +267,67 @@ class OpenAIEmbedding(BaseEmbedding): embed_batch_size=embed_batch_size, callback_manager=callback_manager, model_name=model, - deployment_name=deployment_name, additional_kwargs=additional_kwargs, api_key=api_key, - api_type=api_type, - api_version=api_version, api_base=api_base, + api_version=api_version, + max_retries=max_retries, **kwargs, ) + # NOTE: init after super to use class attributes + helper function + self._client, self._aclient = self._get_clients() + + def _get_clients(self) -> Tuple[OpenAI, AsyncOpenAI]: + client = OpenAI(**self._get_credential_kwargs()) + aclient = AsyncOpenAI(**self._get_credential_kwargs()) + return client, aclient + @classmethod def class_name(cls) -> str: return "OpenAIEmbedding" - @property - def _credential_kwargs(self) -> Dict[str, Any]: + def _get_credential_kwargs(self) -> Dict[str, Any]: return { "api_key": self.api_key, - "api_type": self.api_type, - "api_base": self.api_base, - "api_version": self.api_version, - } - - @property - def _all_kwargs(self) -> Dict[str, Any]: - return { - **self._credential_kwargs, - **self.additional_kwargs, + "base_url": self.api_base, + "max_retries": self.max_retries, } def _get_query_embedding(self, query: str) -> List[float]: """Get query embedding.""" return get_embedding( + self._client, query, engine=self._query_engine, - deployment_id=self.deployment_name, - **self._all_kwargs, + **self.additional_kwargs, ) async def _aget_query_embedding(self, query: str) -> List[float]: """The asynchronous version of _get_query_embedding.""" return await aget_embedding( + self._aclient, query, engine=self._query_engine, - deployment_id=self.deployment_name, - **self._all_kwargs, + **self.additional_kwargs, ) def _get_text_embedding(self, text: str) -> List[float]: """Get text embedding.""" return get_embedding( + self._client, text, engine=self._text_engine, - deployment_id=self.deployment_name, - **self._all_kwargs, + **self.additional_kwargs, ) async def _aget_text_embedding(self, text: str) -> List[float]: """Asynchronously get text embedding.""" return await aget_embedding( + self._aclient, text, engine=self._text_engine, - deployment_id=self.deployment_name, - **self._all_kwargs, + **self.additional_kwargs, ) def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]: @@ -348,17 +338,18 @@ class OpenAIEmbedding(BaseEmbedding): """ return get_embeddings( + self._client, texts, engine=self._text_engine, - deployment_id=self.deployment_name, - **self._all_kwargs, + **self.additional_kwargs, ) async def _aget_text_embeddings(self, texts: List[str]) -> List[List[float]]: """Asynchronously get text embeddings.""" return await aget_embeddings( + self._aclient, texts, engine=self._text_engine, deployment_id=self.deployment_name, - **self._all_kwargs, + **self.additional_kwargs, ) diff --git a/llama_index/finetuning/openai/base.py b/llama_index/finetuning/openai/base.py index 1f26e327067c1de1d64324ec633b4c5ebafb2bf9..8d16936041afe80a09acebbe29e299cac1585f7b 100644 --- a/llama_index/finetuning/openai/base.py +++ b/llama_index/finetuning/openai/base.py @@ -6,6 +6,8 @@ import time from typing import Any, Optional import openai +from openai import OpenAI as SyncOpenAI +from openai.types.fine_tuning import FineTuningJob from llama_index.callbacks import OpenAIFineTuningHandler from llama_index.finetuning.openai.validate_json import validate_json @@ -33,8 +35,9 @@ class OpenAIFinetuneEngine(BaseLLMFinetuneEngine): self._verbose = verbose self._validate_json = validate_json self._start_job: Optional[Any] = None + self._client = SyncOpenAI(api_key=os.getenv("OPENAI_API_KEY", None)) if start_job_id is not None: - self._start_job = openai.FineTuningJob.retrieve(start_job_id) + self._start_job = self._client.fine_tuning.jobs.retrieve(start_job_id) @classmethod def from_finetuning_handler( @@ -58,15 +61,12 @@ class OpenAIFinetuneEngine(BaseLLMFinetuneEngine): if self._validate_json: validate_json(self.data_path) - file_name = os.path.basename(self.data_path) + # TODO: figure out how to specify file name in the new API + # file_name = os.path.basename(self.data_path) # upload file with open(self.data_path, "rb") as f: - output = openai.File.create( - file=f, - purpose="fine-tune", - user_provided_filename=file_name, - ) + output = self._client.files.create(file=f, purpose="fine-tune") logger.info("File uploaded...") if self._verbose: print("File uploaded...") @@ -74,23 +74,23 @@ class OpenAIFinetuneEngine(BaseLLMFinetuneEngine): # launch training while True: try: - job_output = openai.FineTuningJob.create( - training_file=output["id"], model=self.base_model + job_output = self._client.fine_tunes.create( + training_file=output.id, model=self.base_model ) self._start_job = job_output break - except openai.error.InvalidRequestError: + except openai.BadRequestError: print("Waiting for file to be ready...") time.sleep(60) info_str = ( - f"Training job {output['id']} launched. " + f"Training job {output.id} launched. " "You will be emailed when it's complete." ) logger.info(info_str) if self._verbose: print(info_str) - def get_current_job(self) -> Any: + def get_current_job(self) -> FineTuningJob: """Get current job.""" # validate that it works if not self._start_job: @@ -98,15 +98,15 @@ class OpenAIFinetuneEngine(BaseLLMFinetuneEngine): # try getting id, make sure that run succeeded job_id = self._start_job["id"] - return openai.FineTuningJob.retrieve(job_id) + return self._client.fine_tuning.jobs.retrieve(job_id) def get_finetuned_model(self, **model_kwargs: Any) -> LLM: """Gets finetuned model.""" current_job = self.get_current_job() - job_id = current_job["id"] - status = current_job["status"] - model_id = current_job["fine_tuned_model"] + job_id = current_job.id + status = current_job.status + model_id = current_job.fine_tuned_model if model_id is None: raise ValueError( diff --git a/llama_index/llms/azure_openai.py b/llama_index/llms/azure_openai.py index 050fb2a0db1b6707b5f3d8026656b4e842f27d09..d7fc55ed10b822ed7bf2136835512c3219f3f077 100644 --- a/llama_index/llms/azure_openai.py +++ b/llama_index/llms/azure_openai.py @@ -1,15 +1,17 @@ -from typing import Any, Dict, Optional +from typing import Any, Dict, Optional, Tuple + +from openai import AsyncAzureOpenAI +from openai import AzureOpenAI as SyncAzureOpenAI from llama_index.bridge.pydantic import Field, PrivateAttr, root_validator from llama_index.callbacks import CallbackManager +from llama_index.llms.generic_utils import get_from_param_or_env from llama_index.llms.openai import OpenAI from llama_index.llms.openai_utils import ( refresh_openai_azuread_token, resolve_from_aliases, ) -AZURE_OPENAI_API_TYPE = "azure" - class AzureOpenAI(OpenAI): """ @@ -25,23 +27,30 @@ class AzureOpenAI(OpenAI): for your deployment when you deployed a model. You must have the following environment variables set: - - `OPENAI_API_TYPE`: set this to `azure`, `azure_ad`, or `azuread` - `OPENAI_API_VERSION`: set this to `2023-05-15` This may change in the future. - - `OPENAI_API_BASE`: your endpoint should look like the following + - `AZURE_OPENAI_ENDPOINT`: your endpoint should look like the following https://YOUR_RESOURCE_NAME.openai.azure.com/ - - `OPENAI_API_KEY`: your API key if the api type is `azure` + - `AZURE_OPENAI_API_KEY`: your API key if the api type is `azure` More information can be found here: https://learn.microsoft.com/en-us/azure/cognitive-services/openai/quickstart?tabs=command-line&pivots=programming-language-python """ engine: str = Field(description="The name of the deployed azure engine.") + azure_endpoint: Optional[str] = Field( + default=None, description="The Azure endpoint to use." + ) + azure_deployment: Optional[str] = Field( + default=None, description="The Azure deployment to use." + ) use_azure_ad: bool = Field( description="Indicates if Microsoft Entra ID (former Azure AD) is used for token authentication" ) _azure_ad_token: Any = PrivateAttr() + _client: SyncAzureOpenAI = PrivateAttr() + _aclient: AsyncAzureOpenAI = PrivateAttr() def __init__( self, @@ -52,9 +61,11 @@ class AzureOpenAI(OpenAI): additional_kwargs: Optional[Dict[str, Any]] = None, max_retries: int = 10, api_key: Optional[str] = None, - api_type: Optional[str] = AZURE_OPENAI_API_TYPE, - api_base: Optional[str] = None, api_version: Optional[str] = None, + # azure specific + azure_endpoint: Optional[str] = None, + azure_deployment: Optional[str] = None, + use_azure_ad: bool = False, callback_manager: Optional[CallbackManager] = None, # aliases for engine deployment_name: Optional[str] = None, @@ -72,55 +83,66 @@ class AzureOpenAI(OpenAI): if engine is None: raise ValueError("You must specify an `engine` parameter.") - use_azure_ad = api_type in ("azuread", "azure_ad") + azure_endpoint = get_from_param_or_env( + "azure_endpoint", azure_endpoint, "AZURE_OPENAI_ENDPOINT", "" + ) super().__init__( engine=engine, - use_azure_ad=use_azure_ad, model=model, temperature=temperature, max_tokens=max_tokens, additional_kwargs=additional_kwargs, max_retries=max_retries, api_key=api_key, - api_base=api_base, - api_type=api_type, + azure_endpoint=azure_endpoint, + azure_deployment=azure_deployment, + use_azure_ad=use_azure_ad, api_version=api_version, callback_manager=callback_manager, **kwargs, ) - @root_validator + @root_validator(pre=True) def validate_env(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Validate necessary credentials are set.""" - if values["api_base"] == "https://api.openai.com/v1": + if ( + values["api_base"] == "https://api.openai.com/v1" + and values["azure_endpoint"] is None + ): raise ValueError( "You must set OPENAI_API_BASE to your Azure endpoint. " "It should look like https://YOUR_RESOURCE_NAME.openai.azure.com/" ) - if values["api_type"] not in ("azure", "azure_ad", "azuread"): - raise ValueError( - "You must set OPENAI_API_TYPE to one of " - "(`azure`, `azuread`, `azure_ad`) for Azure OpenAI." - ) if values["api_version"] is None: raise ValueError("You must set OPENAI_API_VERSION for Azure OpenAI.") return values - @property - def _credential_kwargs(self) -> Dict[str, Any]: + def _get_clients(self, **kwargs: Any) -> Tuple[SyncAzureOpenAI, AsyncAzureOpenAI]: + client = SyncAzureOpenAI( + **self._get_credential_kwargs(), + ) + aclient = AsyncAzureOpenAI( + **self._get_credential_kwargs(), + ) + return client, aclient + + def _get_credential_kwargs(self, **kwargs: Any) -> Dict[str, Any]: if self.use_azure_ad: self._azure_ad_token = refresh_openai_azuread_token(self._azure_ad_token) self.api_key = self._azure_ad_token.token - return super()._credential_kwargs + return { + "api_key": self.api_key, + "azure_endpoint": self.azure_endpoint, + "azure_deployment": self.azure_deployment, + "api_version": self.api_version, + } - @property - def _model_kwargs(self) -> Dict[str, Any]: - model_kwargs = super()._model_kwargs - model_kwargs.pop("model") - model_kwargs["engine"] = self.engine + def _get_model_kwargs(self, **kwargs: Any) -> Dict[str, Any]: + model_kwargs = super()._get_model_kwargs(**kwargs) + model_kwargs["model"] = self.engine return model_kwargs @classmethod diff --git a/llama_index/llms/konko_utils.py b/llama_index/llms/konko_utils.py index 0b02f2a9e1b787f45994b63167d8d60955e77528..6859f969908489e0306bc7c28e0b2555330d178a 100644 --- a/llama_index/llms/konko_utils.py +++ b/llama_index/llms/konko_utils.py @@ -1,8 +1,7 @@ import logging -from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Type, Union +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Type import openai -from openai import ChatCompletion, Completion from tenacity import ( before_sleep_log, retry, @@ -44,8 +43,6 @@ Please set KONKO_API_KEY environment variable""" logger = logging.getLogger(__name__) -CompletionClientType = Union[Type[Completion], Type[ChatCompletion]] - def _create_retry_decorator(max_retries: int) -> Callable[[Any], Any]: min_seconds = 4 @@ -57,11 +54,11 @@ def _create_retry_decorator(max_retries: int) -> Callable[[Any], Any]: stop=stop_after_attempt(max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( - retry_if_exception_type(openai.error.Timeout) - | retry_if_exception_type(openai.error.APIError) - | retry_if_exception_type(openai.error.APIConnectionError) - | retry_if_exception_type(openai.error.RateLimitError) - | retry_if_exception_type(openai.error.ServiceUnavailableError) + retry_if_exception_type(openai.APITimeoutError) + | retry_if_exception_type(openai.APIError) + | retry_if_exception_type(openai.APIConnectionError) + | retry_if_exception_type(openai.RateLimitError) + | retry_if_exception_type(openai.APIStatusError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) @@ -123,7 +120,7 @@ def is_function_calling_model(model: str) -> bool: return is_chat_model_ and not is_old -def get_completion_endpoint(is_chat_model: bool) -> CompletionClientType: +def get_completion_endpoint(is_chat_model: bool) -> Any: import konko if is_chat_model: diff --git a/llama_index/llms/litellm_utils.py b/llama_index/llms/litellm_utils.py index a82404bfb1d94ed61faf0403ab9550d0c48fd58e..35e5b70fdef1a0bd9ae682e113894f06d2fb9d43 100644 --- a/llama_index/llms/litellm_utils.py +++ b/llama_index/llms/litellm_utils.py @@ -1,7 +1,7 @@ import logging from typing import Any, Callable, Dict, List, Optional, Sequence, Type -from openai.openai_object import OpenAIObject +from openai.resources import Completions from tenacity import ( before_sleep_log, retry, @@ -23,7 +23,7 @@ INVALID_API_KEY_ERROR_MESSAGE = """Invalid LLM API key.""" logger = logging.getLogger(__name__) -CompletionClientType = Type[OpenAIObject] +CompletionClientType = Type[Completions] def _create_retry_decorator(max_retries: int) -> Callable[[Any], Any]: diff --git a/llama_index/llms/openai.py b/llama_index/llms/openai.py index 2b2fb04c96757ae728451faddf069ae6604a8249..612fa8198bf5c6fcbe5d183ff937be8304bd0b52 100644 --- a/llama_index/llms/openai.py +++ b/llama_index/llms/openai.py @@ -7,12 +7,17 @@ from typing import ( Optional, Protocol, Sequence, + Tuple, + cast, runtime_checkable, ) import tiktoken +from openai import AsyncOpenAI +from openai import OpenAI as SyncOpenAI +from openai.types.chat.chat_completion_chunk import ChatCompletionChunk -from llama_index.bridge.pydantic import Field +from llama_index.bridge.pydantic import Field, PrivateAttr from llama_index.callbacks import CallbackManager from llama_index.llms.base import ( LLM, @@ -24,6 +29,7 @@ from llama_index.llms.base import ( CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, + MessageRole, llm_chat_callback, llm_completion_callback, ) @@ -38,9 +44,7 @@ from llama_index.llms.generic_utils import ( stream_completion_to_chat_decorator, ) from llama_index.llms.openai_utils import ( - acompletion_with_retry, - completion_with_retry, - from_openai_message_dict, + from_openai_message, is_chat_model, is_function_calling_model, openai_modelname_to_contextsize, @@ -69,10 +73,12 @@ class OpenAI(LLM): max_retries: int = Field(description="The maximum number of API retries.") api_key: str = Field(default=None, description="The OpenAI API key.", exclude=True) - api_type: str = Field(default=None, description="The OpenAI API type.") api_base: str = Field(description="The base URL for OpenAI API.") api_version: str = Field(description="The API version for OpenAI API.") + _client: SyncOpenAI = PrivateAttr() + _aclient: AsyncOpenAI = PrivateAttr() + def __init__( self, model: str = "gpt-3.5-turbo", @@ -81,7 +87,6 @@ class OpenAI(LLM): additional_kwargs: Optional[Dict[str, Any]] = None, max_retries: int = 10, api_key: Optional[str] = None, - api_type: Optional[str] = None, api_base: Optional[str] = None, api_version: Optional[str] = None, callback_manager: Optional[CallbackManager] = None, @@ -89,9 +94,8 @@ class OpenAI(LLM): ) -> None: additional_kwargs = additional_kwargs or {} - api_key, api_type, api_base, api_version = resolve_openai_credentials( + api_key, api_base, api_version = resolve_openai_credentials( api_key=api_key, - api_type=api_type, api_base=api_base, api_version=api_version, ) @@ -104,12 +108,18 @@ class OpenAI(LLM): max_retries=max_retries, callback_manager=callback_manager, api_key=api_key, - api_type=api_type, api_version=api_version, api_base=api_base, **kwargs, ) + self._client, self._aclient = self._get_clients(**kwargs) + + def _get_clients(self, **kwargs: Any) -> Tuple[SyncOpenAI, AsyncOpenAI]: + client = SyncOpenAI(**self._get_credential_kwargs()) + aclient = AsyncOpenAI(**self._get_credential_kwargs()) + return client, aclient + def _get_model_name(self) -> str: model_name = self.model if "ft-" in model_name: # legacy fine-tuning @@ -177,21 +187,16 @@ class OpenAI(LLM): return kwargs["use_chat_completions"] return self.metadata.is_chat_model - @property - def _credential_kwargs(self) -> Dict[str, Any]: + def _get_credential_kwargs(self, **kwargs: Any) -> Dict[str, Any]: return { "api_key": self.api_key, - "api_type": self.api_type, - "api_base": self.api_base, - "api_version": self.api_version, + "base_url": self.api_base, + "max_retries": self.max_retries, + **kwargs, } - @property - def _model_kwargs(self) -> Dict[str, Any]: - base_kwargs = { - "model": self.model, - "temperature": self.temperature, - } + def _get_model_kwargs(self, **kwargs: Any) -> Dict[str, Any]: + base_kwargs = {"model": self.model, "temperature": self.temperature, **kwargs} if self.max_tokens is not None: # If max_tokens is None, don't include in the payload: # https://platform.openai.com/docs/api-reference/chat @@ -199,21 +204,15 @@ class OpenAI(LLM): base_kwargs["max_tokens"] = self.max_tokens return {**base_kwargs, **self.additional_kwargs} - def _get_all_kwargs(self, **kwargs: Any) -> Dict[str, Any]: - """Get all data for the request as a dictionary.""" - return {**self._credential_kwargs, **self._model_kwargs, **kwargs} - def _chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: message_dicts = to_openai_message_dicts(messages) - response = completion_with_retry( - is_chat_model=True, - max_retries=self.max_retries, + response = self._client.chat.completions.create( messages=message_dicts, stream=False, - **self._get_all_kwargs(**kwargs), + **self._get_model_kwargs(**kwargs), ) - message_dict = response["choices"][0]["message"] - message = from_openai_message_dict(message_dict) + openai_message = response.choices[0].message + message = from_openai_message(openai_message) return ChatResponse( message=message, @@ -229,34 +228,26 @@ class OpenAI(LLM): def gen() -> ChatResponseGen: content = "" function_call: Optional[dict] = None - for response in completion_with_retry( - is_chat_model=True, - max_retries=self.max_retries, + for response in self._client.chat.completions.create( messages=message_dicts, stream=True, - **self._get_all_kwargs(**kwargs), + **self._get_model_kwargs(**kwargs), ): - if len(response["choices"]) == 0 and ( - response.get("prompt_annotations") - or response.get("prompt_filter_results") - ): - # When asking a stream response from the Azure OpenAI API - # you first get an empty message with the content filtering - # results. Ignore this message - continue - - if len(response["choices"]) > 0: - delta = response["choices"][0]["delta"] + response = cast(ChatCompletionChunk, response) + if len(response.choices) > 0: + delta = response.choices[0].delta else: delta = {} - role = delta.get("role", "assistant") - content_delta = delta.get("content", "") or "" + role = delta.role or MessageRole.ASSISTANT + content_delta = delta.content or "" content += content_delta - function_call_delta = delta.get("function_call", None) + function_call_delta = delta.function_call if function_call_delta is not None: + function_dict = function_call_delta.dict() + if function_call is None: - function_call = function_call_delta + function_call = function_dict ## ensure we do not add a blank function call if function_call.get("function_name", "") is None: @@ -264,7 +255,7 @@ class OpenAI(LLM): else: function_call["arguments"] = ( function_call.get("arguments", "") - + function_call_delta["arguments"] + + function_dict["arguments"] ) additional_kwargs = {} @@ -285,17 +276,15 @@ class OpenAI(LLM): return gen() def _complete(self, prompt: str, **kwargs: Any) -> CompletionResponse: - all_kwargs = self._get_all_kwargs(**kwargs) + all_kwargs = self._get_model_kwargs(**kwargs) self._update_max_tokens(all_kwargs, prompt) - response = completion_with_retry( - is_chat_model=False, - max_retries=self.max_retries, + response = self._client.completions.create( prompt=prompt, stream=False, **all_kwargs, ) - text = response["choices"][0]["text"] + text = response.choices[0].text return CompletionResponse( text=text, raw=response, @@ -303,20 +292,18 @@ class OpenAI(LLM): ) def _stream_complete(self, prompt: str, **kwargs: Any) -> CompletionResponseGen: - all_kwargs = self._get_all_kwargs(**kwargs) + all_kwargs = self._get_model_kwargs(**kwargs) self._update_max_tokens(all_kwargs, prompt) def gen() -> CompletionResponseGen: text = "" - for response in completion_with_retry( - is_chat_model=False, - max_retries=self.max_retries, + for response in self._client.completions.create( prompt=prompt, stream=True, **all_kwargs, ): - if len(response["choices"]) > 0: - delta = response["choices"][0]["text"] + if len(response.choices) > 0: + delta = response.choices[0].text else: delta = "" text += delta @@ -412,15 +399,11 @@ class OpenAI(LLM): self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponse: message_dicts = to_openai_message_dicts(messages) - response = await acompletion_with_retry( - is_chat_model=True, - max_retries=self.max_retries, - messages=message_dicts, - stream=False, - **self._get_all_kwargs(**kwargs), + response = await self._aclient.chat.completions.create( + messages=message_dicts, stream=False, **self._get_model_kwargs(**kwargs) ) - message_dict = response["choices"][0]["message"] - message = from_openai_message_dict(message_dict) + message_dict = response.choices[0].message + message = from_openai_message(message_dict) return ChatResponse( message=message, @@ -436,28 +419,26 @@ class OpenAI(LLM): async def gen() -> ChatResponseAsyncGen: content = "" function_call: Optional[dict] = None - async for response in await acompletion_with_retry( - is_chat_model=True, - max_retries=self.max_retries, + async for response in await self._aclient.chat.completions.create( messages=message_dicts, stream=True, - **self._get_all_kwargs(**kwargs), + **self._get_model_kwargs(**kwargs), ): - if len(response["choices"]) == 0 and response.get("prompt_annotations"): - # open ai sends empty response first while streaming ignore it - continue - if len(response["choices"]) > 0: - delta = response["choices"][0]["delta"] + response = cast(ChatCompletionChunk, response) + if len(response.choices) > 0: + delta = response.choices[0].delta else: delta = {} - role = delta.get("role", "assistant") - content_delta = delta.get("content", "") or "" + role = delta.role or MessageRole.ASSISTANT + content_delta = delta.content or "" content += content_delta - function_call_delta = delta.get("function_call", None) + function_call_delta = delta.function_call if function_call_delta is not None: + function_dict = function_call_delta.dict() + if function_call is None: - function_call = function_call_delta + function_call = function_dict ## ensure we do not add a blank function call if function_call.get("function_name", "") is None: @@ -465,7 +446,7 @@ class OpenAI(LLM): else: function_call["arguments"] = ( function_call.get("arguments", "") - + function_call_delta["arguments"] + + function_dict["arguments"] ) additional_kwargs = {} @@ -486,17 +467,15 @@ class OpenAI(LLM): return gen() async def _acomplete(self, prompt: str, **kwargs: Any) -> CompletionResponse: - all_kwargs = self._get_all_kwargs(**kwargs) + all_kwargs = self._get_model_kwargs(**kwargs) self._update_max_tokens(all_kwargs, prompt) - response = await acompletion_with_retry( - is_chat_model=False, - max_retries=self.max_retries, + response = await self._aclient.completions.create( prompt=prompt, stream=False, **all_kwargs, ) - text = response["choices"][0]["text"] + text = response.choices[0].text return CompletionResponse( text=text, raw=response, @@ -506,20 +485,18 @@ class OpenAI(LLM): async def _astream_complete( self, prompt: str, **kwargs: Any ) -> CompletionResponseAsyncGen: - all_kwargs = self._get_all_kwargs(**kwargs) + all_kwargs = self._get_model_kwargs(**kwargs) self._update_max_tokens(all_kwargs, prompt) async def gen() -> CompletionResponseAsyncGen: text = "" - async for response in await acompletion_with_retry( - is_chat_model=False, - max_retries=self.max_retries, + async for response in await self._aclient.completions.create( prompt=prompt, stream=True, **all_kwargs, ): - if len(response["choices"]) > 0: - delta = response["choices"][0]["text"] + if len(response.choices) > 0: + delta = response.choices[0].text else: delta = "" text += delta diff --git a/llama_index/llms/openai_utils.py b/llama_index/llms/openai_utils.py index 6b29fee4212d56351e105f65de5978f8dcf7e9f3..689477ee99e25f0495c9d85521c626472b42f7c9 100644 --- a/llama_index/llms/openai_utils.py +++ b/llama_index/llms/openai_utils.py @@ -1,10 +1,10 @@ import logging -import os import time -from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Type, Union +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Type import openai -from openai import ChatCompletion, Completion +from openai.types.chat import ChatCompletionMessageParam +from openai.types.chat.chat_completion_message import ChatCompletionMessage from tenacity import ( before_sleep_log, retry, @@ -116,8 +116,6 @@ https://platform.openai.com/account/api-keys logger = logging.getLogger(__name__) -CompletionClientType = Union[Type[Completion], Type[ChatCompletion]] - def create_retry_decorator( max_retries: int, @@ -143,11 +141,11 @@ def create_retry_decorator( retry=( retry_if_exception_type( ( - openai.error.Timeout, - openai.error.APIError, - openai.error.APIConnectionError, - openai.error.RateLimitError, - openai.error.ServiceUnavailableError, + openai.APITimeoutError, + openai.APIError, + openai.APIConnectionError, + openai.RateLimitError, + openai.APIStatusError, ) ) ), @@ -155,47 +153,6 @@ def create_retry_decorator( ) -def completion_with_retry( - is_chat_model: bool, - max_retries: int, - min_seconds: float = 4, - max_seconds: float = 10, - **kwargs: Any, -) -> Any: - """Use tenacity to retry the completion call.""" - retry_decorator = create_retry_decorator( - max_retries=max_retries, min_seconds=min_seconds, max_seconds=max_seconds - ) - - @retry_decorator - def _completion_with_retry(**kwargs: Any) -> Any: - client = get_completion_endpoint(is_chat_model) - return client.create(**kwargs) - - return _completion_with_retry(**kwargs) - - -async def acompletion_with_retry( - is_chat_model: bool, - max_retries: int, - min_seconds: float = 4, - max_seconds: float = 10, - **kwargs: Any, -) -> Any: - """Use tenacity to retry the async completion call.""" - retry_decorator = create_retry_decorator( - max_retries=max_retries, min_seconds=min_seconds, max_seconds=max_seconds - ) - - @retry_decorator - async def _completion_with_retry(**kwargs: Any) -> Any: - # Use OpenAI's async api https://github.com/openai/openai-python#async-api - client = get_completion_endpoint(is_chat_model) - return await client.acreate(**kwargs) - - return await _completion_with_retry(**kwargs) - - def openai_modelname_to_contextsize(modelname: str) -> int: """Calculate the maximum number of tokens possible to generate for a model. @@ -242,14 +199,9 @@ def is_function_calling_model(model: str) -> bool: return is_chat_model_ and not is_old -def get_completion_endpoint(is_chat_model: bool) -> CompletionClientType: - if is_chat_model: - return openai.ChatCompletion - else: - return openai.Completion - - -def to_openai_message_dict(message: ChatMessage, drop_none: bool = False) -> dict: +def to_openai_message_dict( + message: ChatMessage, drop_none: bool = False +) -> ChatCompletionMessageParam: """Convert generic message to OpenAI message dict.""" message_dict = { "role": message.role, @@ -267,18 +219,42 @@ def to_openai_message_dict(message: ChatMessage, drop_none: bool = False) -> dic for key in null_keys: message_dict.pop(key) - return message_dict + return message_dict # type: ignore def to_openai_message_dicts( messages: Sequence[ChatMessage], drop_none: bool = False -) -> List[dict]: +) -> List[ChatCompletionMessageParam]: """Convert generic messages to OpenAI message dicts.""" return [ to_openai_message_dict(message, drop_none=drop_none) for message in messages ] +def from_openai_message(openai_message: ChatCompletionMessage) -> ChatMessage: + """Convert openai message dict to generic message.""" + role = openai_message.role + # NOTE: Azure OpenAI returns function calling messages without a content key + content = openai_message.content + + function_call = ( + openai_message.function_call.dict() if openai_message.function_call else None + ) + + additional_kwargs = ( + {"function_call": function_call} if function_call is not None else {} + ) + + return ChatMessage(role=role, content=content, additional_kwargs=additional_kwargs) + + +def from_openai_messages( + openai_messages: Sequence[ChatCompletionMessage], +) -> List[ChatMessage]: + """Convert openai message dicts to generic messages.""" + return [from_openai_message(message) for message in openai_messages] + + def from_openai_message_dict(message_dict: dict) -> ChatMessage: """Convert openai message dict to generic message.""" role = message_dict["role"] @@ -309,10 +285,9 @@ def to_openai_function(pydantic_class: Type[BaseModel]) -> Dict[str, Any]: def resolve_openai_credentials( api_key: Optional[str] = None, - api_type: Optional[str] = None, api_base: Optional[str] = None, api_version: Optional[str] = None, -) -> Tuple[Optional[str], str, str, str]: +) -> Tuple[Optional[str], str, str]: """ "Resolve OpenAI credentials. The order of precedence is: @@ -323,22 +298,17 @@ def resolve_openai_credentials( """ # resolve from param or env api_key = get_from_param_or_env("api_key", api_key, "OPENAI_API_KEY", "") - api_type = get_from_param_or_env("api_type", api_type, "OPENAI_API_TYPE", "") api_base = get_from_param_or_env("api_base", api_base, "OPENAI_API_BASE", "") api_version = get_from_param_or_env( "api_version", api_version, "OPENAI_API_VERSION", "" ) # resolve from openai module or default - api_key = api_key or openai.api_key - api_type = api_type or openai.api_type or DEFAULT_OPENAI_API_TYPE - api_base = api_base or openai.api_base or DEFAULT_OPENAI_API_BASE - api_version = api_version or openai.api_version or DEFAULT_OPENAI_API_VERSION + final_api_key = api_key or openai.api_key or "" + final_api_base = api_base or openai.base_url or DEFAULT_OPENAI_API_BASE + final_api_version = api_version or openai.api_version or DEFAULT_OPENAI_API_VERSION - if not api_key and api_type not in ("azuread", "azure_ad"): - raise ValueError(MISSING_API_KEY_ERROR_MESSAGE) - - return api_key, api_type, api_base, api_version + return final_api_key, str(final_api_base), final_api_version def refresh_openai_azuread_token( @@ -373,13 +343,6 @@ def refresh_openai_azuread_token( return azure_ad_token -def validate_openai_api_key(api_key: Optional[str] = None) -> None: - openai_api_key = api_key or os.environ.get("OPENAI_API_KEY", "") or openai.api_key - - if not openai_api_key: - raise ValueError(MISSING_API_KEY_ERROR_MESSAGE) - - def resolve_from_aliases(*args: Optional[str]) -> Optional[str]: for arg in args: if arg is not None: diff --git a/poetry.lock b/poetry.lock index 6ad4a8b1b59ebc66ec795b1f4d2319fa1b9f8ca5..220a6e2e82d39b31784a7e4f35f48f02545ae43e 100644 --- a/poetry.lock +++ b/poetry.lock @@ -915,22 +915,20 @@ cron = ["capturer (>=2.4)"] [[package]] name = "comm" -version = "0.1.4" +version = "0.2.0" description = "Jupyter Python Comm implementation, for usage in ipykernel, xeus-python etc." optional = false -python-versions = ">=3.6" +python-versions = ">=3.8" files = [ - {file = "comm-0.1.4-py3-none-any.whl", hash = "sha256:6d52794cba11b36ed9860999cd10fd02d6b2eac177068fdd585e1e2f8a96e67a"}, - {file = "comm-0.1.4.tar.gz", hash = "sha256:354e40a59c9dd6db50c5cc6b4acc887d82e9603787f83b68c01a80a923984d15"}, + {file = "comm-0.2.0-py3-none-any.whl", hash = "sha256:2da8d9ebb8dd7bfc247adaff99f24dce705638a8042b85cb995066793e391001"}, + {file = "comm-0.2.0.tar.gz", hash = "sha256:a517ea2ca28931c7007a7a99c562a0fa5883cfb48963140cf642c41c948498be"}, ] [package.dependencies] traitlets = ">=4" [package.extras] -lint = ["black (>=22.6.0)", "mdformat (>0.7)", "mdformat-gfm (>=0.3.5)", "ruff (>=0.0.156)"] test = ["pytest"] -typing = ["mypy (>=0.990)"] [[package]] name = "confection" @@ -1193,6 +1191,17 @@ files = [ {file = "distlib-0.3.7.tar.gz", hash = "sha256:9dafe54b34a028eafd95039d5e5d4851a13734540f1331060d31c9916e7147a8"}, ] +[[package]] +name = "distro" +version = "1.8.0" +description = "Distro - an OS platform information API" +optional = false +python-versions = ">=3.6" +files = [ + {file = "distro-1.8.0-py3-none-any.whl", hash = "sha256:99522ca3e365cac527b44bde033f64c6945d90eb9f769703caaec52b09bbd3ff"}, + {file = "distro-1.8.0.tar.gz", hash = "sha256:02e111d1dc6a50abb8eed6bf31c3e48ed8b0830d1ea2a1b78c61765c2513fdd8"}, +] + [[package]] name = "dnspython" version = "2.4.2" @@ -1809,39 +1818,40 @@ files = [ [[package]] name = "httpcore" -version = "0.18.0" +version = "1.0.1" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" files = [ - {file = "httpcore-0.18.0-py3-none-any.whl", hash = "sha256:adc5398ee0a476567bf87467063ee63584a8bce86078bf748e48754f60202ced"}, - {file = "httpcore-0.18.0.tar.gz", hash = "sha256:13b5e5cd1dca1a6636a6aaea212b19f4f85cd88c366a2b82304181b769aab3c9"}, + {file = "httpcore-1.0.1-py3-none-any.whl", hash = "sha256:c5e97ef177dca2023d0b9aad98e49507ef5423e9f1d94ffe2cfe250aa28e63b0"}, + {file = "httpcore-1.0.1.tar.gz", hash = "sha256:fce1ddf9b606cfb98132ab58865c3728c52c8e4c3c46e2aabb3674464a186e92"}, ] [package.dependencies] -anyio = ">=3.0,<5.0" certifi = "*" h11 = ">=0.13,<0.15" -sniffio = "==1.*" [package.extras] +asyncio = ["anyio (>=4.0,<5.0)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] +trio = ["trio (>=0.22.0,<0.23.0)"] [[package]] name = "httpx" -version = "0.25.0" +version = "0.25.1" description = "The next generation HTTP client." optional = false python-versions = ">=3.8" files = [ - {file = "httpx-0.25.0-py3-none-any.whl", hash = "sha256:181ea7f8ba3a82578be86ef4171554dd45fec26a02556a744db029a0a27b7100"}, - {file = "httpx-0.25.0.tar.gz", hash = "sha256:47ecda285389cb32bb2691cc6e069e3ab0205956f681c5b2ad2325719751d875"}, + {file = "httpx-0.25.1-py3-none-any.whl", hash = "sha256:fec7d6cc5c27c578a391f7e87b9aa7d3d8fbcd034f6399f9f79b45bcc12a866a"}, + {file = "httpx-0.25.1.tar.gz", hash = "sha256:ffd96d5cf901e63863d9f1b4b6807861dbea4d301613415d9e6e57ead15fc5d0"}, ] [package.dependencies] +anyio = "*" certifi = "*" -httpcore = ">=0.18.0,<0.19.0" +httpcore = "*" idna = "*" sniffio = "*" @@ -2064,17 +2074,6 @@ qtconsole = ["qtconsole"] test = ["pytest (<7.1)", "pytest-asyncio", "testpath"] test-extra = ["curio", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.21)", "pandas", "pytest (<7.1)", "pytest-asyncio", "testpath", "trio"] -[[package]] -name = "ipython-genutils" -version = "0.2.0" -description = "Vestigial utilities from IPython" -optional = false -python-versions = "*" -files = [ - {file = "ipython_genutils-0.2.0-py2.py3-none-any.whl", hash = "sha256:72dd37233799e619666c9f639a9da83c34013a73e8bbc79a7a6348d93c61fab8"}, - {file = "ipython_genutils-0.2.0.tar.gz", hash = "sha256:eb2e116e75ecef9d4d228fdc66af54269afa26ab4463042e33785b887c628ba8"}, -] - [[package]] name = "ipywidgets" version = "8.1.1" @@ -2322,13 +2321,13 @@ testing = ["coverage", "ipykernel", "jupytext", "matplotlib", "nbdime", "nbforma [[package]] name = "jupyter-client" -version = "8.5.0" +version = "8.6.0" description = "Jupyter protocol implementation and client libraries" optional = false python-versions = ">=3.8" files = [ - {file = "jupyter_client-8.5.0-py3-none-any.whl", hash = "sha256:c3877aac7257ec68d79b5c622ce986bd2a992ca42f6ddc9b4dd1da50e89f7028"}, - {file = "jupyter_client-8.5.0.tar.gz", hash = "sha256:e8754066510ce456358df363f97eae64b50860f30dc1fe8c6771440db3be9a63"}, + {file = "jupyter_client-8.6.0-py3-none-any.whl", hash = "sha256:909c474dbe62582ae62b758bca86d6518c85234bdee2d908c778db6d72f39d99"}, + {file = "jupyter_client-8.6.0.tar.gz", hash = "sha256:0642244bb83b4764ae60d07e010e15f0e2d275ec4e918a8f7b80fbbef3ca60c7"}, ] [package.dependencies] @@ -2389,13 +2388,13 @@ test = ["ipykernel", "pre-commit", "pytest", "pytest-cov", "pytest-timeout"] [[package]] name = "jupyter-events" -version = "0.8.0" +version = "0.9.0" description = "Jupyter Event System library" optional = false python-versions = ">=3.8" files = [ - {file = "jupyter_events-0.8.0-py3-none-any.whl", hash = "sha256:81f07375c7673ff298bfb9302b4a981864ec64edaed75ca0fe6f850b9b045525"}, - {file = "jupyter_events-0.8.0.tar.gz", hash = "sha256:fda08f0defce5e16930542ce60634ba48e010830d50073c3dfd235759cee77bf"}, + {file = "jupyter_events-0.9.0-py3-none-any.whl", hash = "sha256:d853b3c10273ff9bc8bb8b30076d65e2c9685579db736873de6c2232dde148bf"}, + {file = "jupyter_events-0.9.0.tar.gz", hash = "sha256:81ad2e4bc710881ec274d31c6c50669d71bbaa5dd9d01e600b56faa85700d399"}, ] [package.dependencies] @@ -2429,13 +2428,13 @@ jupyter-server = ">=1.1.2" [[package]] name = "jupyter-server" -version = "2.9.1" +version = "2.10.0" description = "The backend—i.e. core services, APIs, and REST endpoints—to Jupyter web applications." optional = false python-versions = ">=3.8" files = [ - {file = "jupyter_server-2.9.1-py3-none-any.whl", hash = "sha256:21ad1a3d455d5a79ce4bef5201925cd17510c17898cf9d54e3ccfb6b12734948"}, - {file = "jupyter_server-2.9.1.tar.gz", hash = "sha256:9ba71be4b9c16e479e4c50c929f8ac4b1015baf90237a08681397a98c76c7e5e"}, + {file = "jupyter_server-2.10.0-py3-none-any.whl", hash = "sha256:dde56c9bc3cb52d7b72cc0f696d15d7163603526f1a758eb4a27405b73eab2a5"}, + {file = "jupyter_server-2.10.0.tar.gz", hash = "sha256:47b8f5e63440125cb1bb8957bf12b18453ee5ed9efe42d2f7b2ca66a7019a278"}, ] [package.dependencies] @@ -2484,13 +2483,13 @@ test = ["coverage", "jupyter-server (>=2.0.0)", "pytest (>=7.0)", "pytest-cov", [[package]] name = "jupyterlab" -version = "4.0.7" +version = "4.0.8" description = "JupyterLab computational environment" optional = false python-versions = ">=3.8" files = [ - {file = "jupyterlab-4.0.7-py3-none-any.whl", hash = "sha256:08683045117cc495531fdb39c22ababb9aaac6977a45e67cfad20046564c9c7c"}, - {file = "jupyterlab-4.0.7.tar.gz", hash = "sha256:48792efd9f962b2bcda1f87d72168ff122c288b1d97d32109e4a11b33dc862be"}, + {file = "jupyterlab-4.0.8-py3-none-any.whl", hash = "sha256:2ff5aa2a51eb21df241d6011c236e88bd1ff9a5dbb75bebc54472f9c18bfffa4"}, + {file = "jupyterlab-4.0.8.tar.gz", hash = "sha256:c4fe93f977bcc987bd395d7fae5ab02e0c042bf4e0f7c95196f3e2e578c2fb3a"}, ] [package.dependencies] @@ -2510,7 +2509,7 @@ tornado = ">=6.2.0" traitlets = "*" [package.extras] -dev = ["black[jupyter] (==23.7.0)", "build", "bump2version", "coverage", "hatch", "pre-commit", "pytest-cov", "ruff (==0.0.286)"] +dev = ["black[jupyter] (==23.10.1)", "build", "bump2version", "coverage", "hatch", "pre-commit", "pytest-cov", "ruff (==0.0.292)"] docs = ["jsx-lexer", "myst-parser", "pydata-sphinx-theme (>=0.13.0)", "pytest", "pytest-check-links", "pytest-tornasync", "sphinx (>=1.8,<7.2.0)", "sphinx-copybutton"] docs-screenshots = ["altair (==5.0.1)", "ipython (==8.14.0)", "ipywidgets (==8.0.6)", "jupyterlab-geojson (==3.4.0)", "jupyterlab-language-pack-zh-cn (==4.0.post0)", "matplotlib (==3.7.1)", "nbconvert (>=7.0.0)", "pandas (==2.0.2)", "scipy (==1.10.1)", "vega-datasets (==0.9.0)"] test = ["coverage", "pytest (>=7.0)", "pytest-check-links (>=0.7)", "pytest-console-scripts", "pytest-cov", "pytest-jupyter (>=0.5.3)", "pytest-timeout", "pytest-tornasync", "requests", "requests-cache", "virtualenv"] @@ -2565,13 +2564,13 @@ files = [ [[package]] name = "langchain" -version = "0.0.327" +version = "0.0.331" description = "Building applications with LLMs through composability" optional = false python-versions = ">=3.8.1,<4.0" files = [ - {file = "langchain-0.0.327-py3-none-any.whl", hash = "sha256:21835600e1ab11e2a939d9e473c13ed51402a3b75418ca02689877a5764da398"}, - {file = "langchain-0.0.327.tar.gz", hash = "sha256:2710fba0c0735d1a63327cad83387571adc457fe75075c70335e8ea628f0a8a2"}, + {file = "langchain-0.0.331-py3-none-any.whl", hash = "sha256:64e6e1a57b8deafc1c4e914820b2b8e22a5eed60d49432cadc3b8cca9d613694"}, + {file = "langchain-0.0.331.tar.gz", hash = "sha256:b1ac365faf7fe413d5aa38329f70f23589ed07152c1a1398a5f16319eb32beb6"}, ] [package.dependencies] @@ -2619,13 +2618,13 @@ data = ["language-data (>=1.1,<2.0)"] [[package]] name = "langsmith" -version = "0.0.56" +version = "0.0.58" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." optional = false python-versions = ">=3.8.1,<4.0" files = [ - {file = "langsmith-0.0.56-py3-none-any.whl", hash = "sha256:5aed1ad2395700442a6511651eca17d60eff56878f18bdd9e1d20b3c6f7e016c"}, - {file = "langsmith-0.0.56.tar.gz", hash = "sha256:98382931f61a984a3d02cad07e4b986a0a7c843f87830172692c987deb8ba554"}, + {file = "langsmith-0.0.58-py3-none-any.whl", hash = "sha256:75a82744da2d2fa647d8d8d66a2b3791edc914a640516fa2f46cd5502b697380"}, + {file = "langsmith-0.0.58.tar.gz", hash = "sha256:b935b37b7ce09d998e572e5e3a25f022c283eee6a8de661d41cbda02ce09c6b5"}, ] [package.dependencies] @@ -3027,13 +3026,13 @@ tests = ["pytest (>=4.6)"] [[package]] name = "msal" -version = "1.24.1" +version = "1.25.0" description = "The Microsoft Authentication Library (MSAL) for Python library" optional = true python-versions = ">=2.7" files = [ - {file = "msal-1.24.1-py2.py3-none-any.whl", hash = "sha256:ce4320688f95c301ee74a4d0e9dbcfe029a63663a8cc61756f40d0d0d36574ad"}, - {file = "msal-1.24.1.tar.gz", hash = "sha256:aa0972884b3c6fdec53d9a0bd15c12e5bd7b71ac1b66d746f54d128709f3f8f8"}, + {file = "msal-1.25.0-py2.py3-none-any.whl", hash = "sha256:386df621becb506bc315a713ec3d4d5b5d6163116955c7dde23622f156b81af6"}, + {file = "msal-1.25.0.tar.gz", hash = "sha256:f44329fdb59f4f044c779164a34474b8a44ad9e4940afbc4c3a3a2bbe90324d9"}, ] [package.dependencies] @@ -3621,34 +3620,34 @@ sympy = "*" [[package]] name = "openai" -version = "0.28.1" -description = "Python client library for the OpenAI API" +version = "1.0.1" +description = "Client library for the openai API" optional = false python-versions = ">=3.7.1" files = [ - {file = "openai-0.28.1-py3-none-any.whl", hash = "sha256:d18690f9e3d31eedb66b57b88c2165d760b24ea0a01f150dd3f068155088ce68"}, - {file = "openai-0.28.1.tar.gz", hash = "sha256:4be1dad329a65b4ce1a660fe6d5431b438f429b5855c883435f0f7fcb6d2dcc8"}, + {file = "openai-1.0.1-py3-none-any.whl", hash = "sha256:3bf0152da66821a3f539c93d1d3069f7ebc16d730384e836a58de77895829525"}, + {file = "openai-1.0.1.tar.gz", hash = "sha256:fe25079fc1264bf1356e9db80d1617de1c4547bfab4221ccfb9b87d1e7733b48"}, ] [package.dependencies] -aiohttp = "*" -requests = ">=2.20" -tqdm = "*" +anyio = ">=3.5.0,<4" +distro = ">=1.7.0,<2" +httpx = ">=0.23.0,<1" +pydantic = ">=1.9.0,<3" +tqdm = ">4" +typing-extensions = ">=4.5,<5" [package.extras] -datalib = ["numpy", "openpyxl (>=3.0.7)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] -dev = ["black (>=21.6b0,<22.0)", "pytest (==6.*)", "pytest-asyncio", "pytest-mock"] -embeddings = ["matplotlib", "numpy", "openpyxl (>=3.0.7)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)", "plotly", "scikit-learn (>=1.0.2)", "scipy", "tenacity (>=8.0.1)"] -wandb = ["numpy", "openpyxl (>=3.0.7)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)", "wandb"] +datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] [[package]] name = "optimum" -version = "1.13.2" +version = "1.14.0" description = "Optimum Library is an extension of the Hugging Face Transformers library, providing a framework to integrate third-party libraries from Hardware Partners and interface with their specific functionality." optional = true python-versions = ">=3.7.0" files = [ - {file = "optimum-1.13.2.tar.gz", hash = "sha256:a637ce85881019333f40f19ae7ee8ae9e22e2346cdd4f25b9b07be3a95ca4962"}, + {file = "optimum-1.14.0.tar.gz", hash = "sha256:3b15d33b84f1cce483138f2ab202e17f39aa1330dbed1bd63e619d2230931b17"}, ] [package.dependencies] @@ -3678,7 +3677,7 @@ exporters-gpu = ["onnx", "onnxruntime-gpu", "timm"] exporters-tf = ["h5py", "numpy (<1.24.0)", "onnx", "onnxruntime", "tensorflow (>=2.4,<=2.12.1)", "tf2onnx", "timm"] furiosa = ["optimum-furiosa"] graphcore = ["optimum-graphcore"] -habana = ["optimum-habana"] +habana = ["optimum-habana", "transformers (>=4.33.0,<4.35.0)"] intel = ["optimum-intel (>=1.11.0)"] neural-compressor = ["optimum-intel[neural-compressor] (>=1.11.0)"] neuron = ["optimum-neuron[neuron]"] @@ -4084,24 +4083,22 @@ testing = ["google-api-core[grpc] (>=1.31.5)"] [[package]] name = "protobuf" -version = "4.24.4" +version = "4.25.0" description = "" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "protobuf-4.24.4-cp310-abi3-win32.whl", hash = "sha256:ec9912d5cb6714a5710e28e592ee1093d68c5ebfeda61983b3f40331da0b1ebb"}, - {file = "protobuf-4.24.4-cp310-abi3-win_amd64.whl", hash = "sha256:1badab72aa8a3a2b812eacfede5020472e16c6b2212d737cefd685884c191085"}, - {file = "protobuf-4.24.4-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:8e61a27f362369c2f33248a0ff6896c20dcd47b5d48239cb9720134bef6082e4"}, - {file = "protobuf-4.24.4-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:bffa46ad9612e6779d0e51ae586fde768339b791a50610d85eb162daeb23661e"}, - {file = "protobuf-4.24.4-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:b493cb590960ff863743b9ff1452c413c2ee12b782f48beca77c8da3e2ffe9d9"}, - {file = "protobuf-4.24.4-cp37-cp37m-win32.whl", hash = "sha256:dbbed8a56e56cee8d9d522ce844a1379a72a70f453bde6243e3c86c30c2a3d46"}, - {file = "protobuf-4.24.4-cp37-cp37m-win_amd64.whl", hash = "sha256:6b7d2e1c753715dcfe9d284a25a52d67818dd43c4932574307daf836f0071e37"}, - {file = "protobuf-4.24.4-cp38-cp38-win32.whl", hash = "sha256:02212557a76cd99574775a81fefeba8738d0f668d6abd0c6b1d3adcc75503dbe"}, - {file = "protobuf-4.24.4-cp38-cp38-win_amd64.whl", hash = "sha256:2fa3886dfaae6b4c5ed2730d3bf47c7a38a72b3a1f0acb4d4caf68e6874b947b"}, - {file = "protobuf-4.24.4-cp39-cp39-win32.whl", hash = "sha256:b77272f3e28bb416e2071186cb39efd4abbf696d682cbb5dc731308ad37fa6dd"}, - {file = "protobuf-4.24.4-cp39-cp39-win_amd64.whl", hash = "sha256:9fee5e8aa20ef1b84123bb9232b3f4a5114d9897ed89b4b8142d81924e05d79b"}, - {file = "protobuf-4.24.4-py3-none-any.whl", hash = "sha256:80797ce7424f8c8d2f2547e2d42bfbb6c08230ce5832d6c099a37335c9c90a92"}, - {file = "protobuf-4.24.4.tar.gz", hash = "sha256:5a70731910cd9104762161719c3d883c960151eea077134458503723b60e3667"}, + {file = "protobuf-4.25.0-cp310-abi3-win32.whl", hash = "sha256:5c1203ac9f50e4853b0a0bfffd32c67118ef552a33942982eeab543f5c634395"}, + {file = "protobuf-4.25.0-cp310-abi3-win_amd64.whl", hash = "sha256:c40ff8f00aa737938c5378d461637d15c442a12275a81019cc2fef06d81c9419"}, + {file = "protobuf-4.25.0-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:cf21faba64cd2c9a3ed92b7a67f226296b10159dbb8fbc5e854fc90657d908e4"}, + {file = "protobuf-4.25.0-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:32ac2100b0e23412413d948c03060184d34a7c50b3e5d7524ee96ac2b10acf51"}, + {file = "protobuf-4.25.0-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:683dc44c61f2620b32ce4927de2108f3ebe8ccf2fd716e1e684e5a50da154054"}, + {file = "protobuf-4.25.0-cp38-cp38-win32.whl", hash = "sha256:1a3ba712877e6d37013cdc3476040ea1e313a6c2e1580836a94f76b3c176d575"}, + {file = "protobuf-4.25.0-cp38-cp38-win_amd64.whl", hash = "sha256:b2cf8b5d381f9378afe84618288b239e75665fe58d0f3fd5db400959274296e9"}, + {file = "protobuf-4.25.0-cp39-cp39-win32.whl", hash = "sha256:63714e79b761a37048c9701a37438aa29945cd2417a97076048232c1df07b701"}, + {file = "protobuf-4.25.0-cp39-cp39-win_amd64.whl", hash = "sha256:d94a33db8b7ddbd0af7c467475fb9fde0c705fb315a8433c0e2020942b863a1f"}, + {file = "protobuf-4.25.0-py3-none-any.whl", hash = "sha256:1a53d6f64b00eecf53b65ff4a8c23dc95df1fa1e97bb06b8122e5a64f49fc90a"}, + {file = "protobuf-4.25.0.tar.gz", hash = "sha256:68f7caf0d4f012fd194a301420cf6aa258366144d814f358c5b32558228afa7c"}, ] [[package]] @@ -4233,40 +4230,47 @@ tests = ["pytest"] [[package]] name = "pyarrow" -version = "13.0.0" +version = "14.0.0" description = "Python library for Apache Arrow" optional = true python-versions = ">=3.8" files = [ - {file = "pyarrow-13.0.0-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:1afcc2c33f31f6fb25c92d50a86b7a9f076d38acbcb6f9e74349636109550148"}, - {file = "pyarrow-13.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:70fa38cdc66b2fc1349a082987f2b499d51d072faaa6b600f71931150de2e0e3"}, - {file = "pyarrow-13.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd57b13a6466822498238877892a9b287b0a58c2e81e4bdb0b596dbb151cbb73"}, - {file = "pyarrow-13.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8ce69f7bf01de2e2764e14df45b8404fc6f1a5ed9871e8e08a12169f87b7a26"}, - {file = "pyarrow-13.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:588f0d2da6cf1b1680974d63be09a6530fd1bd825dc87f76e162404779a157dc"}, - {file = "pyarrow-13.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:6241afd72b628787b4abea39e238e3ff9f34165273fad306c7acf780dd850956"}, - {file = "pyarrow-13.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:fda7857e35993673fcda603c07d43889fca60a5b254052a462653f8656c64f44"}, - {file = "pyarrow-13.0.0-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:aac0ae0146a9bfa5e12d87dda89d9ef7c57a96210b899459fc2f785303dcbb67"}, - {file = "pyarrow-13.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d7759994217c86c161c6a8060509cfdf782b952163569606bb373828afdd82e8"}, - {file = "pyarrow-13.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:868a073fd0ff6468ae7d869b5fc1f54de5c4255b37f44fb890385eb68b68f95d"}, - {file = "pyarrow-13.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51be67e29f3cfcde263a113c28e96aa04362ed8229cb7c6e5f5c719003659d33"}, - {file = "pyarrow-13.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:d1b4e7176443d12610874bb84d0060bf080f000ea9ed7c84b2801df851320295"}, - {file = "pyarrow-13.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:69b6f9a089d116a82c3ed819eea8fe67dae6105f0d81eaf0fdd5e60d0c6e0944"}, - {file = "pyarrow-13.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:ab1268db81aeb241200e321e220e7cd769762f386f92f61b898352dd27e402ce"}, - {file = "pyarrow-13.0.0-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:ee7490f0f3f16a6c38f8c680949551053c8194e68de5046e6c288e396dccee80"}, - {file = "pyarrow-13.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e3ad79455c197a36eefbd90ad4aa832bece7f830a64396c15c61a0985e337287"}, - {file = "pyarrow-13.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68fcd2dc1b7d9310b29a15949cdd0cb9bc34b6de767aff979ebf546020bf0ba0"}, - {file = "pyarrow-13.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc6fd330fd574c51d10638e63c0d00ab456498fc804c9d01f2a61b9264f2c5b2"}, - {file = "pyarrow-13.0.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:e66442e084979a97bb66939e18f7b8709e4ac5f887e636aba29486ffbf373763"}, - {file = "pyarrow-13.0.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:0f6eff839a9e40e9c5610d3ff8c5bdd2f10303408312caf4c8003285d0b49565"}, - {file = "pyarrow-13.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:8b30a27f1cddf5c6efcb67e598d7823a1e253d743d92ac32ec1eb4b6a1417867"}, - {file = "pyarrow-13.0.0-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:09552dad5cf3de2dc0aba1c7c4b470754c69bd821f5faafc3d774bedc3b04bb7"}, - {file = "pyarrow-13.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3896ae6c205d73ad192d2fc1489cd0edfab9f12867c85b4c277af4d37383c18c"}, - {file = "pyarrow-13.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6647444b21cb5e68b593b970b2a9a07748dd74ea457c7dadaa15fd469c48ada1"}, - {file = "pyarrow-13.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47663efc9c395e31d09c6aacfa860f4473815ad6804311c5433f7085415d62a7"}, - {file = "pyarrow-13.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:b9ba6b6d34bd2563345488cf444510588ea42ad5613df3b3509f48eb80250afd"}, - {file = "pyarrow-13.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:d00d374a5625beeb448a7fa23060df79adb596074beb3ddc1838adb647b6ef09"}, - {file = "pyarrow-13.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:c51afd87c35c8331b56f796eff954b9c7f8d4b7fef5903daf4e05fcf017d23a8"}, - {file = "pyarrow-13.0.0.tar.gz", hash = "sha256:83333726e83ed44b0ac94d8d7a21bbdee4a05029c3b1e8db58a863eec8fd8a33"}, + {file = "pyarrow-14.0.0-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:4fce1db17efbc453080c5b306f021926de7c636456a128328797e574c151f81a"}, + {file = "pyarrow-14.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:28de7c05b4d7a71ec660360639cc9b65ceb1175e0e9d4dfccd879a1545bc38f7"}, + {file = "pyarrow-14.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1541e9209c094e7f4d7b43fdd9de3a8c71d3069cf6fc03b59bf5774042411849"}, + {file = "pyarrow-14.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c05e6c45d303c80e41ab04996430a0251321f70986ed51213903ea7bc0b7efd"}, + {file = "pyarrow-14.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:426ffec63ab9b4dff23dec51be2150e3a4a99eb38e66c10a70e2c48779fe9c9d"}, + {file = "pyarrow-14.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:968844f591902160bd3c9ee240ce8822a3b4e7de731e91daea76ad43fe0ff062"}, + {file = "pyarrow-14.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:dcedbc0b4ea955c530145acfe99e324875c386419a09db150291a24cb01aeb81"}, + {file = "pyarrow-14.0.0-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:97993a12aacc781efad9c92d4545a877e803c4d106d34237ec4ce987bec825a3"}, + {file = "pyarrow-14.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:80225768d94024d59a31320374f5e6abf8899866c958dfb4f4ea8e2d9ec91bde"}, + {file = "pyarrow-14.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b61546977a8bd7e3d0c697ede723341ef4737e761af2239aef6e1db447f97727"}, + {file = "pyarrow-14.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42509e6c93b4a1c8ae8ccd939a43f437097783fe130a1991497a6a1abbba026f"}, + {file = "pyarrow-14.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:3eccce331a1392e46573f2ce849a9ee3c074e0d7008e9be0b44566ac149fd6a1"}, + {file = "pyarrow-14.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:ecc463c45f2b6b36431f5f2025842245e8c15afe4d42072230575785f3bb00c6"}, + {file = "pyarrow-14.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:4362ed90def81640addcd521811dd16a13015f0a8255bec324a41262c1524b6c"}, + {file = "pyarrow-14.0.0-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:2fbb7ab62537782c5ab31aa08db0e1f6de92c2c515fdfc0790128384e919adcb"}, + {file = "pyarrow-14.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ad7095f8f0fe0bfa3d3fca1909b8fa15c70e630b0cc1ff8d35e143f5e2704064"}, + {file = "pyarrow-14.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e6602272fce71c0fb64f266e7cdbe51b93b00c22fc1bb57f2b0cb681c4aeedf4"}, + {file = "pyarrow-14.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5b2b8f87951b08a3e72265c8963da3fe4f737bb81290269037e047dd172aa591"}, + {file = "pyarrow-14.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:a1c9675966662a042caebbaafa1ae7fc26291287ebc3da06aa63ad74c323ec30"}, + {file = "pyarrow-14.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:771079fddc0b4440c41af541dbdebc711a7062c93d3c4764476a9442606977db"}, + {file = "pyarrow-14.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:c4096136318de1c4937370c0c365f949961c371201c396d8cc94a353f342069d"}, + {file = "pyarrow-14.0.0-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:6c94056fb5f0ee0bae2206c3f776881e1db2bd0d133d06805755ae7ac5145349"}, + {file = "pyarrow-14.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:687d0df1e08876b2d24d42abae129742fc655367e3fe6700aa4d79fcf2e3215e"}, + {file = "pyarrow-14.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f4054e5ee6c88ca256a67fc8b27f9c59bcd385216346265831d462a6069033f"}, + {file = "pyarrow-14.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:768b962e4c042ab2c96576ca0757935472e220d11af855c7d0be3279d7fced5f"}, + {file = "pyarrow-14.0.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:77293b1319c7044f68ebfa43db8c929a0a5254ce371f1a0873d343f1460171d0"}, + {file = "pyarrow-14.0.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:d2bc7c53941d85f0133b1bd5a814bca0af213922f50d8a8dc0eed4d9ed477845"}, + {file = "pyarrow-14.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:378955365dd087c285ef4f34ad939d7e551b7715326710e8cd21cfa2ce511bd7"}, + {file = "pyarrow-14.0.0-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:f05e81b4c621e6ad4bcd8f785e3aa1d6c49a935818b809ea6e7bf206a5b1a4e8"}, + {file = "pyarrow-14.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6867f6a8057eaef5a7ac6d27fe5518133f67973c5d4295d79a943458350e7c61"}, + {file = "pyarrow-14.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca54b87c46abdfe027f18f959ca388102bd7326c344838f72244807462d091b2"}, + {file = "pyarrow-14.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:35abf61bd0cc9daca3afc715f6ba74ea83d792fa040025352624204bec66bf6a"}, + {file = "pyarrow-14.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:65c377523b369f7ef1ba02be814e832443bb3b15065010838f02dae5bdc0f53c"}, + {file = "pyarrow-14.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:e8a1e470e4b5f7bda7bede0410291daec55ab69f346d77795d34fd6a45b41579"}, + {file = "pyarrow-14.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:466c1a5a7a4b279cfa363ac34dedd0c3c6af388cec9e6a468ffc095a6627849a"}, + {file = "pyarrow-14.0.0.tar.gz", hash = "sha256:45d3324e1c9871a07de6b4d514ebd73225490963a6dd46c64c465c4b6079fe1e"}, ] [package.dependencies] @@ -4436,92 +4440,92 @@ testutils = ["gitpython (>3)"] [[package]] name = "pymongo" -version = "4.5.0" +version = "4.6.0" description = "Python driver for MongoDB <http://www.mongodb.org>" optional = false python-versions = ">=3.7" files = [ - {file = "pymongo-4.5.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2d4fa1b01fa7e5b7bb8d312e3542e211b320eb7a4e3d8dc884327039d93cb9e0"}, - {file = "pymongo-4.5.0-cp310-cp310-manylinux1_i686.whl", hash = "sha256:dfcd2b9f510411de615ccedd47462dae80e82fdc09fe9ab0f0f32f11cf57eeb5"}, - {file = "pymongo-4.5.0-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:3e33064f1984db412b34d51496f4ea785a9cff621c67de58e09fb28da6468a52"}, - {file = "pymongo-4.5.0-cp310-cp310-manylinux2014_i686.whl", hash = "sha256:33faa786cc907de63f745f587e9879429b46033d7d97a7b84b37f4f8f47b9b32"}, - {file = "pymongo-4.5.0-cp310-cp310-manylinux2014_ppc64le.whl", hash = "sha256:76a262c41c1a7cbb84a3b11976578a7eb8e788c4b7bfbd15c005fb6ca88e6e50"}, - {file = "pymongo-4.5.0-cp310-cp310-manylinux2014_s390x.whl", hash = "sha256:0f4b125b46fe377984fbaecf2af40ed48b05a4b7676a2ff98999f2016d66b3ec"}, - {file = "pymongo-4.5.0-cp310-cp310-manylinux2014_x86_64.whl", hash = "sha256:40d5f6e853ece9bfc01e9129b228df446f49316a4252bb1fbfae5c3c9dedebad"}, - {file = "pymongo-4.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:152259f0f1a60f560323aacf463a3642a65a25557683f49cfa08c8f1ecb2395a"}, - {file = "pymongo-4.5.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6d64878d1659d2a5bdfd0f0a4d79bafe68653c573681495e424ab40d7b6d6d41"}, - {file = "pymongo-4.5.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f1bb3a62395ffe835dbef3a1cbff48fbcce709c78bd1f52e896aee990928432b"}, - {file = "pymongo-4.5.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe48f50fb6348511a3268a893bfd4ab5f263f5ac220782449d03cd05964d1ae7"}, - {file = "pymongo-4.5.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7591a3beea6a9a4fa3080d27d193b41f631130e3ffa76b88c9ccea123f26dc59"}, - {file = "pymongo-4.5.0-cp310-cp310-win32.whl", hash = "sha256:3a7166d57dc74d679caa7743b8ecf7dc3a1235a9fd178654dddb2b2a627ae229"}, - {file = "pymongo-4.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:21b953da14549ff62ea4ae20889c71564328958cbdf880c64a92a48dda4c9c53"}, - {file = "pymongo-4.5.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ead4f19d0257a756b21ac2e0e85a37a7245ddec36d3b6008d5bfe416525967dc"}, - {file = "pymongo-4.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9aff6279e405dc953eeb540ab061e72c03cf38119613fce183a8e94f31be608f"}, - {file = "pymongo-4.5.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd4c8d6aa91d3e35016847cbe8d73106e3d1c9a4e6578d38e2c346bfe8edb3ca"}, - {file = "pymongo-4.5.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08819da7864f9b8d4a95729b2bea5fffed08b63d3b9c15b4fea47de655766cf5"}, - {file = "pymongo-4.5.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a253b765b7cbc4209f1d8ee16c7287c4268d3243070bf72d7eec5aa9dfe2a2c2"}, - {file = "pymongo-4.5.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8027c9063579083746147cf401a7072a9fb6829678076cd3deff28bb0e0f50c8"}, - {file = "pymongo-4.5.0-cp311-cp311-win32.whl", hash = "sha256:9d2346b00af524757576cc2406414562cced1d4349c92166a0ee377a2a483a80"}, - {file = "pymongo-4.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:c3c3525ea8658ee1192cdddf5faf99b07ebe1eeaa61bf32821126df6d1b8072b"}, - {file = "pymongo-4.5.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:e5a27f348909235a106a3903fc8e70f573d89b41d723a500869c6569a391cff7"}, - {file = "pymongo-4.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9a9a39b7cac81dca79fca8c2a6479ef4c7b1aab95fad7544cc0e8fd943595a2"}, - {file = "pymongo-4.5.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:496c9cbcb4951183d4503a9d7d2c1e3694aab1304262f831d5e1917e60386036"}, - {file = "pymongo-4.5.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:23cc6d7eb009c688d70da186b8f362d61d5dd1a2c14a45b890bd1e91e9c451f2"}, - {file = "pymongo-4.5.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fff7d17d30b2cd45afd654b3fc117755c5d84506ed25fda386494e4e0a3416e1"}, - {file = "pymongo-4.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6422b6763b016f2ef2beedded0e546d6aa6ba87910f9244d86e0ac7690f75c96"}, - {file = "pymongo-4.5.0-cp312-cp312-win32.whl", hash = "sha256:77cfff95c1fafd09e940b3fdcb7b65f11442662fad611d0e69b4dd5d17a81c60"}, - {file = "pymongo-4.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:e57d859b972c75ee44ea2ef4758f12821243e99de814030f69a3decb2aa86807"}, - {file = "pymongo-4.5.0-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:2b0176f9233a5927084c79ff80b51bd70bfd57e4f3d564f50f80238e797f0c8a"}, - {file = "pymongo-4.5.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:89b3f2da57a27913d15d2a07d58482f33d0a5b28abd20b8e643ab4d625e36257"}, - {file = "pymongo-4.5.0-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:5caee7bd08c3d36ec54617832b44985bd70c4cbd77c5b313de6f7fce0bb34f93"}, - {file = "pymongo-4.5.0-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:1d40ad09d9f5e719bc6f729cc6b17f31c0b055029719406bd31dde2f72fca7e7"}, - {file = "pymongo-4.5.0-cp37-cp37m-manylinux2014_ppc64le.whl", hash = "sha256:076afa0a4a96ca9f77fec0e4a0d241200b3b3a1766f8d7be9a905ecf59a7416b"}, - {file = "pymongo-4.5.0-cp37-cp37m-manylinux2014_s390x.whl", hash = "sha256:3fa3648e4f1e63ddfe53563ee111079ea3ab35c3b09cd25bc22dadc8269a495f"}, - {file = "pymongo-4.5.0-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:44ee985194c426ddf781fa784f31ffa29cb59657b2dba09250a4245431847d73"}, - {file = "pymongo-4.5.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b33c17d9e694b66d7e96977e9e56df19d662031483efe121a24772a44ccbbc7e"}, - {file = "pymongo-4.5.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3d79ae3bb1ff041c0db56f138c88ce1dfb0209f3546d8d6e7c3f74944ecd2439"}, - {file = "pymongo-4.5.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d67225f05f6ea27c8dc57f3fa6397c96d09c42af69d46629f71e82e66d33fa4f"}, - {file = "pymongo-4.5.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41771b22dd2822540f79a877c391283d4e6368125999a5ec8beee1ce566f3f82"}, - {file = "pymongo-4.5.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a1f26bc1f5ce774d99725773901820dfdfd24e875028da4a0252a5b48dcab5c"}, - {file = "pymongo-4.5.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3236cf89d69679eaeb9119c840f5c7eb388a2110b57af6bb6baf01a1da387c18"}, - {file = "pymongo-4.5.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:e1f61355c821e870fb4c17cdb318669cfbcf245a291ce5053b41140870c3e5cc"}, - {file = "pymongo-4.5.0-cp37-cp37m-win32.whl", hash = "sha256:49dce6957598975d8b8d506329d2a3a6c4aee911fa4bbcf5e52ffc6897122950"}, - {file = "pymongo-4.5.0-cp37-cp37m-win_amd64.whl", hash = "sha256:f2227a08b091bd41df5aadee0a5037673f691e2aa000e1968b1ea2342afc6880"}, - {file = "pymongo-4.5.0-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:435228d3c16a375274ac8ab9c4f9aef40c5e57ddb8296e20ecec9e2461da1017"}, - {file = "pymongo-4.5.0-cp38-cp38-manylinux1_i686.whl", hash = "sha256:8e559116e4128630ad3b7e788e2e5da81cbc2344dee246af44471fa650486a70"}, - {file = "pymongo-4.5.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:840eaf30ccac122df260b6005f9dfae4ac287c498ee91e3e90c56781614ca238"}, - {file = "pymongo-4.5.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:b4fe46b58010115514b842c669a0ed9b6a342017b15905653a5b1724ab80917f"}, - {file = "pymongo-4.5.0-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:a8127437ebc196a6f5e8fddd746bd0903a400dc6b5ae35df672dd1ccc7170a2a"}, - {file = "pymongo-4.5.0-cp38-cp38-manylinux2014_ppc64le.whl", hash = "sha256:2988ef5e6b360b3ff1c6d55c53515499de5f48df31afd9f785d788cdacfbe2d3"}, - {file = "pymongo-4.5.0-cp38-cp38-manylinux2014_s390x.whl", hash = "sha256:e249190b018d63c901678053b4a43e797ca78b93fb6d17633e3567d4b3ec6107"}, - {file = "pymongo-4.5.0-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:1240edc1a448d4ada4bf1a0e55550b6292420915292408e59159fd8bbdaf8f63"}, - {file = "pymongo-4.5.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b6d2a56fc2354bb6378f3634402eec788a8f3facf0b3e7d468db5f2b5a78d763"}, - {file = "pymongo-4.5.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2a0aade2b11dc0c326ccd429ee4134d2d47459ff68d449c6d7e01e74651bd255"}, - {file = "pymongo-4.5.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:74c0da07c04d0781490b2915e7514b1adb265ef22af039a947988c331ee7455b"}, - {file = "pymongo-4.5.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3754acbd7efc7f1b529039fcffc092a15e1cf045e31f22f6c9c5950c613ec4d"}, - {file = "pymongo-4.5.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:631492573a1bef2f74f9ac0f9d84e0ce422c251644cd81207530af4aa2ee1980"}, - {file = "pymongo-4.5.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e2654d1278384cff75952682d17c718ecc1ad1d6227bb0068fd826ba47d426a5"}, - {file = "pymongo-4.5.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:168172ef7856e20ec024fe2a746bfa895c88b32720138e6438fd765ebd2b62dd"}, - {file = "pymongo-4.5.0-cp38-cp38-win32.whl", hash = "sha256:b25f7bea162b3dbec6d33c522097ef81df7c19a9300722fa6853f5b495aecb77"}, - {file = "pymongo-4.5.0-cp38-cp38-win_amd64.whl", hash = "sha256:b520aafc6cb148bac09ccf532f52cbd31d83acf4d3e5070d84efe3c019a1adbf"}, - {file = "pymongo-4.5.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8543253adfaa0b802bfa88386db1009c6ebb7d5684d093ee4edc725007553d21"}, - {file = "pymongo-4.5.0-cp39-cp39-manylinux1_i686.whl", hash = "sha256:bc5d8c3647b8ae28e4312f1492b8f29deebd31479cd3abaa989090fb1d66db83"}, - {file = "pymongo-4.5.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:505f8519c4c782a61d94a17b0da50be639ec462128fbd10ab0a34889218fdee3"}, - {file = "pymongo-4.5.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:53f2dda54d76a98b43a410498bd12f6034b2a14b6844ca08513733b2b20b7ad8"}, - {file = "pymongo-4.5.0-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:9c04b9560872fa9a91251030c488e0a73bce9321a70f991f830c72b3f8115d0d"}, - {file = "pymongo-4.5.0-cp39-cp39-manylinux2014_ppc64le.whl", hash = "sha256:58a63a26a1e3dc481dd3a18d6d9f8bd1d576cd1ffe0d479ba7dd38b0aeb20066"}, - {file = "pymongo-4.5.0-cp39-cp39-manylinux2014_s390x.whl", hash = "sha256:f076b779aa3dc179aa3ed861be063a313ed4e48ae9f6a8370a9b1295d4502111"}, - {file = "pymongo-4.5.0-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:1b1d7d9aabd8629a31d63cd106d56cca0e6420f38e50563278b520f385c0d86e"}, - {file = "pymongo-4.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37df8f6006286a5896d1cbc3efb8471ced42e3568d38e6cb00857277047b0d63"}, - {file = "pymongo-4.5.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:56320c401f544d762fc35766936178fbceb1d9261cd7b24fbfbc8fb6f67aa8a5"}, - {file = "pymongo-4.5.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bbd705d5f3c3d1ff2d169e418bb789ff07ab3c70d567cc6ba6b72b04b9143481"}, - {file = "pymongo-4.5.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80a167081c75cf66b32f30e2f1eaee9365af935a86dbd76788169911bed9b5d5"}, - {file = "pymongo-4.5.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c42748ccc451dfcd9cef6c5447a7ab727351fd9747ad431db5ebb18a9b78a4d"}, - {file = "pymongo-4.5.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cf62da7a4cdec9a4b2981fcbd5e08053edffccf20e845c0b6ec1e77eb7fab61d"}, - {file = "pymongo-4.5.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:b5bbb87fa0511bd313d9a2c90294c88db837667c2bda2ea3fa7a35b59fd93b1f"}, - {file = "pymongo-4.5.0-cp39-cp39-win32.whl", hash = "sha256:465fd5b040206f8bce7016b01d7e7f79d2fcd7c2b8e41791be9632a9df1b4999"}, - {file = "pymongo-4.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:63d8019eee119df308a075b8a7bdb06d4720bf791e2b73d5ab0e7473c115d79c"}, - {file = "pymongo-4.5.0.tar.gz", hash = "sha256:681f252e43b3ef054ca9161635f81b730f4d8cadd28b3f2b2004f5a72f853982"}, + {file = "pymongo-4.6.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:c011bd5ad03cc096f99ffcfdd18a1817354132c1331bed7a837a25226659845f"}, + {file = "pymongo-4.6.0-cp310-cp310-manylinux1_i686.whl", hash = "sha256:5e63146dbdb1eac207464f6e0cfcdb640c9c5ff0f57b754fa96fe252314a1dc6"}, + {file = "pymongo-4.6.0-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:2972dd1f1285866aba027eff2f4a2bbf8aa98563c2ced14cb34ee5602b36afdf"}, + {file = "pymongo-4.6.0-cp310-cp310-manylinux2014_i686.whl", hash = "sha256:a0be99b599da95b7a90a918dd927b20c434bea5e1c9b3efc6a3c6cd67c23f813"}, + {file = "pymongo-4.6.0-cp310-cp310-manylinux2014_ppc64le.whl", hash = "sha256:9b0f98481ad5dc4cb430a60bbb8869f05505283b9ae1c62bdb65eb5e020ee8e3"}, + {file = "pymongo-4.6.0-cp310-cp310-manylinux2014_s390x.whl", hash = "sha256:256c503a75bd71cf7fb9ebf889e7e222d49c6036a48aad5a619f98a0adf0e0d7"}, + {file = "pymongo-4.6.0-cp310-cp310-manylinux2014_x86_64.whl", hash = "sha256:b4ad70d7cac4ca0c7b31444a0148bd3af01a2662fa12b1ad6f57cd4a04e21766"}, + {file = "pymongo-4.6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5717a308a703dda2886a5796a07489c698b442f5e409cf7dc2ac93de8d61d764"}, + {file = "pymongo-4.6.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a8f7f9feecae53fa18d6a3ea7c75f9e9a1d4d20e5c3f9ce3fba83f07bcc4eee2"}, + {file = "pymongo-4.6.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:128b1485753106c54af481789cdfea12b90a228afca0b11fb3828309a907e10e"}, + {file = "pymongo-4.6.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3077a31633beef77d057c6523f5de7271ddef7bde5e019285b00c0cc9cac1e3"}, + {file = "pymongo-4.6.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ebf02c32afa6b67e5861a27183dd98ed88419a94a2ab843cc145fb0bafcc5b28"}, + {file = "pymongo-4.6.0-cp310-cp310-win32.whl", hash = "sha256:b14dd73f595199f4275bed4fb509277470d9b9059310537e3b3daba12b30c157"}, + {file = "pymongo-4.6.0-cp310-cp310-win_amd64.whl", hash = "sha256:8adf014f2779992eba3b513e060d06f075f0ab2fb3ad956f413a102312f65cdf"}, + {file = "pymongo-4.6.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ba51129fcc510824b6ca6e2ce1c27e3e4d048b6e35d3ae6f7e517bed1b8b25ce"}, + {file = "pymongo-4.6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2973f113e079fb98515722cd728e1820282721ec9fd52830e4b73cabdbf1eb28"}, + {file = "pymongo-4.6.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:af425f323fce1b07755edd783581e7283557296946212f5b1a934441718e7528"}, + {file = "pymongo-4.6.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1ec71ac633b126c0775ed4604ca8f56c3540f5c21a1220639f299e7a544b55f9"}, + {file = "pymongo-4.6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ec6c20385c5a58e16b1ea60c5e4993ea060540671d7d12664f385f2fb32fe79"}, + {file = "pymongo-4.6.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:85f2cdc400ee87f5952ebf2a117488f2525a3fb2e23863a8efe3e4ee9e54e4d1"}, + {file = "pymongo-4.6.0-cp311-cp311-win32.whl", hash = "sha256:7fc2bb8a74dcfcdd32f89528e38dcbf70a3a6594963d60dc9595e3b35b66e414"}, + {file = "pymongo-4.6.0-cp311-cp311-win_amd64.whl", hash = "sha256:6695d7136a435c1305b261a9ddb9b3ecec9863e05aab3935b96038145fd3a977"}, + {file = "pymongo-4.6.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:d603edea1ff7408638b2504905c032193b7dcee7af269802dbb35bc8c3310ed5"}, + {file = "pymongo-4.6.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79f41576b3022c2fe9780ae3e44202b2438128a25284a8ddfa038f0785d87019"}, + {file = "pymongo-4.6.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:49f2af6cf82509b15093ce3569229e0d53c90ad8ae2eef940652d4cf1f81e045"}, + {file = "pymongo-4.6.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ecd9e1fa97aa11bf67472220285775fa15e896da108f425e55d23d7540a712ce"}, + {file = "pymongo-4.6.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2d2be5c9c3488fa8a70f83ed925940f488eac2837a996708d98a0e54a861f212"}, + {file = "pymongo-4.6.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8ab6bcc8e424e07c1d4ba6df96f7fb963bcb48f590b9456de9ebd03b88084fe8"}, + {file = "pymongo-4.6.0-cp312-cp312-win32.whl", hash = "sha256:47aa128be2e66abd9d1a9b0437c62499d812d291f17b55185cb4aa33a5f710a4"}, + {file = "pymongo-4.6.0-cp312-cp312-win_amd64.whl", hash = "sha256:014e7049dd019a6663747ca7dae328943e14f7261f7c1381045dfc26a04fa330"}, + {file = "pymongo-4.6.0-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:288c21ab9531b037f7efa4e467b33176bc73a0c27223c141b822ab4a0e66ff2a"}, + {file = "pymongo-4.6.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:747c84f4e690fbe6999c90ac97246c95d31460d890510e4a3fa61b7d2b87aa34"}, + {file = "pymongo-4.6.0-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:055f5c266e2767a88bb585d01137d9c7f778b0195d3dbf4a487ef0638be9b651"}, + {file = "pymongo-4.6.0-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:82e620842e12e8cb4050d2643a81c8149361cd82c0a920fa5a15dc4ca8a4000f"}, + {file = "pymongo-4.6.0-cp37-cp37m-manylinux2014_ppc64le.whl", hash = "sha256:6b18276f14b4b6d92e707ab6db19b938e112bd2f1dc3f9f1a628df58e4fd3f0d"}, + {file = "pymongo-4.6.0-cp37-cp37m-manylinux2014_s390x.whl", hash = "sha256:680fa0fc719e1a3dcb81130858368f51d83667d431924d0bcf249644bce8f303"}, + {file = "pymongo-4.6.0-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:3919708594b86d0f5cdc713eb6fccd3f9b9532af09ea7a5d843c933825ef56c4"}, + {file = "pymongo-4.6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:db082f728160369d9a6ed2e722438291558fc15ce06d0a7d696a8dad735c236b"}, + {file = "pymongo-4.6.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1e4ed21029d80c4f62605ab16398fe1ce093fff4b5f22d114055e7d9fbc4adb0"}, + {file = "pymongo-4.6.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9bea9138b0fc6e2218147e9c6ce1ff76ff8e29dc00bb1b64842bd1ca107aee9f"}, + {file = "pymongo-4.6.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a0269811661ba93c472c8a60ea82640e838c2eb148d252720a09b5123f2c2fe"}, + {file = "pymongo-4.6.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6d6a1b1361f118e7fefa17ae3114e77f10ee1b228b20d50c47c9f351346180c8"}, + {file = "pymongo-4.6.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7e3b0127b260d4abae7b62203c4c7ef0874c901b55155692353db19de4b18bc4"}, + {file = "pymongo-4.6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a49aca4d961823b2846b739380c847e8964ff7ae0f0a683992b9d926054f0d6d"}, + {file = "pymongo-4.6.0-cp37-cp37m-win32.whl", hash = "sha256:09c7de516b08c57647176b9fc21d929d628e35bcebc7422220c89ae40b62126a"}, + {file = "pymongo-4.6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:81dd1308bd5630d2bb5980f00aa163b986b133f1e9ed66c66ce2a5bc3572e891"}, + {file = "pymongo-4.6.0-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:2f8c04277d879146eacda920476e93d520eff8bec6c022ac108cfa6280d84348"}, + {file = "pymongo-4.6.0-cp38-cp38-manylinux1_i686.whl", hash = "sha256:5802acc012bbb4bce4dff92973dff76482f30ef35dd4cb8ab5b0e06aa8f08c80"}, + {file = "pymongo-4.6.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:ccd785fafa1c931deff6a7116e9a0d402d59fabe51644b0d0c268295ff847b25"}, + {file = "pymongo-4.6.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:fe03bf25fae4b95d8afe40004a321df644400fdcba4c8e5e1a19c1085b740888"}, + {file = "pymongo-4.6.0-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:2ca0ba501898b2ec31e6c3acf90c31910944f01d454ad8e489213a156ccf1bda"}, + {file = "pymongo-4.6.0-cp38-cp38-manylinux2014_ppc64le.whl", hash = "sha256:10a379fb60f1b2406ae57b8899bacfe20567918c8e9d2d545e1b93628fcf2050"}, + {file = "pymongo-4.6.0-cp38-cp38-manylinux2014_s390x.whl", hash = "sha256:a4dc1319d0c162919ee7f4ee6face076becae2abbd351cc14f1fe70af5fb20d9"}, + {file = "pymongo-4.6.0-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:ddef295aaf80cefb0c1606f1995899efcb17edc6b327eb6589e234e614b87756"}, + {file = "pymongo-4.6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:518c90bdd6e842c446d01a766b9136fec5ec6cc94f3b8c3f8b4a332786ee6b64"}, + {file = "pymongo-4.6.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b80a4ee19b3442c57c38afa978adca546521a8822d663310b63ae2a7d7b13f3a"}, + {file = "pymongo-4.6.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eb438a8bf6b695bf50d57e6a059ff09652a07968b2041178b3744ea785fcef9b"}, + {file = "pymongo-4.6.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e3db7d833a7c38c317dc95b54e27f1d27012e031b45a7c24e360b53197d5f6e7"}, + {file = "pymongo-4.6.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3729b8db02063da50eeb3db88a27670d85953afb9a7f14c213ac9e3dca93034b"}, + {file = "pymongo-4.6.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:39a1cd5d383b37285641d5a7a86be85274466ae336a61b51117155936529f9b3"}, + {file = "pymongo-4.6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:7b0e6361754ac596cd16bfc6ed49f69ffcd9b60b7bc4bcd3ea65c6a83475e4ff"}, + {file = "pymongo-4.6.0-cp38-cp38-win32.whl", hash = "sha256:806e094e9e85d8badc978af8c95b69c556077f11844655cb8cd2d1758769e521"}, + {file = "pymongo-4.6.0-cp38-cp38-win_amd64.whl", hash = "sha256:1394c4737b325166a65ae7c145af1ebdb9fb153ebedd37cf91d676313e4a67b8"}, + {file = "pymongo-4.6.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a8273e1abbcff1d7d29cbbb1ea7e57d38be72f1af3c597c854168508b91516c2"}, + {file = "pymongo-4.6.0-cp39-cp39-manylinux1_i686.whl", hash = "sha256:e16ade71c93f6814d095d25cd6d28a90d63511ea396bd96e9ffcb886b278baaa"}, + {file = "pymongo-4.6.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:325701ae7b56daa5b0692305b7cb505ca50f80a1288abb32ff420a8a209b01ca"}, + {file = "pymongo-4.6.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:cc94f9fea17a5af8cf1a343597711a26b0117c0b812550d99934acb89d526ed2"}, + {file = "pymongo-4.6.0-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:21812453354b151200034750cd30b0140e82ec2a01fd4357390f67714a1bfbde"}, + {file = "pymongo-4.6.0-cp39-cp39-manylinux2014_ppc64le.whl", hash = "sha256:0634994b026336195778e5693583c060418d4ab453eff21530422690a97e1ee8"}, + {file = "pymongo-4.6.0-cp39-cp39-manylinux2014_s390x.whl", hash = "sha256:ad4f66fbb893b55f96f03020e67dcab49ffde0177c6565ccf9dec4fdf974eb61"}, + {file = "pymongo-4.6.0-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:2703a9f8f5767986b4f51c259ff452cc837c5a83c8ed5f5361f6e49933743b2f"}, + {file = "pymongo-4.6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6bafea6061d63059d8bc2ffc545e2f049221c8a4457d236c5cd6a66678673eab"}, + {file = "pymongo-4.6.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f28ae33dc5a0b9cee06e95fd420e42155d83271ab75964baf747ce959cac5f52"}, + {file = "pymongo-4.6.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d16a534da0e39785687b7295e2fcf9a339f4a20689024983d11afaa4657f8507"}, + {file = "pymongo-4.6.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef67fedd863ffffd4adfd46d9d992b0f929c7f61a8307366d664d93517f2c78e"}, + {file = "pymongo-4.6.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:05c30fd35cc97f14f354916b45feea535d59060ef867446b5c3c7f9b609dd5dc"}, + {file = "pymongo-4.6.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1c63e3a2e8fb815c4b1f738c284a4579897e37c3cfd95fdb199229a1ccfb638a"}, + {file = "pymongo-4.6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:e5e193f89f4f8c1fe273f9a6e6df915092c9f2af6db2d1afb8bd53855025c11f"}, + {file = "pymongo-4.6.0-cp39-cp39-win32.whl", hash = "sha256:a09bfb51953930e7e838972ddf646c5d5f984992a66d79da6ba7f6a8d8a890cd"}, + {file = "pymongo-4.6.0-cp39-cp39-win_amd64.whl", hash = "sha256:107a234dc55affc5802acb3b6d83cbb8c87355b38a9457fcd8806bdeb8bce161"}, + {file = "pymongo-4.6.0.tar.gz", hash = "sha256:fb1c56d891f9e34303c451998ef62ba52659648bb0d75b03c5e4ac223a3342c2"}, ] [package.dependencies] @@ -4533,6 +4537,7 @@ encryption = ["certifi", "pymongo[aws]", "pymongocrypt (>=1.6.0,<2.0.0)"] gssapi = ["pykerberos", "winkerberos (>=0.5.0)"] ocsp = ["certifi", "cryptography (>=2.5)", "pyopenssl (>=17.2.0)", "requests (<3.0.0)", "service-identity (>=18.1.0)"] snappy = ["python-snappy"] +test = ["pytest (>=7)"] zstd = ["zstandard"] [[package]] @@ -4887,18 +4892,17 @@ cffi = {version = "*", markers = "implementation_name == \"pypy\""} [[package]] name = "qtconsole" -version = "5.4.4" +version = "5.5.0" description = "Jupyter Qt console" optional = false -python-versions = ">= 3.7" +python-versions = ">= 3.8" files = [ - {file = "qtconsole-5.4.4-py3-none-any.whl", hash = "sha256:a3b69b868e041c2c698bdc75b0602f42e130ffb256d6efa48f9aa756c97672aa"}, - {file = "qtconsole-5.4.4.tar.gz", hash = "sha256:b7ffb53d74f23cee29f4cdb55dd6fabc8ec312d94f3c46ba38e1dde458693dfb"}, + {file = "qtconsole-5.5.0-py3-none-any.whl", hash = "sha256:6b6bcf8f834c6df1579a3e6623c8531b85d3e723997cee3a1156296df14716c8"}, + {file = "qtconsole-5.5.0.tar.gz", hash = "sha256:ea8b4a07d7dc915a1b1238fbfe2c9aea570640402557b64615e09a4bc60df47c"}, ] [package.dependencies] ipykernel = ">=4.1" -ipython-genutils = "*" jupyter-client = ">=4.1" jupyter-core = "*" packaging = "*" @@ -5137,110 +5141,110 @@ files = [ [[package]] name = "rpds-py" -version = "0.10.6" +version = "0.12.0" description = "Python bindings to Rust's persistent data structures (rpds)" optional = false python-versions = ">=3.8" files = [ - {file = "rpds_py-0.10.6-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:6bdc11f9623870d75692cc33c59804b5a18d7b8a4b79ef0b00b773a27397d1f6"}, - {file = "rpds_py-0.10.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:26857f0f44f0e791f4a266595a7a09d21f6b589580ee0585f330aaccccb836e3"}, - {file = "rpds_py-0.10.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7f5e15c953ace2e8dde9824bdab4bec50adb91a5663df08d7d994240ae6fa31"}, - {file = "rpds_py-0.10.6-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:61fa268da6e2e1cd350739bb61011121fa550aa2545762e3dc02ea177ee4de35"}, - {file = "rpds_py-0.10.6-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c48f3fbc3e92c7dd6681a258d22f23adc2eb183c8cb1557d2fcc5a024e80b094"}, - {file = "rpds_py-0.10.6-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c0503c5b681566e8b722fe8c4c47cce5c7a51f6935d5c7012c4aefe952a35eed"}, - {file = "rpds_py-0.10.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:734c41f9f57cc28658d98270d3436dba65bed0cfc730d115b290e970150c540d"}, - {file = "rpds_py-0.10.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a5d7ed104d158c0042a6a73799cf0eb576dfd5fc1ace9c47996e52320c37cb7c"}, - {file = "rpds_py-0.10.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e3df0bc35e746cce42579826b89579d13fd27c3d5319a6afca9893a9b784ff1b"}, - {file = "rpds_py-0.10.6-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:73e0a78a9b843b8c2128028864901f55190401ba38aae685350cf69b98d9f7c9"}, - {file = "rpds_py-0.10.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:5ed505ec6305abd2c2c9586a7b04fbd4baf42d4d684a9c12ec6110deefe2a063"}, - {file = "rpds_py-0.10.6-cp310-none-win32.whl", hash = "sha256:d97dd44683802000277bbf142fd9f6b271746b4846d0acaf0cefa6b2eaf2a7ad"}, - {file = "rpds_py-0.10.6-cp310-none-win_amd64.whl", hash = "sha256:b455492cab07107bfe8711e20cd920cc96003e0da3c1f91297235b1603d2aca7"}, - {file = "rpds_py-0.10.6-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:e8cdd52744f680346ff8c1ecdad5f4d11117e1724d4f4e1874f3a67598821069"}, - {file = "rpds_py-0.10.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:66414dafe4326bca200e165c2e789976cab2587ec71beb80f59f4796b786a238"}, - {file = "rpds_py-0.10.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc435d059f926fdc5b05822b1be4ff2a3a040f3ae0a7bbbe672babb468944722"}, - {file = "rpds_py-0.10.6-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8e7f2219cb72474571974d29a191714d822e58be1eb171f229732bc6fdedf0ac"}, - {file = "rpds_py-0.10.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3953c6926a63f8ea5514644b7afb42659b505ece4183fdaaa8f61d978754349e"}, - {file = "rpds_py-0.10.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2bb2e4826be25e72013916eecd3d30f66fd076110de09f0e750163b416500721"}, - {file = "rpds_py-0.10.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7bf347b495b197992efc81a7408e9a83b931b2f056728529956a4d0858608b80"}, - {file = "rpds_py-0.10.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:102eac53bb0bf0f9a275b438e6cf6904904908562a1463a6fc3323cf47d7a532"}, - {file = "rpds_py-0.10.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:40f93086eef235623aa14dbddef1b9fb4b22b99454cb39a8d2e04c994fb9868c"}, - {file = "rpds_py-0.10.6-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e22260a4741a0e7a206e175232867b48a16e0401ef5bce3c67ca5b9705879066"}, - {file = "rpds_py-0.10.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f4e56860a5af16a0fcfa070a0a20c42fbb2012eed1eb5ceeddcc7f8079214281"}, - {file = "rpds_py-0.10.6-cp311-none-win32.whl", hash = "sha256:0774a46b38e70fdde0c6ded8d6d73115a7c39d7839a164cc833f170bbf539116"}, - {file = "rpds_py-0.10.6-cp311-none-win_amd64.whl", hash = "sha256:4a5ee600477b918ab345209eddafde9f91c0acd931f3776369585a1c55b04c57"}, - {file = "rpds_py-0.10.6-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:5ee97c683eaface61d38ec9a489e353d36444cdebb128a27fe486a291647aff6"}, - {file = "rpds_py-0.10.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0713631d6e2d6c316c2f7b9320a34f44abb644fc487b77161d1724d883662e31"}, - {file = "rpds_py-0.10.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5a53f5998b4bbff1cb2e967e66ab2addc67326a274567697379dd1e326bded7"}, - {file = "rpds_py-0.10.6-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6a555ae3d2e61118a9d3e549737bb4a56ff0cec88a22bd1dfcad5b4e04759175"}, - {file = "rpds_py-0.10.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:945eb4b6bb8144909b203a88a35e0a03d22b57aefb06c9b26c6e16d72e5eb0f0"}, - {file = "rpds_py-0.10.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:52c215eb46307c25f9fd2771cac8135d14b11a92ae48d17968eda5aa9aaf5071"}, - {file = "rpds_py-0.10.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c1b3cd23d905589cb205710b3988fc8f46d4a198cf12862887b09d7aaa6bf9b9"}, - {file = "rpds_py-0.10.6-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:64ccc28683666672d7c166ed465c09cee36e306c156e787acef3c0c62f90da5a"}, - {file = "rpds_py-0.10.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:516a611a2de12fbea70c78271e558f725c660ce38e0006f75139ba337d56b1f6"}, - {file = "rpds_py-0.10.6-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:9ff93d3aedef11f9c4540cf347f8bb135dd9323a2fc705633d83210d464c579d"}, - {file = "rpds_py-0.10.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d858532212f0650be12b6042ff4378dc2efbb7792a286bee4489eaa7ba010586"}, - {file = "rpds_py-0.10.6-cp312-none-win32.whl", hash = "sha256:3c4eff26eddac49d52697a98ea01b0246e44ca82ab09354e94aae8823e8bda02"}, - {file = "rpds_py-0.10.6-cp312-none-win_amd64.whl", hash = "sha256:150eec465dbc9cbca943c8e557a21afdcf9bab8aaabf386c44b794c2f94143d2"}, - {file = "rpds_py-0.10.6-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:cf693eb4a08eccc1a1b636e4392322582db2a47470d52e824b25eca7a3977b53"}, - {file = "rpds_py-0.10.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4134aa2342f9b2ab6c33d5c172e40f9ef802c61bb9ca30d21782f6e035ed0043"}, - {file = "rpds_py-0.10.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e782379c2028a3611285a795b89b99a52722946d19fc06f002f8b53e3ea26ea9"}, - {file = "rpds_py-0.10.6-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2f6da6d842195fddc1cd34c3da8a40f6e99e4a113918faa5e60bf132f917c247"}, - {file = "rpds_py-0.10.6-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b4a9fe992887ac68256c930a2011255bae0bf5ec837475bc6f7edd7c8dfa254e"}, - {file = "rpds_py-0.10.6-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b788276a3c114e9f51e257f2a6f544c32c02dab4aa7a5816b96444e3f9ffc336"}, - {file = "rpds_py-0.10.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:caa1afc70a02645809c744eefb7d6ee8fef7e2fad170ffdeacca267fd2674f13"}, - {file = "rpds_py-0.10.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bddd4f91eede9ca5275e70479ed3656e76c8cdaaa1b354e544cbcf94c6fc8ac4"}, - {file = "rpds_py-0.10.6-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:775049dfa63fb58293990fc59473e659fcafd953bba1d00fc5f0631a8fd61977"}, - {file = "rpds_py-0.10.6-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:c6c45a2d2b68c51fe3d9352733fe048291e483376c94f7723458cfd7b473136b"}, - {file = "rpds_py-0.10.6-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0699ab6b8c98df998c3eacf51a3b25864ca93dab157abe358af46dc95ecd9801"}, - {file = "rpds_py-0.10.6-cp38-none-win32.whl", hash = "sha256:ebdab79f42c5961682654b851f3f0fc68e6cc7cd8727c2ac4ffff955154123c1"}, - {file = "rpds_py-0.10.6-cp38-none-win_amd64.whl", hash = "sha256:24656dc36f866c33856baa3ab309da0b6a60f37d25d14be916bd3e79d9f3afcf"}, - {file = "rpds_py-0.10.6-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:0898173249141ee99ffcd45e3829abe7bcee47d941af7434ccbf97717df020e5"}, - {file = "rpds_py-0.10.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9e9184fa6c52a74a5521e3e87badbf9692549c0fcced47443585876fcc47e469"}, - {file = "rpds_py-0.10.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5752b761902cd15073a527b51de76bbae63d938dc7c5c4ad1e7d8df10e765138"}, - {file = "rpds_py-0.10.6-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:99a57006b4ec39dbfb3ed67e5b27192792ffb0553206a107e4aadb39c5004cd5"}, - {file = "rpds_py-0.10.6-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09586f51a215d17efdb3a5f090d7cbf1633b7f3708f60a044757a5d48a83b393"}, - {file = "rpds_py-0.10.6-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e225a6a14ecf44499aadea165299092ab0cba918bb9ccd9304eab1138844490b"}, - {file = "rpds_py-0.10.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2039f8d545f20c4e52713eea51a275e62153ee96c8035a32b2abb772b6fc9e5"}, - {file = "rpds_py-0.10.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:34ad87a831940521d462ac11f1774edf867c34172010f5390b2f06b85dcc6014"}, - {file = "rpds_py-0.10.6-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dcdc88b6b01015da066da3fb76545e8bb9a6880a5ebf89e0f0b2e3ca557b3ab7"}, - {file = "rpds_py-0.10.6-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:25860ed5c4e7f5e10c496ea78af46ae8d8468e0be745bd233bab9ca99bfd2647"}, - {file = "rpds_py-0.10.6-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7854a207ef77319ec457c1eb79c361b48807d252d94348305db4f4b62f40f7f3"}, - {file = "rpds_py-0.10.6-cp39-none-win32.whl", hash = "sha256:e6fcc026a3f27c1282c7ed24b7fcac82cdd70a0e84cc848c0841a3ab1e3dea2d"}, - {file = "rpds_py-0.10.6-cp39-none-win_amd64.whl", hash = "sha256:e98c4c07ee4c4b3acf787e91b27688409d918212dfd34c872201273fdd5a0e18"}, - {file = "rpds_py-0.10.6-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:68fe9199184c18d997d2e4293b34327c0009a78599ce703e15cd9a0f47349bba"}, - {file = "rpds_py-0.10.6-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:3339eca941568ed52d9ad0f1b8eb9fe0958fa245381747cecf2e9a78a5539c42"}, - {file = "rpds_py-0.10.6-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a360cfd0881d36c6dc271992ce1eda65dba5e9368575663de993eeb4523d895f"}, - {file = "rpds_py-0.10.6-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:031f76fc87644a234883b51145e43985aa2d0c19b063e91d44379cd2786144f8"}, - {file = "rpds_py-0.10.6-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1f36a9d751f86455dc5278517e8b65580eeee37d61606183897f122c9e51cef3"}, - {file = "rpds_py-0.10.6-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:052a832078943d2b2627aea0d19381f607fe331cc0eb5df01991268253af8417"}, - {file = "rpds_py-0.10.6-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:023574366002bf1bd751ebaf3e580aef4a468b3d3c216d2f3f7e16fdabd885ed"}, - {file = "rpds_py-0.10.6-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:defa2c0c68734f4a82028c26bcc85e6b92cced99866af118cd6a89b734ad8e0d"}, - {file = "rpds_py-0.10.6-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:879fb24304ead6b62dbe5034e7b644b71def53c70e19363f3c3be2705c17a3b4"}, - {file = "rpds_py-0.10.6-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:53c43e10d398e365da2d4cc0bcaf0854b79b4c50ee9689652cdc72948e86f487"}, - {file = "rpds_py-0.10.6-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:3777cc9dea0e6c464e4b24760664bd8831738cc582c1d8aacf1c3f546bef3f65"}, - {file = "rpds_py-0.10.6-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:40578a6469e5d1df71b006936ce95804edb5df47b520c69cf5af264d462f2cbb"}, - {file = "rpds_py-0.10.6-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:cf71343646756a072b85f228d35b1d7407da1669a3de3cf47f8bbafe0c8183a4"}, - {file = "rpds_py-0.10.6-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:10f32b53f424fc75ff7b713b2edb286fdbfc94bf16317890260a81c2c00385dc"}, - {file = "rpds_py-0.10.6-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:81de24a1c51cfb32e1fbf018ab0bdbc79c04c035986526f76c33e3f9e0f3356c"}, - {file = "rpds_py-0.10.6-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac17044876e64a8ea20ab132080ddc73b895b4abe9976e263b0e30ee5be7b9c2"}, - {file = "rpds_py-0.10.6-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5e8a78bd4879bff82daef48c14d5d4057f6856149094848c3ed0ecaf49f5aec2"}, - {file = "rpds_py-0.10.6-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78ca33811e1d95cac8c2e49cb86c0fb71f4d8409d8cbea0cb495b6dbddb30a55"}, - {file = "rpds_py-0.10.6-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c63c3ef43f0b3fb00571cff6c3967cc261c0ebd14a0a134a12e83bdb8f49f21f"}, - {file = "rpds_py-0.10.6-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:7fde6d0e00b2fd0dbbb40c0eeec463ef147819f23725eda58105ba9ca48744f4"}, - {file = "rpds_py-0.10.6-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:79edd779cfc46b2e15b0830eecd8b4b93f1a96649bcb502453df471a54ce7977"}, - {file = "rpds_py-0.10.6-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:9164ec8010327ab9af931d7ccd12ab8d8b5dc2f4c6a16cbdd9d087861eaaefa1"}, - {file = "rpds_py-0.10.6-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:d29ddefeab1791e3c751e0189d5f4b3dbc0bbe033b06e9c333dca1f99e1d523e"}, - {file = "rpds_py-0.10.6-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:30adb75ecd7c2a52f5e76af50644b3e0b5ba036321c390b8e7ec1bb2a16dd43c"}, - {file = "rpds_py-0.10.6-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd609fafdcdde6e67a139898196698af37438b035b25ad63704fd9097d9a3482"}, - {file = "rpds_py-0.10.6-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6eef672de005736a6efd565577101277db6057f65640a813de6c2707dc69f396"}, - {file = "rpds_py-0.10.6-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6cf4393c7b41abbf07c88eb83e8af5013606b1cdb7f6bc96b1b3536b53a574b8"}, - {file = "rpds_py-0.10.6-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ad857f42831e5b8d41a32437f88d86ead6c191455a3499c4b6d15e007936d4cf"}, - {file = "rpds_py-0.10.6-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d7360573f1e046cb3b0dceeb8864025aa78d98be4bb69f067ec1c40a9e2d9df"}, - {file = "rpds_py-0.10.6-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d08f63561c8a695afec4975fae445245386d645e3e446e6f260e81663bfd2e38"}, - {file = "rpds_py-0.10.6-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:f0f17f2ce0f3529177a5fff5525204fad7b43dd437d017dd0317f2746773443d"}, - {file = "rpds_py-0.10.6-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:442626328600bde1d09dc3bb00434f5374948838ce75c41a52152615689f9403"}, - {file = "rpds_py-0.10.6-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:e9616f5bd2595f7f4a04b67039d890348ab826e943a9bfdbe4938d0eba606971"}, - {file = "rpds_py-0.10.6.tar.gz", hash = "sha256:4ce5a708d65a8dbf3748d2474b580d606b1b9f91b5c6ab2a316e0b0cf7a4ba50"}, + {file = "rpds_py-0.12.0-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:c694bee70ece3b232df4678448fdda245fd3b1bb4ba481fb6cd20e13bb784c46"}, + {file = "rpds_py-0.12.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:30e5ce9f501fb1f970e4a59098028cf20676dee64fc496d55c33e04bbbee097d"}, + {file = "rpds_py-0.12.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d72a4315514e5a0b9837a086cb433b004eea630afb0cc129de76d77654a9606f"}, + {file = "rpds_py-0.12.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eebaf8c76c39604d52852366249ab807fe6f7a3ffb0dd5484b9944917244cdbe"}, + {file = "rpds_py-0.12.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a239303acb0315091d54c7ff36712dba24554993b9a93941cf301391d8a997ee"}, + {file = "rpds_py-0.12.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ced40cdbb6dd47a032725a038896cceae9ce267d340f59508b23537f05455431"}, + {file = "rpds_py-0.12.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c8c0226c71bd0ce9892eaf6afa77ae8f43a3d9313124a03df0b389c01f832de"}, + {file = "rpds_py-0.12.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b8e11715178f3608874508f08e990d3771e0b8c66c73eb4e183038d600a9b274"}, + {file = "rpds_py-0.12.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:5210a0018c7e09c75fa788648617ebba861ae242944111d3079034e14498223f"}, + {file = "rpds_py-0.12.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:171d9a159f1b2f42a42a64a985e4ba46fc7268c78299272ceba970743a67ee50"}, + {file = "rpds_py-0.12.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:57ec6baec231bb19bb5fd5fc7bae21231860a1605174b11585660236627e390e"}, + {file = "rpds_py-0.12.0-cp310-none-win32.whl", hash = "sha256:7188ddc1a8887194f984fa4110d5a3d5b9b5cd35f6bafdff1b649049cbc0ce29"}, + {file = "rpds_py-0.12.0-cp310-none-win_amd64.whl", hash = "sha256:1e04581c6117ad9479b6cfae313e212fe0dfa226ac727755f0d539cd54792963"}, + {file = "rpds_py-0.12.0-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:0a38612d07a36138507d69646c470aedbfe2b75b43a4643f7bd8e51e52779624"}, + {file = "rpds_py-0.12.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f12d69d568f5647ec503b64932874dade5a20255736c89936bf690951a5e79f5"}, + {file = "rpds_py-0.12.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f8a1d990dc198a6c68ec3d9a637ba1ce489b38cbfb65440a27901afbc5df575"}, + {file = "rpds_py-0.12.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8c567c664fc2f44130a20edac73e0a867f8e012bf7370276f15c6adc3586c37c"}, + {file = "rpds_py-0.12.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0e9e976e0dbed4f51c56db10831c9623d0fd67aac02853fe5476262e5a22acb7"}, + {file = "rpds_py-0.12.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:efddca2d02254a52078c35cadad34762adbae3ff01c6b0c7787b59d038b63e0d"}, + {file = "rpds_py-0.12.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9e7f29c00577aff6b318681e730a519b235af292732a149337f6aaa4d1c5e31"}, + {file = "rpds_py-0.12.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:389c0e38358fdc4e38e9995e7291269a3aead7acfcf8942010ee7bc5baee091c"}, + {file = "rpds_py-0.12.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:33ab498f9ac30598b6406e2be1b45fd231195b83d948ebd4bd77f337cb6a2bff"}, + {file = "rpds_py-0.12.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:d56b1cd606ba4cedd64bb43479d56580e147c6ef3f5d1c5e64203a1adab784a2"}, + {file = "rpds_py-0.12.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1fa73ed22c40a1bec98d7c93b5659cd35abcfa5a0a95ce876b91adbda170537c"}, + {file = "rpds_py-0.12.0-cp311-none-win32.whl", hash = "sha256:dbc25baa6abb205766fb8606f8263b02c3503a55957fcb4576a6bb0a59d37d10"}, + {file = "rpds_py-0.12.0-cp311-none-win_amd64.whl", hash = "sha256:c6b52b7028b547866c2413f614ee306c2d4eafdd444b1ff656bf3295bf1484aa"}, + {file = "rpds_py-0.12.0-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:9620650c364c01ed5b497dcae7c3d4b948daeae6e1883ae185fef1c927b6b534"}, + {file = "rpds_py-0.12.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2124f9e645a94ab7c853bc0a3644e0ca8ffbe5bb2d72db49aef8f9ec1c285733"}, + {file = "rpds_py-0.12.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:281c8b219d4f4b3581b918b816764098d04964915b2f272d1476654143801aa2"}, + {file = "rpds_py-0.12.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:27ccc93c7457ef890b0dd31564d2a05e1aca330623c942b7e818e9e7c2669ee4"}, + {file = "rpds_py-0.12.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d1c562a9bb72244fa767d1c1ab55ca1d92dd5f7c4d77878fee5483a22ffac808"}, + {file = "rpds_py-0.12.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e57919c32ee295a2fca458bb73e4b20b05c115627f96f95a10f9f5acbd61172d"}, + {file = "rpds_py-0.12.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa35ad36440aaf1ac8332b4a4a433d4acd28f1613f0d480995f5cfd3580e90b7"}, + {file = "rpds_py-0.12.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e6aea5c0eb5b0faf52c7b5c4a47c8bb64437173be97227c819ffa31801fa4e34"}, + {file = "rpds_py-0.12.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:81cf9d306c04df1b45971c13167dc3bad625808aa01281d55f3cf852dde0e206"}, + {file = "rpds_py-0.12.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:08e6e7ff286254016b945e1ab632ee843e43d45e40683b66dd12b73791366dd1"}, + {file = "rpds_py-0.12.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:4d0a675a7acbbc16179188d8c6d0afb8628604fc1241faf41007255957335a0b"}, + {file = "rpds_py-0.12.0-cp312-none-win32.whl", hash = "sha256:b2287c09482949e0ca0c0eb68b2aca6cf57f8af8c6dfd29dcd3bc45f17b57978"}, + {file = "rpds_py-0.12.0-cp312-none-win_amd64.whl", hash = "sha256:8015835494b21aa7abd3b43fdea0614ee35ef6b03db7ecba9beb58eadf01c24f"}, + {file = "rpds_py-0.12.0-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:6174d6ad6b58a6bcf67afbbf1723420a53d06c4b89f4c50763d6fa0a6ac9afd2"}, + {file = "rpds_py-0.12.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a689e1ded7137552bea36305a7a16ad2b40be511740b80748d3140614993db98"}, + {file = "rpds_py-0.12.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f45321224144c25a62052035ce96cbcf264667bcb0d81823b1bbc22c4addd194"}, + {file = "rpds_py-0.12.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:aa32205358a76bf578854bf31698a86dc8b2cb591fd1d79a833283f4a403f04b"}, + {file = "rpds_py-0.12.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91bd2b7cf0f4d252eec8b7046fa6a43cee17e8acdfc00eaa8b3dbf2f9a59d061"}, + {file = "rpds_py-0.12.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3acadbab8b59f63b87b518e09c4c64b142e7286b9ca7a208107d6f9f4c393c5c"}, + {file = "rpds_py-0.12.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:429349a510da82c85431f0f3e66212d83efe9fd2850f50f339341b6532c62fe4"}, + {file = "rpds_py-0.12.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:05942656cb2cb4989cd50ced52df16be94d344eae5097e8583966a1d27da73a5"}, + {file = "rpds_py-0.12.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:0c5441b7626c29dbd54a3f6f3713ec8e956b009f419ffdaaa3c80eaf98ddb523"}, + {file = "rpds_py-0.12.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:b6b0e17d39d21698185097652c611f9cf30f7c56ccec189789920e3e7f1cee56"}, + {file = "rpds_py-0.12.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:3b7a64d43e2a1fa2dd46b678e00cabd9a49ebb123b339ce799204c44a593ae1c"}, + {file = "rpds_py-0.12.0-cp38-none-win32.whl", hash = "sha256:e5bbe011a2cea9060fef1bb3d668a2fd8432b8888e6d92e74c9c794d3c101595"}, + {file = "rpds_py-0.12.0-cp38-none-win_amd64.whl", hash = "sha256:bec29b801b4adbf388314c0d050e851d53762ab424af22657021ce4b6eb41543"}, + {file = "rpds_py-0.12.0-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:1096ca0bf2d3426cbe79d4ccc91dc5aaa73629b08ea2d8467375fad8447ce11a"}, + {file = "rpds_py-0.12.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:48aa98987d54a46e13e6954880056c204700c65616af4395d1f0639eba11764b"}, + {file = "rpds_py-0.12.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7979d90ee2190d000129598c2b0c82f13053dba432b94e45e68253b09bb1f0f6"}, + {file = "rpds_py-0.12.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:88857060b690a57d2ea8569bca58758143c8faa4639fb17d745ce60ff84c867e"}, + {file = "rpds_py-0.12.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4eb74d44776b0fb0782560ea84d986dffec8ddd94947f383eba2284b0f32e35e"}, + {file = "rpds_py-0.12.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f62581d7e884dd01ee1707b7c21148f61f2febb7de092ae2f108743fcbef5985"}, + {file = "rpds_py-0.12.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f5dcb658d597410bb7c967c1d24eaf9377b0d621358cbe9d2ff804e5dd12e81"}, + {file = "rpds_py-0.12.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9bf9acce44e967a5103fcd820fc7580c7b0ab8583eec4e2051aec560f7b31a63"}, + {file = "rpds_py-0.12.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:240687b5be0f91fbde4936a329c9b7589d9259742766f74de575e1b2046575e4"}, + {file = "rpds_py-0.12.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:25740fb56e8bd37692ed380e15ec734be44d7c71974d8993f452b4527814601e"}, + {file = "rpds_py-0.12.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:a54917b7e9cd3a67e429a630e237a90b096e0ba18897bfb99ee8bd1068a5fea0"}, + {file = "rpds_py-0.12.0-cp39-none-win32.whl", hash = "sha256:b92aafcfab3d41580d54aca35a8057341f1cfc7c9af9e8bdfc652f83a20ced31"}, + {file = "rpds_py-0.12.0-cp39-none-win_amd64.whl", hash = "sha256:cd316dbcc74c76266ba94eb021b0cc090b97cca122f50bd7a845f587ff4bf03f"}, + {file = "rpds_py-0.12.0-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:0853da3d5e9bc6a07b2486054a410b7b03f34046c123c6561b535bb48cc509e1"}, + {file = "rpds_py-0.12.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:cb41ad20064e18a900dd427d7cf41cfaec83bcd1184001f3d91a1f76b3fcea4e"}, + {file = "rpds_py-0.12.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b710bf7e7ae61957d5c4026b486be593ed3ec3dca3e5be15e0f6d8cf5d0a4990"}, + {file = "rpds_py-0.12.0-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a952ae3eb460c6712388ac2ec706d24b0e651b9396d90c9a9e0a69eb27737fdc"}, + {file = "rpds_py-0.12.0-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0bedd91ae1dd142a4dc15970ed2c729ff6c73f33a40fa84ed0cdbf55de87c777"}, + {file = "rpds_py-0.12.0-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:761531076df51309075133a6bc1db02d98ec7f66e22b064b1d513bc909f29743"}, + {file = "rpds_py-0.12.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2baa6be130e8a00b6cbb9f18a33611ec150b4537f8563bddadb54c1b74b8193"}, + {file = "rpds_py-0.12.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f05450fa1cd7c525c0b9d1a7916e595d3041ac0afbed2ff6926e5afb6a781b7f"}, + {file = "rpds_py-0.12.0-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:81c4d1a3a564775c44732b94135d06e33417e829ff25226c164664f4a1046213"}, + {file = "rpds_py-0.12.0-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:e888be685fa42d8b8a3d3911d5604d14db87538aa7d0b29b1a7ea80d354c732d"}, + {file = "rpds_py-0.12.0-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:6f8d7fe73d1816eeb5378409adc658f9525ecbfaf9e1ede1e2d67a338b0c7348"}, + {file = "rpds_py-0.12.0-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:0831d3ecdea22e4559cc1793f22e77067c9d8c451d55ae6a75bf1d116a8e7f42"}, + {file = "rpds_py-0.12.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:513ccbf7420c30e283c25c82d5a8f439d625a838d3ba69e79a110c260c46813f"}, + {file = "rpds_py-0.12.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:301bd744a1adaa2f6a5e06c98f1ac2b6f8dc31a5c23b838f862d65e32fca0d4b"}, + {file = "rpds_py-0.12.0-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f8832a4f83d4782a8f5a7b831c47e8ffe164e43c2c148c8160ed9a6d630bc02a"}, + {file = "rpds_py-0.12.0-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4b2416ed743ec5debcf61e1242e012652a4348de14ecc7df3512da072b074440"}, + {file = "rpds_py-0.12.0-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35585a8cb5917161f42c2104567bb83a1d96194095fc54a543113ed5df9fa436"}, + {file = "rpds_py-0.12.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d389ff1e95b6e46ebedccf7fd1fadd10559add595ac6a7c2ea730268325f832c"}, + {file = "rpds_py-0.12.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9b007c2444705a2dc4a525964fd4dd28c3320b19b3410da6517cab28716f27d3"}, + {file = "rpds_py-0.12.0-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:188912b22b6c8225f4c4ffa020a2baa6ad8fabb3c141a12dbe6edbb34e7f1425"}, + {file = "rpds_py-0.12.0-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:1b4cf9ab9a0ae0cb122685209806d3f1dcb63b9fccdf1424fb42a129dc8c2faa"}, + {file = "rpds_py-0.12.0-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:2d34a5450a402b00d20aeb7632489ffa2556ca7b26f4a63c35f6fccae1977427"}, + {file = "rpds_py-0.12.0-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:466030a42724780794dea71eb32db83cc51214d66ab3fb3156edd88b9c8f0d78"}, + {file = "rpds_py-0.12.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:68172622a5a57deb079a2c78511c40f91193548e8ab342c31e8cb0764d362459"}, + {file = "rpds_py-0.12.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54cdfcda59251b9c2f87a05d038c2ae02121219a04d4a1e6fc345794295bdc07"}, + {file = "rpds_py-0.12.0-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6b75b912a0baa033350367a8a07a8b2d44fd5b90c890bfbd063a8a5f945f644b"}, + {file = "rpds_py-0.12.0-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:47aeceb4363851d17f63069318ba5721ae695d9da55d599b4d6fb31508595278"}, + {file = "rpds_py-0.12.0-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0525847f83f506aa1e28eb2057b696fe38217e12931c8b1b02198cfe6975e142"}, + {file = "rpds_py-0.12.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efbe0b5e0fd078ed7b005faa0170da4f72666360f66f0bb2d7f73526ecfd99f9"}, + {file = "rpds_py-0.12.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0fadfdda275c838cba5102c7f90a20f2abd7727bf8f4a2b654a5b617529c5c18"}, + {file = "rpds_py-0.12.0-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:56dd500411d03c5e9927a1eb55621e906837a83b02350a9dc401247d0353717c"}, + {file = "rpds_py-0.12.0-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:6915fc9fa6b3ec3569566832e1bb03bd801c12cea030200e68663b9a87974e76"}, + {file = "rpds_py-0.12.0-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:5f1519b080d8ce0a814f17ad9fb49fb3a1d4d7ce5891f5c85fc38631ca3a8dc4"}, + {file = "rpds_py-0.12.0.tar.gz", hash = "sha256:7036316cc26b93e401cedd781a579be606dad174829e6ad9e9c5a0da6e036f80"}, ] [[package]] @@ -5908,60 +5912,60 @@ test = ["pytest"] [[package]] name = "sqlalchemy" -version = "2.0.22" +version = "2.0.23" description = "Database Abstraction Library" optional = false python-versions = ">=3.7" files = [ - {file = "SQLAlchemy-2.0.22-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f146c61ae128ab43ea3a0955de1af7e1633942c2b2b4985ac51cc292daf33222"}, - {file = "SQLAlchemy-2.0.22-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:875de9414393e778b655a3d97d60465eb3fae7c919e88b70cc10b40b9f56042d"}, - {file = "SQLAlchemy-2.0.22-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:13790cb42f917c45c9c850b39b9941539ca8ee7917dacf099cc0b569f3d40da7"}, - {file = "SQLAlchemy-2.0.22-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e04ab55cf49daf1aeb8c622c54d23fa4bec91cb051a43cc24351ba97e1dd09f5"}, - {file = "SQLAlchemy-2.0.22-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:a42c9fa3abcda0dcfad053e49c4f752eef71ecd8c155221e18b99d4224621176"}, - {file = "SQLAlchemy-2.0.22-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:14cd3bcbb853379fef2cd01e7c64a5d6f1d005406d877ed9509afb7a05ff40a5"}, - {file = "SQLAlchemy-2.0.22-cp310-cp310-win32.whl", hash = "sha256:d143c5a9dada696bcfdb96ba2de4a47d5a89168e71d05a076e88a01386872f97"}, - {file = "SQLAlchemy-2.0.22-cp310-cp310-win_amd64.whl", hash = "sha256:ccd87c25e4c8559e1b918d46b4fa90b37f459c9b4566f1dfbce0eb8122571547"}, - {file = "SQLAlchemy-2.0.22-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4f6ff392b27a743c1ad346d215655503cec64405d3b694228b3454878bf21590"}, - {file = "SQLAlchemy-2.0.22-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f776c2c30f0e5f4db45c3ee11a5f2a8d9de68e81eb73ec4237de1e32e04ae81c"}, - {file = "SQLAlchemy-2.0.22-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c8f1792d20d2f4e875ce7a113f43c3561ad12b34ff796b84002a256f37ce9437"}, - {file = "SQLAlchemy-2.0.22-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d80eeb5189d7d4b1af519fc3f148fe7521b9dfce8f4d6a0820e8f5769b005051"}, - {file = "SQLAlchemy-2.0.22-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:69fd9e41cf9368afa034e1c81f3570afb96f30fcd2eb1ef29cb4d9371c6eece2"}, - {file = "SQLAlchemy-2.0.22-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:54bcceaf4eebef07dadfde424f5c26b491e4a64e61761dea9459103ecd6ccc95"}, - {file = "SQLAlchemy-2.0.22-cp311-cp311-win32.whl", hash = "sha256:7ee7ccf47aa503033b6afd57efbac6b9e05180f492aeed9fcf70752556f95624"}, - {file = "SQLAlchemy-2.0.22-cp311-cp311-win_amd64.whl", hash = "sha256:b560f075c151900587ade06706b0c51d04b3277c111151997ea0813455378ae0"}, - {file = "SQLAlchemy-2.0.22-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:2c9bac865ee06d27a1533471405ad240a6f5d83195eca481f9fc4a71d8b87df8"}, - {file = "SQLAlchemy-2.0.22-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:625b72d77ac8ac23da3b1622e2da88c4aedaee14df47c8432bf8f6495e655de2"}, - {file = "SQLAlchemy-2.0.22-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b39a6e21110204a8c08d40ff56a73ba542ec60bab701c36ce721e7990df49fb9"}, - {file = "SQLAlchemy-2.0.22-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:53a766cb0b468223cafdf63e2d37f14a4757476157927b09300c8c5832d88560"}, - {file = "SQLAlchemy-2.0.22-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0e1ce8ebd2e040357dde01a3fb7d30d9b5736b3e54a94002641dfd0aa12ae6ce"}, - {file = "SQLAlchemy-2.0.22-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:505f503763a767556fa4deae5194b2be056b64ecca72ac65224381a0acab7ebe"}, - {file = "SQLAlchemy-2.0.22-cp312-cp312-win32.whl", hash = "sha256:154a32f3c7b00de3d090bc60ec8006a78149e221f1182e3edcf0376016be9396"}, - {file = "SQLAlchemy-2.0.22-cp312-cp312-win_amd64.whl", hash = "sha256:129415f89744b05741c6f0b04a84525f37fbabe5dc3774f7edf100e7458c48cd"}, - {file = "SQLAlchemy-2.0.22-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3940677d341f2b685a999bffe7078697b5848a40b5f6952794ffcf3af150c301"}, - {file = "SQLAlchemy-2.0.22-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55914d45a631b81a8a2cb1a54f03eea265cf1783241ac55396ec6d735be14883"}, - {file = "SQLAlchemy-2.0.22-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2096d6b018d242a2bcc9e451618166f860bb0304f590d205173d317b69986c95"}, - {file = "SQLAlchemy-2.0.22-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:19c6986cf2fb4bc8e0e846f97f4135a8e753b57d2aaaa87c50f9acbe606bd1db"}, - {file = "SQLAlchemy-2.0.22-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:6ac28bd6888fe3c81fbe97584eb0b96804bd7032d6100b9701255d9441373ec1"}, - {file = "SQLAlchemy-2.0.22-cp37-cp37m-win32.whl", hash = "sha256:cb9a758ad973e795267da334a92dd82bb7555cb36a0960dcabcf724d26299db8"}, - {file = "SQLAlchemy-2.0.22-cp37-cp37m-win_amd64.whl", hash = "sha256:40b1206a0d923e73aa54f0a6bd61419a96b914f1cd19900b6c8226899d9742ad"}, - {file = "SQLAlchemy-2.0.22-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3aa1472bf44f61dd27987cd051f1c893b7d3b17238bff8c23fceaef4f1133868"}, - {file = "SQLAlchemy-2.0.22-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:56a7e2bb639df9263bf6418231bc2a92a773f57886d371ddb7a869a24919face"}, - {file = "SQLAlchemy-2.0.22-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ccca778c0737a773a1ad86b68bda52a71ad5950b25e120b6eb1330f0df54c3d0"}, - {file = "SQLAlchemy-2.0.22-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c6c3e9350f9fb16de5b5e5fbf17b578811a52d71bb784cc5ff71acb7de2a7f9"}, - {file = "SQLAlchemy-2.0.22-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:564e9f9e4e6466273dbfab0e0a2e5fe819eec480c57b53a2cdee8e4fdae3ad5f"}, - {file = "SQLAlchemy-2.0.22-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:af66001d7b76a3fab0d5e4c1ec9339ac45748bc4a399cbc2baa48c1980d3c1f4"}, - {file = "SQLAlchemy-2.0.22-cp38-cp38-win32.whl", hash = "sha256:9e55dff5ec115316dd7a083cdc1a52de63693695aecf72bc53a8e1468ce429e5"}, - {file = "SQLAlchemy-2.0.22-cp38-cp38-win_amd64.whl", hash = "sha256:4e869a8ff7ee7a833b74868a0887e8462445ec462432d8cbeff5e85f475186da"}, - {file = "SQLAlchemy-2.0.22-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9886a72c8e6371280cb247c5d32c9c8fa141dc560124348762db8a8b236f8692"}, - {file = "SQLAlchemy-2.0.22-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a571bc8ac092a3175a1d994794a8e7a1f2f651e7c744de24a19b4f740fe95034"}, - {file = "SQLAlchemy-2.0.22-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8db5ba8b7da759b727faebc4289a9e6a51edadc7fc32207a30f7c6203a181592"}, - {file = "SQLAlchemy-2.0.22-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b0b3f2686c3f162123adba3cb8b626ed7e9b8433ab528e36ed270b4f70d1cdb"}, - {file = "SQLAlchemy-2.0.22-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0c1fea8c0abcb070ffe15311853abfda4e55bf7dc1d4889497b3403629f3bf00"}, - {file = "SQLAlchemy-2.0.22-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4bb062784f37b2d75fd9b074c8ec360ad5df71f933f927e9e95c50eb8e05323c"}, - {file = "SQLAlchemy-2.0.22-cp39-cp39-win32.whl", hash = "sha256:58a3aba1bfb32ae7af68da3f277ed91d9f57620cf7ce651db96636790a78b736"}, - {file = "SQLAlchemy-2.0.22-cp39-cp39-win_amd64.whl", hash = "sha256:92e512a6af769e4725fa5b25981ba790335d42c5977e94ded07db7d641490a85"}, - {file = "SQLAlchemy-2.0.22-py3-none-any.whl", hash = "sha256:3076740335e4aaadd7deb3fe6dcb96b3015f1613bd190a4e1634e1b99b02ec86"}, - {file = "SQLAlchemy-2.0.22.tar.gz", hash = "sha256:5434cc601aa17570d79e5377f5fd45ff92f9379e2abed0be5e8c2fba8d353d2b"}, + {file = "SQLAlchemy-2.0.23-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:638c2c0b6b4661a4fd264f6fb804eccd392745c5887f9317feb64bb7cb03b3ea"}, + {file = "SQLAlchemy-2.0.23-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e3b5036aa326dc2df50cba3c958e29b291a80f604b1afa4c8ce73e78e1c9f01d"}, + {file = "SQLAlchemy-2.0.23-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:787af80107fb691934a01889ca8f82a44adedbf5ef3d6ad7d0f0b9ac557e0c34"}, + {file = "SQLAlchemy-2.0.23-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c14eba45983d2f48f7546bb32b47937ee2cafae353646295f0e99f35b14286ab"}, + {file = "SQLAlchemy-2.0.23-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0666031df46b9badba9bed00092a1ffa3aa063a5e68fa244acd9f08070e936d3"}, + {file = "SQLAlchemy-2.0.23-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:89a01238fcb9a8af118eaad3ffcc5dedaacbd429dc6fdc43fe430d3a941ff965"}, + {file = "SQLAlchemy-2.0.23-cp310-cp310-win32.whl", hash = "sha256:cabafc7837b6cec61c0e1e5c6d14ef250b675fa9c3060ed8a7e38653bd732ff8"}, + {file = "SQLAlchemy-2.0.23-cp310-cp310-win_amd64.whl", hash = "sha256:87a3d6b53c39cd173990de2f5f4b83431d534a74f0e2f88bd16eabb5667e65c6"}, + {file = "SQLAlchemy-2.0.23-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d5578e6863eeb998980c212a39106ea139bdc0b3f73291b96e27c929c90cd8e1"}, + {file = "SQLAlchemy-2.0.23-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:62d9e964870ea5ade4bc870ac4004c456efe75fb50404c03c5fd61f8bc669a72"}, + {file = "SQLAlchemy-2.0.23-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c80c38bd2ea35b97cbf7c21aeb129dcbebbf344ee01a7141016ab7b851464f8e"}, + {file = "SQLAlchemy-2.0.23-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75eefe09e98043cff2fb8af9796e20747ae870c903dc61d41b0c2e55128f958d"}, + {file = "SQLAlchemy-2.0.23-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:bd45a5b6c68357578263d74daab6ff9439517f87da63442d244f9f23df56138d"}, + {file = "SQLAlchemy-2.0.23-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a86cb7063e2c9fb8e774f77fbf8475516d270a3e989da55fa05d08089d77f8c4"}, + {file = "SQLAlchemy-2.0.23-cp311-cp311-win32.whl", hash = "sha256:b41f5d65b54cdf4934ecede2f41b9c60c9f785620416e8e6c48349ab18643855"}, + {file = "SQLAlchemy-2.0.23-cp311-cp311-win_amd64.whl", hash = "sha256:9ca922f305d67605668e93991aaf2c12239c78207bca3b891cd51a4515c72e22"}, + {file = "SQLAlchemy-2.0.23-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d0f7fb0c7527c41fa6fcae2be537ac137f636a41b4c5a4c58914541e2f436b45"}, + {file = "SQLAlchemy-2.0.23-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7c424983ab447dab126c39d3ce3be5bee95700783204a72549c3dceffe0fc8f4"}, + {file = "SQLAlchemy-2.0.23-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f508ba8f89e0a5ecdfd3761f82dda2a3d7b678a626967608f4273e0dba8f07ac"}, + {file = "SQLAlchemy-2.0.23-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6463aa765cf02b9247e38b35853923edbf2f6fd1963df88706bc1d02410a5577"}, + {file = "SQLAlchemy-2.0.23-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e599a51acf3cc4d31d1a0cf248d8f8d863b6386d2b6782c5074427ebb7803bda"}, + {file = "SQLAlchemy-2.0.23-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:fd54601ef9cc455a0c61e5245f690c8a3ad67ddb03d3b91c361d076def0b4c60"}, + {file = "SQLAlchemy-2.0.23-cp312-cp312-win32.whl", hash = "sha256:42d0b0290a8fb0165ea2c2781ae66e95cca6e27a2fbe1016ff8db3112ac1e846"}, + {file = "SQLAlchemy-2.0.23-cp312-cp312-win_amd64.whl", hash = "sha256:227135ef1e48165f37590b8bfc44ed7ff4c074bf04dc8d6f8e7f1c14a94aa6ca"}, + {file = "SQLAlchemy-2.0.23-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:14aebfe28b99f24f8a4c1346c48bc3d63705b1f919a24c27471136d2f219f02d"}, + {file = "SQLAlchemy-2.0.23-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e983fa42164577d073778d06d2cc5d020322425a509a08119bdcee70ad856bf"}, + {file = "SQLAlchemy-2.0.23-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e0dc9031baa46ad0dd5a269cb7a92a73284d1309228be1d5935dac8fb3cae24"}, + {file = "SQLAlchemy-2.0.23-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:5f94aeb99f43729960638e7468d4688f6efccb837a858b34574e01143cf11f89"}, + {file = "SQLAlchemy-2.0.23-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:63bfc3acc970776036f6d1d0e65faa7473be9f3135d37a463c5eba5efcdb24c8"}, + {file = "SQLAlchemy-2.0.23-cp37-cp37m-win32.whl", hash = "sha256:f48ed89dd11c3c586f45e9eec1e437b355b3b6f6884ea4a4c3111a3358fd0c18"}, + {file = "SQLAlchemy-2.0.23-cp37-cp37m-win_amd64.whl", hash = "sha256:1e018aba8363adb0599e745af245306cb8c46b9ad0a6fc0a86745b6ff7d940fc"}, + {file = "SQLAlchemy-2.0.23-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:64ac935a90bc479fee77f9463f298943b0e60005fe5de2aa654d9cdef46c54df"}, + {file = "SQLAlchemy-2.0.23-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c4722f3bc3c1c2fcc3702dbe0016ba31148dd6efcd2a2fd33c1b4897c6a19693"}, + {file = "SQLAlchemy-2.0.23-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4af79c06825e2836de21439cb2a6ce22b2ca129bad74f359bddd173f39582bf5"}, + {file = "SQLAlchemy-2.0.23-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:683ef58ca8eea4747737a1c35c11372ffeb84578d3aab8f3e10b1d13d66f2bc4"}, + {file = "SQLAlchemy-2.0.23-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:d4041ad05b35f1f4da481f6b811b4af2f29e83af253bf37c3c4582b2c68934ab"}, + {file = "SQLAlchemy-2.0.23-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aeb397de65a0a62f14c257f36a726945a7f7bb60253462e8602d9b97b5cbe204"}, + {file = "SQLAlchemy-2.0.23-cp38-cp38-win32.whl", hash = "sha256:42ede90148b73fe4ab4a089f3126b2cfae8cfefc955c8174d697bb46210c8306"}, + {file = "SQLAlchemy-2.0.23-cp38-cp38-win_amd64.whl", hash = "sha256:964971b52daab357d2c0875825e36584d58f536e920f2968df8d581054eada4b"}, + {file = "SQLAlchemy-2.0.23-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:616fe7bcff0a05098f64b4478b78ec2dfa03225c23734d83d6c169eb41a93e55"}, + {file = "SQLAlchemy-2.0.23-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0e680527245895aba86afbd5bef6c316831c02aa988d1aad83c47ffe92655e74"}, + {file = "SQLAlchemy-2.0.23-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9585b646ffb048c0250acc7dad92536591ffe35dba624bb8fd9b471e25212a35"}, + {file = "SQLAlchemy-2.0.23-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4895a63e2c271ffc7a81ea424b94060f7b3b03b4ea0cd58ab5bb676ed02f4221"}, + {file = "SQLAlchemy-2.0.23-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:cc1d21576f958c42d9aec68eba5c1a7d715e5fc07825a629015fe8e3b0657fb0"}, + {file = "SQLAlchemy-2.0.23-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:967c0b71156f793e6662dd839da54f884631755275ed71f1539c95bbada9aaab"}, + {file = "SQLAlchemy-2.0.23-cp39-cp39-win32.whl", hash = "sha256:0a8c6aa506893e25a04233bc721c6b6cf844bafd7250535abb56cb6cc1368884"}, + {file = "SQLAlchemy-2.0.23-cp39-cp39-win_amd64.whl", hash = "sha256:f3420d00d2cb42432c1d0e44540ae83185ccbbc67a6054dcc8ab5387add6620b"}, + {file = "SQLAlchemy-2.0.23-py3-none-any.whl", hash = "sha256:31952bbc527d633b9479f5f81e8b9dfada00b91d6baba021a869095f1a97006d"}, + {file = "SQLAlchemy-2.0.23.tar.gz", hash = "sha256:c1bda93cbbe4aa2aa0aa8655c5aeda505cd219ff3e8da91d1d329e143e4aff69"}, ] [package.dependencies] @@ -5970,6 +5974,7 @@ typing-extensions = ">=4.2.0" [package.extras] aiomysql = ["aiomysql (>=0.2.0)", "greenlet (!=0.4.17)"] +aioodbc = ["aioodbc", "greenlet (!=0.4.17)"] aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing-extensions (!=3.10.0.1)"] asyncio = ["greenlet (!=0.4.17)"] asyncmy = ["asyncmy (>=0.2.3,!=0.2.4,!=0.2.6)", "greenlet (!=0.4.17)"] @@ -5980,7 +5985,7 @@ mssql-pyodbc = ["pyodbc"] mypy = ["mypy (>=0.910)"] mysql = ["mysqlclient (>=1.4.0)"] mysql-connector = ["mysql-connector-python"] -oracle = ["cx-oracle (>=7)"] +oracle = ["cx-oracle (>=8)"] oracle-oracledb = ["oracledb (>=1.0.1)"] postgresql = ["psycopg2 (>=2.7)"] postgresql-asyncpg = ["asyncpg", "greenlet (!=0.4.17)"] @@ -6416,13 +6421,13 @@ files = [ [[package]] name = "tomlkit" -version = "0.12.1" +version = "0.12.2" description = "Style preserving TOML library" optional = false python-versions = ">=3.7" files = [ - {file = "tomlkit-0.12.1-py3-none-any.whl", hash = "sha256:712cbd236609acc6a3e2e97253dfc52d4c2082982a88f61b640ecf0817eab899"}, - {file = "tomlkit-0.12.1.tar.gz", hash = "sha256:38e1ff8edb991273ec9f6181244a6a391ac30e9f5098e7535640ea6be97a7c86"}, + {file = "tomlkit-0.12.2-py3-none-any.whl", hash = "sha256:eeea7ac7563faeab0a1ed8fe12c2e5a51c61f933f2502f7e9db0241a65163ad0"}, + {file = "tomlkit-0.12.2.tar.gz", hash = "sha256:df32fab589a81f0d7dc525a4267b6d7a64ee99619cbd1eeb0fae32c1dd426977"}, ] [[package]] @@ -6560,13 +6565,13 @@ test = ["argcomplete (>=3.0.3)", "mypy (>=1.6.0)", "pre-commit", "pytest (>=7.0, [[package]] name = "transformers" -version = "4.34.1" +version = "4.35.0" description = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow" optional = true python-versions = ">=3.8.0" files = [ - {file = "transformers-4.34.1-py3-none-any.whl", hash = "sha256:d06ac09151d7b845e4a4acd6b143a591d946031ee67b4cbb20693b241920ffc0"}, - {file = "transformers-4.34.1.tar.gz", hash = "sha256:1d0258d5a18063b66005bbe1e3276ec5943d9ab4ab47f020db1fd485cc40ea22"}, + {file = "transformers-4.35.0-py3-none-any.whl", hash = "sha256:45aa9370d7d9ba1c43e6bfa04d7f8b61238497d4b646e573fd95e597fe4040ff"}, + {file = "transformers-4.35.0.tar.gz", hash = "sha256:e4b41763f651282fc979348d3aa148244387ddc9165f4b18455798c770ae23b9"}, ] [package.dependencies] @@ -6592,13 +6597,12 @@ all = ["Pillow (<10.0.0)", "accelerate (>=0.20.3)", "av (==9.2.0)", "codecarbon audio = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] codecarbon = ["codecarbon (==1.2.0)"] deepspeed = ["accelerate (>=0.20.3)", "deepspeed (>=0.9.3)"] -deepspeed-testing = ["GitPython (<3.1.19)", "accelerate (>=0.20.3)", "beautifulsoup4", "black (>=23.1,<24.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "deepspeed (>=0.9.3)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder (>=0.3.0)", "nltk", "optuna", "parameterized", "protobuf", "psutil", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "timeout-decorator"] -dev = ["GitPython (<3.1.19)", "Pillow (<10.0.0)", "accelerate (>=0.20.3)", "av (==9.2.0)", "beautifulsoup4", "black (>=23.1,<24.0)", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "decord (==0.6.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "flax (>=0.4.1,<=0.7.0)", "fugashi (>=1.0)", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "ray[tune]", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (>=0.0.241,<=0.0.259)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorflow (>=2.6,<2.15)", "tensorflow-text (<2.15)", "tf2onnx", "timeout-decorator", "timm", "tokenizers (>=0.14,<0.15)", "torch (>=1.10,!=1.12.0)", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] -dev-tensorflow = ["GitPython (<3.1.19)", "Pillow (<10.0.0)", "beautifulsoup4", "black (>=23.1,<24.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "isort (>=5.5.4)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (>=0.0.241,<=0.0.259)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorflow (>=2.6,<2.15)", "tensorflow-text (<2.15)", "tf2onnx", "timeout-decorator", "tokenizers (>=0.14,<0.15)", "urllib3 (<2.0.0)"] -dev-torch = ["GitPython (<3.1.19)", "Pillow (<10.0.0)", "accelerate (>=0.20.3)", "beautifulsoup4", "black (>=23.1,<24.0)", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "fugashi (>=1.0)", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "kenlm", "librosa", "nltk", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "ray[tune]", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (>=0.0.241,<=0.0.259)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "timeout-decorator", "timm", "tokenizers (>=0.14,<0.15)", "torch (>=1.10,!=1.12.0)", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] +deepspeed-testing = ["GitPython (<3.1.19)", "accelerate (>=0.20.3)", "beautifulsoup4", "black (>=23.1,<24.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "deepspeed (>=0.9.3)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder (>=0.3.0)", "nltk", "optuna", "parameterized", "protobuf", "psutil", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "timeout-decorator"] +dev = ["GitPython (<3.1.19)", "Pillow (<10.0.0)", "accelerate (>=0.20.3)", "av (==9.2.0)", "beautifulsoup4", "black (>=23.1,<24.0)", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "decord (==0.6.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "flax (>=0.4.1,<=0.7.0)", "fugashi (>=1.0)", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "ray[tune]", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (>=0.0.241,<=0.0.259)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "tensorflow (>=2.6,<2.15)", "tensorflow-text (<2.15)", "tf2onnx", "timeout-decorator", "timm", "tokenizers (>=0.14,<0.15)", "torch (>=1.10,!=1.12.0)", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] +dev-tensorflow = ["GitPython (<3.1.19)", "Pillow (<10.0.0)", "beautifulsoup4", "black (>=23.1,<24.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "isort (>=5.5.4)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (>=0.0.241,<=0.0.259)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "tensorflow (>=2.6,<2.15)", "tensorflow-text (<2.15)", "tf2onnx", "timeout-decorator", "tokenizers (>=0.14,<0.15)", "urllib3 (<2.0.0)"] +dev-torch = ["GitPython (<3.1.19)", "Pillow (<10.0.0)", "accelerate (>=0.20.3)", "beautifulsoup4", "black (>=23.1,<24.0)", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "fugashi (>=1.0)", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "kenlm", "librosa", "nltk", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "ray[tune]", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (>=0.0.241,<=0.0.259)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "timeout-decorator", "timm", "tokenizers (>=0.14,<0.15)", "torch (>=1.10,!=1.12.0)", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] docs = ["Pillow (<10.0.0)", "accelerate (>=0.20.3)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.7.0)", "hf-doc-builder", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf", "pyctcdecode (>=0.4.0)", "ray[tune]", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>=2.6,<2.15)", "tensorflow-text (<2.15)", "tf2onnx", "timm", "tokenizers (>=0.14,<0.15)", "torch (>=1.10,!=1.12.0)", "torchaudio", "torchvision"] docs-specific = ["hf-doc-builder"] -fairscale = ["fairscale (>0.3)"] flax = ["flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "optax (>=0.0.8,<=0.1.4)"] flax-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] ftfy = ["ftfy"] @@ -6618,7 +6622,7 @@ serving = ["fastapi", "pydantic (<2)", "starlette", "uvicorn"] sigopt = ["sigopt"] sklearn = ["scikit-learn"] speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] -testing = ["GitPython (<3.1.19)", "beautifulsoup4", "black (>=23.1,<24.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder (>=0.3.0)", "nltk", "parameterized", "protobuf", "psutil", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "timeout-decorator"] +testing = ["GitPython (<3.1.19)", "beautifulsoup4", "black (>=23.1,<24.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder (>=0.3.0)", "nltk", "parameterized", "protobuf", "psutil", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "tensorboard", "timeout-decorator"] tf = ["keras-nlp (>=0.3.1)", "onnxconverter-common", "tensorflow (>=2.6,<2.15)", "tensorflow-text (<2.15)", "tf2onnx"] tf-cpu = ["keras-nlp (>=0.3.1)", "onnxconverter-common", "tensorflow-cpu (>=2.6,<2.15)", "tensorflow-text (<2.15)", "tf2onnx"] tf-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] @@ -7031,13 +7035,13 @@ files = [ [[package]] name = "weasel" -version = "0.3.3" +version = "0.3.4" description = "Weasel: A small and easy workflow system" optional = true python-versions = ">=3.6" files = [ - {file = "weasel-0.3.3-py3-none-any.whl", hash = "sha256:141b12fd0d38599ff8c567208d1db0f5af1b532363fadeba27d7bc87d751d88a"}, - {file = "weasel-0.3.3.tar.gz", hash = "sha256:924962dfc9d89602552e7332846e95d264eca18aebe2b96c2527d46b7bb7cf9c"}, + {file = "weasel-0.3.4-py3-none-any.whl", hash = "sha256:ee48a944f051d007201c2ea1661d0c41035028c5d5a8bcb29a0b10f1100206ae"}, + {file = "weasel-0.3.4.tar.gz", hash = "sha256:eb16f92dc9f1a3ffa89c165e3a9acd28018ebb656e0da4da02c0d7d8ae3f6178"}, ] [package.dependencies] @@ -7415,4 +7419,4 @@ query-tools = ["guidance", "jsonpath-ng", "lm-format-enforcer", "rank-bm25", "sc [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<3.12" -content-hash = "49dac3d429fb46da489295718d09ef8636315e5681dbf1e912491efad6992601" +content-hash = "2723c22f4863dc503fbbb3763ac44f391d22e0fb8a6b776b84bdd2790cb9652e" diff --git a/pyproject.toml b/pyproject.toml index cd5a20ae037cf0bcb25b65032a460e313621e013..4c92e57aa06d21584ea89b5d1e04b061dcf0efc5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -49,7 +49,7 @@ langchain = ">=0.0.303" nest-asyncio = "^1.5.8" nltk = "^3.8.1" numpy = "*" -openai = "<1" +openai = ">=1.0.0" pandas = "*" python = ">=3.8.1,<3.12" tenacity = ">=8.2.0,<9.0.0" diff --git a/tests/agent/openai/test_openai_agent.py b/tests/agent/openai/test_openai_agent.py index de38e78258e968a78e92887dba22be7c7ce7ea88..a24523409c0771a436ae88845493fb6b6f29a623 100644 --- a/tests/agent/openai/test_openai_agent.py +++ b/tests/agent/openai/test_openai_agent.py @@ -1,4 +1,5 @@ from typing import Any, List, Sequence +from unittest.mock import MagicMock, patch import pytest from llama_index.agent.openai_agent import OpenAIAgent @@ -7,29 +8,32 @@ from llama_index.llms.base import ChatMessage, ChatResponse from llama_index.llms.mock import MockLLM from llama_index.llms.openai import OpenAI from llama_index.tools.function_tool import FunctionTool -from pytest import MonkeyPatch +from openai.types.chat.chat_completion import ChatCompletion, Choice +from openai.types.chat.chat_completion_message import ChatCompletionMessage -def mock_chat_completion(*args: Any, **kwargs: Any) -> dict: +def mock_chat_completion(*args: Any, **kwargs: Any) -> ChatCompletion: if "functions" in kwargs: if not kwargs["functions"]: raise ValueError("functions must not be empty") # Example taken from https://platform.openai.com/docs/api-reference/chat/create - return { - "id": "chatcmpl-abc123", - "object": "chat.completion", - "created": 1677858242, - "model": "gpt-3.5-turbo-0301", - "usage": {"prompt_tokens": 13, "completion_tokens": 7, "total_tokens": 20}, - "choices": [ - { - "message": {"role": "assistant", "content": "\n\nThis is a test!"}, - "finish_reason": "stop", - "index": 0, - } + return ChatCompletion( + id="chatcmpl-abc123", + object="chat.completion", + created=1677858242, + model="gpt-3.5-turbo-0301", + usage={"prompt_tokens": 13, "completion_tokens": 7, "total_tokens": 20}, + choices=[ + Choice( + message=ChatCompletionMessage( + role="assistant", content="\n\nThis is a test!" + ), + finish_reason="stop", + index=0, + ) ], - } + ) @pytest.fixture() @@ -67,13 +71,10 @@ Answer: 2 """ -def test_chat_basic( - add_tool: FunctionTool, - monkeypatch: MonkeyPatch, -) -> None: - monkeypatch.setattr( - "llama_index.llms.openai.completion_with_retry", mock_chat_completion - ) +@patch("llama_index.llms.openai.SyncOpenAI") +def test_chat_basic(MockSyncOpenAI: MagicMock, add_tool: FunctionTool) -> None: + mock_instance = MockSyncOpenAI.return_value + mock_instance.chat.completions.create.return_value = mock_chat_completion() llm = OpenAI(model="gpt-3.5-turbo") @@ -86,12 +87,10 @@ def test_chat_basic( assert response.response == "\n\nThis is a test!" -def test_chat_no_functions( - monkeypatch: MonkeyPatch, -) -> None: - monkeypatch.setattr( - "llama_index.llms.openai.completion_with_retry", mock_chat_completion - ) +@patch("llama_index.llms.openai.SyncOpenAI") +def test_chat_no_functions(MockSyncOpenAI: MagicMock) -> None: + mock_instance = MockSyncOpenAI.return_value + mock_instance.chat.completions.create.return_value = mock_chat_completion() llm = OpenAI(model="gpt-3.5-turbo") diff --git a/tests/conftest.py b/tests/conftest.py index cc6dac336c5b9cc265550570762a851cce00e1e3..9bdf83dabc734c5a47560138ddf51e78ce02a20d 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -115,15 +115,11 @@ class CachedOpenAIApiKeys: os.environ["OPENAI_API_KEY"] = str(self.set_env_key_to) os.environ["OPENAI_API_TYPE"] = str(self.set_env_type_to) - openai.api_key = self.set_library_key_to - openai.api_type = self.set_library_type_to if self.set_fake_key: - openai.api_key = "sk-" + "a" * 48 + os.environ["OPENAI_API_KEY"] = "sk-" + "a" * 48 # No matter what, set the environment variable back to what it was def __exit__(self, *exc: object) -> None: os.environ["OPENAI_API_KEY"] = str(self.api_env_variable_was) os.environ["OPENAI_API_TYPE"] = str(self.api_env_type_was) - openai.api_key = self.openai_api_key_was - openai.api_type = self.openai_api_type_was diff --git a/tests/embeddings/test_base.py b/tests/embeddings/test_base.py index 715803afc23365ddb196f2fb78152fb25b288b1f..3eb3d1bec96df1a6e3a0171e36f535156e2f4dcb 100644 --- a/tests/embeddings/test_base.py +++ b/tests/embeddings/test_base.py @@ -3,8 +3,6 @@ import os from typing import Any, List from unittest.mock import patch -import openai -import pytest from llama_index.embeddings.base import SimilarityMode, mean_agg from llama_index.embeddings.openai import OpenAIEmbedding @@ -103,17 +101,13 @@ def test_mean_agg() -> None: def test_validates_api_key_is_present() -> None: with CachedOpenAIApiKeys(): - with pytest.raises(ValueError, match="No API key found for OpenAI."): - OpenAIEmbedding() - os.environ["OPENAI_API_KEY"] = "sk-" + ("a" * 48) # We can create a new LLM when the env variable is set assert OpenAIEmbedding() os.environ["OPENAI_API_KEY"] = "" - openai.api_key = "sk-" + ("a" * 48) # We can create a new LLM when the api_key is set on the - # library directly - assert OpenAIEmbedding() + # class directly + assert OpenAIEmbedding(api_key="sk-" + ("a" * 48)) diff --git a/tests/llms/test_localai.py b/tests/llms/test_localai.py index 5dafe59b4951705803c626326fab78d15bfbb7b9..682b494b5d5c3ffbccede92df824912cddfcc2ae 100644 --- a/tests/llms/test_localai.py +++ b/tests/llms/test_localai.py @@ -1,8 +1,10 @@ -from unittest.mock import patch +from unittest.mock import MagicMock, patch -import pytest from llama_index.llms import LocalAI from llama_index.llms.base import ChatMessage +from openai.types import Completion, CompletionChoice +from openai.types.chat.chat_completion import ChatCompletion, Choice +from openai.types.chat.chat_completion_message import ChatCompletionMessage def test_interfaces() -> None: @@ -11,75 +13,66 @@ def test_interfaces() -> None: assert llm.model == "placeholder" -def test_completion() -> None: +def mock_chat_completion(text: str) -> ChatCompletion: + return ChatCompletion( + id="chatcmpl-abc123", + object="chat.completion", + created=1677858242, + model="gpt-3.5-turbo-0301", + usage={"prompt_tokens": 13, "completion_tokens": 7, "total_tokens": 20}, + choices=[ + Choice( + message=ChatCompletionMessage(role="assistant", content=text), + finish_reason="stop", + index=0, + ) + ], + ) + + +def mock_completion(text: str) -> Completion: + return Completion( + id="chatcmpl-abc123", + object="chat.completion", + created=1677858242, + model="gpt-3.5-turbo-0301", + usage={"prompt_tokens": 13, "completion_tokens": 7, "total_tokens": 20}, + choices=[ + CompletionChoice( + text=text, + finish_reason="stop", + index=0, + ) + ], + ) + + +@patch("llama_index.llms.openai.SyncOpenAI") +def test_completion(MockSyncOpenAI: MagicMock) -> None: + text = "placeholder" + + mock_instance = MockSyncOpenAI.return_value + mock_instance.completions.create.return_value = mock_completion(text) + llm = LocalAI(model="models/placeholder.gguf") - text = "...\n\nIt was just another day at the office. The sun had ris" - with patch( - "llama_index.llms.openai.completion_with_retry", - return_value={ - "id": "123", - "object": "text_completion", - "created": 1696036786, - "model": "models/placeholder.gguf", - "choices": [ - { - "text": text, - "index": 0, - "logprobs": None, - "finish_reason": "length", - } - ], - "usage": {"prompt_tokens": 13, "completion_tokens": 16, "total_tokens": 29}, - }, - ) as mock_completion: - response = llm.complete( - "A long time ago in a galaxy far, far away", use_chat_completions=False - ) + response = llm.complete( + "A long time ago in a galaxy far, far away", use_chat_completions=False + ) assert response.text == text - mock_completion.assert_called_once() - # Check we remove the max_tokens if unspecified - assert "max_tokens" not in mock_completion.call_args.kwargs -def test_chat() -> None: - llm = LocalAI(model="models/placeholder.gguf", globally_use_chat_completions=True) +@patch("llama_index.llms.openai.SyncOpenAI") +def test_chat(MockSyncOpenAI: MagicMock) -> None: content = "placeholder" - with patch( - "llama_index.llms.openai.completion_with_retry", - return_value={ - "id": "123", - "object": "chat.completion", - "created": 1696283017, - "model": "models/placeholder.gguf", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": content, - }, - "finish_reason": "length", - } - ], - "usage": {"prompt_tokens": 5, "completion_tokens": 16, "total_tokens": 21}, - }, - ) as mock_chat: - response = llm.chat([ChatMessage(role="user", content="test message")]) - assert response.message.content == content - mock_chat.assert_called_once() - # Check we remove the max_tokens if unspecified - assert "max_tokens" not in mock_chat.call_args.kwargs + mock_instance = MockSyncOpenAI.return_value + mock_instance.chat.completions.create.return_value = mock_chat_completion(content) -def test_forgetting_kwarg() -> None: - llm = LocalAI(model="models/placeholder.gguf") + llm = LocalAI(model="models/placeholder.gguf", globally_use_chat_completions=True) - with patch( - "llama_index.llms.openai.completion_with_retry", return_value={} - ) as mock_completion, pytest.raises(NotImplementedError, match="/chat/completions"): - llm.complete("A long time ago in a galaxy far, far away") - mock_completion.assert_not_called() + response = llm.chat([ChatMessage(role="user", content="test message")]) + assert response.message.content == content def test_serialization() -> None: diff --git a/tests/llms/test_openai.py b/tests/llms/test_openai.py index 710ff129b476795c7e34fe8fa7f729d58d5925f5..9921739e81321dcc93581e6b9f88b455996bf56c 100644 --- a/tests/llms/test_openai.py +++ b/tests/llms/test_openai.py @@ -1,11 +1,18 @@ import os from typing import Any, AsyncGenerator, Generator +from unittest.mock import AsyncMock, MagicMock, patch -import openai import pytest from llama_index.llms.base import ChatMessage from llama_index.llms.openai import OpenAI -from pytest import MonkeyPatch +from openai.types.chat.chat_completion import ( + ChatCompletion, + ChatCompletionMessage, + Choice, +) +from openai.types.chat.chat_completion_chunk import ChatCompletionChunk, ChoiceDelta +from openai.types.chat.chat_completion_chunk import Choice as ChunkChoice +from openai.types.completion import Completion, CompletionChoice, CompletionUsage from tests.conftest import CachedOpenAIApiKeys @@ -29,10 +36,32 @@ def mock_completion(*args: Any, **kwargs: Any) -> dict: } +def mock_completion_v1(*args: Any, **kwargs: Any) -> Completion: + return Completion( + id="cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7", + object="text_completion", + created=1589478378, + model="text-davinci-003", + choices=[ + CompletionChoice( + text="\n\nThis is indeed a test", + index=0, + logprobs=None, + finish_reason="length", + ) + ], + usage=CompletionUsage(prompt_tokens=5, completion_tokens=7, total_tokens=12), + ) + + async def mock_async_completion(*args: Any, **kwargs: Any) -> dict: return mock_completion(*args, **kwargs) +async def mock_async_completion_v1(*args: Any, **kwargs: Any) -> Completion: + return mock_completion_v1(*args, **kwargs) + + def mock_chat_completion(*args: Any, **kwargs: Any) -> dict: # Example taken from https://platform.openai.com/docs/api-reference/chat/create return { @@ -51,6 +80,25 @@ def mock_chat_completion(*args: Any, **kwargs: Any) -> dict: } +def mock_chat_completion_v1(*args: Any, **kwargs: Any) -> ChatCompletion: + return ChatCompletion( + id="chatcmpl-abc123", + object="chat.completion", + created=1677858242, + model="gpt-3.5-turbo-0301", + usage=CompletionUsage(prompt_tokens=13, completion_tokens=7, total_tokens=20), + choices=[ + Choice( + message=ChatCompletionMessage( + role="assistant", content="\n\nThis is a test!" + ), + finish_reason="stop", + index=0, + ) + ], + ) + + def mock_completion_stream(*args: Any, **kwargs: Any) -> Generator[dict, None, None]: # Example taken from https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb responses = [ @@ -72,6 +120,28 @@ def mock_completion_stream(*args: Any, **kwargs: Any) -> Generator[dict, None, N yield from responses +def mock_completion_stream_v1( + *args: Any, **kwargs: Any +) -> Generator[Completion, None, None]: + responses = [ + Completion( + id="cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7", + object="text_completion", + created=1589478378, + model="text-davinci-003", + choices=[CompletionChoice(text="1", finish_reason="stop", index=0)], + ), + Completion( + id="cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7", + object="text_completion", + created=1589478378, + model="text-davinci-003", + choices=[CompletionChoice(text="2", finish_reason="stop", index=0)], + ), + ] + yield from responses + + async def mock_async_completion_stream( *args: Any, **kwargs: Any ) -> AsyncGenerator[dict, None]: @@ -82,6 +152,16 @@ async def mock_async_completion_stream( return gen() +async def mock_async_completion_stream_v1( + *args: Any, **kwargs: Any +) -> AsyncGenerator[Completion, None]: + async def gen() -> AsyncGenerator[Completion, None]: + for response in mock_completion_stream_v1(*args, **kwargs): + yield response + + return gen() + + def mock_chat_completion_stream( *args: Any, **kwargs: Any ) -> Generator[dict, None, None]: @@ -123,11 +203,57 @@ def mock_chat_completion_stream( yield from responses -def test_completion_model_basic(monkeypatch: MonkeyPatch) -> None: +def mock_chat_completion_stream_v1( + *args: Any, **kwargs: Any +) -> Generator[ChatCompletionChunk, None, None]: + responses = [ + ChatCompletionChunk( + id="chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", + object="chat.completion.chunk", + created=1677825464, + model="gpt-3.5-turbo-0301", + choices=[ + ChunkChoice( + delta=ChoiceDelta(role="assistant"), finish_reason=None, index=0 + ) + ], + ), + ChatCompletionChunk( + id="chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", + object="chat.completion.chunk", + created=1677825464, + model="gpt-3.5-turbo-0301", + choices=[ + ChunkChoice( + delta=ChoiceDelta(content="\n\n"), finish_reason=None, index=0 + ) + ], + ), + ChatCompletionChunk( + id="chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", + object="chat.completion.chunk", + created=1677825464, + model="gpt-3.5-turbo-0301", + choices=[ + ChunkChoice(delta=ChoiceDelta(content="2"), finish_reason=None, index=0) + ], + ), + ChatCompletionChunk( + id="chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", + object="chat.completion.chunk", + created=1677825464, + model="gpt-3.5-turbo-0301", + choices=[ChunkChoice(delta=ChoiceDelta(), finish_reason="stop", index=0)], + ), + ] + yield from responses + + +@patch("llama_index.llms.openai.SyncOpenAI") +def test_completion_model_basic(MockSyncOpenAI: MagicMock) -> None: with CachedOpenAIApiKeys(set_fake_key=True): - monkeypatch.setattr( - "llama_index.llms.openai.completion_with_retry", mock_completion - ) + mock_instance = MockSyncOpenAI.return_value + mock_instance.completions.create.return_value = mock_completion_v1() llm = OpenAI(model="text-davinci-003") prompt = "test prompt" @@ -140,11 +266,11 @@ def test_completion_model_basic(monkeypatch: MonkeyPatch) -> None: assert chat_response.message.content == "\n\nThis is indeed a test" -def test_chat_model_basic(monkeypatch: MonkeyPatch) -> None: +@patch("llama_index.llms.openai.SyncOpenAI") +def test_chat_model_basic(MockSyncOpenAI: MagicMock) -> None: with CachedOpenAIApiKeys(set_fake_key=True): - monkeypatch.setattr( - "llama_index.llms.openai.completion_with_retry", mock_chat_completion - ) + mock_instance = MockSyncOpenAI.return_value + mock_instance.chat.completions.create.return_value = mock_chat_completion_v1() llm = OpenAI(model="gpt-3.5-turbo") prompt = "test prompt" @@ -157,47 +283,58 @@ def test_chat_model_basic(monkeypatch: MonkeyPatch) -> None: assert chat_response.message.content == "\n\nThis is a test!" -def test_completion_model_streaming(monkeypatch: MonkeyPatch) -> None: - monkeypatch.setattr( - "llama_index.llms.openai.completion_with_retry", mock_completion_stream - ) +@patch("llama_index.llms.openai.SyncOpenAI") +def test_completion_model_streaming(MockSyncOpenAI: MagicMock) -> None: + with CachedOpenAIApiKeys(set_fake_key=True): + mock_instance = MockSyncOpenAI.return_value + mock_instance.completions.create.return_value = mock_completion_stream_v1() - llm = OpenAI(model="text-davinci-003") - prompt = "test prompt" - message = ChatMessage(role="user", content="test message") + llm = OpenAI(model="text-davinci-003") + prompt = "test prompt" + message = ChatMessage(role="user", content="test message") - response_gen = llm.stream_complete(prompt) - responses = list(response_gen) - assert responses[-1].text == "12" - chat_response_gen = llm.stream_chat([message]) - chat_responses = list(chat_response_gen) - assert chat_responses[-1].message.content == "12" + response_gen = llm.stream_complete(prompt) + responses = list(response_gen) + assert responses[-1].text == "12" + mock_instance.completions.create.return_value = mock_completion_stream_v1() + chat_response_gen = llm.stream_chat([message]) + chat_responses = list(chat_response_gen) + assert chat_responses[-1].message.content == "12" -def test_chat_model_streaming(monkeypatch: MonkeyPatch) -> None: - monkeypatch.setattr( - "llama_index.llms.openai.completion_with_retry", mock_chat_completion_stream - ) - llm = OpenAI(model="gpt-3.5-turbo") - prompt = "test prompt" - message = ChatMessage(role="user", content="test message") +@patch("llama_index.llms.openai.SyncOpenAI") +def test_chat_model_streaming(MockSyncOpenAI: MagicMock) -> None: + with CachedOpenAIApiKeys(set_fake_key=True): + mock_instance = MockSyncOpenAI.return_value + mock_instance.chat.completions.create.return_value = ( + mock_chat_completion_stream_v1() + ) + + llm = OpenAI(model="gpt-3.5-turbo") + prompt = "test prompt" + message = ChatMessage(role="user", content="test message") - response_gen = llm.stream_complete(prompt) - responses = list(response_gen) - assert responses[-1].text == "\n\n2" + response_gen = llm.stream_complete(prompt) + responses = list(response_gen) + assert responses[-1].text == "\n\n2" - chat_response_gen = llm.stream_chat([message]) - chat_responses = list(chat_response_gen) - assert chat_responses[-1].message.content == "\n\n2" - assert chat_responses[-1].message.role == "assistant" + mock_instance.chat.completions.create.return_value = ( + mock_chat_completion_stream_v1() + ) + chat_response_gen = llm.stream_chat([message]) + chat_responses = list(chat_response_gen) + assert chat_responses[-1].message.content == "\n\n2" + assert chat_responses[-1].message.role == "assistant" @pytest.mark.asyncio() -async def test_completion_model_async(monkeypatch: MonkeyPatch) -> None: - monkeypatch.setattr( - "llama_index.llms.openai.acompletion_with_retry", mock_async_completion - ) +@patch("llama_index.llms.openai.AsyncOpenAI") +async def test_completion_model_async(MockAsyncOpenAI: MagicMock) -> None: + mock_instance = MockAsyncOpenAI.return_value + create_fn = AsyncMock() + create_fn.side_effect = mock_async_completion_v1 + mock_instance.completions.create = create_fn llm = OpenAI(model="text-davinci-003") prompt = "test prompt" @@ -211,11 +348,12 @@ async def test_completion_model_async(monkeypatch: MonkeyPatch) -> None: @pytest.mark.asyncio() -async def test_completion_model_async_streaming(monkeypatch: MonkeyPatch) -> None: - monkeypatch.setattr( - "llama_index.llms.openai.acompletion_with_retry", - mock_async_completion_stream, - ) +@patch("llama_index.llms.openai.AsyncOpenAI") +async def test_completion_model_async_streaming(MockAsyncOpenAI: MagicMock) -> None: + mock_instance = MockAsyncOpenAI.return_value + create_fn = AsyncMock() + create_fn.side_effect = mock_async_completion_stream_v1 + mock_instance.completions.create = create_fn llm = OpenAI(model="text-davinci-003") prompt = "test prompt" @@ -224,6 +362,7 @@ async def test_completion_model_async_streaming(monkeypatch: MonkeyPatch) -> Non response_gen = await llm.astream_complete(prompt) responses = [item async for item in response_gen] assert responses[-1].text == "12" + chat_response_gen = await llm.astream_chat([message]) chat_responses = [item async for item in chat_response_gen] assert chat_responses[-1].message.content == "12" @@ -231,17 +370,13 @@ async def test_completion_model_async_streaming(monkeypatch: MonkeyPatch) -> Non def test_validates_api_key_is_present() -> None: with CachedOpenAIApiKeys(): - with pytest.raises(ValueError, match="No API key found for OpenAI."): - OpenAI() - - os.environ["OPENAI_API_KEY"] = "sk-" + ("a" * 48) + os.environ["OPENAI_API_KEY"] = "sk-" + ("a" * 48) - # We can create a new LLM when the env variable is set - assert OpenAI() + # We can create a new LLM when the env variable is set + assert OpenAI() - os.environ["OPENAI_API_KEY"] = "" - openai.api_key = "sk-" + ("a" * 48) + os.environ["OPENAI_API_KEY"] = "" - # We can create a new LLM when the api_key is set on the - # library directly - assert OpenAI() + # We can create a new LLM when the api_key is set on the + # class directly + assert OpenAI(api_key="sk-" + ("a" * 48)) diff --git a/tests/llms/test_openai_like.py b/tests/llms/test_openai_like.py index b1d03ea37ff20c8554c8381dfa27b4e02155da6d..bbd97d6b48eaabb2e4d72f9cf41923312c9477ab 100644 --- a/tests/llms/test_openai_like.py +++ b/tests/llms/test_openai_like.py @@ -1,8 +1,11 @@ from typing import List -from unittest.mock import patch +from unittest.mock import MagicMock, patch from llama_index.llms import OpenAILike from llama_index.llms.base import ChatMessage +from openai.types import Completion, CompletionChoice +from openai.types.chat.chat_completion import ChatCompletion, Choice +from openai.types.chat.chat_completion_message import ChatCompletionMessage class MockTokenizer: @@ -16,7 +19,47 @@ def test_interfaces() -> None: assert llm.model == "placeholder" -def test_completion() -> None: +def mock_chat_completion(text: str) -> ChatCompletion: + return ChatCompletion( + id="chatcmpl-abc123", + object="chat.completion", + created=1677858242, + model="gpt-3.5-turbo-0301", + usage={"prompt_tokens": 13, "completion_tokens": 7, "total_tokens": 20}, + choices=[ + Choice( + message=ChatCompletionMessage(role="assistant", content=text), + finish_reason="stop", + index=0, + ) + ], + ) + + +def mock_completion(text: str) -> Completion: + return Completion( + id="chatcmpl-abc123", + object="chat.completion", + created=1677858242, + model="gpt-3.5-turbo-0301", + usage={"prompt_tokens": 13, "completion_tokens": 7, "total_tokens": 20}, + choices=[ + CompletionChoice( + text=text, + finish_reason="stop", + index=0, + ) + ], + ) + + +@patch("llama_index.llms.openai.SyncOpenAI") +def test_completion(MockSyncOpenAI: MagicMock) -> None: + text = "placeholder" + + mock_instance = MockSyncOpenAI.return_value + mock_instance.completions.create.return_value = mock_completion(text) + llm = OpenAILike( model="placeholder", is_chat_model=False, @@ -24,58 +67,23 @@ def test_completion() -> None: tokenizer=MockTokenizer(), ) - text = "...\n\nIt was just another day at the office. The sun had ris" - with patch( - "llama_index.llms.openai.completion_with_retry", - return_value={ - "id": "123", - "object": "text_completion", - "created": 1696036786, - "model": "models/placeholder.gguf", - "choices": [ - { - "text": text, - "index": 0, - "logprobs": None, - "finish_reason": "length", - } - ], - "usage": {"prompt_tokens": 13, "completion_tokens": 16, "total_tokens": 29}, - }, - ) as mock_completion: - response = llm.complete("A long time ago in a galaxy far, far away") + response = llm.complete("A long time ago in a galaxy far, far away") assert response.text == text - mock_completion.assert_called_once() -def test_chat() -> None: +@patch("llama_index.llms.openai.SyncOpenAI") +def test_chat(MockSyncOpenAI: MagicMock) -> None: + content = "placeholder" + + mock_instance = MockSyncOpenAI.return_value + mock_instance.chat.completions.create.return_value = mock_chat_completion(content) + llm = OpenAILike( model="models/placeholder", is_chat_model=True, tokenizer=MockTokenizer() ) - content = "placeholder" - with patch( - "llama_index.llms.openai.completion_with_retry", - return_value={ - "id": "123", - "object": "chat.completion", - "created": 1696283017, - "model": "models/placeholder.gguf", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": content, - }, - "finish_reason": "length", - } - ], - "usage": {"prompt_tokens": 5, "completion_tokens": 16, "total_tokens": 21}, - }, - ) as mock_chat: - response = llm.chat([ChatMessage(role="user", content="test message")]) + + response = llm.chat([ChatMessage(role="user", content="test message")]) assert response.message.content == content - mock_chat.assert_called_once() def test_serialization() -> None: diff --git a/tests/llms/test_openai_utils.py b/tests/llms/test_openai_utils.py index ab7e30c203d429c06bd51a2455d10591b749f666..a42b7bbb1d3a0c737ff573d65aa6171915235fe1 100644 --- a/tests/llms/test_openai_utils.py +++ b/tests/llms/test_openai_utils.py @@ -1,13 +1,15 @@ from typing import List -import openai import pytest from llama_index.llms.base import ChatMessage, MessageRole from llama_index.llms.openai_utils import ( - create_retry_decorator, - from_openai_message_dicts, + from_openai_messages, to_openai_message_dicts, ) +from openai.types.chat.chat_completion_message import ( + ChatCompletionMessage, + FunctionCall, +) @pytest.fixture() @@ -35,40 +37,41 @@ def chat_messages_with_function_calling() -> List[ChatMessage]: @pytest.fixture() -def openi_message_dicts_with_function_calling() -> List[dict]: +def openi_message_dicts_with_function_calling() -> List[ChatCompletionMessage]: return [ - {"role": "user", "content": "test question with functions"}, - { - "role": "assistant", - "content": None, - "function_call": { - "name": "get_current_weather", - "arguments": '{ "location": "Boston, MA"}', - }, - }, - { - "role": "function", - "content": '{"temperature": "22", "unit": "celsius", ' + ChatCompletionMessage(role="user", content="test question with functions"), + ChatCompletionMessage( + role="assistant", + content=None, + function_call=FunctionCall( + name="get_current_weather", + arguments='{ "location": "Boston, MA"}', + ), + ), + ChatCompletionMessage( + role="function", + content='{"temperature": "22", "unit": "celsius", ' '"description": "Sunny"}', - "name": "get_current_weather", - }, + name="get_current_weather", + ), ] @pytest.fixture() -def azure_openi_message_dicts_with_function_calling() -> List[dict]: +def azure_openi_message_dicts_with_function_calling() -> List[ChatCompletionMessage]: """ Taken from: - https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/function-calling. """ return [ - { - "role": "assistant", - "function_call": { - "name": "search_hotels", - "arguments": '{\n "location": "San Diego",\n "max_price": 300,\n "features": "beachfront,free breakfast"\n}', - }, - } + ChatCompletionMessage( + role="assistant", + content=None, + function_call=FunctionCall( + name="search_hotels", + arguments='{\n "location": "San Diego",\n "max_price": 300,\n "features": "beachfront,free breakfast"\n}', + ), + ) ] @@ -114,58 +117,39 @@ def test_to_openai_message_dicts_basic_string() -> None: def test_to_openai_message_dicts_function_calling( chat_messages_with_function_calling: List[ChatMessage], - openi_message_dicts_with_function_calling: List[dict], + openi_message_dicts_with_function_calling: List[ChatCompletionMessage], ) -> None: openai_messages = to_openai_message_dicts(chat_messages_with_function_calling) - assert openai_messages == openi_message_dicts_with_function_calling + for openai_message_dict, openai_message in zip( + openai_messages, openi_message_dicts_with_function_calling + ): + for key in openai_message_dict: + assert openai_message_dict[key] == getattr(openai_message, key, None) # type: ignore def test_from_openai_message_dicts_function_calling( - openi_message_dicts_with_function_calling: List[dict], + openi_message_dicts_with_function_calling: List[ChatCompletionMessage], chat_messages_with_function_calling: List[ChatMessage], ) -> None: - chat_messages = from_openai_message_dicts(openi_message_dicts_with_function_calling) - assert chat_messages == chat_messages_with_function_calling + chat_messages = from_openai_messages(openi_message_dicts_with_function_calling) + + # assert attributes match + for chat_message, chat_message_with_function_calling in zip( + chat_messages, chat_messages_with_function_calling + ): + for key in chat_message.additional_kwargs: + assert chat_message.additional_kwargs[ + key + ] == chat_message_with_function_calling.additional_kwargs.get(key, None) + assert chat_message.content == chat_message_with_function_calling.content + assert chat_message.role == chat_message_with_function_calling.role def test_from_openai_message_dicts_function_calling_azure( - azure_openi_message_dicts_with_function_calling: List[dict], + azure_openi_message_dicts_with_function_calling: List[ChatCompletionMessage], azure_chat_messages_with_function_calling: List[ChatMessage], ) -> None: - chat_messages = from_openai_message_dicts( + chat_messages = from_openai_messages( azure_openi_message_dicts_with_function_calling ) assert chat_messages == azure_chat_messages_with_function_calling - - -def test_create_retry_decorator() -> None: - test_retry_decorator = create_retry_decorator( - max_retries=6, - random_exponential=False, - stop_after_delay_seconds=10, - min_seconds=2, - max_seconds=5, - ) - - @test_retry_decorator - def mock_function() -> str: - # Simulate OpenAI API call with potential errors - if mock_function.retry.statistics["attempt_number"] == 1: - raise openai.error.Timeout(message="Timeout error") - elif mock_function.retry.statistics["attempt_number"] == 2: - raise openai.error.APIError(message="API error") - elif mock_function.retry.statistics["attempt_number"] == 3: - raise openai.error.APIConnectionError(message="API connection error") - elif mock_function.retry.statistics["attempt_number"] == 4: - raise openai.error.ServiceUnavailableError( - message="Service Unavailable error" - ) - elif mock_function.retry.statistics["attempt_number"] == 5: - raise openai.error.RateLimitError("Rate limit error") - else: - # Succeed on the final attempt - return "Success" - - # Test that the decorator retries as expected - with pytest.raises(openai.error.RateLimitError, match="Rate limit error"): - mock_function()