From d76d9ab5ffc846a6626d0d219534757173068628 Mon Sep 17 00:00:00 2001 From: Adnan Alkattan <Adnankattan9@gmail.com> Date: Fri, 13 Sep 2024 22:49:57 +0300 Subject: [PATCH] Fix Pydantic models definition (#16008) --- .../llama_index/agent/lats/types.py | 4 ++-- .../agent/llama-index-agent-lats/pyproject.toml | 2 +- .../llama_index/embeddings/alephalpha/base.py | 4 ++-- .../pyproject.toml | 2 +- .../llama_index/embeddings/anyscale/base.py | 6 ++---- .../llama-index-embeddings-anyscale/pyproject.toml | 2 +- .../llama_index/embeddings/databricks/base.py | 6 ++---- .../pyproject.toml | 2 +- .../llama_index/embeddings/mixedbreadai/base.py | 2 +- .../pyproject.toml | 2 +- .../llama_index/embeddings/nvidia/base.py | 4 ++-- .../llama-index-embeddings-nvidia/pyproject.toml | 2 +- .../llama_index/embeddings/ollama/base.py | 2 +- .../llama-index-embeddings-ollama/pyproject.toml | 2 +- .../llama_index/embeddings/openai/base.py | 6 ++---- .../llama-index-embeddings-openai/pyproject.toml | 2 +- .../embeddings/sagemaker_endpoint/base.py | 4 ++-- .../pyproject.toml | 2 +- .../llama_index/extractors/entity/base.py | 4 ++-- .../llama-index-extractors-entity/pyproject.toml | 2 +- .../llama_index/llms/ai21/base.py | 8 ++++---- .../llms/llama-index-llms-ai21/pyproject.toml | 2 +- .../llama_index/llms/alephalpha/base.py | 14 +++++++------- .../llama-index-llms-alephalpha/pyproject.toml | 2 +- .../llama_index/llms/anthropic/base.py | 8 ++++---- .../llms/llama-index-llms-anthropic/pyproject.toml | 2 +- .../llama_index/llms/bedrock_converse/base.py | 4 ++-- .../pyproject.toml | 2 +- .../llama_index/llms/dashscope/base.py | 6 +++--- .../llms/llama-index-llms-dashscope/pyproject.toml | 2 +- .../llama_index/llms/deepinfra/base.py | 8 ++++---- .../llms/llama-index-llms-deepinfra/pyproject.toml | 2 +- .../llama_index/llms/gemini/base.py | 4 ++-- .../llms/llama-index-llms-gemini/pyproject.toml | 2 +- .../llama_index/llms/huggingface/base.py | 8 ++++---- .../llama-index-llms-huggingface/pyproject.toml | 2 +- .../llama_index/llms/konko/base.py | 6 +++--- .../llms/llama-index-llms-konko/pyproject.toml | 2 +- .../llama_index/llms/litellm/base.py | 4 ++-- .../llms/llama-index-llms-litellm/pyproject.toml | 2 +- .../llama_index/llms/llama_cpp/base.py | 4 ++-- .../llms/llama-index-llms-llama-cpp/pyproject.toml | 2 +- .../llama_index/llms/llamafile/base.py | 4 ++-- .../llms/llama-index-llms-llamafile/pyproject.toml | 2 +- .../llama_index/llms/lmstudio/base.py | 6 +++--- .../llms/llama-index-llms-lmstudio/pyproject.toml | 2 +- .../llama_index/llms/mistral_rs/base.py | 4 ++-- .../llama-index-llms-mistral-rs/pyproject.toml | 2 +- .../llama_index/llms/mistralai/base.py | 8 ++++---- .../llms/llama-index-llms-mistralai/pyproject.toml | 2 +- .../llama_index/llms/octoai/base.py | 6 +++--- .../llms/llama-index-llms-octoai/pyproject.toml | 2 +- .../llama_index/llms/ollama/base.py | 4 ++-- .../llms/llama-index-llms-ollama/pyproject.toml | 2 +- .../llama_index/llms/predibase/base.py | 4 ++-- .../llms/llama-index-llms-predibase/pyproject.toml | 2 +- .../llama_index/llms/replicate/base.py | 4 ++-- .../llms/llama-index-llms-replicate/pyproject.toml | 2 +- .../llama_index/llms/rungpt/base.py | 4 ++-- .../llms/llama-index-llms-rungpt/pyproject.toml | 2 +- .../llama_index/llms/sagemaker_endpoint/base.py | 4 ++-- .../pyproject.toml | 2 +- .../llms/text_generation_inference/base.py | 8 ++++---- .../pyproject.toml | 2 +- .../llama_index/llms/upstage/base.py | 12 ++++++------ .../llms/llama-index-llms-upstage/pyproject.toml | 2 +- .../llama_index/llms/xinference/base.py | 2 +- .../llama-index-llms-xinference/pyproject.toml | 2 +- .../llama_index/multi_modal_llms/anthropic/base.py | 4 ++-- .../pyproject.toml | 2 +- .../llama_index/multi_modal_llms/dashscope/base.py | 2 +- .../pyproject.toml | 2 +- .../llama_index/multi_modal_llms/gemini/base.py | 4 ++-- .../pyproject.toml | 2 +- .../llama_index/multi_modal_llms/ollama/base.py | 4 ++-- .../pyproject.toml | 2 +- .../llama_index/multi_modal_llms/openai/base.py | 4 ++-- .../pyproject.toml | 2 +- 78 files changed, 138 insertions(+), 144 deletions(-) diff --git a/llama-index-integrations/agent/llama-index-agent-lats/llama_index/agent/lats/types.py b/llama-index-integrations/agent/llama-index-agent-lats/llama_index/agent/lats/types.py index 88f69b301..e863d0ef8 100644 --- a/llama-index-integrations/agent/llama-index-agent-lats/llama_index/agent/lats/types.py +++ b/llama-index-integrations/agent/llama-index-agent-lats/llama_index/agent/lats/types.py @@ -51,8 +51,8 @@ class Evaluation(BaseModel): score: int = Field( description="Score of the reflection indicating **correctness**. Integer from 1-10", - lte=10, - gte=0, + le=10, + ge=0, ) is_done: bool = Field( False, description="Whether the answer is found yet (**completeness**)." diff --git a/llama-index-integrations/agent/llama-index-agent-lats/pyproject.toml b/llama-index-integrations/agent/llama-index-agent-lats/pyproject.toml index 77f54b534..d35b6dd24 100644 --- a/llama-index-integrations/agent/llama-index-agent-lats/pyproject.toml +++ b/llama-index-integrations/agent/llama-index-agent-lats/pyproject.toml @@ -31,7 +31,7 @@ license = "MIT" name = "llama-index-agent-lats" packages = [{include = "llama_index/"}] readme = "README.md" -version = "0.2.0" +version = "0.2.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-alephalpha/llama_index/embeddings/alephalpha/base.py b/llama-index-integrations/embeddings/llama-index-embeddings-alephalpha/llama_index/embeddings/alephalpha/base.py index 40da8ec5e..78ab769cf 100644 --- a/llama-index-integrations/embeddings/llama-index-embeddings-alephalpha/llama_index/embeddings/alephalpha/base.py +++ b/llama-index-integrations/embeddings/llama-index-embeddings-alephalpha/llama_index/embeddings/alephalpha/base.py @@ -46,10 +46,10 @@ class AlephAlphaEmbedding(BaseEmbedding): default=DEFAULT_ALEPHALPHA_HOST, description="The hostname of the API base_url." ) timeout: Optional[float] = Field( - default=None, description="The timeout to use in seconds.", gte=0 + default=None, description="The timeout to use in seconds.", ge=0 ) max_retries: int = Field( - default=10, description="The maximum number of API retries.", gte=0 + default=10, description="The maximum number of API retries.", ge=0 ) normalize: Optional[bool] = Field( default=False, description="Return normalized embeddings." diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-alephalpha/pyproject.toml b/llama-index-integrations/embeddings/llama-index-embeddings-alephalpha/pyproject.toml index 6386b3332..e4e9bcd1c 100644 --- a/llama-index-integrations/embeddings/llama-index-embeddings-alephalpha/pyproject.toml +++ b/llama-index-integrations/embeddings/llama-index-embeddings-alephalpha/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-embeddings-alephalpha" readme = "README.md" -version = "0.2.0" +version = "0.2.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-anyscale/llama_index/embeddings/anyscale/base.py b/llama-index-integrations/embeddings/llama-index-embeddings-anyscale/llama_index/embeddings/anyscale/base.py index 77bb0016d..8ad18aa45 100644 --- a/llama-index-integrations/embeddings/llama-index-embeddings-anyscale/llama_index/embeddings/anyscale/base.py +++ b/llama-index-integrations/embeddings/llama-index-embeddings-anyscale/llama_index/embeddings/anyscale/base.py @@ -133,10 +133,8 @@ class AnyscaleEmbedding(BaseEmbedding): api_base: str = Field(description="The base URL for Anyscale API.") api_version: str = Field(description="The version for OpenAI API.") - max_retries: int = Field( - default=10, description="Maximum number of retries.", gte=0 - ) - timeout: float = Field(default=60.0, description="Timeout for each request.", gte=0) + max_retries: int = Field(default=10, description="Maximum number of retries.", ge=0) + timeout: float = Field(default=60.0, description="Timeout for each request.", ge=0) default_headers: Optional[Dict[str, str]] = Field( default=None, description="The default headers for API requests." ) diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-anyscale/pyproject.toml b/llama-index-integrations/embeddings/llama-index-embeddings-anyscale/pyproject.toml index fb47cc2b3..d5baa8694 100644 --- a/llama-index-integrations/embeddings/llama-index-embeddings-anyscale/pyproject.toml +++ b/llama-index-integrations/embeddings/llama-index-embeddings-anyscale/pyproject.toml @@ -28,7 +28,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-embeddings-anyscale" readme = "README.md" -version = "0.2.0" +version = "0.2.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-databricks/llama_index/embeddings/databricks/base.py b/llama-index-integrations/embeddings/llama-index-embeddings-databricks/llama_index/embeddings/databricks/base.py index a85bcda5a..3dcc9eeee 100644 --- a/llama-index-integrations/embeddings/llama-index-embeddings-databricks/llama_index/embeddings/databricks/base.py +++ b/llama-index-integrations/embeddings/llama-index-embeddings-databricks/llama_index/embeddings/databricks/base.py @@ -59,10 +59,8 @@ class DatabricksEmbedding(BaseEmbedding): api_key: str = Field(description="The Databricks API key.") endpoint: str = Field(description="The Databricks API endpoint.") - max_retries: int = Field( - default=10, description="Maximum number of retries.", gte=0 - ) - timeout: float = Field(default=60.0, description="Timeout for each request.", gte=0) + max_retries: int = Field(default=10, description="Maximum number of retries.", ge=0) + timeout: float = Field(default=60.0, description="Timeout for each request.", ge=0) default_headers: Optional[Dict[str, str]] = Field( default=None, description="The default headers for API requests." ) diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-databricks/pyproject.toml b/llama-index-integrations/embeddings/llama-index-embeddings-databricks/pyproject.toml index b25139ae1..7e01ffe9b 100644 --- a/llama-index-integrations/embeddings/llama-index-embeddings-databricks/pyproject.toml +++ b/llama-index-integrations/embeddings/llama-index-embeddings-databricks/pyproject.toml @@ -30,7 +30,7 @@ license = "MIT" name = "llama-index-embeddings-databricks" packages = [{include = "llama_index/"}] readme = "README.md" -version = "0.2.0" +version = "0.2.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-mixedbreadai/llama_index/embeddings/mixedbreadai/base.py b/llama-index-integrations/embeddings/llama-index-embeddings-mixedbreadai/llama_index/embeddings/mixedbreadai/base.py index 1f1e7d659..af4475d0a 100644 --- a/llama-index-integrations/embeddings/llama-index-embeddings-mixedbreadai/llama_index/embeddings/mixedbreadai/base.py +++ b/llama-index-integrations/embeddings/llama-index-embeddings-mixedbreadai/llama_index/embeddings/mixedbreadai/base.py @@ -61,7 +61,7 @@ class MixedbreadAIEmbedding(BaseEmbedding): min_length=1, ) embed_batch_size: int = Field( - default=128, description="The batch size for embedding calls.", gt=0, lte=256 + default=128, description="The batch size for embedding calls.", gt=0, le=256 ) _client: MixedbreadAI = PrivateAttr() diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-mixedbreadai/pyproject.toml b/llama-index-integrations/embeddings/llama-index-embeddings-mixedbreadai/pyproject.toml index c4f0e12c4..590df5120 100644 --- a/llama-index-integrations/embeddings/llama-index-embeddings-mixedbreadai/pyproject.toml +++ b/llama-index-integrations/embeddings/llama-index-embeddings-mixedbreadai/pyproject.toml @@ -28,7 +28,7 @@ license = "MIT" name = "llama-index-embeddings-mixedbreadai" packages = [{include = "llama_index/"}] readme = "README.md" -version = "0.2.0" +version = "0.2.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-nvidia/llama_index/embeddings/nvidia/base.py b/llama-index-integrations/embeddings/llama-index-embeddings-nvidia/llama_index/embeddings/nvidia/base.py index 6fe80b70b..503fe3ae8 100644 --- a/llama-index-integrations/embeddings/llama-index-embeddings-nvidia/llama_index/embeddings/nvidia/base.py +++ b/llama-index-integrations/embeddings/llama-index-embeddings-nvidia/llama_index/embeddings/nvidia/base.py @@ -55,13 +55,13 @@ class NVIDIAEmbedding(BaseEmbedding): ) timeout: float = Field( - default=120, description="The timeout for the API request in seconds.", gte=0 + default=120, description="The timeout for the API request in seconds.", ge=0 ) max_retries: int = Field( default=5, description="The maximum number of retries for the API request.", - gte=0, + ge=0, ) _client: Any = PrivateAttr() diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-nvidia/pyproject.toml b/llama-index-integrations/embeddings/llama-index-embeddings-nvidia/pyproject.toml index ffd04f391..03f6e73cb 100644 --- a/llama-index-integrations/embeddings/llama-index-embeddings-nvidia/pyproject.toml +++ b/llama-index-integrations/embeddings/llama-index-embeddings-nvidia/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-embeddings-nvidia" readme = "README.md" -version = "0.2.1" +version = "0.2.2" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-ollama/llama_index/embeddings/ollama/base.py b/llama-index-integrations/embeddings/llama-index-embeddings-ollama/llama_index/embeddings/ollama/base.py index 30898e014..c4112f2c7 100644 --- a/llama-index-integrations/embeddings/llama-index-embeddings-ollama/llama_index/embeddings/ollama/base.py +++ b/llama-index-integrations/embeddings/llama-index-embeddings-ollama/llama_index/embeddings/ollama/base.py @@ -18,7 +18,7 @@ class OllamaEmbedding(BaseEmbedding): default=DEFAULT_EMBED_BATCH_SIZE, description="The batch size for embedding calls.", gt=0, - lte=2048, + le=2048, ) ollama_additional_kwargs: Dict[str, Any] = Field( default_factory=dict, description="Additional kwargs for the Ollama API." diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-ollama/pyproject.toml b/llama-index-integrations/embeddings/llama-index-embeddings-ollama/pyproject.toml index df3d89f36..ead202066 100644 --- a/llama-index-integrations/embeddings/llama-index-embeddings-ollama/pyproject.toml +++ b/llama-index-integrations/embeddings/llama-index-embeddings-ollama/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-embeddings-ollama" readme = "README.md" -version = "0.3.0" +version = "0.3.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-openai/llama_index/embeddings/openai/base.py b/llama-index-integrations/embeddings/llama-index-embeddings-openai/llama_index/embeddings/openai/base.py index 9dcc315dd..de1314516 100644 --- a/llama-index-integrations/embeddings/llama-index-embeddings-openai/llama_index/embeddings/openai/base.py +++ b/llama-index-integrations/embeddings/llama-index-embeddings-openai/llama_index/embeddings/openai/base.py @@ -253,10 +253,8 @@ class OpenAIEmbedding(BaseEmbedding): default=DEFAULT_OPENAI_API_VERSION, description="The version for OpenAI API." ) - max_retries: int = Field( - default=10, description="Maximum number of retries.", gte=0 - ) - timeout: float = Field(default=60.0, description="Timeout for each request.", gte=0) + max_retries: int = Field(default=10, description="Maximum number of retries.", ge=0) + timeout: float = Field(default=60.0, description="Timeout for each request.", ge=0) default_headers: Optional[Dict[str, str]] = Field( default=None, description="The default headers for API requests." ) diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-openai/pyproject.toml b/llama-index-integrations/embeddings/llama-index-embeddings-openai/pyproject.toml index aeff16ebb..935fb80ca 100644 --- a/llama-index-integrations/embeddings/llama-index-embeddings-openai/pyproject.toml +++ b/llama-index-integrations/embeddings/llama-index-embeddings-openai/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-embeddings-openai" readme = "README.md" -version = "0.2.4" +version = "0.2.5" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-sagemaker-endpoint/llama_index/embeddings/sagemaker_endpoint/base.py b/llama-index-integrations/embeddings/llama-index-embeddings-sagemaker-endpoint/llama_index/embeddings/sagemaker_endpoint/base.py index 0f5866790..8927f1e2f 100644 --- a/llama-index-integrations/embeddings/llama-index-embeddings-sagemaker-endpoint/llama_index/embeddings/sagemaker_endpoint/base.py +++ b/llama-index-integrations/embeddings/llama-index-embeddings-sagemaker-endpoint/llama_index/embeddings/sagemaker_endpoint/base.py @@ -43,12 +43,12 @@ class SageMakerEmbedding(BaseEmbedding): max_retries: Optional[int] = Field( default=3, description="The maximum number of API retries.", - gte=0, + ge=0, ) timeout: Optional[float] = Field( default=60.0, description="The timeout, in seconds, for API requests.", - gte=0, + ge=0, ) _client: Any = PrivateAttr() _verbose: bool = PrivateAttr() diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-sagemaker-endpoint/pyproject.toml b/llama-index-integrations/embeddings/llama-index-embeddings-sagemaker-endpoint/pyproject.toml index bc0f43a7f..26c0d433f 100644 --- a/llama-index-integrations/embeddings/llama-index-embeddings-sagemaker-endpoint/pyproject.toml +++ b/llama-index-integrations/embeddings/llama-index-embeddings-sagemaker-endpoint/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-embeddings-sagemaker-endpoint" readme = "README.md" -version = "0.2.2" +version = "0.2.3" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/extractors/llama-index-extractors-entity/llama_index/extractors/entity/base.py b/llama-index-integrations/extractors/llama-index-extractors-entity/llama_index/extractors/entity/base.py index a737d6644..b3299931a 100644 --- a/llama-index-integrations/extractors/llama-index-extractors-entity/llama_index/extractors/entity/base.py +++ b/llama-index-integrations/extractors/llama-index-extractors-entity/llama_index/extractors/entity/base.py @@ -43,8 +43,8 @@ class EntityExtractor(BaseExtractor): prediction_threshold: float = Field( default=0.5, description="The confidence threshold for accepting predictions.", - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) span_joiner: str = Field( default=" ", description="The separator between entity names." diff --git a/llama-index-integrations/extractors/llama-index-extractors-entity/pyproject.toml b/llama-index-integrations/extractors/llama-index-extractors-entity/pyproject.toml index 2fa359696..3c00c23c3 100644 --- a/llama-index-integrations/extractors/llama-index-extractors-entity/pyproject.toml +++ b/llama-index-integrations/extractors/llama-index-extractors-entity/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-extractors-entity" readme = "README.md" -version = "0.2.0" +version = "0.2.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-ai21/llama_index/llms/ai21/base.py b/llama-index-integrations/llms/llama-index-llms-ai21/llama_index/llms/ai21/base.py index ce80a3a38..2179df3f5 100644 --- a/llama-index-integrations/llms/llama-index-llms-ai21/llama_index/llms/ai21/base.py +++ b/llama-index-integrations/llms/llama-index-llms-ai21/llama_index/llms/ai21/base.py @@ -75,16 +75,16 @@ class AI21(FunctionCallingLLM): temperature: float = Field( description="The temperature to use for sampling.", default=_DEFAULT_TEMPERATURE, - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) base_url: Optional[str] = Field(default=None, description="The base URL to use.") timeout: Optional[float] = Field( - default=None, description="The timeout to use in seconds.", gte=0 + default=None, description="The timeout to use in seconds.", ge=0 ) max_retries: int = Field( - default=10, description="The maximum number of API retries.", gte=0 + default=10, description="The maximum number of API retries.", ge=0 ) additional_kwargs: Dict[str, Any] = Field( diff --git a/llama-index-integrations/llms/llama-index-llms-ai21/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-ai21/pyproject.toml index c3e6179dd..cbac1b0b2 100644 --- a/llama-index-integrations/llms/llama-index-llms-ai21/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-ai21/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-ai21" readme = "README.md" -version = "0.3.3" +version = "0.3.4" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-alephalpha/llama_index/llms/alephalpha/base.py b/llama-index-integrations/llms/llama-index-llms-alephalpha/llama_index/llms/alephalpha/base.py index bfe3e2c30..7a02f7ae6 100644 --- a/llama-index-integrations/llms/llama-index-llms-alephalpha/llama_index/llms/alephalpha/base.py +++ b/llama-index-integrations/llms/llama-index-llms-alephalpha/llama_index/llms/alephalpha/base.py @@ -40,8 +40,8 @@ class AlephAlpha(LLM): temperature: float = Field( default=DEFAULT_TEMPERATURE, description="The temperature to use for sampling.", - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) max_tokens: int = Field( default=DEFAULT_ALEPHALPHA_MAX_TOKENS, @@ -52,10 +52,10 @@ class AlephAlpha(LLM): default=DEFAULT_ALEPHALPHA_HOST, description="The hostname of the API base_url." ) timeout: Optional[float] = Field( - default=None, description="The timeout to use in seconds.", gte=0 + default=None, description="The timeout to use in seconds.", ge=0 ) max_retries: int = Field( - default=10, description="The maximum number of API retries.", gte=0 + default=10, description="The maximum number of API retries.", ge=0 ) hosting: Optional[str] = Field(default=None, description="The hosting to use.") nice: bool = Field(default=False, description="Whether to be nice to the API.") @@ -74,13 +74,13 @@ class AlephAlpha(LLM): sequence_penalty: float = Field( default=0.7, description="The sequence penalty to use. Increasing the sequence penalty reduces the likelihood of reproducing token sequences that already appear in the prompt", - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) sequence_penalty_min_length: int = Field( default=3, description="Minimal number of tokens to be considered as sequence. Must be greater or equal 2.", - gte=2, + ge=2, ) stop_sequences: List[str] = Field( default=["\n\n"], description="The stop sequences to use." diff --git a/llama-index-integrations/llms/llama-index-llms-alephalpha/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-alephalpha/pyproject.toml index 37822621f..22133de70 100644 --- a/llama-index-integrations/llms/llama-index-llms-alephalpha/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-alephalpha/pyproject.toml @@ -30,7 +30,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-alephalpha" readme = "README.md" -version = "0.2.0" +version = "0.2.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-anthropic/llama_index/llms/anthropic/base.py b/llama-index-integrations/llms/llama-index-llms-anthropic/llama_index/llms/anthropic/base.py index 0bcdc5744..dca36b45e 100644 --- a/llama-index-integrations/llms/llama-index-llms-anthropic/llama_index/llms/anthropic/base.py +++ b/llama-index-integrations/llms/llama-index-llms-anthropic/llama_index/llms/anthropic/base.py @@ -85,8 +85,8 @@ class Anthropic(FunctionCallingLLM): temperature: float = Field( default=DEFAULT_TEMPERATURE, description="The temperature to use for sampling.", - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) max_tokens: int = Field( default=DEFAULT_ANTHROPIC_MAX_TOKENS, @@ -96,10 +96,10 @@ class Anthropic(FunctionCallingLLM): base_url: Optional[str] = Field(default=None, description="The base URL to use.") timeout: Optional[float] = Field( - default=None, description="The timeout to use in seconds.", gte=0 + default=None, description="The timeout to use in seconds.", ge=0 ) max_retries: int = Field( - default=10, description="The maximum number of API retries.", gte=0 + default=10, description="The maximum number of API retries.", ge=0 ) additional_kwargs: Dict[str, Any] = Field( default_factory=dict, description="Additional kwargs for the anthropic API." diff --git a/llama-index-integrations/llms/llama-index-llms-anthropic/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-anthropic/pyproject.toml index b21063f45..cf083e27b 100644 --- a/llama-index-integrations/llms/llama-index-llms-anthropic/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-anthropic/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-anthropic" readme = "README.md" -version = "0.3.0" +version = "0.3.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-bedrock-converse/llama_index/llms/bedrock_converse/base.py b/llama-index-integrations/llms/llama-index-llms-bedrock-converse/llama_index/llms/bedrock_converse/base.py index 23c9c03d7..d1d4b59e0 100644 --- a/llama-index-integrations/llms/llama-index-llms-bedrock-converse/llama_index/llms/bedrock_converse/base.py +++ b/llama-index-integrations/llms/llama-index-llms-bedrock-converse/llama_index/llms/bedrock_converse/base.py @@ -79,8 +79,8 @@ class BedrockConverse(FunctionCallingLLM): temperature: float = Field( default=DEFAULT_TEMPERATURE, description="The temperature to use for sampling.", - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) max_tokens: int = Field(description="The maximum number of tokens to generate.") profile_name: Optional[str] = Field( diff --git a/llama-index-integrations/llms/llama-index-llms-bedrock-converse/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-bedrock-converse/pyproject.toml index 0aac7aa1e..08ad5bb0c 100644 --- a/llama-index-integrations/llms/llama-index-llms-bedrock-converse/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-bedrock-converse/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-bedrock-converse" readme = "README.md" -version = "0.3.0" +version = "0.3.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-dashscope/llama_index/llms/dashscope/base.py b/llama-index-integrations/llms/llama-index-llms-dashscope/llama_index/llms/dashscope/base.py index 137f702a4..38e753096 100644 --- a/llama-index-integrations/llms/llama-index-llms-dashscope/llama_index/llms/dashscope/base.py +++ b/llama-index-integrations/llms/llama-index-llms-dashscope/llama_index/llms/dashscope/base.py @@ -131,8 +131,8 @@ class DashScope(CustomLLM): temperature: Optional[float] = Field( description="The temperature to use during generation.", default=DEFAULT_TEMPERATURE, - gte=0.0, - lte=2.0, + ge=0.0, + le=2.0, ) top_k: Optional[int] = Field( description="Sample counter when generate.", default=None @@ -141,7 +141,7 @@ class DashScope(CustomLLM): description="Sample probability threshold when generate." ) seed: Optional[int] = Field( - description="Random seed when generate.", default=1234, gte=0 + description="Random seed when generate.", default=1234, ge=0 ) repetition_penalty: Optional[float] = Field( description="Penalty for repeated words in generated text; \ diff --git a/llama-index-integrations/llms/llama-index-llms-dashscope/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-dashscope/pyproject.toml index 13cb6de9c..2aea283c6 100644 --- a/llama-index-integrations/llms/llama-index-llms-dashscope/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-dashscope/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-dashscope" readme = "README.md" -version = "0.2.0" +version = "0.2.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-deepinfra/llama_index/llms/deepinfra/base.py b/llama-index-integrations/llms/llama-index-llms-deepinfra/llama_index/llms/deepinfra/base.py index e63565e69..abcfde793 100644 --- a/llama-index-integrations/llms/llama-index-llms-deepinfra/llama_index/llms/deepinfra/base.py +++ b/llama-index-integrations/llms/llama-index-llms-deepinfra/llama_index/llms/deepinfra/base.py @@ -76,8 +76,8 @@ class DeepInfraLLM(FunctionCallingLLM): temperature: float = Field( default=DEFAULT_TEMPERATURE, description="The temperature to use during generation.", - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) max_tokens: Optional[int] = Field( default=DEFAULT_MAX_TOKENS, @@ -86,10 +86,10 @@ class DeepInfraLLM(FunctionCallingLLM): ) timeout: Optional[float] = Field( - default=None, description="The timeout to use in seconds.", gte=0 + default=None, description="The timeout to use in seconds.", ge=0 ) max_retries: int = Field( - default=10, description="The maximum number of API retries.", gte=0 + default=10, description="The maximum number of API retries.", ge=0 ) _api_key: Optional[str] = PrivateAttr() diff --git a/llama-index-integrations/llms/llama-index-llms-deepinfra/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-deepinfra/pyproject.toml index 1e3615cf4..d5089df77 100644 --- a/llama-index-integrations/llms/llama-index-llms-deepinfra/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-deepinfra/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-deepinfra" readme = "README.md" -version = "0.2.0" +version = "0.2.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-gemini/llama_index/llms/gemini/base.py b/llama-index-integrations/llms/llama-index-llms-gemini/llama_index/llms/gemini/base.py index d1e448af4..2c6e20f68 100644 --- a/llama-index-integrations/llms/llama-index-llms-gemini/llama_index/llms/gemini/base.py +++ b/llama-index-integrations/llms/llama-index-llms-gemini/llama_index/llms/gemini/base.py @@ -69,8 +69,8 @@ class Gemini(CustomLLM): temperature: float = Field( default=DEFAULT_TEMPERATURE, description="The temperature to use during generation.", - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) max_tokens: int = Field( default=DEFAULT_NUM_OUTPUTS, diff --git a/llama-index-integrations/llms/llama-index-llms-gemini/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-gemini/pyproject.toml index b1df21e41..f2c8ed2ba 100644 --- a/llama-index-integrations/llms/llama-index-llms-gemini/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-gemini/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-gemini" readme = "README.md" -version = "0.3.4" +version = "0.3.5" [tool.poetry.dependencies] python = ">=3.9,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-huggingface/llama_index/llms/huggingface/base.py b/llama-index-integrations/llms/llama-index-llms-huggingface/llama_index/llms/huggingface/base.py index 94503c0aa..59dc2b1e5 100644 --- a/llama-index-integrations/llms/llama-index-llms-huggingface/llama_index/llms/huggingface/base.py +++ b/llama-index-integrations/llms/llama-index-llms-huggingface/llama_index/llms/huggingface/base.py @@ -708,8 +708,8 @@ class TextGenerationInference(FunctionCallingLLM): temperature: float = Field( default=DEFAULT_TEMPERATURE, description=("The temperature to use for sampling."), - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) max_tokens: int = Field( default=DEFAULT_NUM_OUTPUTS, @@ -724,10 +724,10 @@ class TextGenerationInference(FunctionCallingLLM): ), ) timeout: float = Field( - default=120, description=("The timeout to use in seconds."), gte=0 + default=120, description=("The timeout to use in seconds."), ge=0 ) max_retries: int = Field( - default=5, description=("The maximum number of API retries."), gte=0 + default=5, description=("The maximum number of API retries."), ge=0 ) headers: Optional[Dict[str, str]] = Field( default=None, diff --git a/llama-index-integrations/llms/llama-index-llms-huggingface/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-huggingface/pyproject.toml index 32995db78..4f23d5380 100644 --- a/llama-index-integrations/llms/llama-index-llms-huggingface/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-huggingface/pyproject.toml @@ -28,7 +28,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-huggingface" readme = "README.md" -version = "0.3.2" +version = "0.3.3" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-konko/llama_index/llms/konko/base.py b/llama-index-integrations/llms/llama-index-llms-konko/llama_index/llms/konko/base.py index bfd5d1525..5a010ffe1 100644 --- a/llama-index-integrations/llms/llama-index-llms-konko/llama_index/llms/konko/base.py +++ b/llama-index-integrations/llms/llama-index-llms-konko/llama_index/llms/konko/base.py @@ -82,8 +82,8 @@ class Konko(LLM): temperature: float = Field( default=DEFAULT_TEMPERATURE, description="The temperature to use during generation.", - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) max_tokens: Optional[int] = Field( default=DEFAULT_NUM_OUTPUTS, @@ -94,7 +94,7 @@ class Konko(LLM): default_factory=dict, description="Additional kwargs for the konko API." ) max_retries: int = Field( - default=10, description="The maximum number of API retries.", gte=0 + default=10, description="The maximum number of API retries.", ge=0 ) konko_api_key: str = Field(default=None, description="The konko API key.") diff --git a/llama-index-integrations/llms/llama-index-llms-konko/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-konko/pyproject.toml index 4c81fb498..b0fca4e4a 100644 --- a/llama-index-integrations/llms/llama-index-llms-konko/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-konko/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-konko" readme = "README.md" -version = "0.2.0" +version = "0.2.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-litellm/llama_index/llms/litellm/base.py b/llama-index-integrations/llms/llama-index-llms-litellm/llama_index/llms/litellm/base.py index 631cad25d..cfb89b5d1 100644 --- a/llama-index-integrations/llms/llama-index-llms-litellm/llama_index/llms/litellm/base.py +++ b/llama-index-integrations/llms/llama-index-llms-litellm/llama_index/llms/litellm/base.py @@ -79,8 +79,8 @@ class LiteLLM(LLM): temperature: float = Field( default=DEFAULT_TEMPERATURE, description="The temperature to use during generation.", - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) max_tokens: Optional[int] = Field( description="The maximum number of tokens to generate.", diff --git a/llama-index-integrations/llms/llama-index-llms-litellm/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-litellm/pyproject.toml index fd8a1f685..3b2928eb8 100644 --- a/llama-index-integrations/llms/llama-index-llms-litellm/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-litellm/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-litellm" readme = "README.md" -version = "0.2.0" +version = "0.2.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-llama-cpp/llama_index/llms/llama_cpp/base.py b/llama-index-integrations/llms/llama-index-llms-llama-cpp/llama_index/llms/llama_cpp/base.py index 743f260a1..002c94fae 100644 --- a/llama-index-integrations/llms/llama-index-llms-llama-cpp/llama_index/llms/llama_cpp/base.py +++ b/llama-index-integrations/llms/llama-index-llms-llama-cpp/llama_index/llms/llama_cpp/base.py @@ -103,8 +103,8 @@ class LlamaCPP(CustomLLM): temperature: float = Field( default=DEFAULT_TEMPERATURE, description="The temperature to use for sampling.", - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) max_new_tokens: int = Field( default=DEFAULT_NUM_OUTPUTS, diff --git a/llama-index-integrations/llms/llama-index-llms-llama-cpp/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-llama-cpp/pyproject.toml index 850c8083e..e2b8fc9ff 100644 --- a/llama-index-integrations/llms/llama-index-llms-llama-cpp/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-llama-cpp/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-llama-cpp" readme = "README.md" -version = "0.2.1" +version = "0.2.2" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-llamafile/llama_index/llms/llamafile/base.py b/llama-index-integrations/llms/llama-index-llms-llamafile/llama_index/llms/llamafile/base.py index e4715a97f..6fc44eca4 100644 --- a/llama-index-integrations/llms/llama-index-llms-llamafile/llama_index/llms/llamafile/base.py +++ b/llama-index-integrations/llms/llama-index-llms-llamafile/llama_index/llms/llamafile/base.py @@ -57,8 +57,8 @@ class Llamafile(CustomLLM): temperature: float = Field( default=0.8, description="The temperature to use for sampling.", - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) seed: int = Field(default=0, description="Random seed") diff --git a/llama-index-integrations/llms/llama-index-llms-llamafile/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-llamafile/pyproject.toml index 07a3c5641..b3943726c 100644 --- a/llama-index-integrations/llms/llama-index-llms-llamafile/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-llamafile/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-llamafile" readme = "README.md" -version = "0.2.0" +version = "0.2.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-lmstudio/llama_index/llms/lmstudio/base.py b/llama-index-integrations/llms/llama-index-llms-lmstudio/llama_index/llms/lmstudio/base.py index 578b761af..c07337879 100644 --- a/llama-index-integrations/llms/llama-index-llms-lmstudio/llama_index/llms/lmstudio/base.py +++ b/llama-index-integrations/llms/llama-index-llms-lmstudio/llama_index/llms/lmstudio/base.py @@ -75,12 +75,12 @@ class LMStudio(CustomLLM): temperature: float = Field( default=DEFAULT_TEMPERATURE, description=("The temperature to use for sampling."), - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) timeout: float = Field( - default=120, description=("The timeout to use in seconds."), gte=0 + default=120, description=("The timeout to use in seconds."), ge=0 ) additional_kwargs: Dict[str, Any] = Field( diff --git a/llama-index-integrations/llms/llama-index-llms-lmstudio/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-lmstudio/pyproject.toml index b2c35cf4e..250183f91 100644 --- a/llama-index-integrations/llms/llama-index-llms-lmstudio/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-lmstudio/pyproject.toml @@ -27,7 +27,7 @@ license = "MIT" name = "llama-index-llms-lmstudio" packages = [{include = "llama_index/"}] readme = "README.md" -version = "0.2.0" +version = "0.2.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-mistral-rs/llama_index/llms/mistral_rs/base.py b/llama-index-integrations/llms/llama-index-llms-mistral-rs/llama_index/llms/mistral_rs/base.py index 78bc17a1b..d56bc8f4b 100644 --- a/llama-index-integrations/llms/llama-index-llms-mistral-rs/llama_index/llms/mistral_rs/base.py +++ b/llama-index-integrations/llms/llama-index-llms-mistral-rs/llama_index/llms/mistral_rs/base.py @@ -133,8 +133,8 @@ class MistralRS(CustomLLM): temperature: float = Field( default=DEFAULT_TEMPERATURE, description="The temperature to use for sampling.", - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) max_new_tokens: int = Field( default=DEFAULT_NUM_OUTPUTS, diff --git a/llama-index-integrations/llms/llama-index-llms-mistral-rs/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-mistral-rs/pyproject.toml index 5fd8b7f75..a36afca23 100644 --- a/llama-index-integrations/llms/llama-index-llms-mistral-rs/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-mistral-rs/pyproject.toml @@ -32,7 +32,7 @@ maintainers = ["jerryjliu"] name = "llama-index-llms-mistral-rs" packages = [{include = "llama_index/"}] readme = "README.md" -version = "0.2.0" +version = "0.2.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-mistralai/llama_index/llms/mistralai/base.py b/llama-index-integrations/llms/llama-index-llms-mistralai/llama_index/llms/mistralai/base.py index c2a123b11..59b24cf94 100644 --- a/llama-index-integrations/llms/llama-index-llms-mistralai/llama_index/llms/mistralai/base.py +++ b/llama-index-integrations/llms/llama-index-llms-mistralai/llama_index/llms/mistralai/base.py @@ -108,8 +108,8 @@ class MistralAI(FunctionCallingLLM): temperature: float = Field( default=DEFAULT_TEMPERATURE, description="The temperature to use for sampling.", - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) max_tokens: int = Field( default=DEFAULT_MISTRALAI_MAX_TOKENS, @@ -118,10 +118,10 @@ class MistralAI(FunctionCallingLLM): ) timeout: float = Field( - default=120, description="The timeout to use in seconds.", gte=0 + default=120, description="The timeout to use in seconds.", ge=0 ) max_retries: int = Field( - default=5, description="The maximum number of API retries.", gte=0 + default=5, description="The maximum number of API retries.", ge=0 ) random_seed: Optional[int] = Field( default=None, description="The random seed to use for sampling." diff --git a/llama-index-integrations/llms/llama-index-llms-mistralai/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-mistralai/pyproject.toml index 1296975f0..985addceb 100644 --- a/llama-index-integrations/llms/llama-index-llms-mistralai/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-mistralai/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-mistralai" readme = "README.md" -version = "0.2.3" +version = "0.2.4" [tool.poetry.dependencies] python = ">=3.9,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-octoai/llama_index/llms/octoai/base.py b/llama-index-integrations/llms/llama-index-llms-octoai/llama_index/llms/octoai/base.py index 601d72da3..303ac1824 100644 --- a/llama-index-integrations/llms/llama-index-llms-octoai/llama_index/llms/octoai/base.py +++ b/llama-index-integrations/llms/llama-index-llms-octoai/llama_index/llms/octoai/base.py @@ -49,15 +49,15 @@ class OctoAI(LLM): temperature: float = Field( default=DEFAULT_TEMPERATURE, description="The temperature to use during generation.", - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) max_tokens: Optional[int] = Field( description="The maximum number of tokens to generate.", gt=0, ) timeout: float = Field( - default=120, description="The timeout to use in seconds.", gte=0 + default=120, description="The timeout to use in seconds.", ge=0 ) additional_kwargs: Dict[str, Any] = Field( default_factory=dict, description="Additional kwargs for the OctoAI SDK." diff --git a/llama-index-integrations/llms/llama-index-llms-octoai/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-octoai/pyproject.toml index f34a827ad..5a0d0d70a 100644 --- a/llama-index-integrations/llms/llama-index-llms-octoai/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-octoai/pyproject.toml @@ -28,7 +28,7 @@ license = "MIT" name = "llama-index-llms-octoai" packages = [{include = "llama_index/"}] readme = "README.md" -version = "0.2.0" +version = "0.2.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-ollama/llama_index/llms/ollama/base.py b/llama-index-integrations/llms/llama-index-llms-ollama/llama_index/llms/ollama/base.py index 50d44a6ca..b9a6cee87 100644 --- a/llama-index-integrations/llms/llama-index-llms-ollama/llama_index/llms/ollama/base.py +++ b/llama-index-integrations/llms/llama-index-llms-ollama/llama_index/llms/ollama/base.py @@ -72,8 +72,8 @@ class Ollama(FunctionCallingLLM): temperature: float = Field( default=0.75, description="The temperature to use for sampling.", - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) context_window: int = Field( default=DEFAULT_CONTEXT_WINDOW, diff --git a/llama-index-integrations/llms/llama-index-llms-ollama/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-ollama/pyproject.toml index 1e9e1a34c..43973bfa6 100644 --- a/llama-index-integrations/llms/llama-index-llms-ollama/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-ollama/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-ollama" readme = "README.md" -version = "0.3.1" +version = "0.3.2" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-predibase/llama_index/llms/predibase/base.py b/llama-index-integrations/llms/llama-index-llms-predibase/llama_index/llms/predibase/base.py index d6976ec9a..13c7dcf05 100644 --- a/llama-index-integrations/llms/llama-index-llms-predibase/llama_index/llms/predibase/base.py +++ b/llama-index-integrations/llms/llama-index-llms-predibase/llama_index/llms/predibase/base.py @@ -84,8 +84,8 @@ class PredibaseLLM(CustomLLM): temperature: float = Field( default=DEFAULT_TEMPERATURE, description="The temperature to use for sampling.", - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) context_window: int = Field( default=DEFAULT_CONTEXT_WINDOW, diff --git a/llama-index-integrations/llms/llama-index-llms-predibase/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-predibase/pyproject.toml index 5bb84b888..c5a919676 100644 --- a/llama-index-integrations/llms/llama-index-llms-predibase/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-predibase/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-predibase" readme = "README.md" -version = "0.2.0" +version = "0.2.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-replicate/llama_index/llms/replicate/base.py b/llama-index-integrations/llms/llama-index-llms-replicate/llama_index/llms/replicate/base.py index 3ecc49ed0..27dc84978 100644 --- a/llama-index-integrations/llms/llama-index-llms-replicate/llama_index/llms/replicate/base.py +++ b/llama-index-integrations/llms/llama-index-llms-replicate/llama_index/llms/replicate/base.py @@ -49,8 +49,8 @@ class Replicate(CustomLLM): temperature: float = Field( default=DEFAULT_REPLICATE_TEMP, description="The temperature to use for sampling.", - gte=0.01, - lte=1.0, + ge=0.01, + le=1.0, ) image: str = Field( default="", description="The image file for multimodal model to use. (optional)" diff --git a/llama-index-integrations/llms/llama-index-llms-replicate/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-replicate/pyproject.toml index 23de01f95..712557913 100644 --- a/llama-index-integrations/llms/llama-index-llms-replicate/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-replicate/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-replicate" readme = "README.md" -version = "0.2.0" +version = "0.2.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-rungpt/llama_index/llms/rungpt/base.py b/llama-index-integrations/llms/llama-index-llms-rungpt/llama_index/llms/rungpt/base.py index 592038ac9..bc5ff57c8 100644 --- a/llama-index-integrations/llms/llama-index-llms-rungpt/llama_index/llms/rungpt/base.py +++ b/llama-index-integrations/llms/llama-index-llms-rungpt/llama_index/llms/rungpt/base.py @@ -47,8 +47,8 @@ class RunGptLLM(LLM): temperature: float = Field( default=DEFAULT_RUNGPT_TEMP, description="The temperature to use for sampling.", - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) max_tokens: int = Field( default=DEFAULT_NUM_OUTPUTS, diff --git a/llama-index-integrations/llms/llama-index-llms-rungpt/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-rungpt/pyproject.toml index 7495951db..afe34ce23 100644 --- a/llama-index-integrations/llms/llama-index-llms-rungpt/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-rungpt/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-rungpt" readme = "README.md" -version = "0.2.0" +version = "0.2.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-sagemaker-endpoint/llama_index/llms/sagemaker_endpoint/base.py b/llama-index-integrations/llms/llama-index-llms-sagemaker-endpoint/llama_index/llms/sagemaker_endpoint/base.py index a419a2a6b..fef6cded1 100644 --- a/llama-index-integrations/llms/llama-index-llms-sagemaker-endpoint/llama_index/llms/sagemaker_endpoint/base.py +++ b/llama-index-integrations/llms/llama-index-llms-sagemaker-endpoint/llama_index/llms/sagemaker_endpoint/base.py @@ -110,12 +110,12 @@ class SageMakerLLM(LLM): max_retries: Optional[int] = Field( default=3, description="The maximum number of API retries.", - gte=0, + ge=0, ) timeout: Optional[float] = Field( default=60.0, description="The timeout, in seconds, for API requests.", - gte=0, + ge=0, ) _client: Any = PrivateAttr() _completion_to_prompt: Callable[[str, Optional[str]], str] = PrivateAttr() diff --git a/llama-index-integrations/llms/llama-index-llms-sagemaker-endpoint/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-sagemaker-endpoint/pyproject.toml index 03b44a9a7..4cf38eea1 100644 --- a/llama-index-integrations/llms/llama-index-llms-sagemaker-endpoint/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-sagemaker-endpoint/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-sagemaker-endpoint" readme = "README.md" -version = "0.2.2" +version = "0.2.3" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-text-generation-inference/llama_index/llms/text_generation_inference/base.py b/llama-index-integrations/llms/llama-index-llms-text-generation-inference/llama_index/llms/text_generation_inference/base.py index 595b48f6c..88ff00c48 100644 --- a/llama-index-integrations/llms/llama-index-llms-text-generation-inference/llama_index/llms/text_generation_inference/base.py +++ b/llama-index-integrations/llms/llama-index-llms-text-generation-inference/llama_index/llms/text_generation_inference/base.py @@ -57,8 +57,8 @@ class TextGenerationInference(FunctionCallingLLM): temperature: float = Field( default=DEFAULT_TEMPERATURE, description=("The temperature to use for sampling."), - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) max_tokens: int = Field( default=DEFAULT_NUM_OUTPUTS, @@ -73,10 +73,10 @@ class TextGenerationInference(FunctionCallingLLM): ), ) timeout: float = Field( - default=120, description=("The timeout to use in seconds."), gte=0 + default=120, description=("The timeout to use in seconds."), ge=0 ) max_retries: int = Field( - default=5, description=("The maximum number of API retries."), gte=0 + default=5, description=("The maximum number of API retries."), ge=0 ) headers: Optional[Dict[str, str]] = Field( default=None, diff --git a/llama-index-integrations/llms/llama-index-llms-text-generation-inference/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-text-generation-inference/pyproject.toml index 13182cf6f..6d060a1de 100644 --- a/llama-index-integrations/llms/llama-index-llms-text-generation-inference/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-text-generation-inference/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-text-generation-inference" readme = "README.md" -version = "0.2.1" +version = "0.2.2" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-upstage/llama_index/llms/upstage/base.py b/llama-index-integrations/llms/llama-index-llms-upstage/llama_index/llms/upstage/base.py index 96d96ea74..fa518eefe 100644 --- a/llama-index-integrations/llms/llama-index-llms-upstage/llama_index/llms/upstage/base.py +++ b/llama-index-integrations/llms/llama-index-llms-upstage/llama_index/llms/upstage/base.py @@ -51,8 +51,8 @@ class Upstage(OpenAI): temperature: float = Field( default=DEFAULT_TEMPERATURE, description="The temperature to use during generation.", - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) max_tokens: Optional[int] = Field( description="The maximum number of tokens to generate." @@ -63,17 +63,17 @@ class Upstage(OpenAI): top_logprobs: int = Field( description="The number of top token logprobs to return.", default=0, - gte=0, - lte=20, + ge=0, + le=20, ) additional_kwargs: Dict[str, Any] = Field( description="Additional kwargs for the Upstage API.", default_factory=dict ) max_retries: int = Field( - description="The maximum number of API retries.", default=3, gte=0 + description="The maximum number of API retries.", default=3, ge=0 ) timeout: float = Field( - description="The timeout, in seconds, for API requests.", default=60.0, gte=0.0 + description="The timeout, in seconds, for API requests.", default=60.0, ge=0.0 ) reuse_client: bool = Field( description=( diff --git a/llama-index-integrations/llms/llama-index-llms-upstage/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-upstage/pyproject.toml index 1ffd9fe07..715875782 100644 --- a/llama-index-integrations/llms/llama-index-llms-upstage/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-upstage/pyproject.toml @@ -30,7 +30,7 @@ license = "MIT" name = "llama-index-llms-upstage" packages = [{include = "llama_index/"}] readme = "README.md" -version = "0.2.0" +version = "0.2.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-xinference/llama_index/llms/xinference/base.py b/llama-index-integrations/llms/llama-index-llms-xinference/llama_index/llms/xinference/base.py index ea7b066c3..5f13ec241 100644 --- a/llama-index-integrations/llms/llama-index-llms-xinference/llama_index/llms/xinference/base.py +++ b/llama-index-integrations/llms/llama-index-llms-xinference/llama_index/llms/xinference/base.py @@ -56,7 +56,7 @@ class Xinference(CustomLLM): model_uid: str = Field(description="The Xinference model to use.") endpoint: str = Field(description="The Xinference endpoint URL to use.") temperature: float = Field( - description="The temperature to use for sampling.", gte=0.0, lte=1.0 + description="The temperature to use for sampling.", ge=0.0, le=1.0 ) max_tokens: int = Field( description="The maximum new tokens to generate as answer.", gt=0 diff --git a/llama-index-integrations/llms/llama-index-llms-xinference/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-xinference/pyproject.toml index 5cf001c33..27e167988 100644 --- a/llama-index-integrations/llms/llama-index-llms-xinference/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-xinference/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-xinference" readme = "README.md" -version = "0.2.1" +version = "0.2.2" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-anthropic/llama_index/multi_modal_llms/anthropic/base.py b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-anthropic/llama_index/multi_modal_llms/anthropic/base.py index 78027ad59..d11682cab 100644 --- a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-anthropic/llama_index/multi_modal_llms/anthropic/base.py +++ b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-anthropic/llama_index/multi_modal_llms/anthropic/base.py @@ -46,12 +46,12 @@ class AnthropicMultiModal(MultiModalLLM): max_retries: int = Field( default=3, description="Maximum number of retries.", - gte=0, + ge=0, ) timeout: float = Field( default=60.0, description="The timeout, in seconds, for API requests.", - gte=0, + ge=0, ) api_key: str = Field( default=None, description="The Anthropic API key.", exclude=True diff --git a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-anthropic/pyproject.toml b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-anthropic/pyproject.toml index 3d8f3c95b..7df3d5ce2 100644 --- a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-anthropic/pyproject.toml +++ b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-anthropic/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-multi-modal-llms-anthropic" readme = "README.md" -version = "0.2.1" +version = "0.2.2" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-dashscope/llama_index/multi_modal_llms/dashscope/base.py b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-dashscope/llama_index/multi_modal_llms/dashscope/base.py index dc19ae2ad..8d7bb3b3e 100644 --- a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-dashscope/llama_index/multi_modal_llms/dashscope/base.py +++ b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-dashscope/llama_index/multi_modal_llms/dashscope/base.py @@ -85,7 +85,7 @@ class DashScopeMultiModal(MultiModalLLM): description="Sample probability threshold when generate." ) seed: Optional[int] = Field( - description="Random seed when generate.", default=1234, gte=0 + description="Random seed when generate.", default=1234, ge=0 ) api_key: Optional[str] = Field( default=None, description="The DashScope API key.", exclude=True diff --git a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-dashscope/pyproject.toml b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-dashscope/pyproject.toml index 5cd06596e..03eed4dec 100644 --- a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-dashscope/pyproject.toml +++ b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-dashscope/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-multi-modal-llms-dashscope" readme = "README.md" -version = "0.2.0" +version = "0.2.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-gemini/llama_index/multi_modal_llms/gemini/base.py b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-gemini/llama_index/multi_modal_llms/gemini/base.py index c4dfab02f..e3354983a 100644 --- a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-gemini/llama_index/multi_modal_llms/gemini/base.py +++ b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-gemini/llama_index/multi_modal_llms/gemini/base.py @@ -54,8 +54,8 @@ class GeminiMultiModal(MultiModalLLM): temperature: float = Field( default=DEFAULT_TEMPERATURE, description="The temperature to use during generation.", - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) max_tokens: int = Field( default=DEFAULT_NUM_OUTPUTS, diff --git a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-gemini/pyproject.toml b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-gemini/pyproject.toml index 3dbb459ab..a20c63e26 100644 --- a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-gemini/pyproject.toml +++ b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-gemini/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-multi-modal-llms-gemini" readme = "README.md" -version = "0.3.0" +version = "0.3.1" [tool.poetry.dependencies] python = ">=3.9,<4.0" diff --git a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-ollama/llama_index/multi_modal_llms/ollama/base.py b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-ollama/llama_index/multi_modal_llms/ollama/base.py index ca34aeae8..a35476c21 100644 --- a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-ollama/llama_index/multi_modal_llms/ollama/base.py +++ b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-ollama/llama_index/multi_modal_llms/ollama/base.py @@ -58,8 +58,8 @@ class OllamaMultiModal(MultiModalLLM): temperature: float = Field( default=0.75, description="The temperature to use for sampling.", - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) context_window: int = Field( default=DEFAULT_CONTEXT_WINDOW, diff --git a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-ollama/pyproject.toml b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-ollama/pyproject.toml index 93a5f36bd..2b32474d5 100644 --- a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-ollama/pyproject.toml +++ b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-ollama/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-multi-modal-llms-ollama" readme = "README.md" -version = "0.3.0" +version = "0.3.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-openai/llama_index/multi_modal_llms/openai/base.py b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-openai/llama_index/multi_modal_llms/openai/base.py index 58681dbd2..d71711460 100644 --- a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-openai/llama_index/multi_modal_llms/openai/base.py +++ b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-openai/llama_index/multi_modal_llms/openai/base.py @@ -61,12 +61,12 @@ class OpenAIMultiModal(MultiModalLLM): max_retries: int = Field( default=3, description="Maximum number of retries.", - gte=0, + ge=0, ) timeout: float = Field( default=60.0, description="The timeout, in seconds, for API requests.", - gte=0, + ge=0, ) api_key: str = Field(default=None, description="The OpenAI API key.", exclude=True) api_base: str = Field(default=None, description="The base URL for OpenAI API.") diff --git a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-openai/pyproject.toml b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-openai/pyproject.toml index f57977bfc..b3550d597 100644 --- a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-openai/pyproject.toml +++ b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-openai/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-multi-modal-llms-openai" readme = "README.md" -version = "0.2.0" +version = "0.2.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" -- GitLab