diff --git a/llama-index-integrations/agent/llama-index-agent-lats/llama_index/agent/lats/types.py b/llama-index-integrations/agent/llama-index-agent-lats/llama_index/agent/lats/types.py index 88f69b30179ba461adc76e20fa3231964386dc61..e863d0ef8ef2a6598c113c7818d745585da06e31 100644 --- a/llama-index-integrations/agent/llama-index-agent-lats/llama_index/agent/lats/types.py +++ b/llama-index-integrations/agent/llama-index-agent-lats/llama_index/agent/lats/types.py @@ -51,8 +51,8 @@ class Evaluation(BaseModel): score: int = Field( description="Score of the reflection indicating **correctness**. Integer from 1-10", - lte=10, - gte=0, + le=10, + ge=0, ) is_done: bool = Field( False, description="Whether the answer is found yet (**completeness**)." diff --git a/llama-index-integrations/agent/llama-index-agent-lats/pyproject.toml b/llama-index-integrations/agent/llama-index-agent-lats/pyproject.toml index 77f54b5344d1f7d8d473c988978b4debd7b486b6..d35b6dd24e24397cd803e38ae9c94cbda9cb2481 100644 --- a/llama-index-integrations/agent/llama-index-agent-lats/pyproject.toml +++ b/llama-index-integrations/agent/llama-index-agent-lats/pyproject.toml @@ -31,7 +31,7 @@ license = "MIT" name = "llama-index-agent-lats" packages = [{include = "llama_index/"}] readme = "README.md" -version = "0.2.0" +version = "0.2.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-alephalpha/llama_index/embeddings/alephalpha/base.py b/llama-index-integrations/embeddings/llama-index-embeddings-alephalpha/llama_index/embeddings/alephalpha/base.py index 40da8ec5e5d98f0de397318c0f04b30878f3dd7a..78ab769cfe1b00f2466cfb11653d109a2545ea38 100644 --- a/llama-index-integrations/embeddings/llama-index-embeddings-alephalpha/llama_index/embeddings/alephalpha/base.py +++ b/llama-index-integrations/embeddings/llama-index-embeddings-alephalpha/llama_index/embeddings/alephalpha/base.py @@ -46,10 +46,10 @@ class AlephAlphaEmbedding(BaseEmbedding): default=DEFAULT_ALEPHALPHA_HOST, description="The hostname of the API base_url." ) timeout: Optional[float] = Field( - default=None, description="The timeout to use in seconds.", gte=0 + default=None, description="The timeout to use in seconds.", ge=0 ) max_retries: int = Field( - default=10, description="The maximum number of API retries.", gte=0 + default=10, description="The maximum number of API retries.", ge=0 ) normalize: Optional[bool] = Field( default=False, description="Return normalized embeddings." diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-alephalpha/pyproject.toml b/llama-index-integrations/embeddings/llama-index-embeddings-alephalpha/pyproject.toml index 6386b33322619f74b023306c95579a218c6e7012..e4e9bcd1ce784bedaa4d1f91823b063e7fc9acde 100644 --- a/llama-index-integrations/embeddings/llama-index-embeddings-alephalpha/pyproject.toml +++ b/llama-index-integrations/embeddings/llama-index-embeddings-alephalpha/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-embeddings-alephalpha" readme = "README.md" -version = "0.2.0" +version = "0.2.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-anyscale/llama_index/embeddings/anyscale/base.py b/llama-index-integrations/embeddings/llama-index-embeddings-anyscale/llama_index/embeddings/anyscale/base.py index 77bb0016dea9224bb31e8366b8919608114cddc4..8ad18aa45f6851b71d09d96624ee2711344e6ce1 100644 --- a/llama-index-integrations/embeddings/llama-index-embeddings-anyscale/llama_index/embeddings/anyscale/base.py +++ b/llama-index-integrations/embeddings/llama-index-embeddings-anyscale/llama_index/embeddings/anyscale/base.py @@ -133,10 +133,8 @@ class AnyscaleEmbedding(BaseEmbedding): api_base: str = Field(description="The base URL for Anyscale API.") api_version: str = Field(description="The version for OpenAI API.") - max_retries: int = Field( - default=10, description="Maximum number of retries.", gte=0 - ) - timeout: float = Field(default=60.0, description="Timeout for each request.", gte=0) + max_retries: int = Field(default=10, description="Maximum number of retries.", ge=0) + timeout: float = Field(default=60.0, description="Timeout for each request.", ge=0) default_headers: Optional[Dict[str, str]] = Field( default=None, description="The default headers for API requests." ) diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-anyscale/pyproject.toml b/llama-index-integrations/embeddings/llama-index-embeddings-anyscale/pyproject.toml index fb47cc2b30120f4b440fec49ddae60ff6f74a651..d5baa8694c208ebedc6ecff921a613fa61171265 100644 --- a/llama-index-integrations/embeddings/llama-index-embeddings-anyscale/pyproject.toml +++ b/llama-index-integrations/embeddings/llama-index-embeddings-anyscale/pyproject.toml @@ -28,7 +28,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-embeddings-anyscale" readme = "README.md" -version = "0.2.0" +version = "0.2.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-databricks/llama_index/embeddings/databricks/base.py b/llama-index-integrations/embeddings/llama-index-embeddings-databricks/llama_index/embeddings/databricks/base.py index a85bcda5aca0a2da8ac5ec104a9c0c82e27842ea..3dcc9eeee31cffb4abd0f3f0ea3161298fe16b72 100644 --- a/llama-index-integrations/embeddings/llama-index-embeddings-databricks/llama_index/embeddings/databricks/base.py +++ b/llama-index-integrations/embeddings/llama-index-embeddings-databricks/llama_index/embeddings/databricks/base.py @@ -59,10 +59,8 @@ class DatabricksEmbedding(BaseEmbedding): api_key: str = Field(description="The Databricks API key.") endpoint: str = Field(description="The Databricks API endpoint.") - max_retries: int = Field( - default=10, description="Maximum number of retries.", gte=0 - ) - timeout: float = Field(default=60.0, description="Timeout for each request.", gte=0) + max_retries: int = Field(default=10, description="Maximum number of retries.", ge=0) + timeout: float = Field(default=60.0, description="Timeout for each request.", ge=0) default_headers: Optional[Dict[str, str]] = Field( default=None, description="The default headers for API requests." ) diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-databricks/pyproject.toml b/llama-index-integrations/embeddings/llama-index-embeddings-databricks/pyproject.toml index b25139ae1f81bb8abd26e632c43d5a8b7f86c1e9..7e01ffe9bb82c2a54a7f84c410e0bfa83c587180 100644 --- a/llama-index-integrations/embeddings/llama-index-embeddings-databricks/pyproject.toml +++ b/llama-index-integrations/embeddings/llama-index-embeddings-databricks/pyproject.toml @@ -30,7 +30,7 @@ license = "MIT" name = "llama-index-embeddings-databricks" packages = [{include = "llama_index/"}] readme = "README.md" -version = "0.2.0" +version = "0.2.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-mixedbreadai/llama_index/embeddings/mixedbreadai/base.py b/llama-index-integrations/embeddings/llama-index-embeddings-mixedbreadai/llama_index/embeddings/mixedbreadai/base.py index 1f1e7d659f23f7803b9ba0ad06dc6f978ab1f71d..af4475d0a894336ddfbe113c12ee6204fe7868f4 100644 --- a/llama-index-integrations/embeddings/llama-index-embeddings-mixedbreadai/llama_index/embeddings/mixedbreadai/base.py +++ b/llama-index-integrations/embeddings/llama-index-embeddings-mixedbreadai/llama_index/embeddings/mixedbreadai/base.py @@ -61,7 +61,7 @@ class MixedbreadAIEmbedding(BaseEmbedding): min_length=1, ) embed_batch_size: int = Field( - default=128, description="The batch size for embedding calls.", gt=0, lte=256 + default=128, description="The batch size for embedding calls.", gt=0, le=256 ) _client: MixedbreadAI = PrivateAttr() diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-mixedbreadai/pyproject.toml b/llama-index-integrations/embeddings/llama-index-embeddings-mixedbreadai/pyproject.toml index c4f0e12c4e29eb25ee78aa51c1f1cf2e4134c271..590df51206d909b834794fb77e79d52b152ce0ec 100644 --- a/llama-index-integrations/embeddings/llama-index-embeddings-mixedbreadai/pyproject.toml +++ b/llama-index-integrations/embeddings/llama-index-embeddings-mixedbreadai/pyproject.toml @@ -28,7 +28,7 @@ license = "MIT" name = "llama-index-embeddings-mixedbreadai" packages = [{include = "llama_index/"}] readme = "README.md" -version = "0.2.0" +version = "0.2.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-nvidia/llama_index/embeddings/nvidia/base.py b/llama-index-integrations/embeddings/llama-index-embeddings-nvidia/llama_index/embeddings/nvidia/base.py index 6fe80b70b89b73150a5baf02463b6cc0204305b8..503fe3ae8731bce57183136c70fd51bd1af5044a 100644 --- a/llama-index-integrations/embeddings/llama-index-embeddings-nvidia/llama_index/embeddings/nvidia/base.py +++ b/llama-index-integrations/embeddings/llama-index-embeddings-nvidia/llama_index/embeddings/nvidia/base.py @@ -55,13 +55,13 @@ class NVIDIAEmbedding(BaseEmbedding): ) timeout: float = Field( - default=120, description="The timeout for the API request in seconds.", gte=0 + default=120, description="The timeout for the API request in seconds.", ge=0 ) max_retries: int = Field( default=5, description="The maximum number of retries for the API request.", - gte=0, + ge=0, ) _client: Any = PrivateAttr() diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-nvidia/pyproject.toml b/llama-index-integrations/embeddings/llama-index-embeddings-nvidia/pyproject.toml index ffd04f39180dadc1aa8bf04bdae6888320de4ee2..03f6e73cb330d7aa6620961d037c1708e69a9d31 100644 --- a/llama-index-integrations/embeddings/llama-index-embeddings-nvidia/pyproject.toml +++ b/llama-index-integrations/embeddings/llama-index-embeddings-nvidia/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-embeddings-nvidia" readme = "README.md" -version = "0.2.1" +version = "0.2.2" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-ollama/llama_index/embeddings/ollama/base.py b/llama-index-integrations/embeddings/llama-index-embeddings-ollama/llama_index/embeddings/ollama/base.py index 30898e01424128cf9e5152250f71b1b1fc860db9..c4112f2c7d27a3987d2e95bd52ba56178d0a786e 100644 --- a/llama-index-integrations/embeddings/llama-index-embeddings-ollama/llama_index/embeddings/ollama/base.py +++ b/llama-index-integrations/embeddings/llama-index-embeddings-ollama/llama_index/embeddings/ollama/base.py @@ -18,7 +18,7 @@ class OllamaEmbedding(BaseEmbedding): default=DEFAULT_EMBED_BATCH_SIZE, description="The batch size for embedding calls.", gt=0, - lte=2048, + le=2048, ) ollama_additional_kwargs: Dict[str, Any] = Field( default_factory=dict, description="Additional kwargs for the Ollama API." diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-ollama/pyproject.toml b/llama-index-integrations/embeddings/llama-index-embeddings-ollama/pyproject.toml index df3d89f366a8c04954c5f87abdba2ddcbe324b2b..ead202066aba7464b79d33132e5b77704173f7ea 100644 --- a/llama-index-integrations/embeddings/llama-index-embeddings-ollama/pyproject.toml +++ b/llama-index-integrations/embeddings/llama-index-embeddings-ollama/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-embeddings-ollama" readme = "README.md" -version = "0.3.0" +version = "0.3.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-openai/llama_index/embeddings/openai/base.py b/llama-index-integrations/embeddings/llama-index-embeddings-openai/llama_index/embeddings/openai/base.py index 9dcc315dda306391ad83da2b1f478d102996b4bf..de1314516389d2572d2bbad237ab175fc6ed4436 100644 --- a/llama-index-integrations/embeddings/llama-index-embeddings-openai/llama_index/embeddings/openai/base.py +++ b/llama-index-integrations/embeddings/llama-index-embeddings-openai/llama_index/embeddings/openai/base.py @@ -253,10 +253,8 @@ class OpenAIEmbedding(BaseEmbedding): default=DEFAULT_OPENAI_API_VERSION, description="The version for OpenAI API." ) - max_retries: int = Field( - default=10, description="Maximum number of retries.", gte=0 - ) - timeout: float = Field(default=60.0, description="Timeout for each request.", gte=0) + max_retries: int = Field(default=10, description="Maximum number of retries.", ge=0) + timeout: float = Field(default=60.0, description="Timeout for each request.", ge=0) default_headers: Optional[Dict[str, str]] = Field( default=None, description="The default headers for API requests." ) diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-openai/pyproject.toml b/llama-index-integrations/embeddings/llama-index-embeddings-openai/pyproject.toml index aeff16ebb0670088684b3f628fcff596c4735777..935fb80ca9a87042dc171819f9044c51c3c64680 100644 --- a/llama-index-integrations/embeddings/llama-index-embeddings-openai/pyproject.toml +++ b/llama-index-integrations/embeddings/llama-index-embeddings-openai/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-embeddings-openai" readme = "README.md" -version = "0.2.4" +version = "0.2.5" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-sagemaker-endpoint/llama_index/embeddings/sagemaker_endpoint/base.py b/llama-index-integrations/embeddings/llama-index-embeddings-sagemaker-endpoint/llama_index/embeddings/sagemaker_endpoint/base.py index 0f58667906dba118ceb9dcc54813803a44ca1bd0..8927f1e2f469e2ed721771d56e468110ee41d95a 100644 --- a/llama-index-integrations/embeddings/llama-index-embeddings-sagemaker-endpoint/llama_index/embeddings/sagemaker_endpoint/base.py +++ b/llama-index-integrations/embeddings/llama-index-embeddings-sagemaker-endpoint/llama_index/embeddings/sagemaker_endpoint/base.py @@ -43,12 +43,12 @@ class SageMakerEmbedding(BaseEmbedding): max_retries: Optional[int] = Field( default=3, description="The maximum number of API retries.", - gte=0, + ge=0, ) timeout: Optional[float] = Field( default=60.0, description="The timeout, in seconds, for API requests.", - gte=0, + ge=0, ) _client: Any = PrivateAttr() _verbose: bool = PrivateAttr() diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-sagemaker-endpoint/pyproject.toml b/llama-index-integrations/embeddings/llama-index-embeddings-sagemaker-endpoint/pyproject.toml index bc0f43a7f55f65e192ab69e2a3d4817a3dd840ba..26c0d433ffa049c2835eda1219647a66e78b033a 100644 --- a/llama-index-integrations/embeddings/llama-index-embeddings-sagemaker-endpoint/pyproject.toml +++ b/llama-index-integrations/embeddings/llama-index-embeddings-sagemaker-endpoint/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-embeddings-sagemaker-endpoint" readme = "README.md" -version = "0.2.2" +version = "0.2.3" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/extractors/llama-index-extractors-entity/llama_index/extractors/entity/base.py b/llama-index-integrations/extractors/llama-index-extractors-entity/llama_index/extractors/entity/base.py index a737d66449d2e3bc38329f1ec6ff4d89bd9cc987..b3299931a008a0e13fa38fd8760d16a760a7748b 100644 --- a/llama-index-integrations/extractors/llama-index-extractors-entity/llama_index/extractors/entity/base.py +++ b/llama-index-integrations/extractors/llama-index-extractors-entity/llama_index/extractors/entity/base.py @@ -43,8 +43,8 @@ class EntityExtractor(BaseExtractor): prediction_threshold: float = Field( default=0.5, description="The confidence threshold for accepting predictions.", - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) span_joiner: str = Field( default=" ", description="The separator between entity names." diff --git a/llama-index-integrations/extractors/llama-index-extractors-entity/pyproject.toml b/llama-index-integrations/extractors/llama-index-extractors-entity/pyproject.toml index 2fa359696c3312acc58d9a26284f6d7a878aa1c5..3c00c23c322c55905bdc9a1d110b2d79ffdcbf89 100644 --- a/llama-index-integrations/extractors/llama-index-extractors-entity/pyproject.toml +++ b/llama-index-integrations/extractors/llama-index-extractors-entity/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-extractors-entity" readme = "README.md" -version = "0.2.0" +version = "0.2.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-ai21/llama_index/llms/ai21/base.py b/llama-index-integrations/llms/llama-index-llms-ai21/llama_index/llms/ai21/base.py index ce80a3a38bb189b382ace6a67991c4b6646609c3..2179df3f56a6d8042d4cec6b2c3abcd89caf985c 100644 --- a/llama-index-integrations/llms/llama-index-llms-ai21/llama_index/llms/ai21/base.py +++ b/llama-index-integrations/llms/llama-index-llms-ai21/llama_index/llms/ai21/base.py @@ -75,16 +75,16 @@ class AI21(FunctionCallingLLM): temperature: float = Field( description="The temperature to use for sampling.", default=_DEFAULT_TEMPERATURE, - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) base_url: Optional[str] = Field(default=None, description="The base URL to use.") timeout: Optional[float] = Field( - default=None, description="The timeout to use in seconds.", gte=0 + default=None, description="The timeout to use in seconds.", ge=0 ) max_retries: int = Field( - default=10, description="The maximum number of API retries.", gte=0 + default=10, description="The maximum number of API retries.", ge=0 ) additional_kwargs: Dict[str, Any] = Field( diff --git a/llama-index-integrations/llms/llama-index-llms-ai21/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-ai21/pyproject.toml index c3e6179ddb9728014ee1a0a91c936700a83c39c7..cbac1b0b22bbb0fa33815e132a9b17ca53884256 100644 --- a/llama-index-integrations/llms/llama-index-llms-ai21/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-ai21/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-ai21" readme = "README.md" -version = "0.3.3" +version = "0.3.4" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-alephalpha/llama_index/llms/alephalpha/base.py b/llama-index-integrations/llms/llama-index-llms-alephalpha/llama_index/llms/alephalpha/base.py index bfe3e2c3067025a0e4a05ac19740c02e61e097a8..7a02f7ae65c1286ef1bdc530dc86defb45f37c21 100644 --- a/llama-index-integrations/llms/llama-index-llms-alephalpha/llama_index/llms/alephalpha/base.py +++ b/llama-index-integrations/llms/llama-index-llms-alephalpha/llama_index/llms/alephalpha/base.py @@ -40,8 +40,8 @@ class AlephAlpha(LLM): temperature: float = Field( default=DEFAULT_TEMPERATURE, description="The temperature to use for sampling.", - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) max_tokens: int = Field( default=DEFAULT_ALEPHALPHA_MAX_TOKENS, @@ -52,10 +52,10 @@ class AlephAlpha(LLM): default=DEFAULT_ALEPHALPHA_HOST, description="The hostname of the API base_url." ) timeout: Optional[float] = Field( - default=None, description="The timeout to use in seconds.", gte=0 + default=None, description="The timeout to use in seconds.", ge=0 ) max_retries: int = Field( - default=10, description="The maximum number of API retries.", gte=0 + default=10, description="The maximum number of API retries.", ge=0 ) hosting: Optional[str] = Field(default=None, description="The hosting to use.") nice: bool = Field(default=False, description="Whether to be nice to the API.") @@ -74,13 +74,13 @@ class AlephAlpha(LLM): sequence_penalty: float = Field( default=0.7, description="The sequence penalty to use. Increasing the sequence penalty reduces the likelihood of reproducing token sequences that already appear in the prompt", - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) sequence_penalty_min_length: int = Field( default=3, description="Minimal number of tokens to be considered as sequence. Must be greater or equal 2.", - gte=2, + ge=2, ) stop_sequences: List[str] = Field( default=["\n\n"], description="The stop sequences to use." diff --git a/llama-index-integrations/llms/llama-index-llms-alephalpha/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-alephalpha/pyproject.toml index 37822621fcc453a7495c7d36b541183f947f927a..22133de702bd109fb1335ebb6ad007fb22542256 100644 --- a/llama-index-integrations/llms/llama-index-llms-alephalpha/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-alephalpha/pyproject.toml @@ -30,7 +30,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-alephalpha" readme = "README.md" -version = "0.2.0" +version = "0.2.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-anthropic/llama_index/llms/anthropic/base.py b/llama-index-integrations/llms/llama-index-llms-anthropic/llama_index/llms/anthropic/base.py index 0bcdc5744f5f70c373b76978ded83aa4dc5babd3..dca36b45ee8a8d5ebacd75c278b15e3608821c09 100644 --- a/llama-index-integrations/llms/llama-index-llms-anthropic/llama_index/llms/anthropic/base.py +++ b/llama-index-integrations/llms/llama-index-llms-anthropic/llama_index/llms/anthropic/base.py @@ -85,8 +85,8 @@ class Anthropic(FunctionCallingLLM): temperature: float = Field( default=DEFAULT_TEMPERATURE, description="The temperature to use for sampling.", - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) max_tokens: int = Field( default=DEFAULT_ANTHROPIC_MAX_TOKENS, @@ -96,10 +96,10 @@ class Anthropic(FunctionCallingLLM): base_url: Optional[str] = Field(default=None, description="The base URL to use.") timeout: Optional[float] = Field( - default=None, description="The timeout to use in seconds.", gte=0 + default=None, description="The timeout to use in seconds.", ge=0 ) max_retries: int = Field( - default=10, description="The maximum number of API retries.", gte=0 + default=10, description="The maximum number of API retries.", ge=0 ) additional_kwargs: Dict[str, Any] = Field( default_factory=dict, description="Additional kwargs for the anthropic API." diff --git a/llama-index-integrations/llms/llama-index-llms-anthropic/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-anthropic/pyproject.toml index b21063f45399313ed717e3e21e966b87ad814c62..cf083e27b1c87efa97627ab61fc1a108f8966899 100644 --- a/llama-index-integrations/llms/llama-index-llms-anthropic/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-anthropic/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-anthropic" readme = "README.md" -version = "0.3.0" +version = "0.3.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-bedrock-converse/llama_index/llms/bedrock_converse/base.py b/llama-index-integrations/llms/llama-index-llms-bedrock-converse/llama_index/llms/bedrock_converse/base.py index 23c9c03d772af61f9a86890e9d8542b009375e11..d1d4b59e0e6130f5f6f6d3743b3e5a3ac25c230a 100644 --- a/llama-index-integrations/llms/llama-index-llms-bedrock-converse/llama_index/llms/bedrock_converse/base.py +++ b/llama-index-integrations/llms/llama-index-llms-bedrock-converse/llama_index/llms/bedrock_converse/base.py @@ -79,8 +79,8 @@ class BedrockConverse(FunctionCallingLLM): temperature: float = Field( default=DEFAULT_TEMPERATURE, description="The temperature to use for sampling.", - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) max_tokens: int = Field(description="The maximum number of tokens to generate.") profile_name: Optional[str] = Field( diff --git a/llama-index-integrations/llms/llama-index-llms-bedrock-converse/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-bedrock-converse/pyproject.toml index 0aac7aa1e43120a26b1115f5716f28f150f80944..08ad5bb0c255a5b4cbba55c6d10da96a6ccfb4b2 100644 --- a/llama-index-integrations/llms/llama-index-llms-bedrock-converse/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-bedrock-converse/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-bedrock-converse" readme = "README.md" -version = "0.3.0" +version = "0.3.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-dashscope/llama_index/llms/dashscope/base.py b/llama-index-integrations/llms/llama-index-llms-dashscope/llama_index/llms/dashscope/base.py index 137f702a46c575239f7f8ce250c01c868d6b04dd..38e753096c422706302639f1447e4e94723f255a 100644 --- a/llama-index-integrations/llms/llama-index-llms-dashscope/llama_index/llms/dashscope/base.py +++ b/llama-index-integrations/llms/llama-index-llms-dashscope/llama_index/llms/dashscope/base.py @@ -131,8 +131,8 @@ class DashScope(CustomLLM): temperature: Optional[float] = Field( description="The temperature to use during generation.", default=DEFAULT_TEMPERATURE, - gte=0.0, - lte=2.0, + ge=0.0, + le=2.0, ) top_k: Optional[int] = Field( description="Sample counter when generate.", default=None @@ -141,7 +141,7 @@ class DashScope(CustomLLM): description="Sample probability threshold when generate." ) seed: Optional[int] = Field( - description="Random seed when generate.", default=1234, gte=0 + description="Random seed when generate.", default=1234, ge=0 ) repetition_penalty: Optional[float] = Field( description="Penalty for repeated words in generated text; \ diff --git a/llama-index-integrations/llms/llama-index-llms-dashscope/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-dashscope/pyproject.toml index 13cb6de9c4de7e9640ccdcd01f95c6bb65f26dd9..2aea283c6583e78b13c4a40bd968bb8f7354b6c4 100644 --- a/llama-index-integrations/llms/llama-index-llms-dashscope/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-dashscope/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-dashscope" readme = "README.md" -version = "0.2.0" +version = "0.2.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-deepinfra/llama_index/llms/deepinfra/base.py b/llama-index-integrations/llms/llama-index-llms-deepinfra/llama_index/llms/deepinfra/base.py index e63565e69f86d63830b2b5a12882d21ad68df86a..abcfde7930f929b1182441c8ffd334697e9d8ceb 100644 --- a/llama-index-integrations/llms/llama-index-llms-deepinfra/llama_index/llms/deepinfra/base.py +++ b/llama-index-integrations/llms/llama-index-llms-deepinfra/llama_index/llms/deepinfra/base.py @@ -76,8 +76,8 @@ class DeepInfraLLM(FunctionCallingLLM): temperature: float = Field( default=DEFAULT_TEMPERATURE, description="The temperature to use during generation.", - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) max_tokens: Optional[int] = Field( default=DEFAULT_MAX_TOKENS, @@ -86,10 +86,10 @@ class DeepInfraLLM(FunctionCallingLLM): ) timeout: Optional[float] = Field( - default=None, description="The timeout to use in seconds.", gte=0 + default=None, description="The timeout to use in seconds.", ge=0 ) max_retries: int = Field( - default=10, description="The maximum number of API retries.", gte=0 + default=10, description="The maximum number of API retries.", ge=0 ) _api_key: Optional[str] = PrivateAttr() diff --git a/llama-index-integrations/llms/llama-index-llms-deepinfra/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-deepinfra/pyproject.toml index 1e3615cf450be36d64893ae056e84cd2b50afe20..d5089df77918fa36bb1b26e46aac1e7b6928ce00 100644 --- a/llama-index-integrations/llms/llama-index-llms-deepinfra/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-deepinfra/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-deepinfra" readme = "README.md" -version = "0.2.0" +version = "0.2.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-gemini/llama_index/llms/gemini/base.py b/llama-index-integrations/llms/llama-index-llms-gemini/llama_index/llms/gemini/base.py index d1e448af4a2459fa3ace8d39797b24aabd460cc4..2c6e20f68f7b4f039b1ba038358e39be99daea3b 100644 --- a/llama-index-integrations/llms/llama-index-llms-gemini/llama_index/llms/gemini/base.py +++ b/llama-index-integrations/llms/llama-index-llms-gemini/llama_index/llms/gemini/base.py @@ -69,8 +69,8 @@ class Gemini(CustomLLM): temperature: float = Field( default=DEFAULT_TEMPERATURE, description="The temperature to use during generation.", - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) max_tokens: int = Field( default=DEFAULT_NUM_OUTPUTS, diff --git a/llama-index-integrations/llms/llama-index-llms-gemini/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-gemini/pyproject.toml index b1df21e413979251bba4f65688fb0de2432d669c..f2c8ed2bae3d6e969a03e51e210bfde94a059a5c 100644 --- a/llama-index-integrations/llms/llama-index-llms-gemini/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-gemini/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-gemini" readme = "README.md" -version = "0.3.4" +version = "0.3.5" [tool.poetry.dependencies] python = ">=3.9,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-huggingface/llama_index/llms/huggingface/base.py b/llama-index-integrations/llms/llama-index-llms-huggingface/llama_index/llms/huggingface/base.py index 94503c0aa4f5a82482b8f61f6c50ba362170e939..59dc2b1e512b6bc848ac1e5f4db9e2107821efa0 100644 --- a/llama-index-integrations/llms/llama-index-llms-huggingface/llama_index/llms/huggingface/base.py +++ b/llama-index-integrations/llms/llama-index-llms-huggingface/llama_index/llms/huggingface/base.py @@ -708,8 +708,8 @@ class TextGenerationInference(FunctionCallingLLM): temperature: float = Field( default=DEFAULT_TEMPERATURE, description=("The temperature to use for sampling."), - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) max_tokens: int = Field( default=DEFAULT_NUM_OUTPUTS, @@ -724,10 +724,10 @@ class TextGenerationInference(FunctionCallingLLM): ), ) timeout: float = Field( - default=120, description=("The timeout to use in seconds."), gte=0 + default=120, description=("The timeout to use in seconds."), ge=0 ) max_retries: int = Field( - default=5, description=("The maximum number of API retries."), gte=0 + default=5, description=("The maximum number of API retries."), ge=0 ) headers: Optional[Dict[str, str]] = Field( default=None, diff --git a/llama-index-integrations/llms/llama-index-llms-huggingface/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-huggingface/pyproject.toml index 32995db7872b19bd158fa8b88da9f7365afb3676..4f23d5380e102b91e57d28230466c3e01651f4de 100644 --- a/llama-index-integrations/llms/llama-index-llms-huggingface/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-huggingface/pyproject.toml @@ -28,7 +28,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-huggingface" readme = "README.md" -version = "0.3.2" +version = "0.3.3" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-konko/llama_index/llms/konko/base.py b/llama-index-integrations/llms/llama-index-llms-konko/llama_index/llms/konko/base.py index bfd5d1525cc1677ea004cd9ad81d96bb890c7ca2..5a010ffe13deee43f9cfeec35b1a9759a3ad67d4 100644 --- a/llama-index-integrations/llms/llama-index-llms-konko/llama_index/llms/konko/base.py +++ b/llama-index-integrations/llms/llama-index-llms-konko/llama_index/llms/konko/base.py @@ -82,8 +82,8 @@ class Konko(LLM): temperature: float = Field( default=DEFAULT_TEMPERATURE, description="The temperature to use during generation.", - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) max_tokens: Optional[int] = Field( default=DEFAULT_NUM_OUTPUTS, @@ -94,7 +94,7 @@ class Konko(LLM): default_factory=dict, description="Additional kwargs for the konko API." ) max_retries: int = Field( - default=10, description="The maximum number of API retries.", gte=0 + default=10, description="The maximum number of API retries.", ge=0 ) konko_api_key: str = Field(default=None, description="The konko API key.") diff --git a/llama-index-integrations/llms/llama-index-llms-konko/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-konko/pyproject.toml index 4c81fb49858613b20a35c71af731a632c95fa667..b0fca4e4af59954c17bbc742463aad1b223ee591 100644 --- a/llama-index-integrations/llms/llama-index-llms-konko/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-konko/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-konko" readme = "README.md" -version = "0.2.0" +version = "0.2.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-litellm/llama_index/llms/litellm/base.py b/llama-index-integrations/llms/llama-index-llms-litellm/llama_index/llms/litellm/base.py index 631cad25dcf2a0d99343d7bd6fc1bfcc421c826b..cfb89b5d19068b82e48eba7aa97b6b099d5d7094 100644 --- a/llama-index-integrations/llms/llama-index-llms-litellm/llama_index/llms/litellm/base.py +++ b/llama-index-integrations/llms/llama-index-llms-litellm/llama_index/llms/litellm/base.py @@ -79,8 +79,8 @@ class LiteLLM(LLM): temperature: float = Field( default=DEFAULT_TEMPERATURE, description="The temperature to use during generation.", - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) max_tokens: Optional[int] = Field( description="The maximum number of tokens to generate.", diff --git a/llama-index-integrations/llms/llama-index-llms-litellm/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-litellm/pyproject.toml index fd8a1f685f01af91369228f062c910ba8fb0ba34..3b2928eb863505549440350e54870f05e76155cd 100644 --- a/llama-index-integrations/llms/llama-index-llms-litellm/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-litellm/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-litellm" readme = "README.md" -version = "0.2.0" +version = "0.2.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-llama-cpp/llama_index/llms/llama_cpp/base.py b/llama-index-integrations/llms/llama-index-llms-llama-cpp/llama_index/llms/llama_cpp/base.py index 743f260a19fb6322d46880c3cb96abc1e42d71c5..002c94fae9b030bf1acea9e07275aee9bd00c6d4 100644 --- a/llama-index-integrations/llms/llama-index-llms-llama-cpp/llama_index/llms/llama_cpp/base.py +++ b/llama-index-integrations/llms/llama-index-llms-llama-cpp/llama_index/llms/llama_cpp/base.py @@ -103,8 +103,8 @@ class LlamaCPP(CustomLLM): temperature: float = Field( default=DEFAULT_TEMPERATURE, description="The temperature to use for sampling.", - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) max_new_tokens: int = Field( default=DEFAULT_NUM_OUTPUTS, diff --git a/llama-index-integrations/llms/llama-index-llms-llama-cpp/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-llama-cpp/pyproject.toml index 850c8083e148e4baa910e44aa87b13c8f62b3815..e2b8fc9ff326ec9e957f0b1f342401bac219bc83 100644 --- a/llama-index-integrations/llms/llama-index-llms-llama-cpp/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-llama-cpp/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-llama-cpp" readme = "README.md" -version = "0.2.1" +version = "0.2.2" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-llamafile/llama_index/llms/llamafile/base.py b/llama-index-integrations/llms/llama-index-llms-llamafile/llama_index/llms/llamafile/base.py index e4715a97f91d3111a13e6bb22ce55ddf56950f93..6fc44eca4b08452caf626f142661e966753a55f3 100644 --- a/llama-index-integrations/llms/llama-index-llms-llamafile/llama_index/llms/llamafile/base.py +++ b/llama-index-integrations/llms/llama-index-llms-llamafile/llama_index/llms/llamafile/base.py @@ -57,8 +57,8 @@ class Llamafile(CustomLLM): temperature: float = Field( default=0.8, description="The temperature to use for sampling.", - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) seed: int = Field(default=0, description="Random seed") diff --git a/llama-index-integrations/llms/llama-index-llms-llamafile/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-llamafile/pyproject.toml index 07a3c5641d6b239cf67c3866fdf8d7226cb938e0..b3943726cd4cd8265ebcf3c0d3b236c0071b7ccf 100644 --- a/llama-index-integrations/llms/llama-index-llms-llamafile/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-llamafile/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-llamafile" readme = "README.md" -version = "0.2.0" +version = "0.2.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-lmstudio/llama_index/llms/lmstudio/base.py b/llama-index-integrations/llms/llama-index-llms-lmstudio/llama_index/llms/lmstudio/base.py index 578b761aff43c96e376767bfb5aba9ce046c0bf1..c073378794069b9aff046cbc50e63a8720808cc6 100644 --- a/llama-index-integrations/llms/llama-index-llms-lmstudio/llama_index/llms/lmstudio/base.py +++ b/llama-index-integrations/llms/llama-index-llms-lmstudio/llama_index/llms/lmstudio/base.py @@ -75,12 +75,12 @@ class LMStudio(CustomLLM): temperature: float = Field( default=DEFAULT_TEMPERATURE, description=("The temperature to use for sampling."), - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) timeout: float = Field( - default=120, description=("The timeout to use in seconds."), gte=0 + default=120, description=("The timeout to use in seconds."), ge=0 ) additional_kwargs: Dict[str, Any] = Field( diff --git a/llama-index-integrations/llms/llama-index-llms-lmstudio/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-lmstudio/pyproject.toml index b2c35cf4e5b43d243cfd6475c584fd48858e3f21..250183f91e306fc6a792e51197c2fdb420fe990e 100644 --- a/llama-index-integrations/llms/llama-index-llms-lmstudio/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-lmstudio/pyproject.toml @@ -27,7 +27,7 @@ license = "MIT" name = "llama-index-llms-lmstudio" packages = [{include = "llama_index/"}] readme = "README.md" -version = "0.2.0" +version = "0.2.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-mistral-rs/llama_index/llms/mistral_rs/base.py b/llama-index-integrations/llms/llama-index-llms-mistral-rs/llama_index/llms/mistral_rs/base.py index 78bc17a1b1ab7317b4c97a286d6108706e23215a..d56bc8f4ba2cdb35e2dfd9026bbb41918268ac6e 100644 --- a/llama-index-integrations/llms/llama-index-llms-mistral-rs/llama_index/llms/mistral_rs/base.py +++ b/llama-index-integrations/llms/llama-index-llms-mistral-rs/llama_index/llms/mistral_rs/base.py @@ -133,8 +133,8 @@ class MistralRS(CustomLLM): temperature: float = Field( default=DEFAULT_TEMPERATURE, description="The temperature to use for sampling.", - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) max_new_tokens: int = Field( default=DEFAULT_NUM_OUTPUTS, diff --git a/llama-index-integrations/llms/llama-index-llms-mistral-rs/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-mistral-rs/pyproject.toml index 5fd8b7f755e5f00918e707f9c4d67d2bee79e1e4..a36afca23d7974050d265df331961093e30b94c3 100644 --- a/llama-index-integrations/llms/llama-index-llms-mistral-rs/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-mistral-rs/pyproject.toml @@ -32,7 +32,7 @@ maintainers = ["jerryjliu"] name = "llama-index-llms-mistral-rs" packages = [{include = "llama_index/"}] readme = "README.md" -version = "0.2.0" +version = "0.2.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-mistralai/llama_index/llms/mistralai/base.py b/llama-index-integrations/llms/llama-index-llms-mistralai/llama_index/llms/mistralai/base.py index c2a123b11dc1c58d7beaca86b3e6c49acdc4e2ba..59b24cf945c6261bc93de35f3324993fb1969238 100644 --- a/llama-index-integrations/llms/llama-index-llms-mistralai/llama_index/llms/mistralai/base.py +++ b/llama-index-integrations/llms/llama-index-llms-mistralai/llama_index/llms/mistralai/base.py @@ -108,8 +108,8 @@ class MistralAI(FunctionCallingLLM): temperature: float = Field( default=DEFAULT_TEMPERATURE, description="The temperature to use for sampling.", - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) max_tokens: int = Field( default=DEFAULT_MISTRALAI_MAX_TOKENS, @@ -118,10 +118,10 @@ class MistralAI(FunctionCallingLLM): ) timeout: float = Field( - default=120, description="The timeout to use in seconds.", gte=0 + default=120, description="The timeout to use in seconds.", ge=0 ) max_retries: int = Field( - default=5, description="The maximum number of API retries.", gte=0 + default=5, description="The maximum number of API retries.", ge=0 ) random_seed: Optional[int] = Field( default=None, description="The random seed to use for sampling." diff --git a/llama-index-integrations/llms/llama-index-llms-mistralai/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-mistralai/pyproject.toml index 1296975f0f0948b2f7a940d4ecc94d93b3f82357..985addcebdbfb0cee5b6568f2c14952d7b7fb09b 100644 --- a/llama-index-integrations/llms/llama-index-llms-mistralai/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-mistralai/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-mistralai" readme = "README.md" -version = "0.2.3" +version = "0.2.4" [tool.poetry.dependencies] python = ">=3.9,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-octoai/llama_index/llms/octoai/base.py b/llama-index-integrations/llms/llama-index-llms-octoai/llama_index/llms/octoai/base.py index 601d72da32e4f8c25cd1c6d4ddcc8031e600e8c1..303ac18242d86675bec6773aea1e3e8f1fae4e6d 100644 --- a/llama-index-integrations/llms/llama-index-llms-octoai/llama_index/llms/octoai/base.py +++ b/llama-index-integrations/llms/llama-index-llms-octoai/llama_index/llms/octoai/base.py @@ -49,15 +49,15 @@ class OctoAI(LLM): temperature: float = Field( default=DEFAULT_TEMPERATURE, description="The temperature to use during generation.", - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) max_tokens: Optional[int] = Field( description="The maximum number of tokens to generate.", gt=0, ) timeout: float = Field( - default=120, description="The timeout to use in seconds.", gte=0 + default=120, description="The timeout to use in seconds.", ge=0 ) additional_kwargs: Dict[str, Any] = Field( default_factory=dict, description="Additional kwargs for the OctoAI SDK." diff --git a/llama-index-integrations/llms/llama-index-llms-octoai/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-octoai/pyproject.toml index f34a827ad29e286128d4aa49b36c2fba23be2afe..5a0d0d70a5450450f06a9571b61d68b4304a4737 100644 --- a/llama-index-integrations/llms/llama-index-llms-octoai/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-octoai/pyproject.toml @@ -28,7 +28,7 @@ license = "MIT" name = "llama-index-llms-octoai" packages = [{include = "llama_index/"}] readme = "README.md" -version = "0.2.0" +version = "0.2.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-ollama/llama_index/llms/ollama/base.py b/llama-index-integrations/llms/llama-index-llms-ollama/llama_index/llms/ollama/base.py index 50d44a6ca055410707e89ea6d1b06685ee42f669..b9a6cee87cd93f6e8a5314f00a5cfae5ed2ea2d6 100644 --- a/llama-index-integrations/llms/llama-index-llms-ollama/llama_index/llms/ollama/base.py +++ b/llama-index-integrations/llms/llama-index-llms-ollama/llama_index/llms/ollama/base.py @@ -72,8 +72,8 @@ class Ollama(FunctionCallingLLM): temperature: float = Field( default=0.75, description="The temperature to use for sampling.", - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) context_window: int = Field( default=DEFAULT_CONTEXT_WINDOW, diff --git a/llama-index-integrations/llms/llama-index-llms-ollama/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-ollama/pyproject.toml index 1e9e1a34c8d1725c78831952e9e178f39cafcb17..43973bfa61d9d4a1df3bcbccedc967be6c8652f0 100644 --- a/llama-index-integrations/llms/llama-index-llms-ollama/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-ollama/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-ollama" readme = "README.md" -version = "0.3.1" +version = "0.3.2" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-predibase/llama_index/llms/predibase/base.py b/llama-index-integrations/llms/llama-index-llms-predibase/llama_index/llms/predibase/base.py index d6976ec9aa401a2984a82134e04c285f3c792319..13c7dcf0505fc56e300881af9e665e40a8012de6 100644 --- a/llama-index-integrations/llms/llama-index-llms-predibase/llama_index/llms/predibase/base.py +++ b/llama-index-integrations/llms/llama-index-llms-predibase/llama_index/llms/predibase/base.py @@ -84,8 +84,8 @@ class PredibaseLLM(CustomLLM): temperature: float = Field( default=DEFAULT_TEMPERATURE, description="The temperature to use for sampling.", - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) context_window: int = Field( default=DEFAULT_CONTEXT_WINDOW, diff --git a/llama-index-integrations/llms/llama-index-llms-predibase/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-predibase/pyproject.toml index 5bb84b888f283efcb79a59700d931d75ef9f8f77..c5a919676fa0e08bf5b2586470c3aad521a85fcb 100644 --- a/llama-index-integrations/llms/llama-index-llms-predibase/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-predibase/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-predibase" readme = "README.md" -version = "0.2.0" +version = "0.2.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-replicate/llama_index/llms/replicate/base.py b/llama-index-integrations/llms/llama-index-llms-replicate/llama_index/llms/replicate/base.py index 3ecc49ed0693b14248e21f8c513bf3e8a3bfc270..27dc849786d0e4d7e793c0978a43378b80a4d4f6 100644 --- a/llama-index-integrations/llms/llama-index-llms-replicate/llama_index/llms/replicate/base.py +++ b/llama-index-integrations/llms/llama-index-llms-replicate/llama_index/llms/replicate/base.py @@ -49,8 +49,8 @@ class Replicate(CustomLLM): temperature: float = Field( default=DEFAULT_REPLICATE_TEMP, description="The temperature to use for sampling.", - gte=0.01, - lte=1.0, + ge=0.01, + le=1.0, ) image: str = Field( default="", description="The image file for multimodal model to use. (optional)" diff --git a/llama-index-integrations/llms/llama-index-llms-replicate/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-replicate/pyproject.toml index 23de01f95380e0c7bf48c1735865154cf5210e92..7125579131969e348c5d8cdb880489368cc32134 100644 --- a/llama-index-integrations/llms/llama-index-llms-replicate/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-replicate/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-replicate" readme = "README.md" -version = "0.2.0" +version = "0.2.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-rungpt/llama_index/llms/rungpt/base.py b/llama-index-integrations/llms/llama-index-llms-rungpt/llama_index/llms/rungpt/base.py index 592038ac911f6a3c6f87a883cb7cbe05f6de265d..bc5ff57c837e370ac9590077bb09b3de343eb9df 100644 --- a/llama-index-integrations/llms/llama-index-llms-rungpt/llama_index/llms/rungpt/base.py +++ b/llama-index-integrations/llms/llama-index-llms-rungpt/llama_index/llms/rungpt/base.py @@ -47,8 +47,8 @@ class RunGptLLM(LLM): temperature: float = Field( default=DEFAULT_RUNGPT_TEMP, description="The temperature to use for sampling.", - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) max_tokens: int = Field( default=DEFAULT_NUM_OUTPUTS, diff --git a/llama-index-integrations/llms/llama-index-llms-rungpt/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-rungpt/pyproject.toml index 7495951dbf55b28b785dc0a3bf34125aaad92c87..afe34ce23cb2298da1150c9b1db5f5d0203a533e 100644 --- a/llama-index-integrations/llms/llama-index-llms-rungpt/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-rungpt/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-rungpt" readme = "README.md" -version = "0.2.0" +version = "0.2.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-sagemaker-endpoint/llama_index/llms/sagemaker_endpoint/base.py b/llama-index-integrations/llms/llama-index-llms-sagemaker-endpoint/llama_index/llms/sagemaker_endpoint/base.py index a419a2a6bf46aa00dd63a966858327ef3a83684e..fef6cded1349ff55192fd09165fc9e6e52fcb28f 100644 --- a/llama-index-integrations/llms/llama-index-llms-sagemaker-endpoint/llama_index/llms/sagemaker_endpoint/base.py +++ b/llama-index-integrations/llms/llama-index-llms-sagemaker-endpoint/llama_index/llms/sagemaker_endpoint/base.py @@ -110,12 +110,12 @@ class SageMakerLLM(LLM): max_retries: Optional[int] = Field( default=3, description="The maximum number of API retries.", - gte=0, + ge=0, ) timeout: Optional[float] = Field( default=60.0, description="The timeout, in seconds, for API requests.", - gte=0, + ge=0, ) _client: Any = PrivateAttr() _completion_to_prompt: Callable[[str, Optional[str]], str] = PrivateAttr() diff --git a/llama-index-integrations/llms/llama-index-llms-sagemaker-endpoint/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-sagemaker-endpoint/pyproject.toml index 03b44a9a7a4f79ab763e7f1486f16a31e6d1a2be..4cf38eea1ee0f9e2a8d3836fbd0b24ce5b6621d0 100644 --- a/llama-index-integrations/llms/llama-index-llms-sagemaker-endpoint/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-sagemaker-endpoint/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-sagemaker-endpoint" readme = "README.md" -version = "0.2.2" +version = "0.2.3" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-text-generation-inference/llama_index/llms/text_generation_inference/base.py b/llama-index-integrations/llms/llama-index-llms-text-generation-inference/llama_index/llms/text_generation_inference/base.py index 595b48f6cca2537e27f93e4e1f7210100e65ab53..88ff00c486427181ed1e0c8098e523908b44d2bc 100644 --- a/llama-index-integrations/llms/llama-index-llms-text-generation-inference/llama_index/llms/text_generation_inference/base.py +++ b/llama-index-integrations/llms/llama-index-llms-text-generation-inference/llama_index/llms/text_generation_inference/base.py @@ -57,8 +57,8 @@ class TextGenerationInference(FunctionCallingLLM): temperature: float = Field( default=DEFAULT_TEMPERATURE, description=("The temperature to use for sampling."), - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) max_tokens: int = Field( default=DEFAULT_NUM_OUTPUTS, @@ -73,10 +73,10 @@ class TextGenerationInference(FunctionCallingLLM): ), ) timeout: float = Field( - default=120, description=("The timeout to use in seconds."), gte=0 + default=120, description=("The timeout to use in seconds."), ge=0 ) max_retries: int = Field( - default=5, description=("The maximum number of API retries."), gte=0 + default=5, description=("The maximum number of API retries."), ge=0 ) headers: Optional[Dict[str, str]] = Field( default=None, diff --git a/llama-index-integrations/llms/llama-index-llms-text-generation-inference/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-text-generation-inference/pyproject.toml index 13182cf6fe9c1373aa55afb5ff5b87d6da6d001f..6d060a1de402e3522c294c2e427a93414240fb95 100644 --- a/llama-index-integrations/llms/llama-index-llms-text-generation-inference/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-text-generation-inference/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-text-generation-inference" readme = "README.md" -version = "0.2.1" +version = "0.2.2" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-upstage/llama_index/llms/upstage/base.py b/llama-index-integrations/llms/llama-index-llms-upstage/llama_index/llms/upstage/base.py index 96d96ea74d96e2b88ac14c878244e67c7be82bd5..fa518eefeb6d6b5ee9f665a8ad1aba393cd77ffa 100644 --- a/llama-index-integrations/llms/llama-index-llms-upstage/llama_index/llms/upstage/base.py +++ b/llama-index-integrations/llms/llama-index-llms-upstage/llama_index/llms/upstage/base.py @@ -51,8 +51,8 @@ class Upstage(OpenAI): temperature: float = Field( default=DEFAULT_TEMPERATURE, description="The temperature to use during generation.", - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) max_tokens: Optional[int] = Field( description="The maximum number of tokens to generate." @@ -63,17 +63,17 @@ class Upstage(OpenAI): top_logprobs: int = Field( description="The number of top token logprobs to return.", default=0, - gte=0, - lte=20, + ge=0, + le=20, ) additional_kwargs: Dict[str, Any] = Field( description="Additional kwargs for the Upstage API.", default_factory=dict ) max_retries: int = Field( - description="The maximum number of API retries.", default=3, gte=0 + description="The maximum number of API retries.", default=3, ge=0 ) timeout: float = Field( - description="The timeout, in seconds, for API requests.", default=60.0, gte=0.0 + description="The timeout, in seconds, for API requests.", default=60.0, ge=0.0 ) reuse_client: bool = Field( description=( diff --git a/llama-index-integrations/llms/llama-index-llms-upstage/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-upstage/pyproject.toml index 1ffd9fe07103e8039cfbb6a4d8a8aa213a161f0e..7158757828426c64d0f2a1a2f4ea6f7eed2434be 100644 --- a/llama-index-integrations/llms/llama-index-llms-upstage/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-upstage/pyproject.toml @@ -30,7 +30,7 @@ license = "MIT" name = "llama-index-llms-upstage" packages = [{include = "llama_index/"}] readme = "README.md" -version = "0.2.0" +version = "0.2.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-xinference/llama_index/llms/xinference/base.py b/llama-index-integrations/llms/llama-index-llms-xinference/llama_index/llms/xinference/base.py index ea7b066c3f4577d8bf3b14503689b785a45a2aa3..5f13ec2414c34f583c0301e9c7c5dddcdb320257 100644 --- a/llama-index-integrations/llms/llama-index-llms-xinference/llama_index/llms/xinference/base.py +++ b/llama-index-integrations/llms/llama-index-llms-xinference/llama_index/llms/xinference/base.py @@ -56,7 +56,7 @@ class Xinference(CustomLLM): model_uid: str = Field(description="The Xinference model to use.") endpoint: str = Field(description="The Xinference endpoint URL to use.") temperature: float = Field( - description="The temperature to use for sampling.", gte=0.0, lte=1.0 + description="The temperature to use for sampling.", ge=0.0, le=1.0 ) max_tokens: int = Field( description="The maximum new tokens to generate as answer.", gt=0 diff --git a/llama-index-integrations/llms/llama-index-llms-xinference/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-xinference/pyproject.toml index 5cf001c33d1eee701580b599af5899faad0b93b6..27e167988c26993e8f69bfa789c11604c8ba844a 100644 --- a/llama-index-integrations/llms/llama-index-llms-xinference/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-xinference/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-xinference" readme = "README.md" -version = "0.2.1" +version = "0.2.2" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-anthropic/llama_index/multi_modal_llms/anthropic/base.py b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-anthropic/llama_index/multi_modal_llms/anthropic/base.py index 78027ad599323c69c209982df90c32f1456da7ab..d11682cab9b1f55542b0449c17432884c2f28b78 100644 --- a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-anthropic/llama_index/multi_modal_llms/anthropic/base.py +++ b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-anthropic/llama_index/multi_modal_llms/anthropic/base.py @@ -46,12 +46,12 @@ class AnthropicMultiModal(MultiModalLLM): max_retries: int = Field( default=3, description="Maximum number of retries.", - gte=0, + ge=0, ) timeout: float = Field( default=60.0, description="The timeout, in seconds, for API requests.", - gte=0, + ge=0, ) api_key: str = Field( default=None, description="The Anthropic API key.", exclude=True diff --git a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-anthropic/pyproject.toml b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-anthropic/pyproject.toml index 3d8f3c95b9ec367a0aae4de53974818772692c3e..7df3d5ce2d40d1ca476c2529b2ad600b4dfe3e1b 100644 --- a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-anthropic/pyproject.toml +++ b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-anthropic/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-multi-modal-llms-anthropic" readme = "README.md" -version = "0.2.1" +version = "0.2.2" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-dashscope/llama_index/multi_modal_llms/dashscope/base.py b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-dashscope/llama_index/multi_modal_llms/dashscope/base.py index dc19ae2ad096e964172e1d8e994bd88244e95f94..8d7bb3b3e3748208fb4bd87f95901166595c0869 100644 --- a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-dashscope/llama_index/multi_modal_llms/dashscope/base.py +++ b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-dashscope/llama_index/multi_modal_llms/dashscope/base.py @@ -85,7 +85,7 @@ class DashScopeMultiModal(MultiModalLLM): description="Sample probability threshold when generate." ) seed: Optional[int] = Field( - description="Random seed when generate.", default=1234, gte=0 + description="Random seed when generate.", default=1234, ge=0 ) api_key: Optional[str] = Field( default=None, description="The DashScope API key.", exclude=True diff --git a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-dashscope/pyproject.toml b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-dashscope/pyproject.toml index 5cd06596e9601f2aecc0117c32ba90d28859a031..03eed4decfdeeafa8eca0d21b9542c999e81c74a 100644 --- a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-dashscope/pyproject.toml +++ b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-dashscope/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-multi-modal-llms-dashscope" readme = "README.md" -version = "0.2.0" +version = "0.2.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-gemini/llama_index/multi_modal_llms/gemini/base.py b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-gemini/llama_index/multi_modal_llms/gemini/base.py index c4dfab02f5248f7608ae3fad6c80447a8e831271..e3354983a1b363c7b12088492a69974a787d20da 100644 --- a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-gemini/llama_index/multi_modal_llms/gemini/base.py +++ b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-gemini/llama_index/multi_modal_llms/gemini/base.py @@ -54,8 +54,8 @@ class GeminiMultiModal(MultiModalLLM): temperature: float = Field( default=DEFAULT_TEMPERATURE, description="The temperature to use during generation.", - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) max_tokens: int = Field( default=DEFAULT_NUM_OUTPUTS, diff --git a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-gemini/pyproject.toml b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-gemini/pyproject.toml index 3dbb459abe644b460ad81432e75bda101062b680..a20c63e26a79f8f02f61654770d1ed33bc4a7849 100644 --- a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-gemini/pyproject.toml +++ b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-gemini/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-multi-modal-llms-gemini" readme = "README.md" -version = "0.3.0" +version = "0.3.1" [tool.poetry.dependencies] python = ">=3.9,<4.0" diff --git a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-ollama/llama_index/multi_modal_llms/ollama/base.py b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-ollama/llama_index/multi_modal_llms/ollama/base.py index ca34aeae82d5d3ad2c801f4ef852d3361ac887ba..a35476c214834d547f25e4d8f9853ff20e7083fa 100644 --- a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-ollama/llama_index/multi_modal_llms/ollama/base.py +++ b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-ollama/llama_index/multi_modal_llms/ollama/base.py @@ -58,8 +58,8 @@ class OllamaMultiModal(MultiModalLLM): temperature: float = Field( default=0.75, description="The temperature to use for sampling.", - gte=0.0, - lte=1.0, + ge=0.0, + le=1.0, ) context_window: int = Field( default=DEFAULT_CONTEXT_WINDOW, diff --git a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-ollama/pyproject.toml b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-ollama/pyproject.toml index 93a5f36bd2b25b8d62b670fab5168c8f470927d0..2b32474d55b551c14f1e8f3cbaf0bfb14cd0cf54 100644 --- a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-ollama/pyproject.toml +++ b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-ollama/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-multi-modal-llms-ollama" readme = "README.md" -version = "0.3.0" +version = "0.3.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-openai/llama_index/multi_modal_llms/openai/base.py b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-openai/llama_index/multi_modal_llms/openai/base.py index 58681dbd2c5b44efdbb6c6b7408a4e1dc2c65e89..d71711460d37bd450318d1f2c5c05161ba6b85d6 100644 --- a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-openai/llama_index/multi_modal_llms/openai/base.py +++ b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-openai/llama_index/multi_modal_llms/openai/base.py @@ -61,12 +61,12 @@ class OpenAIMultiModal(MultiModalLLM): max_retries: int = Field( default=3, description="Maximum number of retries.", - gte=0, + ge=0, ) timeout: float = Field( default=60.0, description="The timeout, in seconds, for API requests.", - gte=0, + ge=0, ) api_key: str = Field(default=None, description="The OpenAI API key.", exclude=True) api_base: str = Field(default=None, description="The base URL for OpenAI API.") diff --git a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-openai/pyproject.toml b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-openai/pyproject.toml index f57977bfc356c2aac8463bd3c55c9baf1b763d6e..b3550d59797468bc1699a5551588698d3396b1c9 100644 --- a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-openai/pyproject.toml +++ b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-openai/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-multi-modal-llms-openai" readme = "README.md" -version = "0.2.0" +version = "0.2.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0"