From 0ade38608e2efb9fbbaed4eda2bb8d3089e87468 Mon Sep 17 00:00:00 2001 From: Andrei Fajardo <92402603+nerdai@users.noreply.github.com> Date: Wed, 20 Mar 2024 16:45:04 -0400 Subject: [PATCH] [version] bump version to 0.10.21 (#12132) * wip * bump core oops * bump --- CHANGELOG.md | 86 +++++++++++++++++++ llama-index-cli/pyproject.toml | 2 +- llama-index-core/llama_index/core/__init__.py | 2 +- llama-index-core/pyproject.toml | 2 +- .../pyproject.toml | 2 +- .../pyproject.toml | 2 +- .../llama-index-llms-gemini/pyproject.toml | 2 +- .../llms/llama-index-llms-palm/pyproject.toml | 2 +- .../pyproject.toml | 2 +- .../pyproject.toml | 2 +- .../pyproject.toml | 2 +- poetry.lock | 10 +-- pyproject.toml | 4 +- 13 files changed, 103 insertions(+), 17 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e83c2999ad..d881644901 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,91 @@ # ChangeLog +## [2024-03-20] + +### `llama-index-core` [0.10.21] + +- Lazy init for async elements StreamingAgentChatResponse (#12116) +- Fix streaming generators get bug by SynthesisEndEvent (#12092) +- CLIP embedding more models (#12063) + +### `llama-index-packs-raptor` [0.1.3] + +- Add `num_workers` to summary module (#) + +### `llama-index-readers-telegram` [0.1.5] + +- Fix datetime fields (#12112) +- Add ability to select time period of posts/messages (#12078) + +### `llama-index-embeddings-openai` [0.1.7] + +- Add api version/base api as optional for open ai embedding (#12091) + +### `llama-index-networks` [0.2.1] + +- Add node postprocessing to network retriever (#12027) +- Add privacy-safe networks demo (#12027) + +### `llama-index-callbacks-langfuse` [0.1.3] + +- Chore: bumps min version of langfuse dep (#12077) + +### `llama-index-embeddings-google` [0.1.4] + +- Chore: bumps google-generativeai dep (#12085) + +### `llama-index-embeddings-gemini` [0.1.5] + +- Chore: bumps google-generativeai dep (#12085) + +### `llama-index-llms-gemini` [0.1.6] + +- Chore: bumps google-generativeai dep (#12085) + +### `llama-index-llms-palm` [0.1.4] + +- Chore: bumps google-generativeai dep (#12085) + +### `llama-index-multi-modal-llms-google` [0.1.4] + +- Chore: bumps google-generativeai dep (#12085) + +### `llama-index-vector-stores-google` [0.1.5] + +- Chore: bumps google-generativeai dep (#12085) + +### `llama-index-storage-kvstore-elasticsearch` [0.1.0] + +- New integration (#12068) + +### `llama-index-readers-google` [0.1.7] + +- Fix - Google Drive Issue of not loading same name files (#12022) + +### `llama-index-vector-stores-upstash` [0.1.3] + +- Adding Metadata Filtering support for UpstashVectorStore (#12054) + +### `llama-index-packs-raptor` [0.1.2] + +- Fix: prevent RaptorPack infinite recursion (#12008) + +### `llama-index-embeddings-huggingface-optimum` [0.1.4] + +- Fix(OptimumEmbedding): removing token_type_ids causing ONNX validation issues + +### `llama-index-llms-anthropic` [0.1.7] + +- Fix: Anthropic LLM merge consecutive messages with same role (#12013) + +### `llama-index-packs-diff-private-simple-dataset` [0.1.0] + +- DiffPrivacy ICL Pack - OpenAI Completion LLMs (#11881) + +### `llama-index-cli` [0.1.11] + +- Remove llama_hub_url keyword from download_llama_dataset of command (#12038) + ## [2024-03-14] ### `llama-index-core` [0.10.20] diff --git a/llama-index-cli/pyproject.toml b/llama-index-cli/pyproject.toml index a1d1521ccd..c18741fb57 100644 --- a/llama-index-cli/pyproject.toml +++ b/llama-index-cli/pyproject.toml @@ -32,7 +32,7 @@ maintainers = [ name = "llama-index-cli" packages = [{include = "llama_index/"}] readme = "README.md" -version = "0.1.10" +version = "0.1.11" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-core/llama_index/core/__init__.py b/llama-index-core/llama_index/core/__init__.py index 4981cfc768..0d2328e57a 100644 --- a/llama-index-core/llama_index/core/__init__.py +++ b/llama-index-core/llama_index/core/__init__.py @@ -1,6 +1,6 @@ """Init file of LlamaIndex.""" -__version__ = "0.10.20.post3" +__version__ = "0.10.21" import logging from logging import NullHandler diff --git a/llama-index-core/pyproject.toml b/llama-index-core/pyproject.toml index 2fe61d01da..bba3d7f063 100644 --- a/llama-index-core/pyproject.toml +++ b/llama-index-core/pyproject.toml @@ -43,7 +43,7 @@ name = "llama-index-core" packages = [{include = "llama_index"}] readme = "README.md" repository = "https://github.com/run-llama/llama_index" -version = "0.10.20.post3" +version = "0.10.21post1" [tool.poetry.dependencies] SQLAlchemy = {extras = ["asyncio"], version = ">=1.4.49"} diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-gemini/pyproject.toml b/llama-index-integrations/embeddings/llama-index-embeddings-gemini/pyproject.toml index d1c859afe3..d3f79db7f9 100644 --- a/llama-index-integrations/embeddings/llama-index-embeddings-gemini/pyproject.toml +++ b/llama-index-integrations/embeddings/llama-index-embeddings-gemini/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-embeddings-gemini" readme = "README.md" -version = "0.1.4" +version = "0.1.5" [tool.poetry.dependencies] python = ">=3.9,<4.0" diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-google/pyproject.toml b/llama-index-integrations/embeddings/llama-index-embeddings-google/pyproject.toml index c0cce9c511..85a2e04d98 100644 --- a/llama-index-integrations/embeddings/llama-index-embeddings-google/pyproject.toml +++ b/llama-index-integrations/embeddings/llama-index-embeddings-google/pyproject.toml @@ -29,7 +29,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-embeddings-google" readme = "README.md" -version = "0.1.3" +version = "0.1.4" [tool.poetry.dependencies] python = ">=3.9,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-gemini/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-gemini/pyproject.toml index 9d03b89c82..f99a0de31f 100644 --- a/llama-index-integrations/llms/llama-index-llms-gemini/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-gemini/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-gemini" readme = "README.md" -version = "0.1.5" +version = "0.1.6" [tool.poetry.dependencies] python = ">=3.9,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-palm/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-palm/pyproject.toml index 3243a348a1..9ecc6ab187 100644 --- a/llama-index-integrations/llms/llama-index-llms-palm/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-palm/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-palm" readme = "README.md" -version = "0.1.3" +version = "0.1.4" [tool.poetry.dependencies] python = ">=3.9,<4.0" diff --git a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-gemini/pyproject.toml b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-gemini/pyproject.toml index 8d25902984..e245ee65fb 100644 --- a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-gemini/pyproject.toml +++ b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-gemini/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-multi-modal-llms-gemini" readme = "README.md" -version = "0.1.3" +version = "0.1.4" [tool.poetry.dependencies] python = ">=3.9,<4.0" diff --git a/llama-index-integrations/readers/llama-index-readers-telegram/pyproject.toml b/llama-index-integrations/readers/llama-index-readers-telegram/pyproject.toml index 0cb7f2ffb7..5607edf848 100644 --- a/llama-index-integrations/readers/llama-index-readers-telegram/pyproject.toml +++ b/llama-index-integrations/readers/llama-index-readers-telegram/pyproject.toml @@ -28,7 +28,7 @@ license = "MIT" maintainers = ["diicell"] name = "llama-index-readers-telegram" readme = "README.md" -version = "0.1.4" +version = "0.1.5" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/vector_stores/llama-index-vector-stores-google/pyproject.toml b/llama-index-integrations/vector_stores/llama-index-vector-stores-google/pyproject.toml index dfe95c3f5b..436323a170 100644 --- a/llama-index-integrations/vector_stores/llama-index-vector-stores-google/pyproject.toml +++ b/llama-index-integrations/vector_stores/llama-index-vector-stores-google/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-vector-stores-google" readme = "README.md" -version = "0.1.4" +version = "0.1.5" [tool.poetry.dependencies] python = ">=3.9,<4.0" diff --git a/poetry.lock b/poetry.lock index fdd9fb1f2a..c902db3433 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1518,13 +1518,13 @@ llama-index-llms-openai = ">=0.1.1,<0.2.0" [[package]] name = "llama-index-core" -version = "0.10.20" +version = "0.10.21.post1" description = "Interface between LLMs and your data" optional = false -python-versions = ">=3.8.1,<4.0" +python-versions = "<4.0,>=3.8.1" files = [ - {file = "llama_index_core-0.10.20-py3-none-any.whl", hash = "sha256:db764e480b4f53095db9f0d39792a54c8ebfd9cde216d0a37d9c16344ea4a664"}, - {file = "llama_index_core-0.10.20.tar.gz", hash = "sha256:7780cf83644dda33c4d7b19c313e82755cbbadacad1d185afb7eba998b736bb6"}, + {file = "llama_index_core-0.10.21.post1-py3-none-any.whl", hash = "sha256:acfec6e39f9f38a1c64e60b953bdf787862abd9a797c0dc3348be34ed67493e7"}, + {file = "llama_index_core-0.10.21.post1.tar.gz", hash = "sha256:0c212d14ffc368114099e881e9e3fa2d073d5186be07ec9db46070d475db14e0"}, ] [package.dependencies] @@ -4309,4 +4309,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "4adead56bef8bfedcfd46741463c3ad8758de84d9c2c03ef493861f598b9e267" +content-hash = "4157199988ba7ed03ed0d3b3a94988bcc69ad693ef696b39edaa71421061e956" diff --git a/pyproject.toml b/pyproject.toml index 04e46b34fa..9e1ff001cd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -44,7 +44,7 @@ name = "llama-index" packages = [{from = "_llama-index", include = "llama_index"}] readme = "README.md" repository = "https://github.com/run-llama/llama_index" -version = "0.10.20" +version = "0.10.21" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" @@ -57,7 +57,7 @@ llama-index-agent-openai = "^0.1.4" llama-index-readers-file = "^0.1.4" llama-index-readers-llama-parse = "^0.1.2" llama-index-indices-managed-llama-cloud = "^0.1.2" -llama-index-core = "^0.10.20" +llama-index-core = "^0.10.21.post1" llama-index-multi-modal-llms-openai = "^0.1.3" llama-index-cli = "^0.1.2" -- GitLab