From 8d18926e1da65d8f04fa3a684ceeb5c18627bf76 Mon Sep 17 00:00:00 2001 From: Andrei Fajardo <92402603+nerdai@users.noreply.github.com> Date: Thu, 9 May 2024 12:04:13 -0400 Subject: [PATCH] [version] bump version to 0.10.36 (#13396) * vbump * wip * rm openvino tests --- CHANGELOG.md | 68 +++++++++++++++++++ llama-index-core/llama_index/core/__init__.py | 2 +- llama-index-core/pyproject.toml | 2 +- .../llama-index-llms-cohere/pyproject.toml | 2 +- .../llama-index-llms-openvino/tests/BUILD | 1 - .../tests/__init__.py | 0 .../tests/test_llms_openvino.py | 7 -- .../pyproject.toml | 2 +- pyproject.toml | 2 +- 9 files changed, 73 insertions(+), 13 deletions(-) delete mode 100644 llama-index-integrations/llms/llama-index-llms-openvino/tests/BUILD delete mode 100644 llama-index-integrations/llms/llama-index-llms-openvino/tests/__init__.py delete mode 100644 llama-index-integrations/llms/llama-index-llms-openvino/tests/test_llms_openvino.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 0f101fb07d..bfdb78bbef 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,73 @@ # ChangeLog +## [2024-05-09] + +### `llama-index-core` [0.10.36] + +- add start_char_idx and end_char_idx with MarkdownElementParser (#13377) +- use handlers from global default (#13368) + +### `llama-index-readers-pebblo` [0.1.0] + +- Initial release (#13128) + +### `llama-index-llms-cohere` [0.1.7] + +- Call Cohere RAG inference with documents argument (#13196) + +### `llama-index-vector-scores-kdbai` [0.1.6] + +- update add method decode utf-8 (#13194) + +### `llama-index-vector-stores-alibabacloud-opensearch` [0.1.0] + +- Initial release (#13286) + +### `llama-index-tools-multion` [0.2.0] + +- update tool to use updated api/sdk (#13373) + +### `llama-index-vector-sores-weaviate` [1.0.0] + +- Update to weaviate client v4 (#13229) + +### `llama-index-readers-file` [0.1.22] + +- fix bug where PDFReader ignores extra_info (#13369) + +### `llama-index-llms-azure-openai` [0.1.8] + +- Add sync httpx client support (#13370) + +### `llama-index-llms-openai` [0.1.18] + +- Add sync httpx client support (#13370) +- Add missing openai model token context (#13337) + +### `llama-index-readers-github` [0.1.9] + +- Add fail_on_http_error (#13366) + +### `llama-index-vector-stores-pinecone` [0.1.7] + +- Add attribution tag for pinecone (#13329) + +### `llama-index-llms-nvidia` [0.1.1] + +- set default max_tokens to 1024 (#13371) + +### `llama-index-readers-papers` [0.1.5] + +- Fix hiddent temp directory issue for arxiv reader (#13351) + +### `llama-index-embeddings-nvidia` [0.1.1] + +- fix truncate passing aget_query_embedding and get_text_embedding (#13367) + +### `llama-index-llms-anyscare` [0.1.4] + +- Add llama-3 models (#13336) + ## [2024-05-07] ### `llama-index-agent-introspective` [0.1.0] diff --git a/llama-index-core/llama_index/core/__init__.py b/llama-index-core/llama_index/core/__init__.py index 72960a02d0..6f4f0bb663 100644 --- a/llama-index-core/llama_index/core/__init__.py +++ b/llama-index-core/llama_index/core/__init__.py @@ -1,6 +1,6 @@ """Init file of LlamaIndex.""" -__version__ = "0.10.35" +__version__ = "0.10.36" import logging from logging import NullHandler diff --git a/llama-index-core/pyproject.toml b/llama-index-core/pyproject.toml index 2f0b826e05..cd2fdd700b 100644 --- a/llama-index-core/pyproject.toml +++ b/llama-index-core/pyproject.toml @@ -43,7 +43,7 @@ name = "llama-index-core" packages = [{include = "llama_index"}] readme = "README.md" repository = "https://github.com/run-llama/llama_index" -version = "0.10.35.post1" +version = "0.10.36" [tool.poetry.dependencies] SQLAlchemy = {extras = ["asyncio"], version = ">=1.4.49"} diff --git a/llama-index-integrations/llms/llama-index-llms-cohere/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-cohere/pyproject.toml index de7400b8b1..68f2b54969 100644 --- a/llama-index-integrations/llms/llama-index-llms-cohere/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-cohere/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-cohere" readme = "README.md" -version = "0.1.6" +version = "0.1.7" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-openvino/tests/BUILD b/llama-index-integrations/llms/llama-index-llms-openvino/tests/BUILD deleted file mode 100644 index dabf212d7e..0000000000 --- a/llama-index-integrations/llms/llama-index-llms-openvino/tests/BUILD +++ /dev/null @@ -1 +0,0 @@ -python_tests() diff --git a/llama-index-integrations/llms/llama-index-llms-openvino/tests/__init__.py b/llama-index-integrations/llms/llama-index-llms-openvino/tests/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/llama-index-integrations/llms/llama-index-llms-openvino/tests/test_llms_openvino.py b/llama-index-integrations/llms/llama-index-llms-openvino/tests/test_llms_openvino.py deleted file mode 100644 index 44af44ab11..0000000000 --- a/llama-index-integrations/llms/llama-index-llms-openvino/tests/test_llms_openvino.py +++ /dev/null @@ -1,7 +0,0 @@ -from llama_index.core.base.llms.base import BaseLLM -from llama_index.llms.openvino import OpenVINOLLM - - -def test_embedding_class(): - names_of_base_classes = [b.__name__ for b in OpenVINOLLM.__mro__] - assert BaseLLM.__name__ in names_of_base_classes diff --git a/llama-index-integrations/vector_stores/llama-index-vector-stores-pinecone/pyproject.toml b/llama-index-integrations/vector_stores/llama-index-vector-stores-pinecone/pyproject.toml index 7ce00b436d..fd247e1e06 100644 --- a/llama-index-integrations/vector_stores/llama-index-vector-stores-pinecone/pyproject.toml +++ b/llama-index-integrations/vector_stores/llama-index-vector-stores-pinecone/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-vector-stores-pinecone" readme = "README.md" -version = "0.1.6" +version = "0.1.7" [tool.poetry.dependencies] python = ">=3.8.1,<3.13" diff --git a/pyproject.toml b/pyproject.toml index 842300ee92..1f41fe8e77 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -44,7 +44,7 @@ name = "llama-index" packages = [{from = "_llama-index", include = "llama_index"}] readme = "README.md" repository = "https://github.com/run-llama/llama_index" -version = "0.10.35" +version = "0.10.36" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" -- GitLab