diff --git a/CHANGELOG.md b/CHANGELOG.md
index c8350f838c0f61e96ee5e631b0b2cea7a406d278..287cd79589b64754a2aaafde9eda7b2ec23b3a8a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,71 @@
 # ChangeLog
 
+## [2024-04-09]
+
+### `llama-index-core` [0.10.28]
+
+- Support indented code block fences in markdown node parser (#12393)
+- Pass in output parser to guideline evaluator (#12646)
+- Added example of query pipeline + memory (#12654)
+- Add missing node postprocessor in CondensePlusContextChatEngine async mode (#12663)
+- Added `return_direct` option to tools /tool metadata (#12587)
+- Add retry for batch eval runner (#12647)
+- Thread-safe instrumentation (#12638)
+- Coroutine-safe instrumentation Spans #12589
+- Add in-memory loading for non-default filesystems in PDFReader (#12659)
+- Remove redundant tokenizer call in sentence splitter (#12655)
+- Add SynthesizeComponent import to shortcut imports (#12655)
+- Improved truncation in SimpleSummarize (#12655)
+- adding err handling in eval_utils default_parser for correctness (#12624)
+- Add async_postprocess_nodes at RankGPT Postprocessor Nodes (#12620)
+- Fix MarkdownNodeParser ref_doc_id (#12615)
+
+### `llama-index-embeddings-openvino` [0.1.5]
+
+- Added initial support for openvino embeddings (#12643)
+
+### `llama-index-llms-anthropic` [0.1.9]
+
+- add anthropic tool calling (#12591)
+
+### `llama-index-llms-ipex-llm` [0.1.1]
+
+- add ipex-llm integration (#12322)
+- add more data types support to ipex-llm llm integration (#12635)
+
+### `llama-index-llms-openllm` [0.1.4]
+
+- Proper PrivateAttr usage in OpenLLM (#12655)
+
+### `llama-index-multi-modal-llms-anthropic` [0.1.4]
+
+- Bumped anthropic dep version (#12655)
+
+### `llama-index-multi-modal-llms-gemini` [0.1.5]
+
+- bump generativeai dep (#12645)
+
+### `llama-index-packs-dense-x-retrieval` [0.1.4]
+
+- Add streaming support for DenseXRetrievalPack (#12607)
+
+### `llama-index-readers-mongodb` [0.1.4]
+
+- Improve efficiency of MongoDB reader (#12664)
+
+### `llama-index-readers-wikipedia` [0.1.4]
+
+- Added multilingual support for the Wikipedia reader (#12616)
+
+### `llama-index-storage-index-store-elasticsearch` [0.1.3]
+
+- remove invalid chars from default collection name (#12672)
+
+### `llama-index-vector-stores-milvus` [0.1.8]
+
+- Added support to retrieve metadata fields from milvus (#12626)
+- Bug fix - Similarity metric is always IP for MilvusVectorStore (#12611)
+
 ## [2024-04-04]
 
 ### `llama-index-agent-openai` [0.2.2]
diff --git a/docs/docs/CHANGELOG.md b/docs/docs/CHANGELOG.md
index c8350f838c0f61e96ee5e631b0b2cea7a406d278..287cd79589b64754a2aaafde9eda7b2ec23b3a8a 100644
--- a/docs/docs/CHANGELOG.md
+++ b/docs/docs/CHANGELOG.md
@@ -1,5 +1,71 @@
 # ChangeLog
 
+## [2024-04-09]
+
+### `llama-index-core` [0.10.28]
+
+- Support indented code block fences in markdown node parser (#12393)
+- Pass in output parser to guideline evaluator (#12646)
+- Added example of query pipeline + memory (#12654)
+- Add missing node postprocessor in CondensePlusContextChatEngine async mode (#12663)
+- Added `return_direct` option to tools /tool metadata (#12587)
+- Add retry for batch eval runner (#12647)
+- Thread-safe instrumentation (#12638)
+- Coroutine-safe instrumentation Spans #12589
+- Add in-memory loading for non-default filesystems in PDFReader (#12659)
+- Remove redundant tokenizer call in sentence splitter (#12655)
+- Add SynthesizeComponent import to shortcut imports (#12655)
+- Improved truncation in SimpleSummarize (#12655)
+- adding err handling in eval_utils default_parser for correctness (#12624)
+- Add async_postprocess_nodes at RankGPT Postprocessor Nodes (#12620)
+- Fix MarkdownNodeParser ref_doc_id (#12615)
+
+### `llama-index-embeddings-openvino` [0.1.5]
+
+- Added initial support for openvino embeddings (#12643)
+
+### `llama-index-llms-anthropic` [0.1.9]
+
+- add anthropic tool calling (#12591)
+
+### `llama-index-llms-ipex-llm` [0.1.1]
+
+- add ipex-llm integration (#12322)
+- add more data types support to ipex-llm llm integration (#12635)
+
+### `llama-index-llms-openllm` [0.1.4]
+
+- Proper PrivateAttr usage in OpenLLM (#12655)
+
+### `llama-index-multi-modal-llms-anthropic` [0.1.4]
+
+- Bumped anthropic dep version (#12655)
+
+### `llama-index-multi-modal-llms-gemini` [0.1.5]
+
+- bump generativeai dep (#12645)
+
+### `llama-index-packs-dense-x-retrieval` [0.1.4]
+
+- Add streaming support for DenseXRetrievalPack (#12607)
+
+### `llama-index-readers-mongodb` [0.1.4]
+
+- Improve efficiency of MongoDB reader (#12664)
+
+### `llama-index-readers-wikipedia` [0.1.4]
+
+- Added multilingual support for the Wikipedia reader (#12616)
+
+### `llama-index-storage-index-store-elasticsearch` [0.1.3]
+
+- remove invalid chars from default collection name (#12672)
+
+### `llama-index-vector-stores-milvus` [0.1.8]
+
+- Added support to retrieve metadata fields from milvus (#12626)
+- Bug fix - Similarity metric is always IP for MilvusVectorStore (#12611)
+
 ## [2024-04-04]
 
 ### `llama-index-agent-openai` [0.2.2]
diff --git a/docs/docs/api_reference/embeddings/openvino.md b/docs/docs/api_reference/embeddings/openvino.md
new file mode 100644
index 0000000000000000000000000000000000000000..f0fda93818862c86d8bf56f613b4e89e9b098fb9
--- /dev/null
+++ b/docs/docs/api_reference/embeddings/openvino.md
@@ -0,0 +1,4 @@
+::: llama_index.embeddings.openvino
+    options:
+      members:
+        - OpenVINOEmbedding
diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml
index ed95b1f5b8f99d6e310cdaf3e0c74d2a0ca571c4..833f5c1dfa730bb35924f9a40dcdcf94ca9d0273 100644
--- a/docs/mkdocs.yml
+++ b/docs/mkdocs.yml
@@ -186,6 +186,7 @@ nav:
           - ./examples/embeddings/alephalpha.ipynb
           - ./examples/embeddings/itrex.ipynb
           - ./examples/embeddings/cloudflare_workersai.ipynb
+          - ./examples/embeddings/openvino.ipynb
       - Evaluation:
           - ./examples/evaluation/TonicValidateEvaluators.ipynb
           - ./examples/evaluation/semantic_similarity_eval.ipynb
@@ -700,6 +701,7 @@ nav:
           - ./api_reference/embeddings/nomic.md
           - ./api_reference/embeddings/ollama.md
           - ./api_reference/embeddings/openai.md
+          - ./api_reference/embeddings/openvino.md
           - ./api_reference/embeddings/premai.md
           - ./api_reference/embeddings/sagemaker_endpoint.md
           - ./api_reference/embeddings/text_embeddings_inference.md
@@ -1786,6 +1788,7 @@ plugins:
             - ../llama-index-integrations/vector_stores/llama-index-vector-stores-neptune
             - ../llama-index-integrations/embeddings/llama-index-embeddings-cloudflare-workersai
             - ../llama-index-integrations/llms/llama-index-llms-databricks
+            - ../llama-index-integrations/embeddings/llama-index-embeddings-huggingface-openvino
   - redirects:
       redirect_maps:
         ./api/llama_index.vector_stores.MongoDBAtlasVectorSearch.html: api_reference/storage/vector_store/mongodb.md
diff --git a/llama-index-core/llama_index/core/__init__.py b/llama-index-core/llama_index/core/__init__.py
index b01adf52dfa742474d0793170bf8a48d118826ef..8552dd373e485188c67dc4a3b6da1183fbf48aa3 100644
--- a/llama-index-core/llama_index/core/__init__.py
+++ b/llama-index-core/llama_index/core/__init__.py
@@ -1,6 +1,6 @@
 """Init file of LlamaIndex."""
 
-__version__ = "0.10.27"
+__version__ = "0.10.28"
 
 import logging
 from logging import NullHandler
diff --git a/llama-index-core/pyproject.toml b/llama-index-core/pyproject.toml
index 7b1dd1109dc601918549d452eb93d53f88224260..e26730c05514a6d22589556a6ebd086c0c11f8c4 100644
--- a/llama-index-core/pyproject.toml
+++ b/llama-index-core/pyproject.toml
@@ -43,7 +43,7 @@ name = "llama-index-core"
 packages = [{include = "llama_index"}]
 readme = "README.md"
 repository = "https://github.com/run-llama/llama_index"
-version = "0.10.27"
+version = "0.10.28"
 
 [tool.poetry.dependencies]
 SQLAlchemy = {extras = ["asyncio"], version = ">=1.4.49"}
diff --git a/llama-index-integrations/llms/llama-index-llms-openai/llama_index/llms/openai/utils.py b/llama-index-integrations/llms/llama-index-llms-openai/llama_index/llms/openai/utils.py
index 419685f81383e1e854188fa133cdf373f4fafa39..f0f7842dc85c7d7244ccbf307abf274745ebcc8c 100644
--- a/llama-index-integrations/llms/llama-index-llms-openai/llama_index/llms/openai/utils.py
+++ b/llama-index-integrations/llms/llama-index-llms-openai/llama_index/llms/openai/utils.py
@@ -42,6 +42,8 @@ GPT4_MODELS: Dict[str, int] = {
     "gpt-4-turbo-preview": 128000,
     # multimodal model
     "gpt-4-vision-preview": 128000,
+    "gpt-4-turbo-2024-04-09": 128000,
+    "gpt-4-turbo": 128000,
     # 0613 models (function calling):
     #   https://openai.com/blog/function-calling-and-other-api-updates
     "gpt-4-0613": 8192,
diff --git a/llama-index-integrations/llms/llama-index-llms-openai/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-openai/pyproject.toml
index 71ee8fb78853869d726da913fd0ab87774f0567a..ecdf4e7d1c758ee222693caf07a2bff31a44e408 100644
--- a/llama-index-integrations/llms/llama-index-llms-openai/pyproject.toml
+++ b/llama-index-integrations/llms/llama-index-llms-openai/pyproject.toml
@@ -29,7 +29,7 @@ exclude = ["**/BUILD"]
 license = "MIT"
 name = "llama-index-llms-openai"
 readme = "README.md"
-version = "0.1.14"
+version = "0.1.15"
 
 [tool.poetry.dependencies]
 python = ">=3.8.1,<4.0"
diff --git a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-openai/llama_index/multi_modal_llms/openai/utils.py b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-openai/llama_index/multi_modal_llms/openai/utils.py
index 3ee2dec2a12d7f6cdb0b8f93318ac3f1b4f709c4..047a6cf8b681427565ec75b55bc011b4d34fa5a0 100644
--- a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-openai/llama_index/multi_modal_llms/openai/utils.py
+++ b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-openai/llama_index/multi_modal_llms/openai/utils.py
@@ -11,6 +11,8 @@ DEFAULT_OPENAI_API_BASE = "https://api.openai.com/v1"
 
 GPT4V_MODELS = {
     "gpt-4-vision-preview": 128000,
+    "gpt-4-turbo-2024-04-09": 128000,
+    "gpt-4-turbo": 128000,
 }
 
 
diff --git a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-openai/pyproject.toml b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-openai/pyproject.toml
index bd2a4abe24d19e6cf575df7d13b7280ae8430eda..3835bcd46647a1b2cb4e93f703a0e550310c2fd9 100644
--- a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-openai/pyproject.toml
+++ b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-openai/pyproject.toml
@@ -27,7 +27,7 @@ exclude = ["**/BUILD"]
 license = "MIT"
 name = "llama-index-multi-modal-llms-openai"
 readme = "README.md"
-version = "0.1.4"
+version = "0.1.5"
 
 [tool.poetry.dependencies]
 python = ">=3.8.1,<4.0"
diff --git a/poetry.lock b/poetry.lock
index 3d673729765685515d8aafb4f2c7065c58189517..30bb79f05167f607c0926dc8af8df0e03eda9dd1 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -817,13 +817,13 @@ devel = ["colorama", "json-spec", "jsonschema", "pylint", "pytest", "pytest-benc
 
 [[package]]
 name = "filelock"
-version = "3.13.3"
+version = "3.13.4"
 description = "A platform independent file lock."
 optional = false
 python-versions = ">=3.8"
 files = [
-    {file = "filelock-3.13.3-py3-none-any.whl", hash = "sha256:5ffa845303983e7a0b7ae17636509bc97997d58afeafa72fb141a17b152284cb"},
-    {file = "filelock-3.13.3.tar.gz", hash = "sha256:a79895a25bbefdf55d1a2a0a80968f7dbb28edcd6d4234a0afb3f37ecde4b546"},
+    {file = "filelock-3.13.4-py3-none-any.whl", hash = "sha256:404e5e9253aa60ad457cae1be07c0f0ca90a63931200a47d9b6a6af84fd7b45f"},
+    {file = "filelock-3.13.4.tar.gz", hash = "sha256:d13f466618bfde72bd2c18255e269f72542c6e70e7bac83a0232d6b1cc5c8cf4"},
 ]
 
 [package.extras]
@@ -1293,13 +1293,13 @@ i18n = ["Babel (>=2.7)"]
 
 [[package]]
 name = "joblib"
-version = "1.3.2"
+version = "1.4.0"
 description = "Lightweight pipelining with Python functions"
 optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
 files = [
-    {file = "joblib-1.3.2-py3-none-any.whl", hash = "sha256:ef4331c65f239985f3f2220ecc87db222f08fd22097a3dd5698f693875f8cbb9"},
-    {file = "joblib-1.3.2.tar.gz", hash = "sha256:92f865e621e17784e7955080b6d042489e3b8e294949cc44c6eac304f59772b1"},
+    {file = "joblib-1.4.0-py3-none-any.whl", hash = "sha256:42942470d4062537be4d54c83511186da1fc14ba354961a2114da91efa9a4ed7"},
+    {file = "joblib-1.4.0.tar.gz", hash = "sha256:1eb0dc091919cd384490de890cb5dfd538410a6d4b3b54eef09fb8c50b409b1c"},
 ]
 
 [[package]]
@@ -1505,13 +1505,13 @@ llama-index-llms-openai = ">=0.1.1,<0.2.0"
 
 [[package]]
 name = "llama-index-core"
-version = "0.10.27"
+version = "0.10.28"
 description = "Interface between LLMs and your data"
 optional = false
 python-versions = "<4.0,>=3.8.1"
 files = [
-    {file = "llama_index_core-0.10.27-py3-none-any.whl", hash = "sha256:805b20a16a417180a32a31956710637af75e22cd0849fec2729447d182197d39"},
-    {file = "llama_index_core-0.10.27.tar.gz", hash = "sha256:01881a1943cb7b37f9f8147212d4a55caeef2d68ec498a0a1b864f79cf9d2be4"},
+    {file = "llama_index_core-0.10.28-py3-none-any.whl", hash = "sha256:73e2ec86ea42a0efdc73dcfe46e03ba39ab7725699dcd791464c160d24a02cb1"},
+    {file = "llama_index_core-0.10.28.tar.gz", hash = "sha256:b454640396daa2678b638299e2a5e2539fa3548afd280d45d224fd3b303c20f4"},
 ]
 
 [package.dependencies]
@@ -1678,13 +1678,13 @@ llama-index-program-openai = ">=0.1.1,<0.2.0"
 
 [[package]]
 name = "llama-index-readers-file"
-version = "0.1.13"
+version = "0.1.16"
 description = "llama-index readers file integration"
 optional = false
 python-versions = "<4.0,>=3.8.1"
 files = [
-    {file = "llama_index_readers_file-0.1.13-py3-none-any.whl", hash = "sha256:692988b8c3ca2807d21a171351078b634ada2ff1682ebe4a484f82da384dfc55"},
-    {file = "llama_index_readers_file-0.1.13.tar.gz", hash = "sha256:830f06ec7b34437fc3bb5f268d235c5c7640296adb148d8f92277dceb7f0846d"},
+    {file = "llama_index_readers_file-0.1.16-py3-none-any.whl", hash = "sha256:1a3f3bc1f09c6894318a5392edc43b118fabc2f2820f446c51cad73676662f73"},
+    {file = "llama_index_readers_file-0.1.16.tar.gz", hash = "sha256:7f25b8fddaf1a91bac06c66727b913b48e26ef68714fcd5197051b3f710f4cfa"},
 ]
 
 [package.dependencies]
@@ -2396,18 +2396,18 @@ xml = ["lxml (>=4.6.3)"]
 
 [[package]]
 name = "parso"
-version = "0.8.3"
+version = "0.8.4"
 description = "A Python Parser"
 optional = false
 python-versions = ">=3.6"
 files = [
-    {file = "parso-0.8.3-py2.py3-none-any.whl", hash = "sha256:c001d4636cd3aecdaf33cbb40aebb59b094be2a74c556778ef5576c175e19e75"},
-    {file = "parso-0.8.3.tar.gz", hash = "sha256:8c07be290bb59f03588915921e29e8a50002acaf2cdc5fa0e0114f91709fafa0"},
+    {file = "parso-0.8.4-py2.py3-none-any.whl", hash = "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18"},
+    {file = "parso-0.8.4.tar.gz", hash = "sha256:eb3a7b58240fb99099a345571deecc0f9540ea5f4dd2fe14c2a99d6b281ab92d"},
 ]
 
 [package.extras]
-qa = ["flake8 (==3.8.3)", "mypy (==0.782)"]
-testing = ["docopt", "pytest (<6.0.0)"]
+qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"]
+testing = ["docopt", "pytest"]
 
 [[package]]
 name = "pathspec"
@@ -2809,17 +2809,17 @@ files = [
 
 [[package]]
 name = "pypdf"
-version = "4.1.0"
+version = "4.2.0"
 description = "A pure-python PDF library capable of splitting, merging, cropping, and transforming PDF files"
 optional = false
 python-versions = ">=3.6"
 files = [
-    {file = "pypdf-4.1.0-py3-none-any.whl", hash = "sha256:16cac912a05200099cef3f347c4c7e0aaf0a6d027603b8f9a973c0ea500dff89"},
-    {file = "pypdf-4.1.0.tar.gz", hash = "sha256:01c3257ec908676efd60a4537e525b89d48e0852bc92b4e0aa4cc646feda17cc"},
+    {file = "pypdf-4.2.0-py3-none-any.whl", hash = "sha256:dc035581664e0ad717e3492acebc1a5fc23dba759e788e3d4a9fc9b1a32e72c1"},
+    {file = "pypdf-4.2.0.tar.gz", hash = "sha256:fe63f3f7d1dcda1c9374421a94c1bba6c6f8c4a62173a59b64ffd52058f846b1"},
 ]
 
 [package.dependencies]
-typing_extensions = {version = ">=3.7.4.3", markers = "python_version < \"3.10\""}
+typing_extensions = {version = ">=4.0", markers = "python_version < \"3.11\""}
 
 [package.extras]
 crypto = ["PyCryptodome", "cryptography"]
@@ -3912,24 +3912,24 @@ files = [
 
 [[package]]
 name = "types-docutils"
-version = "0.20.0.20240331"
+version = "0.20.0.20240406"
 description = "Typing stubs for docutils"
 optional = false
 python-versions = ">=3.8"
 files = [
-    {file = "types-docutils-0.20.0.20240331.tar.gz", hash = "sha256:ac99cdf34040c982081f54237d6017f8f5dafe0bebb818a598bf97a65f5b1715"},
-    {file = "types_docutils-0.20.0.20240331-py3-none-any.whl", hash = "sha256:b9042e1cf064b4a82c87a71ed3c5f0f96e81fb6d402ca4daa6ced65a91397679"},
+    {file = "types-docutils-0.20.0.20240406.tar.gz", hash = "sha256:e8ec4a5a125d06d8632bbaac8a11fbea18a1edfa94df9c5129dc45980915b84b"},
+    {file = "types_docutils-0.20.0.20240406-py3-none-any.whl", hash = "sha256:f1966b05087c0e1227c399281ecc796cb67d0afc23398d7c5c95b3355de14113"},
 ]
 
 [[package]]
 name = "types-protobuf"
-version = "4.24.0.20240311"
+version = "4.24.0.20240408"
 description = "Typing stubs for protobuf"
 optional = false
 python-versions = ">=3.8"
 files = [
-    {file = "types-protobuf-4.24.0.20240311.tar.gz", hash = "sha256:c80426f9fb9b21aee514691e96ab32a5cd694a82e2ac07964b352c3e7e0182bc"},
-    {file = "types_protobuf-4.24.0.20240311-py3-none-any.whl", hash = "sha256:8e039486df058141cb221ab99f88c5878c08cca4376db1d84f63279860aa09cd"},
+    {file = "types-protobuf-4.24.0.20240408.tar.gz", hash = "sha256:c03a44357b03c233c8c5864ce3e07dd9c766a00497d271496923f7ae3cb9e1de"},
+    {file = "types_protobuf-4.24.0.20240408-py3-none-any.whl", hash = "sha256:9b87cd279378693071247227f52e89738af7c8d6f06dbdd749b0cf473c4916ce"},
 ]
 
 [[package]]
@@ -4013,13 +4013,13 @@ files = [
 
 [[package]]
 name = "typing-extensions"
-version = "4.10.0"
+version = "4.11.0"
 description = "Backported and Experimental Type Hints for Python 3.8+"
 optional = false
 python-versions = ">=3.8"
 files = [
-    {file = "typing_extensions-4.10.0-py3-none-any.whl", hash = "sha256:69b1a937c3a517342112fb4c6df7e72fc39a38e7891a5730ed4985b5214b5475"},
-    {file = "typing_extensions-4.10.0.tar.gz", hash = "sha256:b0abd7c89e8fb96f98db18d86106ff1d90ab692004eb746cf6eda2682f91b3cb"},
+    {file = "typing_extensions-4.11.0-py3-none-any.whl", hash = "sha256:c1f94d72897edaf4ce775bb7558d5b79d8126906a14ea5ed1635921406c0387a"},
+    {file = "typing_extensions-4.11.0.tar.gz", hash = "sha256:83f085bd5ca59c80295fc2a82ab5dac679cbe02b9f33f7d83af68e241bea51b0"},
 ]
 
 [[package]]
@@ -4296,4 +4296,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p
 [metadata]
 lock-version = "2.0"
 python-versions = ">=3.8.1,<4.0"
-content-hash = "3244d95a92b45cf5c2bed300304b4aaafc7a936d46c5d6ece802945910793111"
+content-hash = "6591c90ec5685466794b11efc950fc74b97a6f392356622db3a7a075ed4817b4"
diff --git a/pyproject.toml b/pyproject.toml
index 0c11d03ac875c7547ea2164841e8842daf16d3db..469b36fa1485a94a597dd1eb7dc08fcee7919c08 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -44,7 +44,7 @@ name = "llama-index"
 packages = [{from = "_llama-index", include = "llama_index"}]
 readme = "README.md"
 repository = "https://github.com/run-llama/llama_index"
-version = "0.10.27"
+version = "0.10.28"
 
 [tool.poetry.dependencies]
 python = ">=3.8.1,<4.0"
@@ -57,7 +57,7 @@ llama-index-agent-openai = ">=0.1.4,<0.3.0"
 llama-index-readers-file = "^0.1.4"
 llama-index-readers-llama-parse = "^0.1.2"
 llama-index-indices-managed-llama-cloud = "^0.1.2"
-llama-index-core = "^0.10.27"
+llama-index-core = "^0.10.28"
 llama-index-multi-modal-llms-openai = "^0.1.3"
 llama-index-cli = "^0.1.2"