diff --git a/CHANGELOG.md b/CHANGELOG.md
index e133830408f082e9dd5570eca88b76c2b623ae5d..4d0c8c3bd4a77b3bf89e94836a903dccf91732c2 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,55 @@
 # ChangeLog
 
+## [2024-03-07]
+
+### `llama-index-callbacks-deepeval` [0.1.3]
+
+- Update import path for callback handler (#11754)
+
+### `llama-index-core` [0.10.18]
+
+- Ensure `LoadAndSearchToolSpec` loads document objects (#11733)
+- Fixed bug for no nodes in `QueryFusionRetriever` (#11759)
+- Allow using different runtime kwargs for different evaluators in `BatchEvalRunner` (#11727)
+- Fixed issues with fsspec + `SimpleDirectoryReader` (#11665)
+- Remove `asyncio.run()` requirement from guideline evaluator (#11719)
+
+### `llama-index-embeddings-voyageai` [0.1.3]
+
+- Update voyage embeddings to use proper clients (#11721)
+
+### `llama-index-indices-managed-vectara` [0.1.3]
+
+- Fixed issues with vectara query engine in non-summary mode (#11668)
+
+### `llama-index-llms-mymagic` [0.1.5]
+
+- Add `return_output` option for json output with query and response (#11761)
+
+### `llama-index-packs-code-hierarchy` [0.1.0]
+
+- Added support for a `CodeHiearchyAgentPack` that allows for agentic traversal of a codebase (#10671)
+
+### `llama-index-packs-cohere-citation-chat` [0.1.3]
+
+- Added a new llama-pack for citations + chat with cohere (#11697)
+
+### `llama-index-vector-stores-milvus` [0.1.6]
+
+- Prevent forced `flush()` on document add (#11734)
+
+### `llama-index-vector-stores-opensearch` [0.1.7]
+
+- Small typo in metadata column name (#11751)
+
+### `llama-index-vector-stores-tidbvector` [0.1.0]
+
+- Initial support for TiDB vector store (#11635)
+
+### `llama-index-vector-stores-weaviate` [0.1.4]
+
+- Small fix for `int` fields in metadata filters (#11742)
+
 ## [2024-03-06]
 
 New format! Going to try out reporting changes per package.
diff --git a/llama-index-core/llama_index/core/__init__.py b/llama-index-core/llama_index/core/__init__.py
index 4e6e3bbe4fa1ad0d8bada3bced3bd3b91edcc13b..a6428c565a142c18665627913d14547f708695ff 100644
--- a/llama-index-core/llama_index/core/__init__.py
+++ b/llama-index-core/llama_index/core/__init__.py
@@ -1,6 +1,6 @@
 """Init file of LlamaIndex."""
 
-__version__ = "0.10.17"
+__version__ = "0.10.18.post1"
 
 import logging
 from logging import NullHandler
diff --git a/llama-index-core/llama_index/core/command_line/mappings.json b/llama-index-core/llama_index/core/command_line/mappings.json
index a3f397589d7cb2dabd25725c1ba3b7a91bd1cbec..15d6126eff904df07afab3b326a518c46a13f010 100644
--- a/llama-index-core/llama_index/core/command_line/mappings.json
+++ b/llama-index-core/llama_index/core/command_line/mappings.json
@@ -279,6 +279,7 @@
   "LLMQuestionGenerator": "llama_index.core.question_gen",
   "SubQuestionOutputParser": "llama_index.core.question_gen",
   "ReaderConfig": "llama_index.core.readers",
+  "CodeHierarchyAgentPack": "llama_index.packs.code_hierarchy",
   "CodeHierarchyNodeParser": "llama_index.packs.code_hierarchy",
   "CodeHierarchyKeywordQueryEngine": "llama_index.packs.code_hierarchy",
   "StringIterableReader": "llama_index.core.readers",
diff --git a/llama-index-core/llama_index/core/utilities/gemini_utils.py b/llama-index-core/llama_index/core/utilities/gemini_utils.py
index 3ed4df6c848d77726db19e810b6e3e1ac7b54d9e..d34b1958bb5e183626de7351ef017546489477b9 100644
--- a/llama-index-core/llama_index/core/utilities/gemini_utils.py
+++ b/llama-index-core/llama_index/core/utilities/gemini_utils.py
@@ -10,6 +10,7 @@ ROLES_TO_GEMINI: Dict[MessageRole, MessageRole] = {
     MessageRole.ASSISTANT: MessageRole.MODEL,
     ## Gemini only has user and model roles. Put the rest in user role.
     MessageRole.SYSTEM: MessageRole.USER,
+    MessageRole.MODEL: MessageRole.MODEL,
 }
 ROLES_FROM_GEMINI: Dict[MessageRole, MessageRole] = {
     ## Gemini only has user and model roles.
diff --git a/llama-index-core/pyproject.toml b/llama-index-core/pyproject.toml
index 1e55343da9ad48c070c5ea9b0a69321c20d1c29e..9af62c102dc105a24af8a88f4b43d22e76773138 100644
--- a/llama-index-core/pyproject.toml
+++ b/llama-index-core/pyproject.toml
@@ -43,7 +43,7 @@ name = "llama-index-core"
 packages = [{include = "llama_index"}]
 readme = "README.md"
 repository = "https://github.com/run-llama/llama_index"
-version = "0.10.17"
+version = "0.10.18.post1"
 
 [tool.poetry.dependencies]
 SQLAlchemy = {extras = ["asyncio"], version = ">=1.4.49"}
diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-voyageai/pyproject.toml b/llama-index-integrations/embeddings/llama-index-embeddings-voyageai/pyproject.toml
index c662bd0baaa7106381ba8b3aa040f2b58fdbc63a..ebad2fa805168a3f2e992118112137195398efb3 100644
--- a/llama-index-integrations/embeddings/llama-index-embeddings-voyageai/pyproject.toml
+++ b/llama-index-integrations/embeddings/llama-index-embeddings-voyageai/pyproject.toml
@@ -27,7 +27,7 @@ exclude = ["**/BUILD"]
 license = "MIT"
 name = "llama-index-embeddings-voyageai"
 readme = "README.md"
-version = "0.1.2"
+version = "0.1.3"
 
 [tool.poetry.dependencies]
 python = ">=3.8.1,<4.0"
diff --git a/llama-index-integrations/indices/llama-index-indices-managed-vectara/pyproject.toml b/llama-index-integrations/indices/llama-index-indices-managed-vectara/pyproject.toml
index 30a7071e64cf030100242126fd78583afc039333..65d6887bc3b4887410b4a410b0070683a055211d 100644
--- a/llama-index-integrations/indices/llama-index-indices-managed-vectara/pyproject.toml
+++ b/llama-index-integrations/indices/llama-index-indices-managed-vectara/pyproject.toml
@@ -31,7 +31,7 @@ exclude = ["**/BUILD"]
 license = "MIT"
 name = "llama-index-indices-managed-vectara"
 readme = "README.md"
-version = "0.1.2"
+version = "0.1.3"
 
 [tool.poetry.dependencies]
 python = ">=3.8.1,<4.0"
diff --git a/llama-index-integrations/llms/llama-index-llms-mymagic/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-mymagic/pyproject.toml
index c47eaf72790993d2375b3512cee9030931c8cfdf..0e7ba7cea8313e8818ee2bdfb419df90d7064c3f 100644
--- a/llama-index-integrations/llms/llama-index-llms-mymagic/pyproject.toml
+++ b/llama-index-integrations/llms/llama-index-llms-mymagic/pyproject.toml
@@ -27,7 +27,7 @@ exclude = ["**/BUILD"]
 license = "MIT"
 name = "llama-index-llms-mymagic"
 readme = "README.md"
-version = "0.1.4"
+version = "0.1.5"
 
 [tool.poetry.dependencies]
 python = ">=3.8.1,<4.0"
diff --git a/poetry.lock b/poetry.lock
index bf0c7b82ed43090700748718b03bcfddf652cf0d..2c5fc1f4437d55508e01bc6faa769c0ed80b8bd3 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,4 +1,4 @@
-# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand.
+# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand.
 
 [[package]]
 name = "aiohttp"
@@ -1065,13 +1065,13 @@ typing = ["typing-extensions (>=4.8)"]
 
 [[package]]
 name = "flatbuffers"
-version = "23.5.26"
+version = "24.3.7"
 description = "The FlatBuffers serialization format for Python"
 optional = false
 python-versions = "*"
 files = [
-    {file = "flatbuffers-23.5.26-py2.py3-none-any.whl", hash = "sha256:c0ff356da363087b915fde4b8b45bdda73432fc17cddb3c8157472eab1422ad1"},
-    {file = "flatbuffers-23.5.26.tar.gz", hash = "sha256:9ea1144cac05ce5d86e2859f431c6cd5e66cd9c78c558317c7955fb8d4c78d89"},
+    {file = "flatbuffers-24.3.7-py2.py3-none-any.whl", hash = "sha256:80c4f5dcad0ee76b7e349671a0d657f2fbba927a0244f88dd3f5ed6a3694e1fc"},
+    {file = "flatbuffers-24.3.7.tar.gz", hash = "sha256:0895c22b9a6019ff2f4de2e5e2f7cd15914043e6e7033a94c0c6369422690f22"},
 ]
 
 [[package]]
@@ -1608,13 +1608,13 @@ testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs
 
 [[package]]
 name = "importlib-resources"
-version = "6.1.2"
+version = "6.1.3"
 description = "Read resources from Python packages"
 optional = false
 python-versions = ">=3.8"
 files = [
-    {file = "importlib_resources-6.1.2-py3-none-any.whl", hash = "sha256:9a0a862501dc38b68adebc82970140c9e4209fc99601782925178f8386339938"},
-    {file = "importlib_resources-6.1.2.tar.gz", hash = "sha256:308abf8474e2dba5f867d279237cd4076482c3de7104a40b41426370e891549b"},
+    {file = "importlib_resources-6.1.3-py3-none-any.whl", hash = "sha256:4c0269e3580fe2634d364b39b38b961540a7738c02cb984e98add8b4221d793d"},
+    {file = "importlib_resources-6.1.3.tar.gz", hash = "sha256:56fb4525197b78544a3354ea27793952ab93f935bb4bf746b846bb1015020f2b"},
 ]
 
 [package.dependencies]
@@ -1622,7 +1622,7 @@ zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""}
 
 [package.extras]
 docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"]
-testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-ruff (>=0.2.1)", "zipp (>=3.17)"]
+testing = ["jaraco.collections", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-ruff (>=0.2.1)", "zipp (>=3.17)"]
 
 [[package]]
 name = "ipykernel"
@@ -1969,13 +1969,13 @@ llama-index-llms-openai = ">=0.1.5,<0.2.0"
 
 [[package]]
 name = "llama-index-cli"
-version = "0.1.7"
+version = "0.1.8"
 description = "llama-index cli"
 optional = false
 python-versions = ">=3.8.1,<4.0"
 files = [
-    {file = "llama_index_cli-0.1.7-py3-none-any.whl", hash = "sha256:48a81fc33d4005dbe91b77ebe840ac69e0102e7e2a59770308f94fae1b792544"},
-    {file = "llama_index_cli-0.1.7.tar.gz", hash = "sha256:55a77e3c370eb760c42cb74a0df6f650e41ec17928b72b07ff8b927cb94b15b4"},
+    {file = "llama_index_cli-0.1.8-py3-none-any.whl", hash = "sha256:4e300f06206862d6d7eedde95632c6b61a5ebb5162454f1ac7a3c3c9b3ebb05f"},
+    {file = "llama_index_cli-0.1.8.tar.gz", hash = "sha256:776a96917965d0df6e7e272d6278394a4f7c922e57973a75e2645609727fa4b1"},
 ]
 
 [package.dependencies]
@@ -1986,13 +1986,13 @@ llama-index-vector-stores-chroma = ">=0.1.1,<0.2.0"
 
 [[package]]
 name = "llama-index-core"
-version = "0.10.17"
+version = "0.10.18"
 description = "Interface between LLMs and your data"
 optional = false
 python-versions = ">=3.8.1,<4.0"
 files = [
-    {file = "llama_index_core-0.10.17-py3-none-any.whl", hash = "sha256:da13e609f015e87fbe985a4d607a8368d74e84b002d3c7ba4125ddea54dde452"},
-    {file = "llama_index_core-0.10.17.tar.gz", hash = "sha256:bbc1a0f5e457e6f44769ad70e6398a4f1d233ddf70bfa5dc41a925b360445b55"},
+    {file = "llama_index_core-0.10.18-py3-none-any.whl", hash = "sha256:a298116213a3323139736ae20a5d12525fdf6ffcf34fff7f3bb2fae27753e6e2"},
+    {file = "llama_index_core-0.10.18.tar.gz", hash = "sha256:0a7174cf4c88597d0cbb547768f198b68c2ab781127ea149ebdf3a9c70539fb8"},
 ]
 
 [package.dependencies]
@@ -2208,13 +2208,13 @@ tokenizers = ">=0.15.1,<0.16.0"
 
 [[package]]
 name = "llama-parse"
-version = "0.3.7"
+version = "0.3.8"
 description = "Parse files into RAG-Optimized formats."
 optional = false
 python-versions = ">=3.8.1,<4.0"
 files = [
-    {file = "llama_parse-0.3.7-py3-none-any.whl", hash = "sha256:7d866940fa0604e0cef496a7c160fb9ef8fcd6bd3a11a2292d100f43cc6ad469"},
-    {file = "llama_parse-0.3.7.tar.gz", hash = "sha256:31706a610d28729c2b4741455c9a9c1edf471171b4b9d2a7138aa064656712a6"},
+    {file = "llama_parse-0.3.8-py3-none-any.whl", hash = "sha256:2f6222c4f9f8b70622a799fca8438972e3e2f19fa8273e2c8be46af314c9a367"},
+    {file = "llama_parse-0.3.8.tar.gz", hash = "sha256:3d4739726687e6602e7cacbc9f17d438d39989a4a73324fc99122b3aefa384a4"},
 ]
 
 [package.dependencies]
@@ -3824,7 +3824,6 @@ python-versions = ">=3.8"
 files = [
     {file = "PyMuPDFb-1.23.22-py3-none-macosx_10_9_x86_64.whl", hash = "sha256:9085a1e2fbf16f2820f9f7ad3d25e85f81d9b9eb0409110c1670d4cf5a27a678"},
     {file = "PyMuPDFb-1.23.22-py3-none-macosx_11_0_arm64.whl", hash = "sha256:01016dd33220cef4ecaf929d09fd27a584dc3ec3e5c9f4112dfe63613ea35135"},
-    {file = "PyMuPDFb-1.23.22-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cf50e814db91f2a2325219302fbac229a23682c372cf8232aabd51ea3f18210e"},
     {file = "PyMuPDFb-1.23.22-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3ffa713ad18e816e584c8a5f569995c32d22f8ac76ab6e4a61f2d2983c4b73d9"},
     {file = "PyMuPDFb-1.23.22-py3-none-win32.whl", hash = "sha256:d00e372452845aea624659c302d25e935052269fd3aafe26948301576d6f2ee8"},
     {file = "PyMuPDFb-1.23.22-py3-none-win_amd64.whl", hash = "sha256:7c9c157281fdee9f296e666a323307dbf74cb38f017921bb131fa7bfcd39c2bd"},
@@ -3973,6 +3972,7 @@ files = [
     {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
     {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
     {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"},
+    {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"},
     {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"},
     {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"},
     {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"},
@@ -5185,13 +5185,13 @@ files = [
 
 [[package]]
 name = "types-docutils"
-version = "0.20.0.20240304"
+version = "0.20.0.20240308"
 description = "Typing stubs for docutils"
 optional = false
 python-versions = ">=3.8"
 files = [
-    {file = "types-docutils-0.20.0.20240304.tar.gz", hash = "sha256:c35ae35ca835a5aeead758df411cd46cfb7e7f19f2b223c413dae7e069d5b0be"},
-    {file = "types_docutils-0.20.0.20240304-py3-none-any.whl", hash = "sha256:ef02f9d05f2b61500638b1358cdf3fbf975cc5dedaa825a2eb5ea71b7318a760"},
+    {file = "types-docutils-0.20.0.20240308.tar.gz", hash = "sha256:7630e1f7e68197224e6834afad512e86618e079e73780a15af83d90a384efc90"},
+    {file = "types_docutils-0.20.0.20240308-py3-none-any.whl", hash = "sha256:f28b3cf72b6901281ac62f8531d1d0ab575f5fe2ce96fbad7bc4e41aa91177b4"},
 ]
 
 [[package]]
@@ -5823,4 +5823,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p
 [metadata]
 lock-version = "2.0"
 python-versions = ">=3.8.1,<4.0"
-content-hash = "2c9fd1b6639cfaa94772778c890a0e9e06d400f2799893cdf0f1d0d465183482"
+content-hash = "6cd8fa15b9b93c4fb40bacfddd498f97094bb2849a28903cf806c8f15d6805f9"
diff --git a/pyproject.toml b/pyproject.toml
index 2d76659c0dc0f78602cd3cd306d7f9a08f540039..879c7613e1d65c7d83e6e24a413c16c12ba6f495 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -44,7 +44,7 @@ name = "llama-index"
 packages = [{from = "_llama-index", include = "llama_index"}]
 readme = "README.md"
 repository = "https://github.com/run-llama/llama_index"
-version = "0.10.17"
+version = "0.10.18"
 
 [tool.poetry.dependencies]
 python = ">=3.8.1,<4.0"
@@ -57,7 +57,7 @@ llama-index-agent-openai = "^0.1.4"
 llama-index-readers-file = "^0.1.4"
 llama-index-readers-llama-parse = "^0.1.2"
 llama-index-indices-managed-llama-cloud = "^0.1.2"
-llama-index-core = "^0.10.17"
+llama-index-core = "^0.10.18"
 llama-index-multi-modal-llms-openai = "^0.1.3"
 llama-index-cli = "^0.1.2"