diff --git a/CHANGELOG.md b/CHANGELOG.md
index b2676ba77f4daf92a55ec4bbf0dea7f57efb414c..e7226c08e0c4fe80649c6b61179190f77801b107 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,39 @@
 # ChangeLog
 
+## [2024-03-06]
+
+### `llama-index-cli` [0.1.8]
+
+- Update mappings for `upgrade` command (#11699)
+
+### `llama-index-vector-stores-astra-db` [0.1.4]
+
+- Update wording in warning message (#11702)
+
+### `llama-index-vector-stores-qdrant` [0.1.4]
+
+- Catch RPC errors (#11657)
+
+### `llama-index-vector-stores-opensearch` [0.1.7]
+
+- Avoid calling `nest_asyncio.apply()` in code to avoid confusing errors for users (#11707)
+
+### `llama-index-multi-modal-llms-anthropic` [0.1.2]
+
+- Added support for new multi-modal models `haiku` and `sonnet` (#11656)
+
+### `llama-index-readers-file` [0.1.8]
+
+- Added support for checking if NLTK files are already downloaded (#11676)
+
+### `llama-index-readers-json` [0.1.4]
+
+- Use the metadata passed in when creating documents (#11626)
+
+### `llama-index-packs-finchat` [0.1.0]
+
+- Added a new llama-pack for hierarchical agents + finance chat (#11387)
+
 ## [0.10.16] - 2024-03-05
 
 ### New Features
diff --git a/llama-index-cli/pyproject.toml b/llama-index-cli/pyproject.toml
index 392231d2a0ce119c83f58d565e708c63691b7182..5a0fa68a7c3ead10586c58fcb61df10b18b11a73 100644
--- a/llama-index-cli/pyproject.toml
+++ b/llama-index-cli/pyproject.toml
@@ -32,7 +32,7 @@ maintainers = [
 name = "llama-index-cli"
 packages = [{include = "llama_index/"}]
 readme = "README.md"
-version = "0.1.7"
+version = "0.1.8"
 
 [tool.poetry.dependencies]
 python = ">=3.8.1,<4.0"
diff --git a/llama-index-core/llama_index/core/__init__.py b/llama-index-core/llama_index/core/__init__.py
index 53f85eb7b4cb1fe5d4760534497a3088add9cf96..4e6e3bbe4fa1ad0d8bada3bced3bd3b91edcc13b 100644
--- a/llama-index-core/llama_index/core/__init__.py
+++ b/llama-index-core/llama_index/core/__init__.py
@@ -1,6 +1,6 @@
 """Init file of LlamaIndex."""
 
-__version__ = "0.10.16"
+__version__ = "0.10.17"
 
 import logging
 from logging import NullHandler
diff --git a/llama-index-core/pyproject.toml b/llama-index-core/pyproject.toml
index 766f8c69c9560984712495824dcb6f4025194e7f..1e55343da9ad48c070c5ea9b0a69321c20d1c29e 100644
--- a/llama-index-core/pyproject.toml
+++ b/llama-index-core/pyproject.toml
@@ -43,7 +43,7 @@ name = "llama-index-core"
 packages = [{include = "llama_index"}]
 readme = "README.md"
 repository = "https://github.com/run-llama/llama_index"
-version = "0.10.16"
+version = "0.10.17"
 
 [tool.poetry.dependencies]
 SQLAlchemy = {extras = ["asyncio"], version = ">=1.4.49"}
diff --git a/llama-index-integrations/readers/llama-index-readers-json/pyproject.toml b/llama-index-integrations/readers/llama-index-readers-json/pyproject.toml
index 5657a59721ba9bdcbc9f922dc7bdfae6191f8b1e..9723c9a47f47afd6c269fa6c78a29b8a16a3c4b2 100644
--- a/llama-index-integrations/readers/llama-index-readers-json/pyproject.toml
+++ b/llama-index-integrations/readers/llama-index-readers-json/pyproject.toml
@@ -28,7 +28,7 @@ license = "MIT"
 maintainers = ["yisding"]
 name = "llama-index-readers-json"
 readme = "README.md"
-version = "0.1.3"
+version = "0.1.4"
 
 [tool.poetry.dependencies]
 python = ">=3.8.1,<4.0"
diff --git a/llama-index-integrations/vector_stores/llama-index-vector-stores-astra-db/pyproject.toml b/llama-index-integrations/vector_stores/llama-index-vector-stores-astra-db/pyproject.toml
index b31f2452b0a0a2f9232c643875c142e210ba50b2..59af73d47a6636327317d4a048a7977edecbf70a 100644
--- a/llama-index-integrations/vector_stores/llama-index-vector-stores-astra-db/pyproject.toml
+++ b/llama-index-integrations/vector_stores/llama-index-vector-stores-astra-db/pyproject.toml
@@ -27,7 +27,7 @@ exclude = ["**/BUILD"]
 license = "MIT"
 name = "llama-index-vector-stores-astra-db"
 readme = "README.md"
-version = "0.1.3"
+version = "0.1.4"
 
 [tool.poetry.dependencies]
 python = ">=3.8.1,<4.0"
diff --git a/llama-index-integrations/vector_stores/llama-index-vector-stores-qdrant/pyproject.toml b/llama-index-integrations/vector_stores/llama-index-vector-stores-qdrant/pyproject.toml
index 47512de40ba3dea6f336c42be219be694069ebde..6759b5239520da0e7d33e4d66dff78e4293e3f02 100644
--- a/llama-index-integrations/vector_stores/llama-index-vector-stores-qdrant/pyproject.toml
+++ b/llama-index-integrations/vector_stores/llama-index-vector-stores-qdrant/pyproject.toml
@@ -27,7 +27,7 @@ exclude = ["**/BUILD"]
 license = "MIT"
 name = "llama-index-vector-stores-qdrant"
 readme = "README.md"
-version = "0.1.3"
+version = "0.1.4"
 
 [tool.poetry.dependencies]
 python = ">=3.9,<4.0"
diff --git a/poetry.lock b/poetry.lock
index 063032b09a51d8d56da1f8f8d72ca22e85a79ba4..bf0c7b82ed43090700748718b03bcfddf652cf0d 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,4 +1,4 @@
-# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand.
+# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand.
 
 [[package]]
 name = "aiohttp"
@@ -1506,13 +1506,13 @@ socks = ["socksio (==1.*)"]
 
 [[package]]
 name = "huggingface-hub"
-version = "0.21.3"
+version = "0.21.4"
 description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub"
 optional = false
 python-versions = ">=3.8.0"
 files = [
-    {file = "huggingface_hub-0.21.3-py3-none-any.whl", hash = "sha256:b183144336fdf2810a8c109822e0bb6ef1fd61c65da6fb60e8c3f658b7144016"},
-    {file = "huggingface_hub-0.21.3.tar.gz", hash = "sha256:26a15b604e4fc7bad37c467b76456543ec849386cbca9cd7e1e135f53e500423"},
+    {file = "huggingface_hub-0.21.4-py3-none-any.whl", hash = "sha256:df37c2c37fc6c82163cdd8a67ede261687d80d1e262526d6c0ce73b6b3630a7b"},
+    {file = "huggingface_hub-0.21.4.tar.gz", hash = "sha256:e1f4968c93726565a80edf6dc309763c7b546d0cfe79aa221206034d50155531"},
 ]
 
 [package.dependencies]
@@ -1986,13 +1986,13 @@ llama-index-vector-stores-chroma = ">=0.1.1,<0.2.0"
 
 [[package]]
 name = "llama-index-core"
-version = "0.10.16"
+version = "0.10.17"
 description = "Interface between LLMs and your data"
 optional = false
 python-versions = ">=3.8.1,<4.0"
 files = [
-    {file = "llama_index_core-0.10.16-py3-none-any.whl", hash = "sha256:189c808b9c92d871b4c27f78e71186f5edbe202c9fafa28ff4e276197c1a05ff"},
-    {file = "llama_index_core-0.10.16.tar.gz", hash = "sha256:a20b246b75e2e5b2dd679953fcede921e8b9896f06e61f473a31ee96d24cd446"},
+    {file = "llama_index_core-0.10.17-py3-none-any.whl", hash = "sha256:da13e609f015e87fbe985a4d607a8368d74e84b002d3c7ba4125ddea54dde452"},
+    {file = "llama_index_core-0.10.17.tar.gz", hash = "sha256:bbc1a0f5e457e6f44769ad70e6398a4f1d233ddf70bfa5dc41a925b360445b55"},
 ]
 
 [package.dependencies]
@@ -2158,13 +2158,13 @@ llama-index-program-openai = ">=0.1.1,<0.2.0"
 
 [[package]]
 name = "llama-index-readers-file"
-version = "0.1.7"
+version = "0.1.8"
 description = "llama-index readers file integration"
 optional = false
 python-versions = ">=3.8.1,<4.0"
 files = [
-    {file = "llama_index_readers_file-0.1.7-py3-none-any.whl", hash = "sha256:46cf03a141b3fa5fd50c81fa607e9de3060aa67ab9a79dd64bea18962776d2de"},
-    {file = "llama_index_readers_file-0.1.7.tar.gz", hash = "sha256:63827ab51e8f66d97f08e1a20be67f86b92484ae120253b5f756fef2371d61bf"},
+    {file = "llama_index_readers_file-0.1.8-py3-none-any.whl", hash = "sha256:f58c72e2c2ed9f36b5308b4b9ee3142f3848156f0e3b85e813db0a26b8d03290"},
+    {file = "llama_index_readers_file-0.1.8.tar.gz", hash = "sha256:f23417a2afc8461a32f08f057e85c8d09b1c687ba16ca6a6a08f08f319eca26a"},
 ]
 
 [package.dependencies]
@@ -2208,13 +2208,13 @@ tokenizers = ">=0.15.1,<0.16.0"
 
 [[package]]
 name = "llama-parse"
-version = "0.3.5"
+version = "0.3.7"
 description = "Parse files into RAG-Optimized formats."
 optional = false
 python-versions = ">=3.8.1,<4.0"
 files = [
-    {file = "llama_parse-0.3.5-py3-none-any.whl", hash = "sha256:8e6e7a0986ad30cb82c5c67a29b7e2c3892620dd2a422afc909654a9d0f1c82c"},
-    {file = "llama_parse-0.3.5.tar.gz", hash = "sha256:736a80e4fc5970b9cbef1048171908021ebd26be43f07b806889f0d1bb3875fe"},
+    {file = "llama_parse-0.3.7-py3-none-any.whl", hash = "sha256:7d866940fa0604e0cef496a7c160fb9ef8fcd6bd3a11a2292d100f43cc6ad469"},
+    {file = "llama_parse-0.3.7.tar.gz", hash = "sha256:31706a610d28729c2b4741455c9a9c1edf471171b4b9d2a7138aa064656712a6"},
 ]
 
 [package.dependencies]
@@ -3824,6 +3824,7 @@ python-versions = ">=3.8"
 files = [
     {file = "PyMuPDFb-1.23.22-py3-none-macosx_10_9_x86_64.whl", hash = "sha256:9085a1e2fbf16f2820f9f7ad3d25e85f81d9b9eb0409110c1670d4cf5a27a678"},
     {file = "PyMuPDFb-1.23.22-py3-none-macosx_11_0_arm64.whl", hash = "sha256:01016dd33220cef4ecaf929d09fd27a584dc3ec3e5c9f4112dfe63613ea35135"},
+    {file = "PyMuPDFb-1.23.22-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cf50e814db91f2a2325219302fbac229a23682c372cf8232aabd51ea3f18210e"},
     {file = "PyMuPDFb-1.23.22-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3ffa713ad18e816e584c8a5f569995c32d22f8ac76ab6e4a61f2d2983c4b73d9"},
     {file = "PyMuPDFb-1.23.22-py3-none-win32.whl", hash = "sha256:d00e372452845aea624659c302d25e935052269fd3aafe26948301576d6f2ee8"},
     {file = "PyMuPDFb-1.23.22-py3-none-win_amd64.whl", hash = "sha256:7c9c157281fdee9f296e666a323307dbf74cb38f017921bb131fa7bfcd39c2bd"},
@@ -3972,7 +3973,6 @@ files = [
     {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
     {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
     {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"},
-    {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"},
     {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"},
     {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"},
     {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"},
@@ -5823,4 +5823,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p
 [metadata]
 lock-version = "2.0"
 python-versions = ">=3.8.1,<4.0"
-content-hash = "b3cad5844057965a829ca5df97dfcce50bf430eef20b0b69e7be04a0288acef9"
+content-hash = "2c9fd1b6639cfaa94772778c890a0e9e06d400f2799893cdf0f1d0d465183482"
diff --git a/pyproject.toml b/pyproject.toml
index 7ef492487c9b42b58fda4734f913a216fc6755e1..2d76659c0dc0f78602cd3cd306d7f9a08f540039 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -44,7 +44,7 @@ name = "llama-index"
 packages = [{from = "_llama-index", include = "llama_index"}]
 readme = "README.md"
 repository = "https://github.com/run-llama/llama_index"
-version = "0.10.16"
+version = "0.10.17"
 
 [tool.poetry.dependencies]
 python = ">=3.8.1,<4.0"
@@ -57,7 +57,7 @@ llama-index-agent-openai = "^0.1.4"
 llama-index-readers-file = "^0.1.4"
 llama-index-readers-llama-parse = "^0.1.2"
 llama-index-indices-managed-llama-cloud = "^0.1.2"
-llama-index-core = "^0.10.16"
+llama-index-core = "^0.10.17"
 llama-index-multi-modal-llms-openai = "^0.1.3"
 llama-index-cli = "^0.1.2"