diff --git a/CHANGELOG.md b/CHANGELOG.md
index e83c2999ad1149747a3ae48a8a8528a6b96cbf9e..d881644901546e2d6071883749b8cb677b74a7e1 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,91 @@
 # ChangeLog
 
+## [2024-03-20]
+
+### `llama-index-core` [0.10.21]
+
+- Lazy init for async elements StreamingAgentChatResponse (#12116)
+- Fix streaming generators get bug by SynthesisEndEvent (#12092)
+- CLIP embedding more models (#12063)
+
+### `llama-index-packs-raptor` [0.1.3]
+
+- Add `num_workers` to summary module (#)
+
+### `llama-index-readers-telegram` [0.1.5]
+
+- Fix datetime fields (#12112)
+- Add ability to select time period of posts/messages (#12078)
+
+### `llama-index-embeddings-openai` [0.1.7]
+
+- Add api version/base api as optional for open ai embedding (#12091)
+
+### `llama-index-networks` [0.2.1]
+
+- Add node postprocessing to network retriever (#12027)
+- Add privacy-safe networks demo (#12027)
+
+### `llama-index-callbacks-langfuse` [0.1.3]
+
+- Chore: bumps min version of langfuse dep (#12077)
+
+### `llama-index-embeddings-google` [0.1.4]
+
+- Chore: bumps google-generativeai dep (#12085)
+
+### `llama-index-embeddings-gemini` [0.1.5]
+
+- Chore: bumps google-generativeai dep (#12085)
+
+### `llama-index-llms-gemini` [0.1.6]
+
+- Chore: bumps google-generativeai dep (#12085)
+
+### `llama-index-llms-palm` [0.1.4]
+
+- Chore: bumps google-generativeai dep (#12085)
+
+### `llama-index-multi-modal-llms-google` [0.1.4]
+
+- Chore: bumps google-generativeai dep (#12085)
+
+### `llama-index-vector-stores-google` [0.1.5]
+
+- Chore: bumps google-generativeai dep (#12085)
+
+### `llama-index-storage-kvstore-elasticsearch` [0.1.0]
+
+- New integration (#12068)
+
+### `llama-index-readers-google` [0.1.7]
+
+- Fix - Google Drive Issue of not loading same name files (#12022)
+
+### `llama-index-vector-stores-upstash` [0.1.3]
+
+- Adding Metadata Filtering support for UpstashVectorStore (#12054)
+
+### `llama-index-packs-raptor` [0.1.2]
+
+- Fix: prevent RaptorPack infinite recursion (#12008)
+
+### `llama-index-embeddings-huggingface-optimum` [0.1.4]
+
+- Fix(OptimumEmbedding): removing token_type_ids causing ONNX validation issues
+
+### `llama-index-llms-anthropic` [0.1.7]
+
+- Fix: Anthropic LLM merge consecutive messages with same role (#12013)
+
+### `llama-index-packs-diff-private-simple-dataset` [0.1.0]
+
+- DiffPrivacy ICL Pack - OpenAI Completion LLMs (#11881)
+
+### `llama-index-cli` [0.1.11]
+
+- Remove llama_hub_url keyword from download_llama_dataset of command (#12038)
+
 ## [2024-03-14]
 
 ### `llama-index-core` [0.10.20]
diff --git a/llama-index-cli/pyproject.toml b/llama-index-cli/pyproject.toml
index a1d1521ccde8a9aba1319a03ef5fb966d92a576a..c18741fb572bccf279b5903453c51c1e5b55f97e 100644
--- a/llama-index-cli/pyproject.toml
+++ b/llama-index-cli/pyproject.toml
@@ -32,7 +32,7 @@ maintainers = [
 name = "llama-index-cli"
 packages = [{include = "llama_index/"}]
 readme = "README.md"
-version = "0.1.10"
+version = "0.1.11"
 
 [tool.poetry.dependencies]
 python = ">=3.8.1,<4.0"
diff --git a/llama-index-core/llama_index/core/__init__.py b/llama-index-core/llama_index/core/__init__.py
index 4981cfc76821b2f6c47bcda36e6e07f0334bb212..0d2328e57af93f3685fc699ad62d46e8255afaa1 100644
--- a/llama-index-core/llama_index/core/__init__.py
+++ b/llama-index-core/llama_index/core/__init__.py
@@ -1,6 +1,6 @@
 """Init file of LlamaIndex."""
 
-__version__ = "0.10.20.post3"
+__version__ = "0.10.21"
 
 import logging
 from logging import NullHandler
diff --git a/llama-index-core/pyproject.toml b/llama-index-core/pyproject.toml
index 2fe61d01da856f7d799407330571ad3191cc9765..bba3d7f06380796d070857e02548e76125523a60 100644
--- a/llama-index-core/pyproject.toml
+++ b/llama-index-core/pyproject.toml
@@ -43,7 +43,7 @@ name = "llama-index-core"
 packages = [{include = "llama_index"}]
 readme = "README.md"
 repository = "https://github.com/run-llama/llama_index"
-version = "0.10.20.post3"
+version = "0.10.21post1"
 
 [tool.poetry.dependencies]
 SQLAlchemy = {extras = ["asyncio"], version = ">=1.4.49"}
diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-gemini/pyproject.toml b/llama-index-integrations/embeddings/llama-index-embeddings-gemini/pyproject.toml
index d1c859afe38c19a1a0e799c74e887ca92b6af4f3..d3f79db7f9ebc53873afa17f4e808e50c05ea801 100644
--- a/llama-index-integrations/embeddings/llama-index-embeddings-gemini/pyproject.toml
+++ b/llama-index-integrations/embeddings/llama-index-embeddings-gemini/pyproject.toml
@@ -27,7 +27,7 @@ exclude = ["**/BUILD"]
 license = "MIT"
 name = "llama-index-embeddings-gemini"
 readme = "README.md"
-version = "0.1.4"
+version = "0.1.5"
 
 [tool.poetry.dependencies]
 python = ">=3.9,<4.0"
diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-google/pyproject.toml b/llama-index-integrations/embeddings/llama-index-embeddings-google/pyproject.toml
index c0cce9c511319775056dbb43a2586577bcf39bf5..85a2e04d985afb2e8bd7ff10f6f1b2de3a0f615d 100644
--- a/llama-index-integrations/embeddings/llama-index-embeddings-google/pyproject.toml
+++ b/llama-index-integrations/embeddings/llama-index-embeddings-google/pyproject.toml
@@ -29,7 +29,7 @@ exclude = ["**/BUILD"]
 license = "MIT"
 name = "llama-index-embeddings-google"
 readme = "README.md"
-version = "0.1.3"
+version = "0.1.4"
 
 [tool.poetry.dependencies]
 python = ">=3.9,<4.0"
diff --git a/llama-index-integrations/llms/llama-index-llms-gemini/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-gemini/pyproject.toml
index 9d03b89c826c48e6c72717fafb0970cb1d3d8e0e..f99a0de31f7c14868b3302ecd6ff628083cd5e9f 100644
--- a/llama-index-integrations/llms/llama-index-llms-gemini/pyproject.toml
+++ b/llama-index-integrations/llms/llama-index-llms-gemini/pyproject.toml
@@ -27,7 +27,7 @@ exclude = ["**/BUILD"]
 license = "MIT"
 name = "llama-index-llms-gemini"
 readme = "README.md"
-version = "0.1.5"
+version = "0.1.6"
 
 [tool.poetry.dependencies]
 python = ">=3.9,<4.0"
diff --git a/llama-index-integrations/llms/llama-index-llms-palm/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-palm/pyproject.toml
index 3243a348a18d83628e453bf968f277515190f604..9ecc6ab187eafd8161d7f49356a26071ff128029 100644
--- a/llama-index-integrations/llms/llama-index-llms-palm/pyproject.toml
+++ b/llama-index-integrations/llms/llama-index-llms-palm/pyproject.toml
@@ -27,7 +27,7 @@ exclude = ["**/BUILD"]
 license = "MIT"
 name = "llama-index-llms-palm"
 readme = "README.md"
-version = "0.1.3"
+version = "0.1.4"
 
 [tool.poetry.dependencies]
 python = ">=3.9,<4.0"
diff --git a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-gemini/pyproject.toml b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-gemini/pyproject.toml
index 8d259029847ef74569c75698e6d3eb0cde17f549..e245ee65fb4f965a66f2e8e1187ace5feec35d43 100644
--- a/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-gemini/pyproject.toml
+++ b/llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-gemini/pyproject.toml
@@ -27,7 +27,7 @@ exclude = ["**/BUILD"]
 license = "MIT"
 name = "llama-index-multi-modal-llms-gemini"
 readme = "README.md"
-version = "0.1.3"
+version = "0.1.4"
 
 [tool.poetry.dependencies]
 python = ">=3.9,<4.0"
diff --git a/llama-index-integrations/readers/llama-index-readers-telegram/pyproject.toml b/llama-index-integrations/readers/llama-index-readers-telegram/pyproject.toml
index 0cb7f2ffb709d9595ccdc01068a56a94b1b25fd6..5607edf848fe94d119e0becefcd44792a84bb407 100644
--- a/llama-index-integrations/readers/llama-index-readers-telegram/pyproject.toml
+++ b/llama-index-integrations/readers/llama-index-readers-telegram/pyproject.toml
@@ -28,7 +28,7 @@ license = "MIT"
 maintainers = ["diicell"]
 name = "llama-index-readers-telegram"
 readme = "README.md"
-version = "0.1.4"
+version = "0.1.5"
 
 [tool.poetry.dependencies]
 python = ">=3.8.1,<4.0"
diff --git a/llama-index-integrations/vector_stores/llama-index-vector-stores-google/pyproject.toml b/llama-index-integrations/vector_stores/llama-index-vector-stores-google/pyproject.toml
index dfe95c3f5be9e055e2c5c1e95b716d3fa28d2e2c..436323a1708a45e53a406da22aa8c056b9d1f048 100644
--- a/llama-index-integrations/vector_stores/llama-index-vector-stores-google/pyproject.toml
+++ b/llama-index-integrations/vector_stores/llama-index-vector-stores-google/pyproject.toml
@@ -27,7 +27,7 @@ exclude = ["**/BUILD"]
 license = "MIT"
 name = "llama-index-vector-stores-google"
 readme = "README.md"
-version = "0.1.4"
+version = "0.1.5"
 
 [tool.poetry.dependencies]
 python = ">=3.9,<4.0"
diff --git a/poetry.lock b/poetry.lock
index fdd9fb1f2a6a38267706e566fef89cbe08dbecb8..c902db34332f73294a5052d4c023a53aa997d481 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1518,13 +1518,13 @@ llama-index-llms-openai = ">=0.1.1,<0.2.0"
 
 [[package]]
 name = "llama-index-core"
-version = "0.10.20"
+version = "0.10.21.post1"
 description = "Interface between LLMs and your data"
 optional = false
-python-versions = ">=3.8.1,<4.0"
+python-versions = "<4.0,>=3.8.1"
 files = [
-    {file = "llama_index_core-0.10.20-py3-none-any.whl", hash = "sha256:db764e480b4f53095db9f0d39792a54c8ebfd9cde216d0a37d9c16344ea4a664"},
-    {file = "llama_index_core-0.10.20.tar.gz", hash = "sha256:7780cf83644dda33c4d7b19c313e82755cbbadacad1d185afb7eba998b736bb6"},
+    {file = "llama_index_core-0.10.21.post1-py3-none-any.whl", hash = "sha256:acfec6e39f9f38a1c64e60b953bdf787862abd9a797c0dc3348be34ed67493e7"},
+    {file = "llama_index_core-0.10.21.post1.tar.gz", hash = "sha256:0c212d14ffc368114099e881e9e3fa2d073d5186be07ec9db46070d475db14e0"},
 ]
 
 [package.dependencies]
@@ -4309,4 +4309,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p
 [metadata]
 lock-version = "2.0"
 python-versions = ">=3.8.1,<4.0"
-content-hash = "4adead56bef8bfedcfd46741463c3ad8758de84d9c2c03ef493861f598b9e267"
+content-hash = "4157199988ba7ed03ed0d3b3a94988bcc69ad693ef696b39edaa71421061e956"
diff --git a/pyproject.toml b/pyproject.toml
index 04e46b34fa75edc1aa3ef2b1a8821b0ac5f8e827..9e1ff001cd82c5f42ee22d7c5897c5f27ab5167b 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -44,7 +44,7 @@ name = "llama-index"
 packages = [{from = "_llama-index", include = "llama_index"}]
 readme = "README.md"
 repository = "https://github.com/run-llama/llama_index"
-version = "0.10.20"
+version = "0.10.21"
 
 [tool.poetry.dependencies]
 python = ">=3.8.1,<4.0"
@@ -57,7 +57,7 @@ llama-index-agent-openai = "^0.1.4"
 llama-index-readers-file = "^0.1.4"
 llama-index-readers-llama-parse = "^0.1.2"
 llama-index-indices-managed-llama-cloud = "^0.1.2"
-llama-index-core = "^0.10.20"
+llama-index-core = "^0.10.21.post1"
 llama-index-multi-modal-llms-openai = "^0.1.3"
 llama-index-cli = "^0.1.2"