diff --git a/llama-index-core/pyproject.toml b/llama-index-core/pyproject.toml
index a59ec960cbeee3a0550bd2b3ef73416f0bd763d4..fd64f393c9ab5d5a96ccd3c8f7cef738c45c3a3a 100644
--- a/llama-index-core/pyproject.toml
+++ b/llama-index-core/pyproject.toml
@@ -42,7 +42,7 @@ name = "llama-index-core"
 packages = [{include = "llama_index"}]
 readme = "README.md"
 repository = "https://github.com/run-llama/llama_index"
-version = "0.10.3"
+version = "0.10.5"
 
 [tool.poetry.dependencies]
 SQLAlchemy = {extras = ["asyncio"], version = ">=1.4.49"}
diff --git a/llama-index-finetuning/llama_index/finetuning/embeddings/adapter.py b/llama-index-finetuning/llama_index/finetuning/embeddings/adapter.py
index 1d1a394832ef3dfb89af5ede1f9db9d6cbdaff7f..b719ae619d56f8d77fbc4fa8e657917e6bae418e 100644
--- a/llama-index-finetuning/llama_index/finetuning/embeddings/adapter.py
+++ b/llama-index-finetuning/llama_index/finetuning/embeddings/adapter.py
@@ -50,7 +50,7 @@ class EmbeddingAdapterFinetuneEngine(BaseEmbeddingFinetuneEngine):
     ) -> None:
         """Init params."""
         import torch
-        from llama_index.finetuning.embeddings.adapter_utils import (
+        from llama_index.embeddings.adapter import (
             BaseAdapter,
             LinearLayer,
         )
@@ -108,7 +108,7 @@ class EmbeddingAdapterFinetuneEngine(BaseEmbeddingFinetuneEngine):
             **kwargs (Any): Additional kwargs (see __init__)
 
         """
-        from llama_index.finetuning.embeddings.adapter_utils import LinearLayer
+        from llama_index.embeddings.adapter import LinearLayer
 
         model_cls = model_cls or LinearLayer
         model = model_cls.load(model_path)
diff --git a/llama-index-finetuning/llama_index/finetuning/embeddings/adapter_utils.py b/llama-index-finetuning/llama_index/finetuning/embeddings/adapter_utils.py
index 495f6689c9b0253d29cd7849bc1683282e70291e..5f51874f8dae35901be379d9fdbbe09fe5067d42 100644
--- a/llama-index-finetuning/llama_index/finetuning/embeddings/adapter_utils.py
+++ b/llama-index-finetuning/llama_index/finetuning/embeddings/adapter_utils.py
@@ -6,7 +6,7 @@ from typing import Any, Callable, Dict, List, Optional, Type
 import torch
 import transformers
 from llama_index.core.utils import print_text
-from llama_index.embeddings.adapter.utils import BaseAdapter
+from llama_index.embeddings.adapter import BaseAdapter
 from sentence_transformers.util import cos_sim
 from torch import Tensor, nn
 from torch.optim import Optimizer
diff --git a/llama-index-finetuning/pyproject.toml b/llama-index-finetuning/pyproject.toml
index b073a6e23459ced30ac09122a59c533d51be40a1..6c5b63661f15c67135b745c65057eb0718d01d64 100644
--- a/llama-index-finetuning/pyproject.toml
+++ b/llama-index-finetuning/pyproject.toml
@@ -24,7 +24,7 @@ description = "llama-index finetuning"
 license = "MIT"
 name = "llama-index-finetuning"
 readme = "README.md"
-version = "0.1.1"
+version = "0.1.2"
 
 [tool.poetry.dependencies]
 python = ">=3.8.1,<3.12"
@@ -32,7 +32,7 @@ llama-index-core = "^0.10.1"
 llama-index-llms-openai = "^0.1.1"
 llama-index-llms-gradient = "^0.1.1"
 llama-index-postprocessor-cohere-rerank = "^0.1.1"
-llama-index-embeddings-adapter = "^0.1.1"
+llama-index-embeddings-adapter = "^0.1.2"
 sentence-transformers = "^2.3.0"
 
 [tool.poetry.group.dev.dependencies]
diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-adapter/llama_index/embeddings/adapter/__init__.py b/llama-index-integrations/embeddings/llama-index-embeddings-adapter/llama_index/embeddings/adapter/__init__.py
index 2376cd580b2d718365f179f9e0f614349ed4abda..bbdfb36e5536a399c9c3408a9ec0f94c54ba2c85 100644
--- a/llama-index-integrations/embeddings/llama-index-embeddings-adapter/llama_index/embeddings/adapter/__init__.py
+++ b/llama-index-integrations/embeddings/llama-index-embeddings-adapter/llama_index/embeddings/adapter/__init__.py
@@ -1,3 +1,12 @@
-from llama_index.embeddings.adapter.base import AdapterEmbeddingModel
+from llama_index.embeddings.adapter.base import (
+    AdapterEmbeddingModel,
+    LinearAdapterEmbeddingModel,
+)
+from llama_index.embeddings.adapter.utils import BaseAdapter, LinearLayer
 
-__all__ = ["AdapterEmbeddingModel"]
+__all__ = [
+    "AdapterEmbeddingModel",
+    "LinearAdapterEmbeddingModel",
+    "BaseAdapter",
+    "LinearLayer",
+]
diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-adapter/pyproject.toml b/llama-index-integrations/embeddings/llama-index-embeddings-adapter/pyproject.toml
index 9a8c719a27aa8e3567dc281f6eb2505564109427..c3c2fc05ac5a7d21d0bc1590b6a0755d85faf1d5 100644
--- a/llama-index-integrations/embeddings/llama-index-embeddings-adapter/pyproject.toml
+++ b/llama-index-integrations/embeddings/llama-index-embeddings-adapter/pyproject.toml
@@ -24,7 +24,7 @@ description = "llama-index embeddings adapter integration"
 license = "MIT"
 name = "llama-index-embeddings-adapter"
 readme = "README.md"
-version = "0.1.1"
+version = "0.1.2"
 
 [tool.poetry.dependencies]
 python = ">=3.8.1,<3.12"
diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-nomic/pyproject.toml b/llama-index-integrations/embeddings/llama-index-embeddings-nomic/pyproject.toml
index 9bf3662c665e0fc032d47044a1292bbdebe18f73..e6e3b0c19139a7184fcec2a677352323d7733c39 100644
--- a/llama-index-integrations/embeddings/llama-index-embeddings-nomic/pyproject.toml
+++ b/llama-index-integrations/embeddings/llama-index-embeddings-nomic/pyproject.toml
@@ -8,7 +8,7 @@ check-hidden = true
 skip = "*.csv,*.html,*.json,*.jsonl,*.pdf,*.txt,*.ipynb"
 
 [tool.llamahub]
-classes = ["NomicEmbedding"]
+classes = ["NomicEmbedding", "NomicHFEmbedding"]
 contains_example = false
 import_path = "llama_index.embeddings.nomic"
 
@@ -24,7 +24,7 @@ description = "llama-index embeddings nomic integration"
 license = "MIT"
 name = "llama-index-embeddings-nomic"
 readme = "README.md"
-version = "0.1.1"
+version = "0.1.3"
 
 [tool.poetry.dependencies]
 python = ">=3.8.1,<3.12"
diff --git a/llama-index-integrations/indices/llama-index-indices-managed-llama-cloud/llama_index/indices/managed/llama_cloud/__pycache__/__init__.cpython-311.pyc b/llama-index-integrations/indices/llama-index-indices-managed-llama-cloud/llama_index/indices/managed/llama_cloud/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cc9463641386fa4702b72869efd7fba517af128d
Binary files /dev/null and b/llama-index-integrations/indices/llama-index-indices-managed-llama-cloud/llama_index/indices/managed/llama_cloud/__pycache__/__init__.cpython-311.pyc differ
diff --git a/llama-index-integrations/indices/llama-index-indices-managed-llama-cloud/llama_index/indices/managed/llama_cloud/__pycache__/base.cpython-311.pyc b/llama-index-integrations/indices/llama-index-indices-managed-llama-cloud/llama_index/indices/managed/llama_cloud/__pycache__/base.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6f4558c12f4f5593a210d8635c33328cfe07319e
Binary files /dev/null and b/llama-index-integrations/indices/llama-index-indices-managed-llama-cloud/llama_index/indices/managed/llama_cloud/__pycache__/base.cpython-311.pyc differ
diff --git a/llama-index-integrations/indices/llama-index-indices-managed-llama-cloud/llama_index/indices/managed/llama_cloud/__pycache__/retriever.cpython-311.pyc b/llama-index-integrations/indices/llama-index-indices-managed-llama-cloud/llama_index/indices/managed/llama_cloud/__pycache__/retriever.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e694d11789e609ae252b7a587089b3b67ba77a97
Binary files /dev/null and b/llama-index-integrations/indices/llama-index-indices-managed-llama-cloud/llama_index/indices/managed/llama_cloud/__pycache__/retriever.cpython-311.pyc differ
diff --git a/llama-index-integrations/llms/llama-index-llms-nvidia-tensorrt/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-nvidia-tensorrt/pyproject.toml
index a76c0adfc1576411d352da8cc739a157d0759f3c..8620cd2ef1fee4580d9861b159c6f0fe79176388 100644
--- a/llama-index-integrations/llms/llama-index-llms-nvidia-tensorrt/pyproject.toml
+++ b/llama-index-integrations/llms/llama-index-llms-nvidia-tensorrt/pyproject.toml
@@ -24,7 +24,7 @@ description = "llama-index llms nvidia tensorrt integration"
 license = "MIT"
 name = "llama-index-llms-nvidia-tensorrt"
 readme = "README.md"
-version = "0.1.1"
+version = "0.1.2"
 
 [tool.poetry.dependencies]
 python = ">=3.8.1,<3.12"
diff --git a/llama-index-integrations/llms/llama-index-llms-vllm/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-vllm/pyproject.toml
index bd5499519609c1c6e7f62d8e3eb5ef4b72ebd236..3e08676380b091d9b9ea4db095f0ffb635f02d21 100644
--- a/llama-index-integrations/llms/llama-index-llms-vllm/pyproject.toml
+++ b/llama-index-integrations/llms/llama-index-llms-vllm/pyproject.toml
@@ -24,7 +24,7 @@ description = "llama-index llms vllm integration"
 license = "MIT"
 name = "llama-index-llms-vllm"
 readme = "README.md"
-version = "0.1.1"
+version = "0.1.2"
 
 [tool.poetry.dependencies]
 python = ">=3.8.1,<3.12"
diff --git a/llama-index-integrations/readers/llama-index-readers-minio/README.md b/llama-index-integrations/readers/llama-index-readers-minio/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..d0bb5d20dd2d13cda46ddf991fffc178f5a661dd
--- /dev/null
+++ b/llama-index-integrations/readers/llama-index-readers-minio/README.md
@@ -0,0 +1,9 @@
+# Minio Reader
+
+## Install
+
+`pip install llama-index-readers-minio`
+
+## Import
+
+`from llama_index.readers.minio import MinioReader, BotoMinioReader`
diff --git a/llama-index-integrations/vector_stores/llama-index-vector-stores-mongodb/pyproject.toml b/llama-index-integrations/vector_stores/llama-index-vector-stores-mongodb/pyproject.toml
index b9d0fc834ab4c33313a279653e7f3bb4fc4b227c..de01ba5aae5e6c185054219d9e2ae6742ad95905 100644
--- a/llama-index-integrations/vector_stores/llama-index-vector-stores-mongodb/pyproject.toml
+++ b/llama-index-integrations/vector_stores/llama-index-vector-stores-mongodb/pyproject.toml
@@ -24,7 +24,7 @@ description = "llama-index vector_stores mongodb integration"
 license = "MIT"
 name = "llama-index-vector-stores-mongodb"
 readme = "README.md"
-version = "0.1.2"
+version = "0.1.3"
 
 [tool.poetry.dependencies]
 python = ">=3.8.1,<3.12"
diff --git a/pyproject.toml b/pyproject.toml
index 619039efd0206918cb7065078d5d7e2d06d730a8..aff6f76f1c2dd2a38fb8c3fd333c7057ce47b619 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -41,7 +41,7 @@ name = "llama-index"
 packages = [{from = "_llama-index", include = "llama_index"}]
 readme = "README.md"
 repository = "https://github.com/run-llama/llama_index"
-version = "0.10.3"
+version = "0.10.5"
 
 [tool.poetry.dependencies]
 python = ">=3.8.1,<4.0"