diff --git a/llama-index-cli/CHANGELOG.MD b/llama-index-cli/CHANGELOG.MD
index 109adf94b711b175987f0b62b5c48503f884f232..1e362cae2bfa8c59d3814f07dcc232f9ad5162c3 100644
--- a/llama-index-cli/CHANGELOG.MD
+++ b/llama-index-cli/CHANGELOG.MD
@@ -1,5 +1,10 @@
 # CHANGELOG
 
+## [0.1.1] - 2024-02-20
+
+- Upgrades ported over from core
+- Add tool.llamahub section in pyproject template
+
 ## [0.1.0] - 2024-02-15
 
 - Initial release of cli (ripped out from core)
diff --git a/llama-index-cli/llama_index/cli/command_line.py b/llama-index-cli/llama_index/cli/command_line.py
index 34af87a864c570a7e231a369dd93ae2f5bba4a2e..34d092eafddf4bdb4702007fd5dae1fe6920217a 100644
--- a/llama-index-cli/llama_index/cli/command_line.py
+++ b/llama-index-cli/llama_index/cli/command_line.py
@@ -1,9 +1,8 @@
 import argparse
 from typing import Any, Optional
 
-from llama_index.cli.rag.base import RagCLI, default_ragcli_persist_dir
-from llama_index.cli.upgrade.base import upgrade_dir, upgrade_file
-from llama_index.cli.new_package.base import init_new_package
+from llama_index.cli.command_line.rag import RagCLI, default_ragcli_persist_dir
+from llama_index.cli.command_line.upgrade import upgrade_dir, upgrade_file
 from llama_index.core.ingestion import IngestionCache, IngestionPipeline
 from llama_index.core.llama_dataset.download import (
     LLAMA_DATASETS_LFS_URL,
@@ -18,6 +17,8 @@ from llama_index.core.llama_pack.download import (
 from llama_index.core.storage.docstore import SimpleDocumentStore
 from llama_index.core.text_splitter import SentenceSplitter
 
+from llama_index.cli.command_line.new_package.base import init_new_package
+
 
 def handle_init_package(
     name: str, kind: str, prefix: Optional[str] = None, **kwargs: Any
@@ -98,7 +99,7 @@ def default_rag_cli() -> RagCLI:
 
         from llama_index.vector_stores.chroma import (
             ChromaVectorStore,
-        )
+        )  # pants: no-infer-dep
     except ImportError:
         ChromaVectorStore = None
 
@@ -133,7 +134,7 @@ def default_rag_cli() -> RagCLI:
         print(
             "Default RagCLI was not built. There are packages missing. Please"
             " install required dependencies by running "
-            "`pip install llama-index-embeddings-openai llama-index-llms-openai chromadb llama-index-vector-stores-chroma`"
+            "`pip install llama-index-embeddings-openai llama-index-llms-openai chroma llama-index-vector-stores-chroma`"
         )
         return None
 
@@ -146,7 +147,7 @@ def main() -> None:
 
     # llama rag command
     llamarag_parser = subparsers.add_parser(
-        "rag", help="Ask a question to a document or a directory of documents."
+        "rag", help="Ask a question to a document / a directory of documents."
     )
     RagCLI.add_parser_args(llamarag_parser, default_rag_cli)
 
diff --git a/llama-index-cli/llama_index/cli/new_package/__init__.py b/llama-index-cli/llama_index/cli/new_package/__init__.py
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..ccf81ae51a5138a160493adec508bc4d630bebcf 100644
--- a/llama-index-cli/llama_index/cli/new_package/__init__.py
+++ b/llama-index-cli/llama_index/cli/new_package/__init__.py
@@ -0,0 +1,4 @@
+from llama_index.cli.upgrade.base import upgrade_dir, upgrade_file
+
+
+__all__ = ["upgrade_dir", "upgrade_file"]
diff --git a/llama-index-cli/llama_index/cli/new_package/templates/pyproject.py b/llama-index-cli/llama_index/cli/new_package/templates/pyproject.py
index 7efdadb178a284ef21cbe1fb2dafec03038ef8b3..7c40fa5a4bc08f9a7a67175a99aeb24acd6039cc 100644
--- a/llama-index-cli/llama_index/cli/new_package/templates/pyproject.py
+++ b/llama-index-cli/llama_index/cli/new_package/templates/pyproject.py
@@ -9,6 +9,13 @@ check-hidden = true
 # work through many typos (--write-changes and --interactive will help)
 skip = "*.csv,*.html,*.json,*.jsonl,*.pdf,*.txt,*.ipynb"
 
+# [tool.llamahub]
+# contains_example = false
+# import_path = "<import-path>"
+
+# [tool.llamahub.class_authors]
+# CLASS = "github-username"
+
 [tool.mypy]
 disallow_untyped_defs = true
 # Remove venv skip when integrated with pre-commit
diff --git a/llama-index-cli/llama_index/cli/rag/__init__.py b/llama-index-cli/llama_index/cli/rag/__init__.py
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..39d59f694eb8c497579113d4d754abe8546523fd 100644
--- a/llama-index-cli/llama_index/cli/rag/__init__.py
+++ b/llama-index-cli/llama_index/cli/rag/__init__.py
@@ -0,0 +1,4 @@
+from llama_index.cli.rag.base import RagCLI, default_ragcli_persist_dir
+
+
+__all__ = ["RagCLI", "default_ragcli_persist_dir"]
diff --git a/llama-index-cli/llama_index/cli/upgrade/__init__.py b/llama-index-cli/llama_index/cli/upgrade/__init__.py
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..ccf81ae51a5138a160493adec508bc4d630bebcf 100644
--- a/llama-index-cli/llama_index/cli/upgrade/__init__.py
+++ b/llama-index-cli/llama_index/cli/upgrade/__init__.py
@@ -0,0 +1,4 @@
+from llama_index.cli.upgrade.base import upgrade_dir, upgrade_file
+
+
+__all__ = ["upgrade_dir", "upgrade_file"]
diff --git a/llama-index-cli/llama_index/cli/upgrade/base.py b/llama-index-cli/llama_index/cli/upgrade/base.py
index 20db64956f3aaca298042c3adb5e09e7ea88148e..291a53f72d62d5f82d6c1b88fe68fdc6c9af062a 100644
--- a/llama-index-cli/llama_index/cli/upgrade/base.py
+++ b/llama-index-cli/llama_index/cli/upgrade/base.py
@@ -48,12 +48,16 @@ def _parse_from_imports(
                         new_imports[new_import_parent].append(module)
                 else:
                     print(f"Module not found: {module}\nSwitching to core")
+                    # get back the llama_index module that's being imported.
                     new_import_parent = (
-                        imported_modules[0]
-                        .split(" import ")[0]
-                        .split("from ")[-1]
-                        .replace("llama_index", "llama_index.core")
+                        imported_modules[0].split(" import ")[0].split("from ")[-1]
                     )
+                    # if the parent contains `llama_index.core` already, then skip
+                    if "llama_index.core" not in new_import_parent:
+                        new_import_parent = new_import_parent.replace(
+                            "llama_index", "llama_index.core"
+                        )
+
                     if new_import_parent not in new_imports:
                         new_imports[new_import_parent] = [module]
                     else:
diff --git a/llama-index-cli/poetry.lock b/llama-index-cli/poetry.lock
index 3ed6c4dc07ba23ee22ef1231aa37c3ce4d52f989..67a96dd151ac57d4c069b4d97e2b9aca8d5c21c9 100644
--- a/llama-index-cli/poetry.lock
+++ b/llama-index-cli/poetry.lock
@@ -2268,13 +2268,13 @@ files = [
 
 [[package]]
 name = "llama-index-core"
-version = "0.10.3"
+version = "0.10.8.post1"
 description = "Interface between LLMs and your data"
 optional = false
 python-versions = ">=3.8.1,<4.0"
 files = [
-    {file = "llama_index_core-0.10.3-py3-none-any.whl", hash = "sha256:711e2766cb1690a394a209dc6155d1b7a05b44fd6b7e08084d6b96c00bef5dd0"},
-    {file = "llama_index_core-0.10.3.tar.gz", hash = "sha256:5cced2c56bd640311835094fe6946ce6498e6d31dffcdb3df7583ff1aa3861b8"},
+    {file = "llama_index_core-0.10.8.post1-py3-none-any.whl", hash = "sha256:51b736b22818cb0b117a1486be54cf05f9496e79feb5de7262be5ec5480d85ec"},
+    {file = "llama_index_core-0.10.8.post1.tar.gz", hash = "sha256:449fdd206901ca1e403b0c7fd021e52f9f0aa6fd121793f3062c442429a11ffd"},
 ]
 
 [package.dependencies]
@@ -2284,6 +2284,7 @@ deprecated = ">=1.2.9.3"
 dirtyjson = ">=1.0.8,<2.0.0"
 fsspec = ">=2023.5.0"
 httpx = "*"
+llamaindex-py-client = ">=0.1.13,<0.2.0"
 nest-asyncio = ">=1.5.8,<2.0.0"
 networkx = ">=3.0"
 nltk = ">=3.8.1,<4.0.0"
@@ -2353,6 +2354,21 @@ llama-index-core = ">=0.10.1,<0.11.0"
 onnxruntime = ">=1.17.0,<2.0.0"
 tokenizers = ">=0.15.1,<0.16.0"
 
+[[package]]
+name = "llamaindex-py-client"
+version = "0.1.13"
+description = ""
+optional = false
+python-versions = ">=3.8,<4.0"
+files = [
+    {file = "llamaindex_py_client-0.1.13-py3-none-any.whl", hash = "sha256:02400c90655da80ae373e0455c829465208607d72462f1898fd383fdfe8dabce"},
+    {file = "llamaindex_py_client-0.1.13.tar.gz", hash = "sha256:3bd9b435ee0a78171eba412dea5674d813eb5bf36e577d3c7c7e90edc54900d9"},
+]
+
+[package.dependencies]
+httpx = ">=0.20.0"
+pydantic = ">=1.10"
+
 [[package]]
 name = "markupsafe"
 version = "2.1.5"
@@ -5999,4 +6015,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p
 [metadata]
 lock-version = "2.0"
 python-versions = ">=3.8.1,<3.12"
-content-hash = "a0c6f7be872d5ca99b5cbba9da4916e5b54805026cdebbda71f7320e0a0f03df"
+content-hash = "fe397103ccb40b32f60b0547640e6670c4ee906e1f0bdd8e78d7034400e865f8"
diff --git a/llama-index-cli/pyproject.toml b/llama-index-cli/pyproject.toml
index ea9fda4493b1eef6aa2d7ca071a902cff5590c61..29e7c5c87562707e456bdb1e41d5bab4b8f25ab3 100644
--- a/llama-index-cli/pyproject.toml
+++ b/llama-index-cli/pyproject.toml
@@ -31,11 +31,11 @@ maintainers = [
 name = "llama-index-cli"
 packages = [{include = "llama_index/"}]
 readme = "README.md"
-version = "0.1.0"
+version = "0.1.1"
 
 [tool.poetry.dependencies]
 python = ">=3.8.1,<3.12"
-llama-index-core = "^0.10.3"
+llama-index-core = "^0.10.8.post1"
 llama-index-vector-stores-chroma = "^0.1.1"
 llama-index-embeddings-openai = "^0.1.1"
 llama-index-llms-openai = "^0.1.1"