diff --git a/helpers/index.ts b/helpers/index.ts
index 237bbecae695ca7912c2660d28a574c620d89df2..70a112444f03b7c4083e23664b007f9fb50c59b0 100644
--- a/helpers/index.ts
+++ b/helpers/index.ts
@@ -60,6 +60,12 @@ const createEnvLocalFile = async (
       content += `PG_CONNECTION_STRING=\n`;
       break;
     }
+    case "pinecone": {
+      content += `PINECONE_API_KEY=\n`;
+      content += `PINECONE_ENVIRONMENT=\n`;
+      content += `PINECONE_INDEX_NAME=\n`;
+      break;
+    }
   }
 
   switch (opts?.dataSource?.type) {
diff --git a/helpers/python.ts b/helpers/python.ts
index 6ba0a63e1b6de278e64514c5f4e418c806fbe4c1..39be2f32e8eacbf8b8213558aeb89373770465b8 100644
--- a/helpers/python.ts
+++ b/helpers/python.ts
@@ -6,8 +6,12 @@ import terminalLink from "terminal-link";
 import { copy } from "./copy";
 import { templatesDir } from "./dir";
 import { isPoetryAvailable, tryPoetryInstall } from "./poetry";
-import { getToolConfig } from "./tools";
-import { InstallTemplateArgs, TemplateVectorDB } from "./types";
+import { Tool } from "./tools";
+import {
+  InstallTemplateArgs,
+  TemplateDataSource,
+  TemplateVectorDB,
+} from "./types";
 
 interface Dependency {
   name: string;
@@ -15,25 +19,59 @@ interface Dependency {
   extras?: string[];
 }
 
-const getAdditionalDependencies = (vectorDb?: TemplateVectorDB) => {
+const getAdditionalDependencies = (
+  vectorDb?: TemplateVectorDB,
+  dataSource?: TemplateDataSource,
+  tools?: Tool[],
+) => {
   const dependencies: Dependency[] = [];
 
+  // Add vector db dependencies
   switch (vectorDb) {
     case "mongo": {
       dependencies.push({
-        name: "pymongo",
-        version: "^4.6.1",
+        name: "llama-index-vector-stores-mongodb",
+        version: "^0.1.3",
       });
       break;
     }
     case "pg": {
       dependencies.push({
-        name: "llama-index",
-        extras: ["postgres"],
+        name: "llama-index-vector-stores-postgres",
+        version: "^0.1.1",
       });
     }
+    case "pinecone": {
+      dependencies.push({
+        name: "llama-index-vector-stores-pinecone",
+        version: "^0.1.3",
+      });
+      break;
+    }
   }
 
+  // Add data source dependencies
+  const dataSourceType = dataSource?.type;
+  if (dataSourceType === "file" || dataSourceType === "folder") {
+    // llama-index-readers-file (pdf, excel, csv) is already included in llama_index package
+    dependencies.push({
+      name: "docx2txt",
+      version: "^0.8",
+    });
+  } else if (dataSourceType === "web") {
+    dependencies.push({
+      name: "llama-index-readers-web",
+      version: "^0.1.6",
+    });
+  }
+
+  // Add tools dependencies
+  tools?.forEach((tool) => {
+    tool.dependencies?.forEach((dep) => {
+      dependencies.push(dep);
+    });
+  });
+
   return dependencies;
 };
 
@@ -190,7 +228,7 @@ export const installPythonTemplate = async ({
       // Write tools_config.json
       const configContent: Record<string, any> = {};
       tools.forEach((tool) => {
-        configContent[tool] = getToolConfig(tool) ?? {};
+        configContent[tool.name] = tool.config ?? {};
       });
       const configFilePath = path.join(root, "tools_config.json");
       await fs.writeFile(
@@ -217,7 +255,11 @@ export const installPythonTemplate = async ({
     }
   }
 
-  const addOnDependencies = getAdditionalDependencies(vectorDb);
+  const addOnDependencies = getAdditionalDependencies(
+    vectorDb,
+    dataSource,
+    tools,
+  );
   await addDependencies(root, addOnDependencies);
 
   if (postInstallAction !== "none") {
diff --git a/helpers/tools.ts b/helpers/tools.ts
index eae53d205a20e4a8b188c9522fd0c989c8e47813..f7b844cbb195bfc4c69ebf390a78d884887df2e3 100644
--- a/helpers/tools.ts
+++ b/helpers/tools.ts
@@ -1,33 +1,71 @@
+import { red } from "picocolors";
+
 export type Tool = {
   display: string;
   name: string;
   config?: Record<string, any>;
+  dependencies?: ToolDependencies[];
+};
+export type ToolDependencies = {
+  name: string;
+  version?: string;
 };
 
 export const supportedTools: Tool[] = [
   {
     display: "Google Search (configuration required after installation)",
-    name: "google_search",
+    name: "google.GoogleSearchToolSpec",
     config: {
       engine:
         "Your search engine id, see https://developers.google.com/custom-search/v1/overview#prerequisites",
       key: "Your search api key",
       num: 2,
     },
+    dependencies: [
+      {
+        name: "llama-index-tools-google",
+        version: "0.1.2",
+      },
+    ],
   },
   {
     display: "Wikipedia",
-    name: "wikipedia",
+    name: "wikipedia.WikipediaToolSpec",
+    dependencies: [
+      {
+        name: "llama-index-tools-wikipedia",
+        version: "0.1.2",
+      },
+    ],
   },
 ];
 
-export const getToolConfig = (name: string) => {
-  return supportedTools.find((tool) => tool.name === name)?.config;
+export const getTool = (toolName: string): Tool | undefined => {
+  return supportedTools.find((tool) => tool.name === toolName);
+};
+
+export const getTools = (toolsName: string[]): Tool[] => {
+  let tools: Tool[] = [];
+  for (const toolName of toolsName) {
+    const tool = getTool(toolName);
+    if (!tool) {
+      console.log(
+        red(
+          `Error: Tool '${toolName}' is not supported. Supported tools are: ${supportedTools
+            .map((t) => t.name)
+            .join(", ")}`,
+        ),
+      );
+      process.exit(1);
+    }
+    tools.push(tool);
+  }
+  return tools;
 };
 
-export const toolsRequireConfig = (tools?: string[]): boolean => {
+export const toolsRequireConfig = (tools?: Tool[]): boolean => {
   if (tools) {
-    return tools.some((tool) => getToolConfig(tool));
+    return tools?.some((tool) => Object.keys(tool.config || {}).length > 0);
   }
   return false;
 };
diff --git a/helpers/types.ts b/helpers/types.ts
index 19253cc152f7aaa286e3b9554b1adf0518bbb473..019a1e2a29e7a8747eed7ca0564e1e1220aca27e 100644
--- a/helpers/types.ts
+++ b/helpers/types.ts
@@ -1,4 +1,5 @@
 import { PackageManager } from "../helpers/get-pkg-manager";
+import { Tool } from "./tools";
 
 export type TemplateType = "simple" | "streaming" | "community" | "llamapack";
 export type TemplateFramework = "nextjs" | "express" | "fastapi";
@@ -41,5 +42,5 @@ export interface InstallTemplateArgs {
   vectorDb?: TemplateVectorDB;
   externalPort?: number;
   postInstallAction?: TemplatePostInstallAction;
-  tools?: string[];
+  tools?: Tool[];
 }
diff --git a/index.ts b/index.ts
index 6fa19cc5501966ac6bca23de0dc417cf306e9c52..3dbc9ffd0f1ead174a8c6f80117acffee5458af6 100644
--- a/index.ts
+++ b/index.ts
@@ -11,7 +11,7 @@ import { createApp } from "./create-app";
 import { getPkgManager } from "./helpers/get-pkg-manager";
 import { isFolderEmpty } from "./helpers/is-folder-empty";
 import { runApp } from "./helpers/run-app";
-import { supportedTools } from "./helpers/tools";
+import { getTools } from "./helpers/tools";
 import { validateNpmName } from "./helpers/validate-pkg";
 import packageJson from "./package.json";
 import { QuestionArgs, askQuestions, onPromptState } from "./questions";
@@ -153,7 +153,7 @@ const program = new Commander.Command(packageJson.name)
     "--tools <tools>",
     `
 
-  Specify the tools you want to use by providing a comma-separated list. For example, 'google_search,wikipedia'. Use 'none' to not using any tools.
+  Specify the tools you want to use by providing a comma-separated list. For example, 'wikipedia.WikipediaToolSpec,google.GoogleSearchToolSpec'. Use 'none' to not using any tools.
 `,
   )
   .allowUnknownOption()
@@ -168,19 +168,7 @@ if (process.argv.includes("--tools")) {
   if (program.tools === "none") {
     program.tools = [];
   } else {
-    program.tools = program.tools.split(",");
-    // Check if tools are available
-    const toolsName = supportedTools.map((tool) => tool.name);
-    program.tools.forEach((tool: string) => {
-      if (!toolsName.includes(tool)) {
-        console.error(
-          `Error: Tool '${tool}' is not supported. Supported tools are: ${toolsName.join(
-            ", ",
-          )}`,
-        );
-        process.exit(1);
-      }
-    });
+    program.tools = getTools(program.tools.split(","));
   }
 }
 
diff --git a/questions.ts b/questions.ts
index b0cca28cf09f31036a18e46a68f18f13f9bb830f..f8a086a49836f38156109750f9ec2e1e0df51654 100644
--- a/questions.ts
+++ b/questions.ts
@@ -583,13 +583,16 @@ export const askQuestions = async (
         title: tool.display,
         value: tool.name,
       }));
-      const { tools } = await prompts({
+      const { toolsName } = await prompts({
         type: "multiselect",
-        name: "tools",
+        name: "toolsName",
         message:
           "Would you like to build an agent using tools? If so, select the tools here, otherwise just press enter",
         choices: toolChoices,
       });
+      const tools = toolsName?.map((tool: string) =>
+        supportedTools.find((t) => t.name === tool),
+      );
       program.tools = tools;
       preferences.tools = tools;
     }
diff --git a/templates/components/engines/python/agent/__init__.py b/templates/components/engines/python/agent/__init__.py
index 86e777edbd3f21c3e62b7b20b4cdff59c4d545b6..1826d0e0a9f0fccb38b3a27a5098870286988ada 100644
--- a/templates/components/engines/python/agent/__init__.py
+++ b/templates/components/engines/python/agent/__init__.py
@@ -1,33 +1,8 @@
-import os
-
-from typing import Any, Optional
-from llama_index.llms import LLM
-from llama_index.agent import AgentRunner
-
+from llama_index.core.settings import Settings
+from llama_index.core.agent import AgentRunner
+from llama_index.core.tools.query_engine import QueryEngineTool
 from app.engine.tools import ToolFactory
 from app.engine.index import get_index
-from llama_index.agent import ReActAgent
-from llama_index.tools.query_engine import QueryEngineTool
-
-
-def create_agent_from_llm(
-    llm: Optional[LLM] = None,
-    **kwargs: Any,
-) -> AgentRunner:
-    from llama_index.agent import OpenAIAgent, ReActAgent
-    from llama_index.llms.openai import OpenAI
-    from llama_index.llms.openai_utils import is_function_calling_model
-
-    if isinstance(llm, OpenAI) and is_function_calling_model(llm.model):
-        return OpenAIAgent.from_tools(
-            llm=llm,
-            **kwargs,
-        )
-    else:
-        return ReActAgent.from_tools(
-            llm=llm,
-            **kwargs,
-        )
 
 
 def get_chat_engine():
@@ -35,7 +10,6 @@ def get_chat_engine():
 
     # Add query tool
     index = get_index()
-    llm = index.service_context.llm
     query_engine = index.as_query_engine(similarity_top_k=3)
     query_engine_tool = QueryEngineTool.from_defaults(query_engine=query_engine)
     tools.append(query_engine_tool)
@@ -43,8 +17,8 @@ def get_chat_engine():
     # Add additional tools
     tools += ToolFactory.from_env()
 
-    return create_agent_from_llm(
-        llm=llm,
+    return AgentRunner.from_llm(
+        llm=Settings.llm,
         tools=tools,
         verbose=True,
     )
diff --git a/templates/components/engines/python/agent/tools.py b/templates/components/engines/python/agent/tools.py
index 9fb9d488f74192852094e90644582a2ae896e0ee..fafc1fcc874477893e7ecc4ae9add4bcb5e062b1 100644
--- a/templates/components/engines/python/agent/tools.py
+++ b/templates/components/engines/python/agent/tools.py
@@ -1,8 +1,8 @@
 import json
 import importlib
 
-from llama_index.tools.tool_spec.base import BaseToolSpec
-from llama_index.tools.function_tool import FunctionTool
+from llama_index.core.tools.tool_spec.base import BaseToolSpec
+from llama_index.core.tools.function_tool import FunctionTool
 
 
 class ToolFactory:
@@ -10,9 +10,9 @@ class ToolFactory:
     @staticmethod
     def create_tool(tool_name: str, **kwargs) -> list[FunctionTool]:
         try:
-            module_name = f"llama_hub.tools.{tool_name}.base"
+            tool_package, tool_cls_name = tool_name.split(".")
+            module_name = f"llama_index.tools.{tool_package}"
             module = importlib.import_module(module_name)
-            tool_cls_name = tool_name.title().replace("_", "") + "ToolSpec"
             tool_class = getattr(module, tool_cls_name)
             tool_spec: BaseToolSpec = tool_class(**kwargs)
             return tool_spec.to_tool_list()
diff --git a/templates/components/loaders/python/file/loader.py b/templates/components/loaders/python/file/loader.py
index d343cec34a60f4c3004d9c00c53f7f3b7734bf82..dd7627a7199cacf625bfd14b0e33ab1bd1e75892 100644
--- a/templates/components/loaders/python/file/loader.py
+++ b/templates/components/loaders/python/file/loader.py
@@ -1,7 +1,5 @@
-import os
+from llama_index.core.readers import SimpleDirectoryReader
 from app.engine.constants import DATA_DIR
-from llama_index import VectorStoreIndex, download_loader
-from llama_index import SimpleDirectoryReader
 
 
 def get_documents():
diff --git a/templates/components/loaders/python/web/loader.py b/templates/components/loaders/python/web/loader.py
index 026dd1019e555d438b896a2cf9a7e7161fa1d189..bc6d0496dda8fc7a9e0b1a79e1a4084fec6a6cb6 100644
--- a/templates/components/loaders/python/web/loader.py
+++ b/templates/components/loaders/python/web/loader.py
@@ -1,10 +1,8 @@
 import os
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.readers.web import WholeSiteReader
 
 
 def get_documents():
-    WholeSiteReader = download_loader("WholeSiteReader")
-
     # Initialize the scraper with a prefix URL and maximum depth
     scraper = WholeSiteReader(
         prefix=os.environ.get("URL_PREFIX"), max_depth=int(os.environ.get("MAX_DEPTH"))
diff --git a/templates/components/vectordbs/python/mongo/context.py b/templates/components/vectordbs/python/mongo/context.py
deleted file mode 100644
index ceb8a50ae0cd02425aeb5fa4436df374590a4fad..0000000000000000000000000000000000000000
--- a/templates/components/vectordbs/python/mongo/context.py
+++ /dev/null
@@ -1,14 +0,0 @@
-from llama_index import ServiceContext
-
-from app.context import create_base_context
-from app.engine.constants import CHUNK_SIZE, CHUNK_OVERLAP
-
-
-def create_service_context():
-    base = create_base_context()
-    return ServiceContext.from_defaults(
-        llm=base.llm,
-        embed_model=base.embed_model,
-        chunk_size=CHUNK_SIZE,
-        chunk_overlap=CHUNK_OVERLAP,
-    )
diff --git a/templates/components/vectordbs/python/mongo/generate.py b/templates/components/vectordbs/python/mongo/generate.py
index f52b3b48b1fdecd2102b172d1a85d356a2b2338c..667308394fa302c9fb7a2a4af48882109bad040c 100644
--- a/templates/components/vectordbs/python/mongo/generate.py
+++ b/templates/components/vectordbs/python/mongo/generate.py
@@ -1,26 +1,21 @@
 from dotenv import load_dotenv
 
 load_dotenv()
+
 import os
 import logging
-from llama_index.vector_stores import MongoDBAtlasVectorSearch
-
+from llama_index.core.storage import StorageContext
+from llama_index.core.indices import VectorStoreIndex
+from llama_index.vector_stores.mongodb import MongoDBAtlasVectorSearch
+from app.engine.settings import init_settings
 from app.engine.constants import DATA_DIR
-from app.engine.context import create_service_context
 from app.engine.loader import get_documents
 
-
-from llama_index import (
-    SimpleDirectoryReader,
-    VectorStoreIndex,
-    StorageContext,
-)
-
 logging.basicConfig(level=logging.INFO)
 logger = logging.getLogger()
 
 
-def generate_datasource(service_context):
+def generate_datasource():
     logger.info("Creating new index")
     # load the documents and create the index
     documents = get_documents()
@@ -32,7 +27,6 @@ def generate_datasource(service_context):
     storage_context = StorageContext.from_defaults(vector_store=store)
     VectorStoreIndex.from_documents(
         documents,
-        service_context=service_context,
         storage_context=storage_context,
         show_progress=True,  # this will show you a progress bar as the embeddings are created
     )
@@ -46,4 +40,5 @@ See https://github.com/run-llama/mongodb-demo/tree/main?tab=readme-ov-file#creat
 
 
 if __name__ == "__main__":
-    generate_datasource(create_service_context())
+    init_settings()
+    generate_datasource()
diff --git a/templates/components/vectordbs/python/mongo/index.py b/templates/components/vectordbs/python/mongo/index.py
index 173e7b57735246613ac94f310936fb6de2257a46..6dba7c1d05ddd8e77853f081632b3232a89bc7e2 100644
--- a/templates/components/vectordbs/python/mongo/index.py
+++ b/templates/components/vectordbs/python/mongo/index.py
@@ -1,23 +1,20 @@
 import logging
 import os
 
-from llama_index import (
-    VectorStoreIndex,
-)
-from llama_index.vector_stores import MongoDBAtlasVectorSearch
+from llama_index.core.indices import VectorStoreIndex
+from llama_index.vector_stores.mongodb import MongoDBAtlasVectorSearch
 
-from app.engine.context import create_service_context
+
+logger = logging.getLogger("uvicorn")
 
 
 def get_index():
-    service_context = create_service_context()
-    logger = logging.getLogger("uvicorn")
     logger.info("Connecting to index from MongoDB...")
     store = MongoDBAtlasVectorSearch(
         db_name=os.environ["MONGODB_DATABASE"],
         collection_name=os.environ["MONGODB_VECTORS"],
         index_name=os.environ["MONGODB_VECTOR_INDEX"],
     )
-    index = VectorStoreIndex.from_vector_store(store, service_context)
+    index = VectorStoreIndex.from_vector_store(store)
     logger.info("Finished connecting to index from MongoDB.")
     return index
diff --git a/templates/components/vectordbs/python/mongo/settings.py b/templates/components/vectordbs/python/mongo/settings.py
new file mode 100644
index 0000000000000000000000000000000000000000..d41f022766dba1af855fd29121ff87040f407e76
--- /dev/null
+++ b/templates/components/vectordbs/python/mongo/settings.py
@@ -0,0 +1,10 @@
+from llama_index.core.settings import Settings
+from app.engine.constants import CHUNK_SIZE, CHUNK_OVERLAP
+from app.settings import init_base_settings
+
+
+def init_settings():
+    init_base_settings()
+
+    Settings.chunk_size = CHUNK_SIZE
+    Settings.chunk_overlap = CHUNK_OVERLAP
diff --git a/templates/components/vectordbs/python/none/context.py b/templates/components/vectordbs/python/none/context.py
deleted file mode 100644
index ceb8a50ae0cd02425aeb5fa4436df374590a4fad..0000000000000000000000000000000000000000
--- a/templates/components/vectordbs/python/none/context.py
+++ /dev/null
@@ -1,14 +0,0 @@
-from llama_index import ServiceContext
-
-from app.context import create_base_context
-from app.engine.constants import CHUNK_SIZE, CHUNK_OVERLAP
-
-
-def create_service_context():
-    base = create_base_context()
-    return ServiceContext.from_defaults(
-        llm=base.llm,
-        embed_model=base.embed_model,
-        chunk_size=CHUNK_SIZE,
-        chunk_overlap=CHUNK_OVERLAP,
-    )
diff --git a/templates/components/vectordbs/python/none/generate.py b/templates/components/vectordbs/python/none/generate.py
index 7ff20012e9fbd1d3189ccf9527ef928ec1a85a12..29140e11299e022cc0a5ad3705ba966e0675d27f 100644
--- a/templates/components/vectordbs/python/none/generate.py
+++ b/templates/components/vectordbs/python/none/generate.py
@@ -1,32 +1,32 @@
-import logging
-
 from dotenv import load_dotenv
 
-from app.engine.constants import DATA_DIR, STORAGE_DIR
-from app.engine.context import create_service_context
-from app.engine.loader import get_documents
-
 load_dotenv()
 
-from llama_index import (
-    SimpleDirectoryReader,
+import logging
+from llama_index.core.indices import (
     VectorStoreIndex,
 )
+from app.engine.constants import DATA_DIR, STORAGE_DIR
+from app.engine.loader import get_documents
+from app.engine.settings import init_settings
+
 
 logging.basicConfig(level=logging.INFO)
 logger = logging.getLogger()
 
 
-def generate_datasource(service_context):
+def generate_datasource():
     logger.info("Creating new index")
     # load the documents and create the index
     documents = get_documents()
-    index = VectorStoreIndex.from_documents(documents, service_context=service_context)
+    index = VectorStoreIndex.from_documents(
+        documents,
+    )
     # store it for later
     index.storage_context.persist(STORAGE_DIR)
     logger.info(f"Finished creating new index. Stored in {STORAGE_DIR}")
 
 
 if __name__ == "__main__":
-    service_context = create_service_context()
-    generate_datasource(service_context)
+    init_settings()
+    generate_datasource()
diff --git a/templates/components/vectordbs/python/none/index.py b/templates/components/vectordbs/python/none/index.py
index 8e16975b5c12161aef0f1c35cab9de8fce18a025..4446eed0e9e2eeb0e33f93d34120d299b03c7495 100644
--- a/templates/components/vectordbs/python/none/index.py
+++ b/templates/components/vectordbs/python/none/index.py
@@ -2,24 +2,26 @@ import logging
 import os
 
 from app.engine.constants import STORAGE_DIR
-from app.engine.context import create_service_context
-from llama_index import (
-    StorageContext,
-    load_index_from_storage,
-)
+from app.engine.settings import init_settings
+from llama_index.core.storage import StorageContext
+from llama_index.core.indices import load_index_from_storage
+
+logger = logging.getLogger("uvicorn")
 
 
 def get_index():
-    service_context = create_service_context()
+    # Init default app global settings
+    init_settings()
+
     # check if storage already exists
     if not os.path.exists(STORAGE_DIR):
         raise Exception(
             "StorageContext is empty - call 'python app/engine/generate.py' to generate the storage first"
         )
-    logger = logging.getLogger("uvicorn")
+
     # load the existing index
     logger.info(f"Loading index from {STORAGE_DIR}...")
     storage_context = StorageContext.from_defaults(persist_dir=STORAGE_DIR)
-    index = load_index_from_storage(storage_context, service_context=service_context)
+    index = load_index_from_storage(storage_context)
     logger.info(f"Finished loading index from {STORAGE_DIR}")
     return index
diff --git a/templates/components/vectordbs/python/none/settings.py b/templates/components/vectordbs/python/none/settings.py
new file mode 100644
index 0000000000000000000000000000000000000000..d41f022766dba1af855fd29121ff87040f407e76
--- /dev/null
+++ b/templates/components/vectordbs/python/none/settings.py
@@ -0,0 +1,10 @@
+from llama_index.core.settings import Settings
+from app.engine.constants import CHUNK_SIZE, CHUNK_OVERLAP
+from app.settings import init_base_settings
+
+
+def init_settings():
+    init_base_settings()
+
+    Settings.chunk_size = CHUNK_SIZE
+    Settings.chunk_overlap = CHUNK_OVERLAP
diff --git a/templates/components/vectordbs/python/pg/context.py b/templates/components/vectordbs/python/pg/context.py
deleted file mode 100644
index ceb8a50ae0cd02425aeb5fa4436df374590a4fad..0000000000000000000000000000000000000000
--- a/templates/components/vectordbs/python/pg/context.py
+++ /dev/null
@@ -1,14 +0,0 @@
-from llama_index import ServiceContext
-
-from app.context import create_base_context
-from app.engine.constants import CHUNK_SIZE, CHUNK_OVERLAP
-
-
-def create_service_context():
-    base = create_base_context()
-    return ServiceContext.from_defaults(
-        llm=base.llm,
-        embed_model=base.embed_model,
-        chunk_size=CHUNK_SIZE,
-        chunk_overlap=CHUNK_OVERLAP,
-    )
diff --git a/templates/components/vectordbs/python/pg/generate.py b/templates/components/vectordbs/python/pg/generate.py
index 5c77ee07e910349c19bd6320c359d3885aff879b..b184d3a4917972fdf05e77ac943b13883155a473 100644
--- a/templates/components/vectordbs/python/pg/generate.py
+++ b/templates/components/vectordbs/python/pg/generate.py
@@ -1,24 +1,21 @@
 from dotenv import load_dotenv
 
 load_dotenv()
+
 import logging
+from llama_index.core.indices import VectorStoreIndex
+from llama_index.core.storage import StorageContext
 
 from app.engine.constants import DATA_DIR
-from app.engine.context import create_service_context
-from app.engine.utils import init_pg_vector_store_from_env
 from app.engine.loader import get_documents
-
-from llama_index import (
-    SimpleDirectoryReader,
-    VectorStoreIndex,
-    StorageContext,
-)
+from app.engine.settings import init_settings
+from app.engine.utils import init_pg_vector_store_from_env
 
 logging.basicConfig(level=logging.INFO)
 logger = logging.getLogger()
 
 
-def generate_datasource(service_context):
+def generate_datasource():
     logger.info("Creating new index")
     # load the documents and create the index
     documents = get_documents()
@@ -26,7 +23,6 @@ def generate_datasource(service_context):
     storage_context = StorageContext.from_defaults(vector_store=store)
     VectorStoreIndex.from_documents(
         documents,
-        service_context=service_context,
         storage_context=storage_context,
         show_progress=True,  # this will show you a progress bar as the embeddings are created
     )
@@ -36,4 +32,5 @@ def generate_datasource(service_context):
 
 
 if __name__ == "__main__":
-    generate_datasource(create_service_context())
+    init_settings()
+    generate_datasource()
diff --git a/templates/components/vectordbs/python/pg/index.py b/templates/components/vectordbs/python/pg/index.py
index 368fb4321dc225a9020a7a514370ac93a72c42a5..9cce1b95b8c7474675043a920a9529dbb7c992dd 100644
--- a/templates/components/vectordbs/python/pg/index.py
+++ b/templates/components/vectordbs/python/pg/index.py
@@ -1,16 +1,17 @@
 import logging
-from llama_index import (
-    VectorStoreIndex,
-)
-from app.engine.context import create_service_context
+from llama_index.core.indices.vector_store import VectorStoreIndex
+from app.engine.settings import init_settings
 from app.engine.utils import init_pg_vector_store_from_env
 
+logger = logging.getLogger("uvicorn")
+
 
 def get_index():
-    service_context = create_service_context()
-    logger = logging.getLogger("uvicorn")
+    # Init default app global settings
+    init_settings()
+
     logger.info("Connecting to index from PGVector...")
     store = init_pg_vector_store_from_env()
-    index = VectorStoreIndex.from_vector_store(store, service_context)
+    index = VectorStoreIndex.from_vector_store(store)
     logger.info("Finished connecting to index from PGVector.")
     return index
diff --git a/templates/components/vectordbs/python/pg/settings.py b/templates/components/vectordbs/python/pg/settings.py
new file mode 100644
index 0000000000000000000000000000000000000000..d41f022766dba1af855fd29121ff87040f407e76
--- /dev/null
+++ b/templates/components/vectordbs/python/pg/settings.py
@@ -0,0 +1,10 @@
+from llama_index.core.settings import Settings
+from app.engine.constants import CHUNK_SIZE, CHUNK_OVERLAP
+from app.settings import init_base_settings
+
+
+def init_settings():
+    init_base_settings()
+
+    Settings.chunk_size = CHUNK_SIZE
+    Settings.chunk_overlap = CHUNK_OVERLAP
diff --git a/templates/components/vectordbs/python/pg/utils.py b/templates/components/vectordbs/python/pg/utils.py
index 808fda2c675e042c7748e2ad86c57d19ba707a60..39127846dfddb706a22fc1b20e3cef1bf98751a7 100644
--- a/templates/components/vectordbs/python/pg/utils.py
+++ b/templates/components/vectordbs/python/pg/utils.py
@@ -1,5 +1,5 @@
 import os
-from llama_index.vector_stores import PGVectorStore
+from llama_index.vector_stores.postgres import PGVectorStore
 from urllib.parse import urlparse
 from app.engine.constants import PGVECTOR_SCHEMA, PGVECTOR_TABLE
 
diff --git a/templates/components/vectordbs/python/pinecone/context.py b/templates/components/vectordbs/python/pinecone/context.py
deleted file mode 100644
index ceb8a50ae0cd02425aeb5fa4436df374590a4fad..0000000000000000000000000000000000000000
--- a/templates/components/vectordbs/python/pinecone/context.py
+++ /dev/null
@@ -1,14 +0,0 @@
-from llama_index import ServiceContext
-
-from app.context import create_base_context
-from app.engine.constants import CHUNK_SIZE, CHUNK_OVERLAP
-
-
-def create_service_context():
-    base = create_base_context()
-    return ServiceContext.from_defaults(
-        llm=base.llm,
-        embed_model=base.embed_model,
-        chunk_size=CHUNK_SIZE,
-        chunk_overlap=CHUNK_OVERLAP,
-    )
diff --git a/templates/components/vectordbs/python/pinecone/generate.py b/templates/components/vectordbs/python/pinecone/generate.py
index 8c0e1c0b42a88f23723bf8f54cdde1eb2f8b0ad4..0e6486f86f85021d768fa3feeb938c2067f12484 100644
--- a/templates/components/vectordbs/python/pinecone/generate.py
+++ b/templates/components/vectordbs/python/pinecone/generate.py
@@ -1,26 +1,21 @@
 from dotenv import load_dotenv
 
 load_dotenv()
+
 import os
 import logging
-from llama_index.vector_stores import PineconeVectorStore
-
+from llama_index.core.storage import StorageContext
+from llama_index.core.indices import VectorStoreIndex
+from llama_index.vector_stores.pinecone import PineconeVectorStore
+from app.engine.settings import init_settings
 from app.engine.constants import DATA_DIR
-from app.engine.context import create_service_context
 from app.engine.loader import get_documents
 
-
-from llama_index import (
-    SimpleDirectoryReader,
-    VectorStoreIndex,
-    StorageContext,
-)
-
 logging.basicConfig(level=logging.INFO)
 logger = logging.getLogger()
 
 
-def generate_datasource(service_context):
+def generate_datasource():
     logger.info("Creating new index")
     # load the documents and create the index
     documents = get_documents()
@@ -32,7 +27,6 @@ def generate_datasource(service_context):
     storage_context = StorageContext.from_defaults(vector_store=store)
     VectorStoreIndex.from_documents(
         documents,
-        service_context=service_context,
         storage_context=storage_context,
         show_progress=True,  # this will show you a progress bar as the embeddings are created
     )
@@ -42,4 +36,5 @@ def generate_datasource(service_context):
 
 
 if __name__ == "__main__":
-    generate_datasource(create_service_context())
+    init_settings()
+    generate_datasource()
diff --git a/templates/components/vectordbs/python/pinecone/index.py b/templates/components/vectordbs/python/pinecone/index.py
index 6e9b88102f97fe510e2772a8a92ce5e8111772e3..98824ffdc5f197ad9d3d0a3b546ffbee64f4f7ed 100644
--- a/templates/components/vectordbs/python/pinecone/index.py
+++ b/templates/components/vectordbs/python/pinecone/index.py
@@ -1,23 +1,20 @@
 import logging
 import os
 
-from llama_index import (
-    VectorStoreIndex,
-)
-from llama_index.vector_stores import PineconeVectorStore
+from llama_index.core.indices import VectorStoreIndex
+from llama_index.vector_stores.pinecone import PineconeVectorStore
 
-from app.engine.context import create_service_context
+
+logger = logging.getLogger("uvicorn")
 
 
 def get_index():
-    service_context = create_service_context()
-    logger = logging.getLogger("uvicorn")
     logger.info("Connecting to index from Pinecone...")
     store = PineconeVectorStore(
         api_key=os.environ["PINECONE_API_KEY"],
         index_name=os.environ["PINECONE_INDEX_NAME"],
         environment=os.environ["PINECONE_ENVIRONMENT"],
     )
-    index = VectorStoreIndex.from_vector_store(store, service_context)
+    index = VectorStoreIndex.from_vector_store(store)
     logger.info("Finished connecting to index from Pinecone.")
     return index
diff --git a/templates/components/vectordbs/python/pinecone/settings.py b/templates/components/vectordbs/python/pinecone/settings.py
new file mode 100644
index 0000000000000000000000000000000000000000..d41f022766dba1af855fd29121ff87040f407e76
--- /dev/null
+++ b/templates/components/vectordbs/python/pinecone/settings.py
@@ -0,0 +1,10 @@
+from llama_index.core.settings import Settings
+from app.engine.constants import CHUNK_SIZE, CHUNK_OVERLAP
+from app.settings import init_base_settings
+
+
+def init_settings():
+    init_base_settings()
+
+    Settings.chunk_size = CHUNK_SIZE
+    Settings.chunk_overlap = CHUNK_OVERLAP
diff --git a/templates/types/simple/fastapi/app/api/routers/chat.py b/templates/types/simple/fastapi/app/api/routers/chat.py
index 09efdcaabca8e33493cddc64ea6d9f0fdb8956e3..8405f2ac592bb5666ed60a127f7f2b8c22c3e154 100644
--- a/templates/types/simple/fastapi/app/api/routers/chat.py
+++ b/templates/types/simple/fastapi/app/api/routers/chat.py
@@ -1,10 +1,8 @@
 from typing import List
-
-from fastapi import APIRouter, Depends, HTTPException, status
-from llama_index.chat_engine.types import BaseChatEngine
-from llama_index.llms.base import ChatMessage
-from llama_index.llms.types import MessageRole
 from pydantic import BaseModel
+from fastapi import APIRouter, Depends, HTTPException, status
+from llama_index.core.chat_engine.types import BaseChatEngine
+from llama_index.core.llms import ChatMessage, MessageRole
 from app.engine import get_chat_engine
 
 chat_router = r = APIRouter()
diff --git a/templates/types/simple/fastapi/app/context.py b/templates/types/simple/fastapi/app/context.py
deleted file mode 100644
index ae00de217c8741e080c981cc3fed21f24fe19961..0000000000000000000000000000000000000000
--- a/templates/types/simple/fastapi/app/context.py
+++ /dev/null
@@ -1,11 +0,0 @@
-import os
-
-from llama_index import ServiceContext
-from llama_index.llms import OpenAI
-
-
-def create_base_context():
-    model = os.getenv("MODEL", "gpt-3.5-turbo")
-    return ServiceContext.from_defaults(
-        llm=OpenAI(model=model),
-    )
diff --git a/templates/types/simple/fastapi/app/engine/__init__.py b/templates/types/simple/fastapi/app/engine/__init__.py
index 663b595a40c17f6090bb933bd73ea08ae71286de..50f9237407597be4f37227c6c60a378258e0a975 100644
--- a/templates/types/simple/fastapi/app/engine/__init__.py
+++ b/templates/types/simple/fastapi/app/engine/__init__.py
@@ -1,7 +1,7 @@
-from llama_index.chat_engine import SimpleChatEngine
-
-from app.context import create_base_context
+from llama_index.core.chat_engine import SimpleChatEngine
+from app.settings import init_base_settings
 
 
 def get_chat_engine():
-    return SimpleChatEngine.from_defaults(service_context=create_base_context())
+    init_base_settings()
+    return SimpleChatEngine.from_defaults()
diff --git a/templates/types/simple/fastapi/app/settings.py b/templates/types/simple/fastapi/app/settings.py
new file mode 100644
index 0000000000000000000000000000000000000000..fec9955efbaf9c771aa20f7040dfa119214836b0
--- /dev/null
+++ b/templates/types/simple/fastapi/app/settings.py
@@ -0,0 +1,8 @@
+import os
+from llama_index.llms.openai import OpenAI
+from llama_index.core.settings import Settings
+
+
+def init_base_settings():
+    model = os.getenv("MODEL", "gpt-3.5-turbo")
+    Settings.llm = OpenAI(model=model)
diff --git a/templates/types/simple/fastapi/pyproject.toml b/templates/types/simple/fastapi/pyproject.toml
index 46448617f175723997c2e4e03e8efd7ab93ee20a..45c3ae567a6a1b7392255a6aa49fe896dbb12f3f 100644
--- a/templates/types/simple/fastapi/pyproject.toml
+++ b/templates/types/simple/fastapi/pyproject.toml
@@ -9,12 +9,8 @@ readme = "README.md"
 python = "^3.11,<3.12"
 fastapi = "^0.109.1"
 uvicorn = { extras = ["standard"], version = "^0.23.2" }
-llama-index = "^0.9.19"
-pypdf = "^3.17.0"
 python-dotenv = "^1.0.0"
-docx2txt = "^0.8"
-llama-hub = "^0.0.77"
-wikipedia = "^1.4.0"
+llama-index = "^0.10.7"
 
 [build-system]
 requires = ["poetry-core"]
diff --git a/templates/types/streaming/fastapi/app/api/routers/chat.py b/templates/types/streaming/fastapi/app/api/routers/chat.py
index 0afe14e4eed670b99fe5d7d4d44710517590c2cc..278a9a753d626d79fb0ca9109893a70fb400bfa6 100644
--- a/templates/types/streaming/fastapi/app/api/routers/chat.py
+++ b/templates/types/streaming/fastapi/app/api/routers/chat.py
@@ -1,13 +1,10 @@
 from typing import List
-
+from pydantic import BaseModel
 from fastapi.responses import StreamingResponse
-from llama_index.chat_engine.types import BaseChatEngine
-
-from app.engine import get_chat_engine
 from fastapi import APIRouter, Depends, HTTPException, Request, status
-from llama_index.llms.base import ChatMessage
-from llama_index.llms.types import MessageRole
-from pydantic import BaseModel
+from llama_index.core.chat_engine.types import BaseChatEngine
+from llama_index.core.llms import ChatMessage, MessageRole
+from app.engine import get_chat_engine
 
 chat_router = r = APIRouter()
 
diff --git a/templates/types/streaming/fastapi/app/context.py b/templates/types/streaming/fastapi/app/context.py
deleted file mode 100644
index ae00de217c8741e080c981cc3fed21f24fe19961..0000000000000000000000000000000000000000
--- a/templates/types/streaming/fastapi/app/context.py
+++ /dev/null
@@ -1,11 +0,0 @@
-import os
-
-from llama_index import ServiceContext
-from llama_index.llms import OpenAI
-
-
-def create_base_context():
-    model = os.getenv("MODEL", "gpt-3.5-turbo")
-    return ServiceContext.from_defaults(
-        llm=OpenAI(model=model),
-    )
diff --git a/templates/types/streaming/fastapi/app/engine/__init__.py b/templates/types/streaming/fastapi/app/engine/__init__.py
index 663b595a40c17f6090bb933bd73ea08ae71286de..50f9237407597be4f37227c6c60a378258e0a975 100644
--- a/templates/types/streaming/fastapi/app/engine/__init__.py
+++ b/templates/types/streaming/fastapi/app/engine/__init__.py
@@ -1,7 +1,7 @@
-from llama_index.chat_engine import SimpleChatEngine
-
-from app.context import create_base_context
+from llama_index.core.chat_engine import SimpleChatEngine
+from app.settings import init_base_settings
 
 
 def get_chat_engine():
-    return SimpleChatEngine.from_defaults(service_context=create_base_context())
+    init_base_settings()
+    return SimpleChatEngine.from_defaults()
diff --git a/templates/types/streaming/fastapi/app/settings.py b/templates/types/streaming/fastapi/app/settings.py
new file mode 100644
index 0000000000000000000000000000000000000000..fec9955efbaf9c771aa20f7040dfa119214836b0
--- /dev/null
+++ b/templates/types/streaming/fastapi/app/settings.py
@@ -0,0 +1,8 @@
+import os
+from llama_index.llms.openai import OpenAI
+from llama_index.core.settings import Settings
+
+
+def init_base_settings():
+    model = os.getenv("MODEL", "gpt-3.5-turbo")
+    Settings.llm = OpenAI(model=model)
diff --git a/templates/types/streaming/fastapi/pyproject.toml b/templates/types/streaming/fastapi/pyproject.toml
index 46448617f175723997c2e4e03e8efd7ab93ee20a..45c3ae567a6a1b7392255a6aa49fe896dbb12f3f 100644
--- a/templates/types/streaming/fastapi/pyproject.toml
+++ b/templates/types/streaming/fastapi/pyproject.toml
@@ -9,12 +9,8 @@ readme = "README.md"
 python = "^3.11,<3.12"
 fastapi = "^0.109.1"
 uvicorn = { extras = ["standard"], version = "^0.23.2" }
-llama-index = "^0.9.19"
-pypdf = "^3.17.0"
 python-dotenv = "^1.0.0"
-docx2txt = "^0.8"
-llama-hub = "^0.0.77"
-wikipedia = "^1.4.0"
+llama-index = "^0.10.7"
 
 [build-system]
 requires = ["poetry-core"]