diff --git a/packages/create-llama/templates/components/loaders/python/file/loader.py b/packages/create-llama/templates/components/loaders/python/file/loader.py
index dd7627a7199cacf625bfd14b0e33ab1bd1e75892..d4f4fcf9ab70c44611a7d65d16296002ff2d9870 100644
--- a/packages/create-llama/templates/components/loaders/python/file/loader.py
+++ b/packages/create-llama/templates/components/loaders/python/file/loader.py
@@ -1,5 +1,6 @@
 from llama_index.core.readers import SimpleDirectoryReader
-from app.engine.constants import DATA_DIR
+
+DATA_DIR = "data"  # directory to cache the generated index
 
 
 def get_documents():
diff --git a/packages/create-llama/templates/components/vectordbs/python/mongo/constants.py b/packages/create-llama/templates/components/vectordbs/python/mongo/constants.py
deleted file mode 100644
index 547ff9a8b9a7df6a752e07084f22d9bd771ce20c..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/components/vectordbs/python/mongo/constants.py
+++ /dev/null
@@ -1,3 +0,0 @@
-DATA_DIR = "data"  # directory containing the documents to index
-CHUNK_SIZE = 1024
-CHUNK_OVERLAP = 20
diff --git a/packages/create-llama/templates/components/vectordbs/python/mongo/generate.py b/packages/create-llama/templates/components/vectordbs/python/mongo/generate.py
index 667308394fa302c9fb7a2a4af48882109bad040c..69d520711537c0c2b35f7be5bb2697cb9690cf5c 100644
--- a/packages/create-llama/templates/components/vectordbs/python/mongo/generate.py
+++ b/packages/create-llama/templates/components/vectordbs/python/mongo/generate.py
@@ -7,8 +7,7 @@ import logging
 from llama_index.core.storage import StorageContext
 from llama_index.core.indices import VectorStoreIndex
 from llama_index.vector_stores.mongodb import MongoDBAtlasVectorSearch
-from app.engine.settings import init_settings
-from app.engine.constants import DATA_DIR
+from app.settings import init_settings
 from app.engine.loader import get_documents
 
 logging.basicConfig(level=logging.INFO)
diff --git a/packages/create-llama/templates/components/vectordbs/python/mongo/settings.py b/packages/create-llama/templates/components/vectordbs/python/mongo/settings.py
deleted file mode 100644
index d41f022766dba1af855fd29121ff87040f407e76..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/components/vectordbs/python/mongo/settings.py
+++ /dev/null
@@ -1,10 +0,0 @@
-from llama_index.core.settings import Settings
-from app.engine.constants import CHUNK_SIZE, CHUNK_OVERLAP
-from app.settings import init_base_settings
-
-
-def init_settings():
-    init_base_settings()
-
-    Settings.chunk_size = CHUNK_SIZE
-    Settings.chunk_overlap = CHUNK_OVERLAP
diff --git a/packages/create-llama/templates/components/vectordbs/python/none/constants.py b/packages/create-llama/templates/components/vectordbs/python/none/constants.py
index 4180edc4b440cafc26aef00530dc3ba2af3cbdf6..254998ebbda96cd491b7914ed795eb6b5cfe0d39 100644
--- a/packages/create-llama/templates/components/vectordbs/python/none/constants.py
+++ b/packages/create-llama/templates/components/vectordbs/python/none/constants.py
@@ -1,4 +1 @@
 STORAGE_DIR = "storage"  # directory to cache the generated index
-DATA_DIR = "data"  # directory containing the documents to index
-CHUNK_SIZE = 1024
-CHUNK_OVERLAP = 20
diff --git a/packages/create-llama/templates/components/vectordbs/python/none/generate.py b/packages/create-llama/templates/components/vectordbs/python/none/generate.py
index 29140e11299e022cc0a5ad3705ba966e0675d27f..3c8055f3794669d37d599a502a6f7eb802202606 100644
--- a/packages/create-llama/templates/components/vectordbs/python/none/generate.py
+++ b/packages/create-llama/templates/components/vectordbs/python/none/generate.py
@@ -6,9 +6,9 @@ import logging
 from llama_index.core.indices import (
     VectorStoreIndex,
 )
-from app.engine.constants import DATA_DIR, STORAGE_DIR
+from app.engine.constants import STORAGE_DIR
 from app.engine.loader import get_documents
-from app.engine.settings import init_settings
+from app.settings import init_settings
 
 
 logging.basicConfig(level=logging.INFO)
diff --git a/packages/create-llama/templates/components/vectordbs/python/none/index.py b/packages/create-llama/templates/components/vectordbs/python/none/index.py
index 4446eed0e9e2eeb0e33f93d34120d299b03c7495..4dcc858a03ff1f0f8226236f436add8e705bb80f 100644
--- a/packages/create-llama/templates/components/vectordbs/python/none/index.py
+++ b/packages/create-llama/templates/components/vectordbs/python/none/index.py
@@ -2,7 +2,6 @@ import logging
 import os
 
 from app.engine.constants import STORAGE_DIR
-from app.engine.settings import init_settings
 from llama_index.core.storage import StorageContext
 from llama_index.core.indices import load_index_from_storage
 
@@ -10,9 +9,6 @@ logger = logging.getLogger("uvicorn")
 
 
 def get_index():
-    # Init default app global settings
-    init_settings()
-
     # check if storage already exists
     if not os.path.exists(STORAGE_DIR):
         raise Exception(
diff --git a/packages/create-llama/templates/components/vectordbs/python/none/settings.py b/packages/create-llama/templates/components/vectordbs/python/none/settings.py
deleted file mode 100644
index d41f022766dba1af855fd29121ff87040f407e76..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/components/vectordbs/python/none/settings.py
+++ /dev/null
@@ -1,10 +0,0 @@
-from llama_index.core.settings import Settings
-from app.engine.constants import CHUNK_SIZE, CHUNK_OVERLAP
-from app.settings import init_base_settings
-
-
-def init_settings():
-    init_base_settings()
-
-    Settings.chunk_size = CHUNK_SIZE
-    Settings.chunk_overlap = CHUNK_OVERLAP
diff --git a/packages/create-llama/templates/components/vectordbs/python/pg/constants.py b/packages/create-llama/templates/components/vectordbs/python/pg/constants.py
index efc5105a0eceeffa847797dfa2ad7223a6842a79..a4ebd91831da4f3e6ff585106eee69fcf6993b0e 100644
--- a/packages/create-llama/templates/components/vectordbs/python/pg/constants.py
+++ b/packages/create-llama/templates/components/vectordbs/python/pg/constants.py
@@ -1,5 +1,2 @@
-DATA_DIR = "data"  # directory containing the documents to index
-CHUNK_SIZE = 1024
-CHUNK_OVERLAP = 20
 PGVECTOR_SCHEMA = "public"
 PGVECTOR_TABLE = "llamaindex_embedding"
\ No newline at end of file
diff --git a/packages/create-llama/templates/components/vectordbs/python/pg/generate.py b/packages/create-llama/templates/components/vectordbs/python/pg/generate.py
index b184d3a4917972fdf05e77ac943b13883155a473..608beb2e81bc84a67a18fdd21a820eb207c4ad70 100644
--- a/packages/create-llama/templates/components/vectordbs/python/pg/generate.py
+++ b/packages/create-llama/templates/components/vectordbs/python/pg/generate.py
@@ -6,9 +6,8 @@ import logging
 from llama_index.core.indices import VectorStoreIndex
 from llama_index.core.storage import StorageContext
 
-from app.engine.constants import DATA_DIR
 from app.engine.loader import get_documents
-from app.engine.settings import init_settings
+from app.settings import init_settings
 from app.engine.utils import init_pg_vector_store_from_env
 
 logging.basicConfig(level=logging.INFO)
diff --git a/packages/create-llama/templates/components/vectordbs/python/pg/index.py b/packages/create-llama/templates/components/vectordbs/python/pg/index.py
index 9cce1b95b8c7474675043a920a9529dbb7c992dd..3c4f31800b4f06fd286e8c23ab3fbdca393c4fca 100644
--- a/packages/create-llama/templates/components/vectordbs/python/pg/index.py
+++ b/packages/create-llama/templates/components/vectordbs/python/pg/index.py
@@ -1,15 +1,11 @@
 import logging
 from llama_index.core.indices.vector_store import VectorStoreIndex
-from app.engine.settings import init_settings
 from app.engine.utils import init_pg_vector_store_from_env
 
 logger = logging.getLogger("uvicorn")
 
 
 def get_index():
-    # Init default app global settings
-    init_settings()
-
     logger.info("Connecting to index from PGVector...")
     store = init_pg_vector_store_from_env()
     index = VectorStoreIndex.from_vector_store(store)
diff --git a/packages/create-llama/templates/components/vectordbs/python/pg/settings.py b/packages/create-llama/templates/components/vectordbs/python/pg/settings.py
deleted file mode 100644
index d41f022766dba1af855fd29121ff87040f407e76..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/components/vectordbs/python/pg/settings.py
+++ /dev/null
@@ -1,10 +0,0 @@
-from llama_index.core.settings import Settings
-from app.engine.constants import CHUNK_SIZE, CHUNK_OVERLAP
-from app.settings import init_base_settings
-
-
-def init_settings():
-    init_base_settings()
-
-    Settings.chunk_size = CHUNK_SIZE
-    Settings.chunk_overlap = CHUNK_OVERLAP
diff --git a/packages/create-llama/templates/components/vectordbs/python/pinecone/constants.py b/packages/create-llama/templates/components/vectordbs/python/pinecone/constants.py
deleted file mode 100644
index 0dd46619b2a2532e46e30a1abde8efcf94cfda06..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/components/vectordbs/python/pinecone/constants.py
+++ /dev/null
@@ -1,3 +0,0 @@
-DATA_DIR = "data"  # directory containing the documents to index
-CHUNK_SIZE = 512
-CHUNK_OVERLAP = 20
diff --git a/packages/create-llama/templates/components/vectordbs/python/pinecone/generate.py b/packages/create-llama/templates/components/vectordbs/python/pinecone/generate.py
index 0e6486f86f85021d768fa3feeb938c2067f12484..4e14648b0b008d244e37532dfdd4f1304c95853c 100644
--- a/packages/create-llama/templates/components/vectordbs/python/pinecone/generate.py
+++ b/packages/create-llama/templates/components/vectordbs/python/pinecone/generate.py
@@ -7,8 +7,7 @@ import logging
 from llama_index.core.storage import StorageContext
 from llama_index.core.indices import VectorStoreIndex
 from llama_index.vector_stores.pinecone import PineconeVectorStore
-from app.engine.settings import init_settings
-from app.engine.constants import DATA_DIR
+from app.settings import init_settings
 from app.engine.loader import get_documents
 
 logging.basicConfig(level=logging.INFO)
diff --git a/packages/create-llama/templates/components/vectordbs/python/pinecone/settings.py b/packages/create-llama/templates/components/vectordbs/python/pinecone/settings.py
deleted file mode 100644
index d41f022766dba1af855fd29121ff87040f407e76..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/components/vectordbs/python/pinecone/settings.py
+++ /dev/null
@@ -1,10 +0,0 @@
-from llama_index.core.settings import Settings
-from app.engine.constants import CHUNK_SIZE, CHUNK_OVERLAP
-from app.settings import init_base_settings
-
-
-def init_settings():
-    init_base_settings()
-
-    Settings.chunk_size = CHUNK_SIZE
-    Settings.chunk_overlap = CHUNK_OVERLAP
diff --git a/packages/create-llama/templates/types/simple/fastapi/app/engine/__init__.py b/packages/create-llama/templates/types/simple/fastapi/app/engine/__init__.py
index 50f9237407597be4f37227c6c60a378258e0a975..fd8bb96a812db057eb2b0bcd6e1b8a17d221f76e 100644
--- a/packages/create-llama/templates/types/simple/fastapi/app/engine/__init__.py
+++ b/packages/create-llama/templates/types/simple/fastapi/app/engine/__init__.py
@@ -1,7 +1,5 @@
 from llama_index.core.chat_engine import SimpleChatEngine
-from app.settings import init_base_settings
 
 
 def get_chat_engine():
-    init_base_settings()
     return SimpleChatEngine.from_defaults()
diff --git a/packages/create-llama/templates/types/simple/fastapi/app/settings.py b/packages/create-llama/templates/types/simple/fastapi/app/settings.py
index fec9955efbaf9c771aa20f7040dfa119214836b0..e221a6b4cf0576a933dbd93c2956d07bfa7fa829 100644
--- a/packages/create-llama/templates/types/simple/fastapi/app/settings.py
+++ b/packages/create-llama/templates/types/simple/fastapi/app/settings.py
@@ -3,6 +3,8 @@ from llama_index.llms.openai import OpenAI
 from llama_index.core.settings import Settings
 
 
-def init_base_settings():
+def init_settings():
     model = os.getenv("MODEL", "gpt-3.5-turbo")
     Settings.llm = OpenAI(model=model)
+    Settings.chunk_size = 1024
+    Settings.chunk_overlap = 20
diff --git a/packages/create-llama/templates/types/simple/fastapi/main.py b/packages/create-llama/templates/types/simple/fastapi/main.py
index ba56f0345bacc5ad73e4218a781bee57427e1ec9..d9291060d141f06f7de78b1e91cea2330e94ee48 100644
--- a/packages/create-llama/templates/types/simple/fastapi/main.py
+++ b/packages/create-llama/templates/types/simple/fastapi/main.py
@@ -5,12 +5,15 @@ load_dotenv()
 import logging
 import os
 import uvicorn
-from app.api.routers.chat import chat_router
 from fastapi import FastAPI
 from fastapi.middleware.cors import CORSMiddleware
+from app.api.routers.chat import chat_router
+from app.settings import init_settings
 
 app = FastAPI()
 
+init_settings()
+
 environment = os.getenv("ENVIRONMENT", "dev")  # Default to 'development' if not set
 
 
diff --git a/packages/create-llama/templates/types/streaming/fastapi/app/engine/__init__.py b/packages/create-llama/templates/types/streaming/fastapi/app/engine/__init__.py
index 50f9237407597be4f37227c6c60a378258e0a975..fd8bb96a812db057eb2b0bcd6e1b8a17d221f76e 100644
--- a/packages/create-llama/templates/types/streaming/fastapi/app/engine/__init__.py
+++ b/packages/create-llama/templates/types/streaming/fastapi/app/engine/__init__.py
@@ -1,7 +1,5 @@
 from llama_index.core.chat_engine import SimpleChatEngine
-from app.settings import init_base_settings
 
 
 def get_chat_engine():
-    init_base_settings()
     return SimpleChatEngine.from_defaults()
diff --git a/packages/create-llama/templates/types/streaming/fastapi/app/settings.py b/packages/create-llama/templates/types/streaming/fastapi/app/settings.py
index fec9955efbaf9c771aa20f7040dfa119214836b0..e221a6b4cf0576a933dbd93c2956d07bfa7fa829 100644
--- a/packages/create-llama/templates/types/streaming/fastapi/app/settings.py
+++ b/packages/create-llama/templates/types/streaming/fastapi/app/settings.py
@@ -3,6 +3,8 @@ from llama_index.llms.openai import OpenAI
 from llama_index.core.settings import Settings
 
 
-def init_base_settings():
+def init_settings():
     model = os.getenv("MODEL", "gpt-3.5-turbo")
     Settings.llm = OpenAI(model=model)
+    Settings.chunk_size = 1024
+    Settings.chunk_overlap = 20
diff --git a/packages/create-llama/templates/types/streaming/fastapi/main.py b/packages/create-llama/templates/types/streaming/fastapi/main.py
index ba56f0345bacc5ad73e4218a781bee57427e1ec9..ed73c268cf119cd453144f198653224b2c57b3aa 100644
--- a/packages/create-llama/templates/types/streaming/fastapi/main.py
+++ b/packages/create-llama/templates/types/streaming/fastapi/main.py
@@ -5,12 +5,16 @@ load_dotenv()
 import logging
 import os
 import uvicorn
-from app.api.routers.chat import chat_router
 from fastapi import FastAPI
 from fastapi.middleware.cors import CORSMiddleware
+from app.api.routers.chat import chat_router
+from app.settings import init_settings
+
 
 app = FastAPI()
 
+init_settings()
+
 environment = os.getenv("ENVIRONMENT", "dev")  # Default to 'development' if not set