From 1fa881df5300783312e7e19f8f8a2006282dc9ff Mon Sep 17 00:00:00 2001
From: Marcus Schiesser <mail@marcusschiesser.de>
Date: Fri, 23 Feb 2024 11:14:02 +0700
Subject: [PATCH] refactor: simplify generated python code (#558)

Co-authored-by: leehuwuj <leehuwuj@gmail.com>
---
 templates/components/loaders/python/file/loader.py     |  3 ++-
 .../components/vectordbs/python/mongo/constants.py     |  3 ---
 .../components/vectordbs/python/mongo/generate.py      |  3 +--
 .../components/vectordbs/python/mongo/settings.py      | 10 ----------
 .../components/vectordbs/python/none/constants.py      |  3 ---
 templates/components/vectordbs/python/none/generate.py |  4 ++--
 templates/components/vectordbs/python/none/index.py    |  4 ----
 templates/components/vectordbs/python/none/settings.py | 10 ----------
 templates/components/vectordbs/python/pg/constants.py  |  3 ---
 templates/components/vectordbs/python/pg/generate.py   |  3 +--
 templates/components/vectordbs/python/pg/index.py      |  4 ----
 templates/components/vectordbs/python/pg/settings.py   | 10 ----------
 .../components/vectordbs/python/pinecone/constants.py  |  3 ---
 .../components/vectordbs/python/pinecone/generate.py   |  3 +--
 .../components/vectordbs/python/pinecone/settings.py   | 10 ----------
 templates/types/simple/fastapi/app/engine/__init__.py  |  2 --
 templates/types/simple/fastapi/app/settings.py         |  4 +++-
 templates/types/simple/fastapi/main.py                 |  5 ++++-
 .../types/streaming/fastapi/app/engine/__init__.py     |  2 --
 templates/types/streaming/fastapi/app/settings.py      |  4 +++-
 templates/types/streaming/fastapi/main.py              |  6 +++++-
 21 files changed, 22 insertions(+), 77 deletions(-)
 delete mode 100644 templates/components/vectordbs/python/mongo/constants.py
 delete mode 100644 templates/components/vectordbs/python/mongo/settings.py
 delete mode 100644 templates/components/vectordbs/python/none/settings.py
 delete mode 100644 templates/components/vectordbs/python/pg/settings.py
 delete mode 100644 templates/components/vectordbs/python/pinecone/constants.py
 delete mode 100644 templates/components/vectordbs/python/pinecone/settings.py

diff --git a/templates/components/loaders/python/file/loader.py b/templates/components/loaders/python/file/loader.py
index dd7627a7..d4f4fcf9 100644
--- a/templates/components/loaders/python/file/loader.py
+++ b/templates/components/loaders/python/file/loader.py
@@ -1,5 +1,6 @@
 from llama_index.core.readers import SimpleDirectoryReader
-from app.engine.constants import DATA_DIR
+
+DATA_DIR = "data"  # directory to cache the generated index
 
 
 def get_documents():
diff --git a/templates/components/vectordbs/python/mongo/constants.py b/templates/components/vectordbs/python/mongo/constants.py
deleted file mode 100644
index 547ff9a8..00000000
--- a/templates/components/vectordbs/python/mongo/constants.py
+++ /dev/null
@@ -1,3 +0,0 @@
-DATA_DIR = "data"  # directory containing the documents to index
-CHUNK_SIZE = 1024
-CHUNK_OVERLAP = 20
diff --git a/templates/components/vectordbs/python/mongo/generate.py b/templates/components/vectordbs/python/mongo/generate.py
index 66730839..69d52071 100644
--- a/templates/components/vectordbs/python/mongo/generate.py
+++ b/templates/components/vectordbs/python/mongo/generate.py
@@ -7,8 +7,7 @@ import logging
 from llama_index.core.storage import StorageContext
 from llama_index.core.indices import VectorStoreIndex
 from llama_index.vector_stores.mongodb import MongoDBAtlasVectorSearch
-from app.engine.settings import init_settings
-from app.engine.constants import DATA_DIR
+from app.settings import init_settings
 from app.engine.loader import get_documents
 
 logging.basicConfig(level=logging.INFO)
diff --git a/templates/components/vectordbs/python/mongo/settings.py b/templates/components/vectordbs/python/mongo/settings.py
deleted file mode 100644
index d41f0227..00000000
--- a/templates/components/vectordbs/python/mongo/settings.py
+++ /dev/null
@@ -1,10 +0,0 @@
-from llama_index.core.settings import Settings
-from app.engine.constants import CHUNK_SIZE, CHUNK_OVERLAP
-from app.settings import init_base_settings
-
-
-def init_settings():
-    init_base_settings()
-
-    Settings.chunk_size = CHUNK_SIZE
-    Settings.chunk_overlap = CHUNK_OVERLAP
diff --git a/templates/components/vectordbs/python/none/constants.py b/templates/components/vectordbs/python/none/constants.py
index 4180edc4..254998eb 100644
--- a/templates/components/vectordbs/python/none/constants.py
+++ b/templates/components/vectordbs/python/none/constants.py
@@ -1,4 +1 @@
 STORAGE_DIR = "storage"  # directory to cache the generated index
-DATA_DIR = "data"  # directory containing the documents to index
-CHUNK_SIZE = 1024
-CHUNK_OVERLAP = 20
diff --git a/templates/components/vectordbs/python/none/generate.py b/templates/components/vectordbs/python/none/generate.py
index 29140e11..3c8055f3 100644
--- a/templates/components/vectordbs/python/none/generate.py
+++ b/templates/components/vectordbs/python/none/generate.py
@@ -6,9 +6,9 @@ import logging
 from llama_index.core.indices import (
     VectorStoreIndex,
 )
-from app.engine.constants import DATA_DIR, STORAGE_DIR
+from app.engine.constants import STORAGE_DIR
 from app.engine.loader import get_documents
-from app.engine.settings import init_settings
+from app.settings import init_settings
 
 
 logging.basicConfig(level=logging.INFO)
diff --git a/templates/components/vectordbs/python/none/index.py b/templates/components/vectordbs/python/none/index.py
index 4446eed0..4dcc858a 100644
--- a/templates/components/vectordbs/python/none/index.py
+++ b/templates/components/vectordbs/python/none/index.py
@@ -2,7 +2,6 @@ import logging
 import os
 
 from app.engine.constants import STORAGE_DIR
-from app.engine.settings import init_settings
 from llama_index.core.storage import StorageContext
 from llama_index.core.indices import load_index_from_storage
 
@@ -10,9 +9,6 @@ logger = logging.getLogger("uvicorn")
 
 
 def get_index():
-    # Init default app global settings
-    init_settings()
-
     # check if storage already exists
     if not os.path.exists(STORAGE_DIR):
         raise Exception(
diff --git a/templates/components/vectordbs/python/none/settings.py b/templates/components/vectordbs/python/none/settings.py
deleted file mode 100644
index d41f0227..00000000
--- a/templates/components/vectordbs/python/none/settings.py
+++ /dev/null
@@ -1,10 +0,0 @@
-from llama_index.core.settings import Settings
-from app.engine.constants import CHUNK_SIZE, CHUNK_OVERLAP
-from app.settings import init_base_settings
-
-
-def init_settings():
-    init_base_settings()
-
-    Settings.chunk_size = CHUNK_SIZE
-    Settings.chunk_overlap = CHUNK_OVERLAP
diff --git a/templates/components/vectordbs/python/pg/constants.py b/templates/components/vectordbs/python/pg/constants.py
index efc5105a..a4ebd918 100644
--- a/templates/components/vectordbs/python/pg/constants.py
+++ b/templates/components/vectordbs/python/pg/constants.py
@@ -1,5 +1,2 @@
-DATA_DIR = "data"  # directory containing the documents to index
-CHUNK_SIZE = 1024
-CHUNK_OVERLAP = 20
 PGVECTOR_SCHEMA = "public"
 PGVECTOR_TABLE = "llamaindex_embedding"
\ No newline at end of file
diff --git a/templates/components/vectordbs/python/pg/generate.py b/templates/components/vectordbs/python/pg/generate.py
index b184d3a4..608beb2e 100644
--- a/templates/components/vectordbs/python/pg/generate.py
+++ b/templates/components/vectordbs/python/pg/generate.py
@@ -6,9 +6,8 @@ import logging
 from llama_index.core.indices import VectorStoreIndex
 from llama_index.core.storage import StorageContext
 
-from app.engine.constants import DATA_DIR
 from app.engine.loader import get_documents
-from app.engine.settings import init_settings
+from app.settings import init_settings
 from app.engine.utils import init_pg_vector_store_from_env
 
 logging.basicConfig(level=logging.INFO)
diff --git a/templates/components/vectordbs/python/pg/index.py b/templates/components/vectordbs/python/pg/index.py
index 9cce1b95..3c4f3180 100644
--- a/templates/components/vectordbs/python/pg/index.py
+++ b/templates/components/vectordbs/python/pg/index.py
@@ -1,15 +1,11 @@
 import logging
 from llama_index.core.indices.vector_store import VectorStoreIndex
-from app.engine.settings import init_settings
 from app.engine.utils import init_pg_vector_store_from_env
 
 logger = logging.getLogger("uvicorn")
 
 
 def get_index():
-    # Init default app global settings
-    init_settings()
-
     logger.info("Connecting to index from PGVector...")
     store = init_pg_vector_store_from_env()
     index = VectorStoreIndex.from_vector_store(store)
diff --git a/templates/components/vectordbs/python/pg/settings.py b/templates/components/vectordbs/python/pg/settings.py
deleted file mode 100644
index d41f0227..00000000
--- a/templates/components/vectordbs/python/pg/settings.py
+++ /dev/null
@@ -1,10 +0,0 @@
-from llama_index.core.settings import Settings
-from app.engine.constants import CHUNK_SIZE, CHUNK_OVERLAP
-from app.settings import init_base_settings
-
-
-def init_settings():
-    init_base_settings()
-
-    Settings.chunk_size = CHUNK_SIZE
-    Settings.chunk_overlap = CHUNK_OVERLAP
diff --git a/templates/components/vectordbs/python/pinecone/constants.py b/templates/components/vectordbs/python/pinecone/constants.py
deleted file mode 100644
index 0dd46619..00000000
--- a/templates/components/vectordbs/python/pinecone/constants.py
+++ /dev/null
@@ -1,3 +0,0 @@
-DATA_DIR = "data"  # directory containing the documents to index
-CHUNK_SIZE = 512
-CHUNK_OVERLAP = 20
diff --git a/templates/components/vectordbs/python/pinecone/generate.py b/templates/components/vectordbs/python/pinecone/generate.py
index 0e6486f8..4e14648b 100644
--- a/templates/components/vectordbs/python/pinecone/generate.py
+++ b/templates/components/vectordbs/python/pinecone/generate.py
@@ -7,8 +7,7 @@ import logging
 from llama_index.core.storage import StorageContext
 from llama_index.core.indices import VectorStoreIndex
 from llama_index.vector_stores.pinecone import PineconeVectorStore
-from app.engine.settings import init_settings
-from app.engine.constants import DATA_DIR
+from app.settings import init_settings
 from app.engine.loader import get_documents
 
 logging.basicConfig(level=logging.INFO)
diff --git a/templates/components/vectordbs/python/pinecone/settings.py b/templates/components/vectordbs/python/pinecone/settings.py
deleted file mode 100644
index d41f0227..00000000
--- a/templates/components/vectordbs/python/pinecone/settings.py
+++ /dev/null
@@ -1,10 +0,0 @@
-from llama_index.core.settings import Settings
-from app.engine.constants import CHUNK_SIZE, CHUNK_OVERLAP
-from app.settings import init_base_settings
-
-
-def init_settings():
-    init_base_settings()
-
-    Settings.chunk_size = CHUNK_SIZE
-    Settings.chunk_overlap = CHUNK_OVERLAP
diff --git a/templates/types/simple/fastapi/app/engine/__init__.py b/templates/types/simple/fastapi/app/engine/__init__.py
index 50f92374..fd8bb96a 100644
--- a/templates/types/simple/fastapi/app/engine/__init__.py
+++ b/templates/types/simple/fastapi/app/engine/__init__.py
@@ -1,7 +1,5 @@
 from llama_index.core.chat_engine import SimpleChatEngine
-from app.settings import init_base_settings
 
 
 def get_chat_engine():
-    init_base_settings()
     return SimpleChatEngine.from_defaults()
diff --git a/templates/types/simple/fastapi/app/settings.py b/templates/types/simple/fastapi/app/settings.py
index fec9955e..e221a6b4 100644
--- a/templates/types/simple/fastapi/app/settings.py
+++ b/templates/types/simple/fastapi/app/settings.py
@@ -3,6 +3,8 @@ from llama_index.llms.openai import OpenAI
 from llama_index.core.settings import Settings
 
 
-def init_base_settings():
+def init_settings():
     model = os.getenv("MODEL", "gpt-3.5-turbo")
     Settings.llm = OpenAI(model=model)
+    Settings.chunk_size = 1024
+    Settings.chunk_overlap = 20
diff --git a/templates/types/simple/fastapi/main.py b/templates/types/simple/fastapi/main.py
index ba56f034..d9291060 100644
--- a/templates/types/simple/fastapi/main.py
+++ b/templates/types/simple/fastapi/main.py
@@ -5,12 +5,15 @@ load_dotenv()
 import logging
 import os
 import uvicorn
-from app.api.routers.chat import chat_router
 from fastapi import FastAPI
 from fastapi.middleware.cors import CORSMiddleware
+from app.api.routers.chat import chat_router
+from app.settings import init_settings
 
 app = FastAPI()
 
+init_settings()
+
 environment = os.getenv("ENVIRONMENT", "dev")  # Default to 'development' if not set
 
 
diff --git a/templates/types/streaming/fastapi/app/engine/__init__.py b/templates/types/streaming/fastapi/app/engine/__init__.py
index 50f92374..fd8bb96a 100644
--- a/templates/types/streaming/fastapi/app/engine/__init__.py
+++ b/templates/types/streaming/fastapi/app/engine/__init__.py
@@ -1,7 +1,5 @@
 from llama_index.core.chat_engine import SimpleChatEngine
-from app.settings import init_base_settings
 
 
 def get_chat_engine():
-    init_base_settings()
     return SimpleChatEngine.from_defaults()
diff --git a/templates/types/streaming/fastapi/app/settings.py b/templates/types/streaming/fastapi/app/settings.py
index fec9955e..e221a6b4 100644
--- a/templates/types/streaming/fastapi/app/settings.py
+++ b/templates/types/streaming/fastapi/app/settings.py
@@ -3,6 +3,8 @@ from llama_index.llms.openai import OpenAI
 from llama_index.core.settings import Settings
 
 
-def init_base_settings():
+def init_settings():
     model = os.getenv("MODEL", "gpt-3.5-turbo")
     Settings.llm = OpenAI(model=model)
+    Settings.chunk_size = 1024
+    Settings.chunk_overlap = 20
diff --git a/templates/types/streaming/fastapi/main.py b/templates/types/streaming/fastapi/main.py
index ba56f034..ed73c268 100644
--- a/templates/types/streaming/fastapi/main.py
+++ b/templates/types/streaming/fastapi/main.py
@@ -5,12 +5,16 @@ load_dotenv()
 import logging
 import os
 import uvicorn
-from app.api.routers.chat import chat_router
 from fastapi import FastAPI
 from fastapi.middleware.cors import CORSMiddleware
+from app.api.routers.chat import chat_router
+from app.settings import init_settings
+
 
 app = FastAPI()
 
+init_settings()
+
 environment = os.getenv("ENVIRONMENT", "dev")  # Default to 'development' if not set
 
 
-- 
GitLab