Skip to content
Snippets Groups Projects
Commit 1fa881df authored by Marcus Schiesser's avatar Marcus Schiesser Committed by GitHub
Browse files

refactor: simplify generated python code (#558)


Co-authored-by: default avatarleehuwuj <leehuwuj@gmail.com>
parent c74fec2b
No related branches found
No related tags found
No related merge requests found
Showing
with 17 additions and 76 deletions
from llama_index.core.readers import SimpleDirectoryReader
from app.engine.constants import DATA_DIR
DATA_DIR = "data" # directory to cache the generated index
def get_documents():
......
DATA_DIR = "data" # directory containing the documents to index
CHUNK_SIZE = 1024
CHUNK_OVERLAP = 20
......@@ -7,8 +7,7 @@ import logging
from llama_index.core.storage import StorageContext
from llama_index.core.indices import VectorStoreIndex
from llama_index.vector_stores.mongodb import MongoDBAtlasVectorSearch
from app.engine.settings import init_settings
from app.engine.constants import DATA_DIR
from app.settings import init_settings
from app.engine.loader import get_documents
logging.basicConfig(level=logging.INFO)
......
from llama_index.core.settings import Settings
from app.engine.constants import CHUNK_SIZE, CHUNK_OVERLAP
from app.settings import init_base_settings
def init_settings():
init_base_settings()
Settings.chunk_size = CHUNK_SIZE
Settings.chunk_overlap = CHUNK_OVERLAP
STORAGE_DIR = "storage" # directory to cache the generated index
DATA_DIR = "data" # directory containing the documents to index
CHUNK_SIZE = 1024
CHUNK_OVERLAP = 20
......@@ -6,9 +6,9 @@ import logging
from llama_index.core.indices import (
VectorStoreIndex,
)
from app.engine.constants import DATA_DIR, STORAGE_DIR
from app.engine.constants import STORAGE_DIR
from app.engine.loader import get_documents
from app.engine.settings import init_settings
from app.settings import init_settings
logging.basicConfig(level=logging.INFO)
......
......@@ -2,7 +2,6 @@ import logging
import os
from app.engine.constants import STORAGE_DIR
from app.engine.settings import init_settings
from llama_index.core.storage import StorageContext
from llama_index.core.indices import load_index_from_storage
......@@ -10,9 +9,6 @@ logger = logging.getLogger("uvicorn")
def get_index():
# Init default app global settings
init_settings()
# check if storage already exists
if not os.path.exists(STORAGE_DIR):
raise Exception(
......
from llama_index.core.settings import Settings
from app.engine.constants import CHUNK_SIZE, CHUNK_OVERLAP
from app.settings import init_base_settings
def init_settings():
init_base_settings()
Settings.chunk_size = CHUNK_SIZE
Settings.chunk_overlap = CHUNK_OVERLAP
DATA_DIR = "data" # directory containing the documents to index
CHUNK_SIZE = 1024
CHUNK_OVERLAP = 20
PGVECTOR_SCHEMA = "public"
PGVECTOR_TABLE = "llamaindex_embedding"
\ No newline at end of file
......@@ -6,9 +6,8 @@ import logging
from llama_index.core.indices import VectorStoreIndex
from llama_index.core.storage import StorageContext
from app.engine.constants import DATA_DIR
from app.engine.loader import get_documents
from app.engine.settings import init_settings
from app.settings import init_settings
from app.engine.utils import init_pg_vector_store_from_env
logging.basicConfig(level=logging.INFO)
......
import logging
from llama_index.core.indices.vector_store import VectorStoreIndex
from app.engine.settings import init_settings
from app.engine.utils import init_pg_vector_store_from_env
logger = logging.getLogger("uvicorn")
def get_index():
# Init default app global settings
init_settings()
logger.info("Connecting to index from PGVector...")
store = init_pg_vector_store_from_env()
index = VectorStoreIndex.from_vector_store(store)
......
from llama_index.core.settings import Settings
from app.engine.constants import CHUNK_SIZE, CHUNK_OVERLAP
from app.settings import init_base_settings
def init_settings():
init_base_settings()
Settings.chunk_size = CHUNK_SIZE
Settings.chunk_overlap = CHUNK_OVERLAP
DATA_DIR = "data" # directory containing the documents to index
CHUNK_SIZE = 512
CHUNK_OVERLAP = 20
......@@ -7,8 +7,7 @@ import logging
from llama_index.core.storage import StorageContext
from llama_index.core.indices import VectorStoreIndex
from llama_index.vector_stores.pinecone import PineconeVectorStore
from app.engine.settings import init_settings
from app.engine.constants import DATA_DIR
from app.settings import init_settings
from app.engine.loader import get_documents
logging.basicConfig(level=logging.INFO)
......
from llama_index.core.settings import Settings
from app.engine.constants import CHUNK_SIZE, CHUNK_OVERLAP
from app.settings import init_base_settings
def init_settings():
init_base_settings()
Settings.chunk_size = CHUNK_SIZE
Settings.chunk_overlap = CHUNK_OVERLAP
from llama_index.core.chat_engine import SimpleChatEngine
from app.settings import init_base_settings
def get_chat_engine():
init_base_settings()
return SimpleChatEngine.from_defaults()
......@@ -3,6 +3,8 @@ from llama_index.llms.openai import OpenAI
from llama_index.core.settings import Settings
def init_base_settings():
def init_settings():
model = os.getenv("MODEL", "gpt-3.5-turbo")
Settings.llm = OpenAI(model=model)
Settings.chunk_size = 1024
Settings.chunk_overlap = 20
......@@ -5,12 +5,15 @@ load_dotenv()
import logging
import os
import uvicorn
from app.api.routers.chat import chat_router
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from app.api.routers.chat import chat_router
from app.settings import init_settings
app = FastAPI()
init_settings()
environment = os.getenv("ENVIRONMENT", "dev") # Default to 'development' if not set
......
from llama_index.core.chat_engine import SimpleChatEngine
from app.settings import init_base_settings
def get_chat_engine():
init_base_settings()
return SimpleChatEngine.from_defaults()
......@@ -3,6 +3,8 @@ from llama_index.llms.openai import OpenAI
from llama_index.core.settings import Settings
def init_base_settings():
def init_settings():
model = os.getenv("MODEL", "gpt-3.5-turbo")
Settings.llm = OpenAI(model=model)
Settings.chunk_size = 1024
Settings.chunk_overlap = 20
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment