diff --git a/templates/components/vectordbs/python/none/context.py b/templates/components/vectordbs/python/none/context.py index 271ac87267c12de5f0b588d9ac68ba17a7fddbb6..4756d813d812c7244df2818ef81f426e6fe13ca0 100644 --- a/templates/components/vectordbs/python/none/context.py +++ b/templates/components/vectordbs/python/none/context.py @@ -1,15 +1,7 @@ -import os - from llama_index import ServiceContext -from llama_index.llms import OpenAI -from app.engine.constants import CHUNK_SIZE, CHUNK_OVERLAP - -def create_base_context(): - model = os.getenv("MODEL", "gpt-3.5-turbo") - return ServiceContext.from_defaults( - llm=OpenAI(model=model), - ) +from app.context import create_base_context +from app.engine.constants import CHUNK_SIZE, CHUNK_OVERLAP def create_service_context(): diff --git a/templates/types/simple/fastapi/app/context.py b/templates/types/simple/fastapi/app/context.py new file mode 100644 index 0000000000000000000000000000000000000000..ae00de217c8741e080c981cc3fed21f24fe19961 --- /dev/null +++ b/templates/types/simple/fastapi/app/context.py @@ -0,0 +1,11 @@ +import os + +from llama_index import ServiceContext +from llama_index.llms import OpenAI + + +def create_base_context(): + model = os.getenv("MODEL", "gpt-3.5-turbo") + return ServiceContext.from_defaults( + llm=OpenAI(model=model), + ) diff --git a/templates/types/simple/fastapi/app/engine/index.py b/templates/types/simple/fastapi/app/engine/index.py index 47a9b083c914ffa96c7412d849b7de713c9c559f..663b595a40c17f6090bb933bd73ea08ae71286de 100644 --- a/templates/types/simple/fastapi/app/engine/index.py +++ b/templates/types/simple/fastapi/app/engine/index.py @@ -1,15 +1,6 @@ -import os - -from llama_index import ServiceContext from llama_index.chat_engine import SimpleChatEngine -from llama_index.llms import OpenAI - -def create_base_context(): - model = os.getenv("MODEL", "gpt-3.5-turbo") - return ServiceContext.from_defaults( - llm=OpenAI(model=model), - ) +from app.context import create_base_context def get_chat_engine(): diff --git a/templates/types/simple/fastapi/main.py b/templates/types/simple/fastapi/main.py index 00cb79c44d58f4819243b8ee16c3108bef950382..ba56f0345bacc5ad73e4218a781bee57427e1ec9 100644 --- a/templates/types/simple/fastapi/main.py +++ b/templates/types/simple/fastapi/main.py @@ -1,4 +1,5 @@ from dotenv import load_dotenv + load_dotenv() import logging diff --git a/templates/types/streaming/fastapi/app/context.py b/templates/types/streaming/fastapi/app/context.py new file mode 100644 index 0000000000000000000000000000000000000000..ae00de217c8741e080c981cc3fed21f24fe19961 --- /dev/null +++ b/templates/types/streaming/fastapi/app/context.py @@ -0,0 +1,11 @@ +import os + +from llama_index import ServiceContext +from llama_index.llms import OpenAI + + +def create_base_context(): + model = os.getenv("MODEL", "gpt-3.5-turbo") + return ServiceContext.from_defaults( + llm=OpenAI(model=model), + ) diff --git a/templates/types/streaming/fastapi/app/engine/index.py b/templates/types/streaming/fastapi/app/engine/index.py index 47a9b083c914ffa96c7412d849b7de713c9c559f..663b595a40c17f6090bb933bd73ea08ae71286de 100644 --- a/templates/types/streaming/fastapi/app/engine/index.py +++ b/templates/types/streaming/fastapi/app/engine/index.py @@ -1,15 +1,6 @@ -import os - -from llama_index import ServiceContext from llama_index.chat_engine import SimpleChatEngine -from llama_index.llms import OpenAI - -def create_base_context(): - model = os.getenv("MODEL", "gpt-3.5-turbo") - return ServiceContext.from_defaults( - llm=OpenAI(model=model), - ) +from app.context import create_base_context def get_chat_engine():