From 935bc5223974780424e0d56f32121d1242e84a74 Mon Sep 17 00:00:00 2001 From: Marcus Schiesser <mail@marcusschiesser.de> Date: Tue, 26 Dec 2023 16:43:34 +0700 Subject: [PATCH] fix: use base service context --- .../components/vectordbs/python/none/context.py | 12 ++---------- .../templates/types/simple/fastapi/app/context.py | 11 +++++++++++ .../types/simple/fastapi/app/engine/index.py | 11 +---------- .../templates/types/simple/fastapi/main.py | 1 + .../templates/types/streaming/fastapi/app/context.py | 11 +++++++++++ .../types/streaming/fastapi/app/engine/index.py | 11 +---------- 6 files changed, 27 insertions(+), 30 deletions(-) create mode 100644 packages/create-llama/templates/types/simple/fastapi/app/context.py create mode 100644 packages/create-llama/templates/types/streaming/fastapi/app/context.py diff --git a/packages/create-llama/templates/components/vectordbs/python/none/context.py b/packages/create-llama/templates/components/vectordbs/python/none/context.py index 271ac8726..4756d813d 100644 --- a/packages/create-llama/templates/components/vectordbs/python/none/context.py +++ b/packages/create-llama/templates/components/vectordbs/python/none/context.py @@ -1,15 +1,7 @@ -import os - from llama_index import ServiceContext -from llama_index.llms import OpenAI -from app.engine.constants import CHUNK_SIZE, CHUNK_OVERLAP - -def create_base_context(): - model = os.getenv("MODEL", "gpt-3.5-turbo") - return ServiceContext.from_defaults( - llm=OpenAI(model=model), - ) +from app.context import create_base_context +from app.engine.constants import CHUNK_SIZE, CHUNK_OVERLAP def create_service_context(): diff --git a/packages/create-llama/templates/types/simple/fastapi/app/context.py b/packages/create-llama/templates/types/simple/fastapi/app/context.py new file mode 100644 index 000000000..ae00de217 --- /dev/null +++ b/packages/create-llama/templates/types/simple/fastapi/app/context.py @@ -0,0 +1,11 @@ +import os + +from llama_index import ServiceContext +from llama_index.llms import OpenAI + + +def create_base_context(): + model = os.getenv("MODEL", "gpt-3.5-turbo") + return ServiceContext.from_defaults( + llm=OpenAI(model=model), + ) diff --git a/packages/create-llama/templates/types/simple/fastapi/app/engine/index.py b/packages/create-llama/templates/types/simple/fastapi/app/engine/index.py index 47a9b083c..663b595a4 100644 --- a/packages/create-llama/templates/types/simple/fastapi/app/engine/index.py +++ b/packages/create-llama/templates/types/simple/fastapi/app/engine/index.py @@ -1,15 +1,6 @@ -import os - -from llama_index import ServiceContext from llama_index.chat_engine import SimpleChatEngine -from llama_index.llms import OpenAI - -def create_base_context(): - model = os.getenv("MODEL", "gpt-3.5-turbo") - return ServiceContext.from_defaults( - llm=OpenAI(model=model), - ) +from app.context import create_base_context def get_chat_engine(): diff --git a/packages/create-llama/templates/types/simple/fastapi/main.py b/packages/create-llama/templates/types/simple/fastapi/main.py index 00cb79c44..ba56f0345 100644 --- a/packages/create-llama/templates/types/simple/fastapi/main.py +++ b/packages/create-llama/templates/types/simple/fastapi/main.py @@ -1,4 +1,5 @@ from dotenv import load_dotenv + load_dotenv() import logging diff --git a/packages/create-llama/templates/types/streaming/fastapi/app/context.py b/packages/create-llama/templates/types/streaming/fastapi/app/context.py new file mode 100644 index 000000000..ae00de217 --- /dev/null +++ b/packages/create-llama/templates/types/streaming/fastapi/app/context.py @@ -0,0 +1,11 @@ +import os + +from llama_index import ServiceContext +from llama_index.llms import OpenAI + + +def create_base_context(): + model = os.getenv("MODEL", "gpt-3.5-turbo") + return ServiceContext.from_defaults( + llm=OpenAI(model=model), + ) diff --git a/packages/create-llama/templates/types/streaming/fastapi/app/engine/index.py b/packages/create-llama/templates/types/streaming/fastapi/app/engine/index.py index 47a9b083c..663b595a4 100644 --- a/packages/create-llama/templates/types/streaming/fastapi/app/engine/index.py +++ b/packages/create-llama/templates/types/streaming/fastapi/app/engine/index.py @@ -1,15 +1,6 @@ -import os - -from llama_index import ServiceContext from llama_index.chat_engine import SimpleChatEngine -from llama_index.llms import OpenAI - -def create_base_context(): - model = os.getenv("MODEL", "gpt-3.5-turbo") - return ServiceContext.from_defaults( - llm=OpenAI(model=model), - ) +from app.context import create_base_context def get_chat_engine(): -- GitLab