From 2e4be99f4f035620970653f098ff2fbcca1bdf9a Mon Sep 17 00:00:00 2001 From: Marcus Schiesser <mail@marcusschiesser.de> Date: Fri, 22 Dec 2023 15:16:09 +0700 Subject: [PATCH] feat[cl-fastapi]: test and document new fastapi structure --- templates/types/streaming/fastapi/README-template.md | 10 ++++++++-- .../types/streaming/fastapi/app/engine/constants.py | 4 ++-- .../types/streaming/fastapi/app/engine/generate.py | 3 ++- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/templates/types/streaming/fastapi/README-template.md b/templates/types/streaming/fastapi/README-template.md index 77fa8793..a91b9f15 100644 --- a/templates/types/streaming/fastapi/README-template.md +++ b/templates/types/streaming/fastapi/README-template.md @@ -11,13 +11,19 @@ poetry shell By default, we use the OpenAI LLM (though you can customize, see `app/context.py`). As a result you need to specify an `OPENAI_API_KEY` in an .env file in this directory. -Example `backend/.env` file: +Example `.env` file: ``` OPENAI_API_KEY=<openai_api_key> ``` -Second, run the development server: +Second, generate the embeddings of the documents in the `./data` directory: + +``` +python app/engine/generate.py +``` + +Third, run the development server: ``` python main.py diff --git a/templates/types/streaming/fastapi/app/engine/constants.py b/templates/types/streaming/fastapi/app/engine/constants.py index 6dba7d2e..4180edc4 100644 --- a/templates/types/streaming/fastapi/app/engine/constants.py +++ b/templates/types/streaming/fastapi/app/engine/constants.py @@ -1,4 +1,4 @@ -STORAGE_DIR = "./storage" # directory to cache the generated index -DATA_DIR = "./data" # directory containing the documents to index +STORAGE_DIR = "storage" # directory to cache the generated index +DATA_DIR = "data" # directory containing the documents to index CHUNK_SIZE = 1024 CHUNK_OVERLAP = 20 diff --git a/templates/types/streaming/fastapi/app/engine/generate.py b/templates/types/streaming/fastapi/app/engine/generate.py index 3abb7491..3c4cd6a9 100644 --- a/templates/types/streaming/fastapi/app/engine/generate.py +++ b/templates/types/streaming/fastapi/app/engine/generate.py @@ -12,7 +12,8 @@ from llama_index import ( VectorStoreIndex, ) -logger = logging.getLogger("uvicorn") +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger() def generate_datasource(service_context): -- GitLab