diff --git a/templates/types/streaming/fastapi/README-template.md b/templates/types/streaming/fastapi/README-template.md index 77fa879319be255a6cb422b65c0b9602e7125076..a91b9f15a4d1d359934c2987702f883514927b8a 100644 --- a/templates/types/streaming/fastapi/README-template.md +++ b/templates/types/streaming/fastapi/README-template.md @@ -11,13 +11,19 @@ poetry shell By default, we use the OpenAI LLM (though you can customize, see `app/context.py`). As a result you need to specify an `OPENAI_API_KEY` in an .env file in this directory. -Example `backend/.env` file: +Example `.env` file: ``` OPENAI_API_KEY=<openai_api_key> ``` -Second, run the development server: +Second, generate the embeddings of the documents in the `./data` directory: + +``` +python app/engine/generate.py +``` + +Third, run the development server: ``` python main.py diff --git a/templates/types/streaming/fastapi/app/engine/constants.py b/templates/types/streaming/fastapi/app/engine/constants.py index 6dba7d2e0b870bfbec91f19e054d775ab25ceb23..4180edc4b440cafc26aef00530dc3ba2af3cbdf6 100644 --- a/templates/types/streaming/fastapi/app/engine/constants.py +++ b/templates/types/streaming/fastapi/app/engine/constants.py @@ -1,4 +1,4 @@ -STORAGE_DIR = "./storage" # directory to cache the generated index -DATA_DIR = "./data" # directory containing the documents to index +STORAGE_DIR = "storage" # directory to cache the generated index +DATA_DIR = "data" # directory containing the documents to index CHUNK_SIZE = 1024 CHUNK_OVERLAP = 20 diff --git a/templates/types/streaming/fastapi/app/engine/generate.py b/templates/types/streaming/fastapi/app/engine/generate.py index 3abb7491abe405d9d5377e37f78604ee1f9bd2b1..3c4cd6a9e310f3f2e2f7e4709e94b9073282f151 100644 --- a/templates/types/streaming/fastapi/app/engine/generate.py +++ b/templates/types/streaming/fastapi/app/engine/generate.py @@ -12,7 +12,8 @@ from llama_index import ( VectorStoreIndex, ) -logger = logging.getLogger("uvicorn") +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger() def generate_datasource(service_context):