Skip to content
Snippets Groups Projects
.env.example 9.52 KiB
Newer Older
  • Learn to ignore specific revisions
  • Timothy Carambat's avatar
    Timothy Carambat committed
    SERVER_PORT=3001
    
    JWT_SECRET="my-random-string-for-seeding" # Please generate random string at least 12 chars long.
    
    SIG_KEY='passphrase' # Please generate random string at least 32 chars long.
    SIG_SALT='salt' # Please generate random string at least 32 chars long.
    
    
    ###########################################
    ######## LLM API SElECTION ################
    ###########################################
    
    # LLM_PROVIDER='openai'
    
    # OPEN_MODEL_PREF='gpt-4o'
    
    # LLM_PROVIDER='gemini'
    # GEMINI_API_KEY=
    # GEMINI_LLM_MODEL_PREF='gemini-pro'
    
    
    # LLM_PROVIDER='azure'
    # AZURE_OPENAI_ENDPOINT=
    # AZURE_OPENAI_KEY=
    # OPEN_MODEL_PREF='my-gpt35-deployment' # This is the "deployment" on Azure you want to use. Not the base model.
    # EMBEDDING_MODEL_PREF='embedder-model' # This is the "deployment" on Azure you want to use for embeddings. Not the base model. Valid base model is text-embedding-ada-002
    
    
    # LLM_PROVIDER='anthropic'
    # ANTHROPIC_API_KEY=sk-ant-xxxx
    # ANTHROPIC_MODEL_PREF='claude-2'
    
    
    # LLM_PROVIDER='lmstudio'
    # LMSTUDIO_BASE_PATH='http://your-server:1234/v1'
    
    # LMSTUDIO_MODEL_PREF='Loaded from Chat UI' # this is a bug in LMStudio 0.2.17
    
    # LMSTUDIO_MODEL_TOKEN_LIMIT=4096
    
    
    # LLM_PROVIDER='localai'
    # LOCAL_AI_BASE_PATH='http://localhost:8080/v1'
    # LOCAL_AI_MODEL_PREF='luna-ai-llama2'
    # LOCAL_AI_MODEL_TOKEN_LIMIT=4096
    
    # LOCAL_AI_API_KEY="sk-123abc"
    
    # LLM_PROVIDER='ollama'
    # OLLAMA_BASE_PATH='http://host.docker.internal:11434'
    # OLLAMA_MODEL_PREF='llama2'
    # OLLAMA_MODEL_TOKEN_LIMIT=4096
    
    
    # LLM_PROVIDER='togetherai'
    # TOGETHER_AI_API_KEY='my-together-ai-key'
    # TOGETHER_AI_MODEL_PREF='mistralai/Mixtral-8x7B-Instruct-v0.1'
    
    
    # LLM_PROVIDER='fireworksai'
    # FIREWORKS_AI_LLM_API_KEY='my-fireworks-ai-key'
    # FIREWORKS_AI_LLM_MODEL_PREF='accounts/fireworks/models/llama-v3p1-8b-instruct'
    
    
    # LLM_PROVIDER='perplexity'
    # PERPLEXITY_API_KEY='my-perplexity-key'
    # PERPLEXITY_MODEL_PREF='codellama-34b-instruct'
    
    
    # LLM_PROVIDER='openrouter'
    # OPENROUTER_API_KEY='my-openrouter-key'
    # OPENROUTER_MODEL_PREF='openrouter/auto'
    
    
    # LLM_PROVIDER='mistral'
    # MISTRAL_API_KEY='example-mistral-ai-api-key'
    # MISTRAL_MODEL_PREF='mistral-tiny'
    
    
    # LLM_PROVIDER='huggingface'
    # HUGGING_FACE_LLM_ENDPOINT=https://uuid-here.us-east-1.aws.endpoints.huggingface.cloud
    # HUGGING_FACE_LLM_API_KEY=hf_xxxxxx
    # HUGGING_FACE_LLM_TOKEN_LIMIT=8000
    
    
    # LLM_PROVIDER='groq'
    # GROQ_API_KEY=gsk_abcxyz
    
    # GROQ_MODEL_PREF=llama3-8b-8192
    
    # LLM_PROVIDER='koboldcpp'
    # KOBOLD_CPP_BASE_PATH='http://127.0.0.1:5000/v1'
    # KOBOLD_CPP_MODEL_PREF='koboldcpp/codellama-7b-instruct.Q4_K_S'
    # KOBOLD_CPP_MODEL_TOKEN_LIMIT=4096
    
    
    # LLM_PROVIDER='textgenwebui'
    # TEXT_GEN_WEB_UI_BASE_PATH='http://127.0.0.1:5000/v1'
    # TEXT_GEN_WEB_UI_TOKEN_LIMIT=4096
    
    # TEXT_GEN_WEB_UI_API_KEY='sk-123abc'
    
    # LLM_PROVIDER='generic-openai'
    # GENERIC_OPEN_AI_BASE_PATH='http://proxy.url.openai.com/v1'
    # GENERIC_OPEN_AI_MODEL_PREF='gpt-3.5-turbo'
    # GENERIC_OPEN_AI_MODEL_TOKEN_LIMIT=4096
    # GENERIC_OPEN_AI_API_KEY=sk-123abc
    
    
    # LLM_PROVIDER='litellm'
    # LITE_LLM_MODEL_PREF='gpt-3.5-turbo'
    # LITE_LLM_MODEL_TOKEN_LIMIT=4096
    # LITE_LLM_BASE_PATH='http://127.0.0.1:4000'
    # LITE_LLM_API_KEY='sk-123abc'
    
    
    # LLM_PROVIDER='novita'
    # NOVITA_LLM_API_KEY='your-novita-api-key-here' check on https://novita.ai/settings#key-management
    # NOVITA_LLM_MODEL_PREF='gryphe/mythomax-l2-13b'
    
    
    # LLM_PROVIDER='cohere'
    # COHERE_API_KEY=
    # COHERE_MODEL_PREF='command-r'
    
    
    # LLM_PROVIDER='apipie'
    # APIPIE_LLM_API_KEY='sk-123abc'
    # APIPIE_LLM_MODEL_PREF='openrouter/llama-3.1-8b-instruct'
    
    
    # LLM_PROVIDER='xai'
    # XAI_LLM_API_KEY='xai-your-api-key-here'
    # XAI_LLM_MODEL_PREF='grok-beta'
    
    
    ###########################################
    ######## Embedding API SElECTION ##########
    ###########################################
    # Only used if you are using an LLM that does not natively support embedding (openai or Azure)
    # EMBEDDING_ENGINE='openai'
    # OPEN_AI_KEY=sk-xxxx
    
    # EMBEDDING_MODEL_PREF='text-embedding-ada-002'
    
    # EMBEDDING_ENGINE='azure'
    # AZURE_OPENAI_ENDPOINT=
    # AZURE_OPENAI_KEY=
    # EMBEDDING_MODEL_PREF='my-embedder-model' # This is the "deployment" on Azure you want to use for embeddings. Not the base model. Valid base model is text-embedding-ada-002
    
    # EMBEDDING_ENGINE='localai'
    
    # EMBEDDING_BASE_PATH='http://localhost:8080/v1'
    
    # EMBEDDING_MODEL_PREF='text-embedding-ada-002'
    
    # EMBEDDING_MODEL_MAX_CHUNK_LENGTH=1000 # The max chunk size in chars a string to embed can be
    
    # EMBEDDING_ENGINE='ollama'
    # EMBEDDING_BASE_PATH='http://127.0.0.1:11434'
    # EMBEDDING_MODEL_PREF='nomic-embed-text:latest'
    # EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192
    
    
    # EMBEDDING_ENGINE='lmstudio'
    # EMBEDDING_BASE_PATH='https://localhost:1234/v1'
    # EMBEDDING_MODEL_PREF='nomic-ai/nomic-embed-text-v1.5-GGUF/nomic-embed-text-v1.5.Q4_0.gguf'
    # EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192
    
    
    # EMBEDDING_ENGINE='cohere'
    # COHERE_API_KEY=
    # EMBEDDING_MODEL_PREF='embed-english-v3.0'
    
    
    # EMBEDDING_ENGINE='voyageai'
    # VOYAGEAI_API_KEY=
    # EMBEDDING_MODEL_PREF='voyage-large-2-instruct'
    
    
    # EMBEDDING_ENGINE='litellm'
    # EMBEDDING_MODEL_PREF='text-embedding-ada-002'
    # EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192
    # LITE_LLM_BASE_PATH='http://127.0.0.1:4000'
    # LITE_LLM_API_KEY='sk-123abc'
    
    
    # EMBEDDING_ENGINE='generic-openai'
    # EMBEDDING_MODEL_PREF='text-embedding-ada-002'
    # EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192
    # EMBEDDING_BASE_PATH='http://127.0.0.1:4000'
    # GENERIC_OPEN_AI_EMBEDDING_API_KEY='sk-123abc'
    
    
    ###########################################
    ######## Vector Database Selection ########
    ###########################################
    
    # Enable all below if you are using vector database: Chroma.
    # VECTOR_DB="chroma"
    # CHROMA_ENDPOINT='http://localhost:8000'
    
    # CHROMA_API_HEADER="X-Api-Key"
    # CHROMA_API_KEY="sk-123abc"
    
    
    # Enable all below if you are using vector database: Pinecone.
    
    # VECTOR_DB="pinecone"
    # PINECONE_API_KEY=
    # PINECONE_INDEX=
    
    # Enable all below if you are using vector database: Astra DB.
    # VECTOR_DB="astra"
    # ASTRA_DB_APPLICATION_TOKEN=
    # ASTRA_DB_ENDPOINT=
    
    
    # Enable all below if you are using vector database: LanceDB.
    
    VECTOR_DB="lancedb"
    
    # Enable all below if you are using vector database: Weaviate.
    # VECTOR_DB="weaviate"
    # WEAVIATE_ENDPOINT="http://localhost:8080"
    # WEAVIATE_API_KEY=
    
    
    # Enable all below if you are using vector database: Qdrant.
    # VECTOR_DB="qdrant"
    # QDRANT_ENDPOINT="http://localhost:6333"
    # QDRANT_API_KEY=
    
    
    # Enable all below if you are using vector database: Milvus.
    # VECTOR_DB="milvus"
    # MILVUS_ADDRESS="http://localhost:19530"
    # MILVUS_USERNAME=
    # MILVUS_PASSWORD=
    
    # Enable all below if you are using vector database: Zilliz Cloud.
    # VECTOR_DB="zilliz"
    # ZILLIZ_ENDPOINT="https://sample.api.gcp-us-west1.zillizcloud.com"
    # ZILLIZ_API_TOKEN=api-token-here
    
    
    ###########################################
    ######## Audio Model Selection ############
    ###########################################
    # (default) use built-in whisper-small model.
    WHISPER_PROVIDER="local"
    
    # use openai hosted whisper model.
    # WHISPER_PROVIDER="openai"
    # OPEN_AI_KEY=sk-xxxxxxxx
    
    
    ###########################################
    ######## TTS/STT Model Selection ##########
    ###########################################
    TTS_PROVIDER="native"
    
    # TTS_PROVIDER="openai"
    # TTS_OPEN_AI_KEY=sk-example
    # TTS_OPEN_AI_VOICE_MODEL=nova
    
    # TTS_PROVIDER="elevenlabs"
    # TTS_ELEVEN_LABS_KEY=
    # TTS_ELEVEN_LABS_VOICE_MODEL=21m00Tcm4TlvDq8ikWAM # Rachel
    
    
    # TTS_PROVIDER="generic-openai"
    # TTS_OPEN_AI_COMPATIBLE_KEY=sk-example
    # TTS_OPEN_AI_COMPATIBLE_VOICE_MODEL=nova
    # TTS_OPEN_AI_COMPATIBLE_ENDPOINT="https://api.openai.com/v1"
    
    
    timothycarambat's avatar
    timothycarambat committed
    # CLOUD DEPLOYMENT VARIRABLES ONLY
    # AUTH_TOKEN="hunter2" # This is the password to your application if remote hosting.
    
    # STORAGE_DIR= # absolute filesystem path with no trailing slash
    
    
    ###########################################
    ######## PASSWORD COMPLEXITY ##############
    ###########################################
    # Enforce a password schema for your organization users.
    # Documentation on how to use https://github.com/kamronbatman/joi-password-complexity
    #PASSWORDMINCHAR=8
    #PASSWORDMAXCHAR=250
    #PASSWORDLOWERCASE=1
    #PASSWORDUPPERCASE=1
    #PASSWORDNUMERIC=1
    #PASSWORDSYMBOL=1
    #PASSWORDREQUIREMENTS=4
    
    
    ###########################################
    ######## ENABLE HTTPS SERVER ##############
    ###########################################
    # By enabling this and providing the path/filename for the key and cert,
    # the server will use HTTPS instead of HTTP.
    #ENABLE_HTTPS="true"
    #HTTPS_CERT_PATH="sslcert/cert.pem"
    #HTTPS_KEY_PATH="sslcert/key.pem"
    
    
    ###########################################
    ######## AGENT SERVICE KEYS ###############
    ###########################################
    
    #------ SEARCH ENGINES -------
    #=============================
    #------ Google Search -------- https://programmablesearchengine.google.com/controlpanel/create
    # AGENT_GSE_KEY=
    # AGENT_GSE_CTX=
    
    
    #------ SearchApi.io ----------- https://www.searchapi.io/
    # AGENT_SEARCHAPI_API_KEY=
    # AGENT_SEARCHAPI_ENGINE=google
    
    
    #------ Serper.dev ----------- https://serper.dev/
    
    # AGENT_SERPER_DEV_KEY=
    
    #------ Bing Search ----------- https://portal.azure.com/
    # AGENT_BING_SEARCH_API_KEY=
    
    Serply's avatar
    Serply committed
    
    #------ Serply.io ----------- https://serply.io/
    # AGENT_SERPLY_API_KEY=
    
    
    #------ SearXNG ----------- https://github.com/searxng/searxng
    
    # AGENT_SEARXNG_API_URL=
    
    ###########################################
    ######## Other Configurations ############
    ###########################################
    
    # Disable viewing chat history from the UI and frontend APIs.
    # See https://docs.anythingllm.com/configuration#disable-view-chat-history for more information.
    
    # DISABLE_VIEW_CHAT_HISTORY=1
    
    # Enable simple SSO passthrough to pre-authenticate users from a third party service.
    # See https://docs.anythingllm.com/configuration#simple-sso-passthrough for more information.
    # SIMPLE_SSO_ENABLED=1