|
|
SERVER_PORT=3001 |
|
|
STORAGE_DIR="/app/server/storage" |
|
|
UID='1000' |
|
|
GID='1000' |
|
|
# SIG_KEY='passphrase' # Please generate random string at least 32 chars long. |
|
|
# SIG_SALT='salt' # Please generate random string at least 32 chars long. |
|
|
# JWT_SECRET="my-random-string-for-seeding" # Only needed if AUTH_TOKEN is set. Please generate random string at least 12 chars long. |
|
|
# JWT_EXPIRY="30d" # (optional) https: |
|
|
|
|
|
########################################### |
|
|
######## LLM API SElECTION ################ |
|
|
########################################### |
|
|
# LLM_PROVIDER='openai' |
|
|
# OPEN_AI_KEY= |
|
|
# OPEN_MODEL_PREF='gpt-4o' |
|
|
|
|
|
# LLM_PROVIDER='gemini' |
|
|
# GEMINI_API_KEY= |
|
|
# GEMINI_LLM_MODEL_PREF='gemini-2.0-flash-lite' |
|
|
|
|
|
# LLM_PROVIDER='azure' |
|
|
# AZURE_OPENAI_ENDPOINT= |
|
|
# AZURE_OPENAI_KEY= |
|
|
# OPEN_MODEL_PREF='my-gpt35-deployment' # This is the "deployment" on Azure you want to use. Not the base model. |
|
|
# EMBEDDING_MODEL_PREF='embedder-model' # This is the "deployment" on Azure you want to use for embeddings. Not the base model. Valid base model is text-embedding-ada-002 |
|
|
|
|
|
# LLM_PROVIDER='anthropic' |
|
|
# ANTHROPIC_API_KEY=sk-ant-xxxx |
|
|
# ANTHROPIC_MODEL_PREF='claude-2' |
|
|
|
|
|
# LLM_PROVIDER='lmstudio' |
|
|
# LMSTUDIO_BASE_PATH='http://your-server:1234/v1' |
|
|
# LMSTUDIO_MODEL_PREF='Loaded from Chat UI' # this is a bug in LMStudio 0.2.17 |
|
|
# LMSTUDIO_MODEL_TOKEN_LIMIT=4096 |
|
|
|
|
|
# LLM_PROVIDER='localai' |
|
|
# LOCAL_AI_BASE_PATH='http://host.docker.internal:8080/v1' |
|
|
# LOCAL_AI_MODEL_PREF='luna-ai-llama2' |
|
|
# LOCAL_AI_MODEL_TOKEN_LIMIT=4096 |
|
|
# LOCAL_AI_API_KEY="sk-123abc" |
|
|
|
|
|
# LLM_PROVIDER='ollama' |
|
|
# OLLAMA_BASE_PATH='http://host.docker.internal:11434' |
|
|
# OLLAMA_MODEL_PREF='llama2' |
|
|
# OLLAMA_MODEL_TOKEN_LIMIT=4096 |
|
|
# OLLAMA_AUTH_TOKEN='your-ollama-auth-token-here (optional, only for ollama running behind auth - Bearer token)' |
|
|
|
|
|
# LLM_PROVIDER='togetherai' |
|
|
# TOGETHER_AI_API_KEY='my-together-ai-key' |
|
|
# TOGETHER_AI_MODEL_PREF='mistralai/Mixtral-8x7B-Instruct-v0.1' |
|
|
|
|
|
# LLM_PROVIDER='mistral' |
|
|
# MISTRAL_API_KEY='example-mistral-ai-api-key' |
|
|
# MISTRAL_MODEL_PREF='mistral-tiny' |
|
|
|
|
|
# LLM_PROVIDER='perplexity' |
|
|
# PERPLEXITY_API_KEY='my-perplexity-key' |
|
|
# PERPLEXITY_MODEL_PREF='codellama-34b-instruct' |
|
|
|
|
|
# LLM_PROVIDER='openrouter' |
|
|
# OPENROUTER_API_KEY='my-openrouter-key' |
|
|
# OPENROUTER_MODEL_PREF='openrouter/auto' |
|
|
|
|
|
# LLM_PROVIDER='huggingface' |
|
|
# HUGGING_FACE_LLM_ENDPOINT=https: |
|
|
# HUGGING_FACE_LLM_API_KEY=hf_xxxxxx |
|
|
# HUGGING_FACE_LLM_TOKEN_LIMIT=8000 |
|
|
|
|
|
# LLM_PROVIDER='groq' |
|
|
# GROQ_API_KEY=gsk_abcxyz |
|
|
# GROQ_MODEL_PREF=llama3-8b-8192 |
|
|
|
|
|
# LLM_PROVIDER='koboldcpp' |
|
|
# KOBOLD_CPP_BASE_PATH='http://127.0.0.1:5000/v1' |
|
|
# KOBOLD_CPP_MODEL_PREF='koboldcpp/codellama-7b-instruct.Q4_K_S' |
|
|
# KOBOLD_CPP_MODEL_TOKEN_LIMIT=4096 |
|
|
|
|
|
# LLM_PROVIDER='textgenwebui' |
|
|
# TEXT_GEN_WEB_UI_BASE_PATH='http://127.0.0.1:5000/v1' |
|
|
# TEXT_GEN_WEB_UI_TOKEN_LIMIT=4096 |
|
|
# TEXT_GEN_WEB_UI_API_KEY='sk-123abc' |
|
|
|
|
|
# LLM_PROVIDER='generic-openai' |
|
|
# GENERIC_OPEN_AI_BASE_PATH='http://proxy.url.openai.com/v1' |
|
|
# GENERIC_OPEN_AI_MODEL_PREF='gpt-3.5-turbo' |
|
|
# GENERIC_OPEN_AI_MODEL_TOKEN_LIMIT=4096 |
|
|
# GENERIC_OPEN_AI_API_KEY=sk-123abc |
|
|
|
|
|
# LLM_PROVIDER='litellm' |
|
|
# LITE_LLM_MODEL_PREF='gpt-3.5-turbo' |
|
|
# LITE_LLM_MODEL_TOKEN_LIMIT=4096 |
|
|
# LITE_LLM_BASE_PATH='http://127.0.0.1:4000' |
|
|
# LITE_LLM_API_KEY='sk-123abc' |
|
|
|
|
|
# LLM_PROVIDER='novita' |
|
|
# NOVITA_LLM_API_KEY='your-novita-api-key-here' check on https: |
|
|
# NOVITA_LLM_MODEL_PREF='deepseek/deepseek-r1' |
|
|
|
|
|
# LLM_PROVIDER='cometapi' |
|
|
# COMETAPI_LLM_API_KEY='your-cometapi-api-key-here' # Get one at https: |
|
|
# COMETAPI_LLM_MODEL_PREF='gpt-5-mini' |
|
|
# COMETAPI_LLM_TIMEOUT_MS=500 # Optional; stream idle timeout in ms (min 500ms) |
|
|
|
|
|
# LLM_PROVIDER='cohere' |
|
|
# COHERE_API_KEY= |
|
|
# COHERE_MODEL_PREF='command-r' |
|
|
|
|
|
# LLM_PROVIDER='bedrock' |
|
|
# AWS_BEDROCK_LLM_ACCESS_KEY_ID= |
|
|
# AWS_BEDROCK_LLM_ACCESS_KEY= |
|
|
# AWS_BEDROCK_LLM_REGION=us-west-2 |
|
|
# AWS_BEDROCK_LLM_MODEL_PREFERENCE=meta.llama3-1-8b-instruct-v1:0 |
|
|
# AWS_BEDROCK_LLM_MODEL_TOKEN_LIMIT=8191 |
|
|
# AWS_BEDROCK_LLM_CONNECTION_METHOD=iam |
|
|
# AWS_BEDROCK_LLM_MAX_OUTPUT_TOKENS=4096 |
|
|
# AWS_BEDROCK_LLM_SESSION_TOKEN= # Only required if CONNECTION_METHOD is 'sessionToken' |
|
|
|
|
|
# LLM_PROVIDER='fireworksai' |
|
|
# FIREWORKS_AI_LLM_API_KEY='my-fireworks-ai-key' |
|
|
# FIREWORKS_AI_LLM_MODEL_PREF='accounts/fireworks/models/llama-v3p1-8b-instruct' |
|
|
|
|
|
# LLM_PROVIDER='apipie' |
|
|
# APIPIE_LLM_API_KEY='sk-123abc' |
|
|
# APIPIE_LLM_MODEL_PREF='openrouter/llama-3.1-8b-instruct' |
|
|
|
|
|
# LLM_PROVIDER='xai' |
|
|
# XAI_LLM_API_KEY='xai-your-api-key-here' |
|
|
# XAI_LLM_MODEL_PREF='grok-beta' |
|
|
|
|
|
# LLM_PROVIDER='nvidia-nim' |
|
|
# NVIDIA_NIM_LLM_BASE_PATH='http://127.0.0.1:8000' |
|
|
# NVIDIA_NIM_LLM_MODEL_PREF='meta/llama-3.2-3b-instruct' |
|
|
|
|
|
# LLM_PROVIDER='deepseek' |
|
|
# DEEPSEEK_API_KEY='your-deepseek-api-key-here' |
|
|
# DEEPSEEK_MODEL_PREF='deepseek-chat' |
|
|
|
|
|
# LLM_PROVIDER='ppio' |
|
|
# PPIO_API_KEY='your-ppio-api-key-here' |
|
|
# PPIO_MODEL_PREF=deepseek/deepseek-v3/community |
|
|
|
|
|
# LLM_PROVIDER='moonshotai' |
|
|
# MOONSHOT_AI_API_KEY='your-moonshot-api-key-here' |
|
|
# MOONSHOT_AI_MODEL_PREF='moonshot-v1-32k' |
|
|
|
|
|
########################################### |
|
|
######## Embedding API SElECTION ########## |
|
|
########################################### |
|
|
# This will be the assumed default embedding seleciton and model |
|
|
# EMBEDDING_ENGINE='native' |
|
|
# EMBEDDING_MODEL_PREF='Xenova/all-MiniLM-L6-v2' |
|
|
|
|
|
# Only used if you are using an LLM that does not natively support embedding (openai or Azure) |
|
|
# EMBEDDING_ENGINE='openai' |
|
|
# OPEN_AI_KEY=sk-xxxx |
|
|
# EMBEDDING_MODEL_PREF='text-embedding-ada-002' |
|
|
|
|
|
# EMBEDDING_ENGINE='azure' |
|
|
# AZURE_OPENAI_ENDPOINT= |
|
|
# AZURE_OPENAI_KEY= |
|
|
# EMBEDDING_MODEL_PREF='my-embedder-model' # This is the "deployment" on Azure you want to use for embeddings. Not the base model. Valid base model is text-embedding-ada-002 |
|
|
|
|
|
# EMBEDDING_ENGINE='localai' |
|
|
# EMBEDDING_BASE_PATH='http://localhost:8080/v1' |
|
|
# EMBEDDING_MODEL_PREF='text-embedding-ada-002' |
|
|
# EMBEDDING_MODEL_MAX_CHUNK_LENGTH=1000 # The max chunk size in chars a string to embed can be |
|
|
|
|
|
# EMBEDDING_ENGINE='ollama' |
|
|
# EMBEDDING_BASE_PATH='http://host.docker.internal:11434' |
|
|
# EMBEDDING_MODEL_PREF='nomic-embed-text:latest' |
|
|
# EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192 |
|
|
|
|
|
# EMBEDDING_ENGINE='lmstudio' |
|
|
# EMBEDDING_BASE_PATH='https://host.docker.internal:1234/v1' |
|
|
# EMBEDDING_MODEL_PREF='nomic-ai/nomic-embed-text-v1.5-GGUF/nomic-embed-text-v1.5.Q4_0.gguf' |
|
|
# EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192 |
|
|
|
|
|
# EMBEDDING_ENGINE='cohere' |
|
|
# COHERE_API_KEY= |
|
|
# EMBEDDING_MODEL_PREF='embed-english-v3.0' |
|
|
|
|
|
# EMBEDDING_ENGINE='voyageai' |
|
|
# VOYAGEAI_API_KEY= |
|
|
# EMBEDDING_MODEL_PREF='voyage-large-2-instruct' |
|
|
|
|
|
# EMBEDDING_ENGINE='litellm' |
|
|
# EMBEDDING_MODEL_PREF='text-embedding-ada-002' |
|
|
# EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192 |
|
|
# LITE_LLM_BASE_PATH='http://127.0.0.1:4000' |
|
|
# LITE_LLM_API_KEY='sk-123abc' |
|
|
|
|
|
# EMBEDDING_ENGINE='generic-openai' |
|
|
# EMBEDDING_MODEL_PREF='text-embedding-ada-002' |
|
|
# EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192 |
|
|
# EMBEDDING_BASE_PATH='http://127.0.0.1:4000' |
|
|
# GENERIC_OPEN_AI_EMBEDDING_API_KEY='sk-123abc' |
|
|
# GENERIC_OPEN_AI_EMBEDDING_MAX_CONCURRENT_CHUNKS=500 |
|
|
# GENERIC_OPEN_AI_EMBEDDING_API_DELAY_MS=1000 |
|
|
|
|
|
# EMBEDDING_ENGINE='gemini' |
|
|
# GEMINI_EMBEDDING_API_KEY= |
|
|
# EMBEDDING_MODEL_PREF='text-embedding-004' |
|
|
|
|
|
########################################### |
|
|
######## Vector Database Selection ######## |
|
|
########################################### |
|
|
# Enable all below if you are using vector database: LanceDB. |
|
|
# VECTOR_DB="lancedb" |
|
|
|
|
|
# Enable all below if you are using vector database: Weaviate. |
|
|
# VECTOR_DB="pgvector" |
|
|
# PGVECTOR_CONNECTION_STRING="postgresql://dbuser:dbuserpass@localhost:5432/yourdb" |
|
|
# PGVECTOR_TABLE_NAME="anythingllm_vectors" # optional, but can be defined |
|
|
|
|
|
# Enable all below if you are using vector database: Chroma. |
|
|
# VECTOR_DB="chroma" |
|
|
# CHROMA_ENDPOINT='http://host.docker.internal:8000' |
|
|
# CHROMA_API_HEADER="X-Api-Key" |
|
|
# CHROMA_API_KEY="sk-123abc" |
|
|
|
|
|
# Enable all below if you are using vector database: Chroma Cloud. |
|
|
# VECTOR_DB="chromacloud" |
|
|
# CHROMACLOUD_API_KEY="ck-your-api-key" |
|
|
# CHROMACLOUD_TENANT= |
|
|
# CHROMACLOUD_DATABASE= |
|
|
|
|
|
# Enable all below if you are using vector database: Pinecone. |
|
|
# VECTOR_DB="pinecone" |
|
|
# PINECONE_API_KEY= |
|
|
# PINECONE_INDEX= |
|
|
|
|
|
# Enable all below if you are using vector database: Weaviate. |
|
|
# VECTOR_DB="weaviate" |
|
|
# WEAVIATE_ENDPOINT="http://localhost:8080" |
|
|
# WEAVIATE_API_KEY= |
|
|
|
|
|
# Enable all below if you are using vector database: Qdrant. |
|
|
# VECTOR_DB="qdrant" |
|
|
# QDRANT_ENDPOINT="http://localhost:6333" |
|
|
# QDRANT_API_KEY= |
|
|
|
|
|
# Enable all below if you are using vector database: Milvus. |
|
|
# VECTOR_DB="milvus" |
|
|
# MILVUS_ADDRESS="http://localhost:19530" |
|
|
# MILVUS_USERNAME= |
|
|
# MILVUS_PASSWORD= |
|
|
|
|
|
# Enable all below if you are using vector database: Zilliz Cloud. |
|
|
# VECTOR_DB="zilliz" |
|
|
# ZILLIZ_ENDPOINT="https://sample.api.gcp-us-west1.zillizcloud.com" |
|
|
# ZILLIZ_API_TOKEN=api-token-here |
|
|
|
|
|
# Enable all below if you are using vector database: Astra DB. |
|
|
# VECTOR_DB="astra" |
|
|
# ASTRA_DB_APPLICATION_TOKEN= |
|
|
# ASTRA_DB_ENDPOINT= |
|
|
|
|
|
########################################### |
|
|
######## Audio Model Selection ############ |
|
|
########################################### |
|
|
# (default) use built-in whisper-small model. |
|
|
# WHISPER_PROVIDER="local" |
|
|
|
|
|
# use openai hosted whisper model. |
|
|
# WHISPER_PROVIDER="openai" |
|
|
# OPEN_AI_KEY=sk-xxxxxxxx |
|
|
|
|
|
########################################### |
|
|
######## TTS/STT Model Selection ########## |
|
|
########################################### |
|
|
# TTS_PROVIDER="native" |
|
|
|
|
|
# TTS_PROVIDER="openai" |
|
|
# TTS_OPEN_AI_KEY=sk-example |
|
|
# TTS_OPEN_AI_VOICE_MODEL=nova |
|
|
|
|
|
# TTS_PROVIDER="generic-openai" |
|
|
# TTS_OPEN_AI_COMPATIBLE_KEY=sk-example |
|
|
# TTS_OPEN_AI_COMPATIBLE_MODEL=tts-1 |
|
|
# TTS_OPEN_AI_COMPATIBLE_VOICE_MODEL=nova |
|
|
# TTS_OPEN_AI_COMPATIBLE_ENDPOINT="https://api.openai.com/v1" |
|
|
|
|
|
# TTS_PROVIDER="elevenlabs" |
|
|
# TTS_ELEVEN_LABS_KEY= |
|
|
# TTS_ELEVEN_LABS_VOICE_MODEL=21m00Tcm4TlvDq8ikWAM # Rachel |
|
|
|
|
|
# CLOUD DEPLOYMENT VARIRABLES ONLY |
|
|
# AUTH_TOKEN="hunter2" # This is the password to your application if remote hosting. |
|
|
# DISABLE_TELEMETRY="false" |
|
|
|
|
|
########################################### |
|
|
######## PASSWORD COMPLEXITY ############## |
|
|
########################################### |
|
|
# Enforce a password schema for your organization users. |
|
|
# Documentation on how to use https: |
|
|
# Default is only 8 char minimum |
|
|
# PASSWORDMINCHAR=8 |
|
|
# PASSWORDMAXCHAR=250 |
|
|
# PASSWORDLOWERCASE=1 |
|
|
# PASSWORDUPPERCASE=1 |
|
|
# PASSWORDNUMERIC=1 |
|
|
# PASSWORDSYMBOL=1 |
|
|
# PASSWORDREQUIREMENTS=4 |
|
|
|
|
|
########################################### |
|
|
######## ENABLE HTTPS SERVER ############## |
|
|
########################################### |
|
|
# By enabling this and providing the path/filename for the key and cert, |
|
|
# the server will use HTTPS instead of HTTP. |
|
|
#ENABLE_HTTPS="true" |
|
|
#HTTPS_CERT_PATH="sslcert/cert.pem" |
|
|
#HTTPS_KEY_PATH="sslcert/key.pem" |
|
|
|
|
|
########################################### |
|
|
######## AGENT SERVICE KEYS ############### |
|
|
########################################### |
|
|
|
|
|
#------ SEARCH ENGINES ------- |
|
|
#============================= |
|
|
#------ Google Search -------- https: |
|
|
# AGENT_GSE_KEY= |
|
|
# AGENT_GSE_CTX= |
|
|
|
|
|
#------ SearchApi.io ----------- https: |
|
|
# AGENT_SEARCHAPI_API_KEY= |
|
|
# AGENT_SEARCHAPI_ENGINE=google |
|
|
|
|
|
#------ Serper.dev ----------- https: |
|
|
# AGENT_SERPER_DEV_KEY= |
|
|
|
|
|
#------ Bing Search ----------- https: |
|
|
# AGENT_BING_SEARCH_API_KEY= |
|
|
|
|
|
#------ Serply.io ----------- https: |
|
|
# AGENT_SERPLY_API_KEY= |
|
|
|
|
|
#------ SearXNG ----------- https: |
|
|
# AGENT_SEARXNG_API_URL= |
|
|
|
|
|
#------ Tavily ----------- https: |
|
|
# AGENT_TAVILY_API_KEY= |
|
|
|
|
|
#------ Exa Search ----------- https: |
|
|
# AGENT_EXA_API_KEY= |
|
|
|
|
|
########################################### |
|
|
######## Other Configurations ############ |
|
|
########################################### |
|
|
|
|
|
# Disable viewing chat history from the UI and frontend APIs. |
|
|
# See https: |
|
|
# DISABLE_VIEW_CHAT_HISTORY=1 |
|
|
|
|
|
# Enable simple SSO passthrough to pre-authenticate users from a third party service. |
|
|
# See https: |
|
|
# SIMPLE_SSO_ENABLED=1 |
|
|
# SIMPLE_SSO_NO_LOGIN=1 |
|
|
# SIMPLE_SSO_NO_LOGIN_REDIRECT=https: |
|
|
|
|
|
# Allow scraping of any IP address in collector - must be string "true" to be enabled |
|
|
# See https: |
|
|
# COLLECTOR_ALLOW_ANY_IP="true" |
|
|
|
|
|
# Specify the target languages for when using OCR to parse images and PDFs. |
|
|
# This is a comma separated list of language codes as a string. Unsupported languages will be ignored. |
|
|
# Default is English. See https: |
|
|
# TARGET_OCR_LANG=eng,deu,ita,spa,fra,por,rus,nld,tur,hun,pol,ita,spa,fra,por,rus,nld,tur,hun,pol |
|
|
|
|
|
# Runtime flags for built-in pupeeteer Chromium instance |
|
|
# This is only required on Linux machines running AnythingLLM via Docker |
|
|
# and do not want to use the --cap-add=SYS_ADMIN docker argument |
|
|
# ANYTHINGLLM_CHROMIUM_ARGS="--no-sandbox,--disable-setuid-sandbox" |