innofacisteven commited on
Commit
3124ba9
·
verified ·
1 Parent(s): 41919df

Upload .env

Browse files
Files changed (1) hide show
  1. .env +377 -0
.env ADDED
@@ -0,0 +1,377 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### This is sample file of .env
2
+
3
+ ###########################
4
+ ### Server Configuration
5
+ ###########################
6
+ HOST=0.0.0.0
7
+ PORT=9621
8
+ WEBUI_TITLE='My Graph KB'
9
+ WEBUI_DESCRIPTION="Simple and Fast Graph Based RAG System"
10
+ # WORKERS=2
11
+ ### gunicorn worker timeout(as default LLM request timeout if LLM_TIMEOUT is not set)
12
+ # TIMEOUT=150
13
+ # CORS_ORIGINS=http://localhost:3000,http://localhost:8080
14
+
15
+ ### Optional SSL Configuration
16
+ # SSL=true
17
+ # SSL_CERTFILE=/path/to/cert.pem
18
+ # SSL_KEYFILE=/path/to/key.pem
19
+
20
+ ### Directory Configuration (defaults to current working directory)
21
+ ### Default value is ./inputs and ./rag_storage
22
+ # INPUT_DIR=<absolute_path_for_doc_input_dir>
23
+ # WORKING_DIR=<absolute_path_for_working_dir>
24
+
25
+ ### Tiktoken cache directory (Store cached files in this folder for offline deployment)
26
+ # TIKTOKEN_CACHE_DIR=/app/data/tiktoken
27
+
28
+ ### Ollama Emulating Model and Tag
29
+ # OLLAMA_EMULATING_MODEL_NAME=lightrag
30
+ OLLAMA_EMULATING_MODEL_TAG=latest
31
+
32
+ ### Max nodes return from graph retrieval in webui
33
+ # MAX_GRAPH_NODES=1000
34
+
35
+ ### Logging level
36
+ # LOG_LEVEL=INFO
37
+ # VERBOSE=False
38
+ # LOG_MAX_BYTES=10485760
39
+ # LOG_BACKUP_COUNT=5
40
+ ### Logfile location (defaults to current working directory)
41
+ # LOG_DIR=/path/to/log/directory
42
+
43
+ #####################################
44
+ ### Login and API-Key Configuration
45
+ #####################################
46
+ # AUTH_ACCOUNTS='admin:admin123,user1:pass456'
47
+ # TOKEN_SECRET=Your-Key-For-LightRAG-API-Server
48
+ # TOKEN_EXPIRE_HOURS=48
49
+ # GUEST_TOKEN_EXPIRE_HOURS=24
50
+ # JWT_ALGORITHM=HS256
51
+
52
+ ### API-Key to access LightRAG Server API
53
+ # LIGHTRAG_API_KEY=your-secure-api-key-here
54
+ # WHITELIST_PATHS=/health,/api/*
55
+
56
+ ######################################################################################
57
+ ### Query Configuration
58
+ ###
59
+ ### How to control the context length sent to LLM:
60
+ ### MAX_ENTITY_TOKENS + MAX_RELATION_TOKENS < MAX_TOTAL_TOKENS
61
+ ### Chunk_Tokens = MAX_TOTAL_TOKENS - Actual_Entity_Tokens - Actual_Relation_Tokens
62
+ ######################################################################################
63
+ # LLM response cache for query (Not valid for streaming response)
64
+ ENABLE_LLM_CACHE=true
65
+ # COSINE_THRESHOLD=0.2
66
+ ### Number of entities or relations retrieved from KG
67
+ # TOP_K=40
68
+ ### Maximum number or chunks for naive vector search
69
+ # CHUNK_TOP_K=20
70
+ ### control the actual entities send to LLM
71
+ # MAX_ENTITY_TOKENS=6000
72
+ ### control the actual relations send to LLM
73
+ # MAX_RELATION_TOKENS=8000
74
+ ### control the maximum tokens send to LLM (include entities, relations and chunks)
75
+ # MAX_TOTAL_TOKENS=30000
76
+
77
+ ### maximum number of related chunks per source entity or relation
78
+ ### The chunk picker uses this value to determine the total number of chunks selected from KG(knowledge graph)
79
+ ### Higher values increase re-ranking time
80
+ # RELATED_CHUNK_NUMBER=5
81
+
82
+ ### chunk selection strategies
83
+ ### VECTOR: Pick KG chunks by vector similarity, delivered chunks to the LLM aligning more closely with naive retrieval
84
+ ### WEIGHT: Pick KG chunks by entity and chunk weight, delivered more solely KG related chunks to the LLM
85
+ ### If reranking is enabled, the impact of chunk selection strategies will be diminished.
86
+ # KG_CHUNK_PICK_METHOD=VECTOR
87
+
88
+ #########################################################
89
+ ### Reranking configuration
90
+ ### RERANK_BINDING type: null, cohere, jina, aliyun
91
+ ### For rerank model deployed by vLLM use cohere binding
92
+ #########################################################
93
+ RERANK_BINDING=null
94
+ ### Enable rerank by default in query params when RERANK_BINDING is not null
95
+ # RERANK_BY_DEFAULT=True
96
+ ### rerank score chunk filter(set to 0.0 to keep all chunks, 0.6 or above if LLM is not strong enough)
97
+ # MIN_RERANK_SCORE=0.0
98
+
99
+ ### For local deployment with vLLM
100
+ # RERANK_MODEL=BAAI/bge-reranker-v2-m3
101
+ # RERANK_BINDING_HOST=http://localhost:8000/v1/rerank
102
+ # RERANK_BINDING_API_KEY=your_rerank_api_key_here
103
+
104
+ ### Default value for Cohere AI
105
+ # RERANK_MODEL=rerank-v3.5
106
+ # RERANK_BINDING_HOST=https://api.cohere.com/v2/rerank
107
+ # RERANK_BINDING_API_KEY=your_rerank_api_key_here
108
+
109
+ ### Default value for Jina AI
110
+ # RERANK_MODEL=jina-reranker-v2-base-multilingual
111
+ # RERANK_BINDING_HOST=https://api.jina.ai/v1/rerank
112
+ # RERANK_BINDING_API_KEY=your_rerank_api_key_here
113
+
114
+ ### Default value for Aliyun
115
+ # RERANK_MODEL=gte-rerank-v2
116
+ # RERANK_BINDING_HOST=https://dashscope.aliyuncs.com/api/v1/services/rerank/text-rerank/text-rerank
117
+ # RERANK_BINDING_API_KEY=your_rerank_api_key_here
118
+
119
+ ########################################
120
+ ### Document processing configuration
121
+ ########################################
122
+ ENABLE_LLM_CACHE_FOR_EXTRACT=true
123
+
124
+ ### Document processing output language: English, Chinese, French, German ...
125
+ SUMMARY_LANGUAGE=English
126
+
127
+ ### Entity types that the LLM will attempt to recognize
128
+ # ENTITY_TYPES='["Person", "Creature", "Organization", "Location", "Event", "Concept", "Method", "Content", "Data", "Artifact", "NaturalObject"]'
129
+
130
+ ### Chunk size for document splitting, 500~1500 is recommended
131
+ # CHUNK_SIZE=1200
132
+ # CHUNK_OVERLAP_SIZE=100
133
+
134
+ ### Number of summary segments or tokens to trigger LLM summary on entity/relation merge (at least 3 is recommended)
135
+ # FORCE_LLM_SUMMARY_ON_MERGE=8
136
+ ### Max description token size to trigger LLM summary
137
+ # SUMMARY_MAX_TOKENS = 1200
138
+ ### Recommended LLM summary output length in tokens
139
+ # SUMMARY_LENGTH_RECOMMENDED_=600
140
+ ### Maximum context size sent to LLM for description summary
141
+ # SUMMARY_CONTEXT_SIZE=12000
142
+
143
+ ###############################
144
+ ### Concurrency Configuration
145
+ ###############################
146
+ ### Max concurrency requests of LLM (for both query and document processing)
147
+ MAX_ASYNC=4
148
+ ### Number of parallel processing documents(between 2~10, MAX_ASYNC/3 is recommended)
149
+ MAX_PARALLEL_INSERT=2
150
+ ### Max concurrency requests for Embedding
151
+ # EMBEDDING_FUNC_MAX_ASYNC=8
152
+ ### Num of chunks send to Embedding in single request
153
+ # EMBEDDING_BATCH_NUM=10
154
+
155
+ ###########################################################
156
+ ### LLM Configuration
157
+ ### LLM_BINDING type: openai, ollama, lollms, azure_openai, aws_bedrock
158
+ ###########################################################
159
+ ### LLM request timeout setting for all llm (0 means no timeout for Ollma)
160
+ # LLM_TIMEOUT=180
161
+
162
+ LLM_BINDING=openai
163
+ LLM_MODEL=gpt-4o
164
+ LLM_BINDING_HOST=https://api.openai.com/v1
165
+ LLM_BINDING_API_KEY=your_api_key
166
+
167
+ ### Optional for Azure
168
+ # AZURE_OPENAI_API_VERSION=2024-08-01-preview
169
+ # AZURE_OPENAI_DEPLOYMENT=gpt-4o
170
+
171
+ ### Openrouter example
172
+ # LLM_MODEL=google/gemini-2.5-flash
173
+ # LLM_BINDING_HOST=https://openrouter.ai/api/v1
174
+ # LLM_BINDING_API_KEY=your_api_key
175
+ # LLM_BINDING=openai
176
+
177
+ ### OpenAI Compatible API Specific Parameters
178
+ ### Increased temperature values may mitigate infinite inference loops in certain LLM, such as Qwen3-30B.
179
+ # OPENAI_LLM_TEMPERATURE=0.9
180
+ ### Set the max_tokens to mitigate endless output of some LLM (less than LLM_TIMEOUT * llm_output_tokens/second, i.e. 9000 = 180s * 50 tokens/s)
181
+ ### Typically, max_tokens does not include prompt content, though some models, such as Gemini Models, are exceptions
182
+ ### For vLLM/SGLang deployed models, or most of OpenAI compatible API provider
183
+ # OPENAI_LLM_MAX_TOKENS=9000
184
+ ### For OpenAI o1-mini or newer modles
185
+ OPENAI_LLM_MAX_COMPLETION_TOKENS=9000
186
+
187
+ #### OpenAI's new API utilizes max_completion_tokens instead of max_tokens
188
+ # OPENAI_LLM_MAX_COMPLETION_TOKENS=9000
189
+
190
+ ### use the following command to see all support options for OpenAI, azure_openai or OpenRouter
191
+ ### lightrag-server --llm-binding openai --help
192
+ ### OpenAI Specific Parameters
193
+ # OPENAI_LLM_REASONING_EFFORT=minimal
194
+ ### OpenRouter Specific Parameters
195
+ # OPENAI_LLM_EXTRA_BODY='{"reasoning": {"enabled": false}}'
196
+ ### Qwen3 Specific Parameters deploy by vLLM
197
+ # OPENAI_LLM_EXTRA_BODY='{"chat_template_kwargs": {"enable_thinking": false}}'
198
+
199
+ ### use the following command to see all support options for Ollama LLM
200
+ ### lightrag-server --llm-binding ollama --help
201
+ ### Ollama Server Specific Parameters
202
+ ### OLLAMA_LLM_NUM_CTX must be provided, and should at least larger than MAX_TOTAL_TOKENS + 2000
203
+ OLLAMA_LLM_NUM_CTX=32768
204
+ ### Set the max_output_tokens to mitigate endless output of some LLM (less than LLM_TIMEOUT * llm_output_tokens/second, i.e. 9000 = 180s * 50 tokens/s)
205
+ # OLLAMA_LLM_NUM_PREDICT=9000
206
+ ### Stop sequences for Ollama LLM
207
+ # OLLAMA_LLM_STOP='["</s>", "<|EOT|>"]'
208
+
209
+ ### Bedrock Specific Parameters
210
+ # BEDROCK_LLM_TEMPERATURE=1.0
211
+
212
+ ####################################################################################
213
+ ### Embedding Configuration (Should not be changed after the first file processed)
214
+ ### EMBEDDING_BINDING: ollama, openai, azure_openai, jina, lollms, aws_bedrock
215
+ ####################################################################################
216
+ # EMBEDDING_TIMEOUT=30
217
+ EMBEDDING_BINDING=ollama
218
+ EMBEDDING_MODEL=bge-m3:latest
219
+ EMBEDDING_DIM=1024
220
+ EMBEDDING_BINDING_API_KEY=your_api_key
221
+ # If the embedding service is deployed within the same Docker stack, use host.docker.internal instead of localhost
222
+ EMBEDDING_BINDING_HOST=http://localhost:11434
223
+
224
+ ### OpenAI compatible (VoyageAI embedding openai compatible)
225
+ # EMBEDDING_BINDING=openai
226
+ # EMBEDDING_MODEL=text-embedding-3-large
227
+ # EMBEDDING_DIM=3072
228
+ # EMBEDDING_BINDING_HOST=https://api.openai.com/v1
229
+ # EMBEDDING_BINDING_API_KEY=your_api_key
230
+
231
+ ### Optional for Azure
232
+ # AZURE_EMBEDDING_DEPLOYMENT=text-embedding-3-large
233
+ # AZURE_EMBEDDING_API_VERSION=2023-05-15
234
+ # AZURE_EMBEDDING_ENDPOINT=your_endpoint
235
+ # AZURE_EMBEDDING_API_KEY=your_api_key
236
+
237
+ ### Jina AI Embedding
238
+ # EMBEDDING_BINDING=jina
239
+ # EMBEDDING_BINDING_HOST=https://api.jina.ai/v1/embeddings
240
+ # EMBEDDING_MODEL=jina-embeddings-v4
241
+ # EMBEDDING_DIM=2048
242
+ # EMBEDDING_BINDING_API_KEY=your_api_key
243
+
244
+ ### Optional for Ollama embedding
245
+ OLLAMA_EMBEDDING_NUM_CTX=8192
246
+ ### use the following command to see all support options for Ollama embedding
247
+ ### lightrag-server --embedding-binding ollama --help
248
+
249
+ ####################################################################
250
+ ### WORKSPACE sets workspace name for all storage types
251
+ ### for the purpose of isolating data from LightRAG instances.
252
+ ### Valid workspace name constraints: a-z, A-Z, 0-9, and _
253
+ ####################################################################
254
+ # WORKSPACE=space1
255
+
256
+ ############################
257
+ ### Data storage selection
258
+ ############################
259
+ ### Default storage (Recommended for small scale deployment)
260
+ # LIGHTRAG_KV_STORAGE=JsonKVStorage
261
+ # LIGHTRAG_DOC_STATUS_STORAGE=JsonDocStatusStorage
262
+ # LIGHTRAG_GRAPH_STORAGE=NetworkXStorage
263
+ # LIGHTRAG_VECTOR_STORAGE=NanoVectorDBStorage
264
+
265
+ ### Redis Storage (Recommended for production deployment)
266
+ # LIGHTRAG_KV_STORAGE=RedisKVStorage
267
+ # LIGHTRAG_DOC_STATUS_STORAGE=RedisDocStatusStorage
268
+
269
+ ### Vector Storage (Recommended for production deployment)
270
+ # LIGHTRAG_VECTOR_STORAGE=MilvusVectorDBStorage
271
+ # LIGHTRAG_VECTOR_STORAGE=QdrantVectorDBStorage
272
+ # LIGHTRAG_VECTOR_STORAGE=FaissVectorDBStorage
273
+
274
+ ### Graph Storage (Recommended for production deployment)
275
+ # LIGHTRAG_GRAPH_STORAGE=Neo4JStorage
276
+ # LIGHTRAG_GRAPH_STORAGE=MemgraphStorage
277
+
278
+ ### PostgreSQL
279
+ # LIGHTRAG_KV_STORAGE=PGKVStorage
280
+ # LIGHTRAG_DOC_STATUS_STORAGE=PGDocStatusStorage
281
+ # LIGHTRAG_GRAPH_STORAGE=PGGraphStorage
282
+ # LIGHTRAG_VECTOR_STORAGE=PGVectorStorage
283
+
284
+ ### MongoDB (Vector storage only available on Atlas Cloud)
285
+ # LIGHTRAG_KV_STORAGE=MongoKVStorage
286
+ # LIGHTRAG_DOC_STATUS_STORAGE=MongoDocStatusStorage
287
+ # LIGHTRAG_GRAPH_STORAGE=MongoGraphStorage
288
+ # LIGHTRAG_VECTOR_STORAGE=MongoVectorDBStorage
289
+
290
+ ### PostgreSQL Configuration
291
+ POSTGRES_HOST=localhost
292
+ POSTGRES_PORT=5432
293
+ POSTGRES_USER=your_username
294
+ POSTGRES_PASSWORD='your_password'
295
+ POSTGRES_DATABASE=your_database
296
+ POSTGRES_MAX_CONNECTIONS=12
297
+ # POSTGRES_WORKSPACE=forced_workspace_name
298
+
299
+ ### PostgreSQL Vector Storage Configuration
300
+ ### Vector storage type: HNSW, IVFFlat
301
+ POSTGRES_VECTOR_INDEX_TYPE=HNSW
302
+ POSTGRES_HNSW_M=16
303
+ POSTGRES_HNSW_EF=200
304
+ POSTGRES_IVFFLAT_LISTS=100
305
+
306
+ ### PostgreSQL Connection Retry Configuration (Network Robustness)
307
+ ### Number of retry attempts (1-10, default: 3)
308
+ ### Initial retry backoff in seconds (0.1-5.0, default: 0.5)
309
+ ### Maximum retry backoff in seconds (backoff-60.0, default: 5.0)
310
+ ### Connection pool close timeout in seconds (1.0-30.0, default: 5.0)
311
+ # POSTGRES_CONNECTION_RETRIES=3
312
+ # POSTGRES_CONNECTION_RETRY_BACKOFF=0.5
313
+ # POSTGRES_CONNECTION_RETRY_BACKOFF_MAX=5.0
314
+ # POSTGRES_POOL_CLOSE_TIMEOUT=5.0
315
+
316
+ ### PostgreSQL SSL Configuration (Optional)
317
+ # POSTGRES_SSL_MODE=require
318
+ # POSTGRES_SSL_CERT=/path/to/client-cert.pem
319
+ # POSTGRES_SSL_KEY=/path/to/client-key.pem
320
+ # POSTGRES_SSL_ROOT_CERT=/path/to/ca-cert.pem
321
+ # POSTGRES_SSL_CRL=/path/to/crl.pem
322
+
323
+ ### PostgreSQL Server Settings (for Supabase Supavisor)
324
+ # Use this to pass extra options to the PostgreSQL connection string.
325
+ # For Supabase, you might need to set it like this:
326
+ # POSTGRES_SERVER_SETTINGS="options=reference%3D[project-ref]"
327
+
328
+ # Default is 100 set to 0 to disable
329
+ # POSTGRES_STATEMENT_CACHE_SIZE=100
330
+
331
+ ### Neo4j Configuration
332
+ NEO4J_URI=neo4j+s://xxxxxxxx.databases.neo4j.io
333
+ NEO4J_USERNAME=neo4j
334
+ NEO4J_PASSWORD='your_password'
335
+ NEO4J_DATABASE=neo4j
336
+ NEO4J_MAX_CONNECTION_POOL_SIZE=100
337
+ NEO4J_CONNECTION_TIMEOUT=30
338
+ NEO4J_CONNECTION_ACQUISITION_TIMEOUT=30
339
+ NEO4J_MAX_TRANSACTION_RETRY_TIME=30
340
+ NEO4J_MAX_CONNECTION_LIFETIME=300
341
+ NEO4J_LIVENESS_CHECK_TIMEOUT=30
342
+ NEO4J_KEEP_ALIVE=true
343
+ # NEO4J_WORKSPACE=forced_workspace_name
344
+
345
+ ### MongoDB Configuration
346
+ MONGO_URI=mongodb://root:root@localhost:27017/
347
+ #MONGO_URI=mongodb+srv://xxxx
348
+ MONGO_DATABASE=LightRAG
349
+ # MONGODB_WORKSPACE=forced_workspace_name
350
+
351
+ ### Milvus Configuration
352
+ MILVUS_URI=http://localhost:19530
353
+ MILVUS_DB_NAME=lightrag
354
+ # MILVUS_USER=root
355
+ # MILVUS_PASSWORD=your_password
356
+ # MILVUS_TOKEN=your_token
357
+ # MILVUS_WORKSPACE=forced_workspace_name
358
+
359
+ ### Qdrant
360
+ QDRANT_URL=http://localhost:6333
361
+ # QDRANT_API_KEY=your-api-key
362
+ # QDRANT_WORKSPACE=forced_workspace_name
363
+
364
+ ### Redis
365
+ REDIS_URI=redis://localhost:6379
366
+ REDIS_SOCKET_TIMEOUT=30
367
+ REDIS_CONNECT_TIMEOUT=10
368
+ REDIS_MAX_CONNECTIONS=100
369
+ REDIS_RETRY_ATTEMPTS=3
370
+ # REDIS_WORKSPACE=forced_workspace_name
371
+
372
+ ### Memgraph Configuration
373
+ MEMGRAPH_URI=bolt://localhost:7687
374
+ MEMGRAPH_USERNAME=
375
+ MEMGRAPH_PASSWORD=
376
+ MEMGRAPH_DATABASE=memgraph
377
+ # MEMGRAPH_WORKSPACE=forced_workspace_name