innofacisteven commited on
Commit
0e77712
·
verified ·
1 Parent(s): 13816d2

Update .env

Browse files
Files changed (1) hide show
  1. .env +7 -7
.env CHANGED
@@ -43,14 +43,14 @@ OLLAMA_EMULATING_MODEL_TAG=latest
43
  #####################################
44
  ### Login and API-Key Configuration
45
  #####################################
46
- # AUTH_ACCOUNTS='admin:admin123,user1:pass456'
47
  # TOKEN_SECRET=Your-Key-For-LightRAG-API-Server
48
  # TOKEN_EXPIRE_HOURS=48
49
  # GUEST_TOKEN_EXPIRE_HOURS=24
50
  # JWT_ALGORITHM=HS256
51
 
52
  ### API-Key to access LightRAG Server API
53
- # LIGHTRAG_API_KEY=your-secure-api-key-here
54
  # WHITELIST_PATHS=/health,/api/*
55
 
56
  ######################################################################################
@@ -144,13 +144,13 @@ SUMMARY_LANGUAGE=English
144
  ### Concurrency Configuration
145
  ###############################
146
  ### Max concurrency requests of LLM (for both query and document processing)
147
- MAX_ASYNC=4
148
  ### Number of parallel processing documents(between 2~10, MAX_ASYNC/3 is recommended)
149
- MAX_PARALLEL_INSERT=2
150
  ### Max concurrency requests for Embedding
151
- # EMBEDDING_FUNC_MAX_ASYNC=8
152
  ### Num of chunks send to Embedding in single request
153
- # EMBEDDING_BATCH_NUM=10
154
 
155
  ###########################################################
156
  ### LLM Configuration
@@ -160,7 +160,7 @@ MAX_PARALLEL_INSERT=2
160
  # LLM_TIMEOUT=180
161
 
162
  LLM_BINDING=openai
163
- LLM_MODEL=gpt-4o
164
  LLM_BINDING_HOST=https://api.openai.com/v1
165
  LLM_BINDING_API_KEY=${LLM_BINDING_API_KEY}
166
 
 
43
  #####################################
44
  ### Login and API-Key Configuration
45
  #####################################
46
+ AUTH_ACCOUNTS=${AUTH_ACCOUNTS}
47
  # TOKEN_SECRET=Your-Key-For-LightRAG-API-Server
48
  # TOKEN_EXPIRE_HOURS=48
49
  # GUEST_TOKEN_EXPIRE_HOURS=24
50
  # JWT_ALGORITHM=HS256
51
 
52
  ### API-Key to access LightRAG Server API
53
+ LIGHTRAG_API_KEY=${LIGHTRAG_API_KEY}
54
  # WHITELIST_PATHS=/health,/api/*
55
 
56
  ######################################################################################
 
144
  ### Concurrency Configuration
145
  ###############################
146
  ### Max concurrency requests of LLM (for both query and document processing)
147
+ MAX_ASYNC=12
148
  ### Number of parallel processing documents(between 2~10, MAX_ASYNC/3 is recommended)
149
+ MAX_PARALLEL_INSERT=3
150
  ### Max concurrency requests for Embedding
151
+ EMBEDDING_FUNC_MAX_ASYNC=24
152
  ### Num of chunks send to Embedding in single request
153
+ EMBEDDING_BATCH_NUM=100
154
 
155
  ###########################################################
156
  ### LLM Configuration
 
160
  # LLM_TIMEOUT=180
161
 
162
  LLM_BINDING=openai
163
+ LLM_MODEL=gpt-41-nano
164
  LLM_BINDING_HOST=https://api.openai.com/v1
165
  LLM_BINDING_API_KEY=${LLM_BINDING_API_KEY}
166