helloya20 commited on
Commit
f0743f4
·
verified ·
1 Parent(s): 0960ca8

Upload 2345 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .devcontainer/Dockerfile +5 -0
  2. .devcontainer/devcontainer.json +18 -0
  3. .devcontainer/docker-compose.yml +63 -0
  4. .dockerignore +17 -0
  5. .env.example +791 -0
  6. .gitattributes +8 -0
  7. .github/CODE_OF_CONDUCT.md +132 -0
  8. .github/CONTRIBUTING.md +168 -0
  9. .github/FUNDING.yml +13 -0
  10. .github/ISSUE_TEMPLATE/BUG-REPORT.yml +96 -0
  11. .github/ISSUE_TEMPLATE/FEATURE-REQUEST.yml +49 -0
  12. .github/ISSUE_TEMPLATE/LOCIZE_TRANSLATION_ACCESS_REQUEST.yml +42 -0
  13. .github/ISSUE_TEMPLATE/NEW-LANGUAGE-REQUEST.yml +33 -0
  14. .github/SECURITY.md +63 -0
  15. .github/configuration-release.json +60 -0
  16. .github/configuration-unreleased.json +68 -0
  17. .github/playwright.yml +72 -0
  18. .github/pull_request_template.md +41 -0
  19. .github/workflows/a11y.yml +26 -0
  20. .github/workflows/backend-review.yml +74 -0
  21. .github/workflows/build.yml +38 -0
  22. .github/workflows/cache-integration-tests.yml +89 -0
  23. .github/workflows/client.yml +64 -0
  24. .github/workflows/data-provider.yml +47 -0
  25. .github/workflows/data-schemas.yml +64 -0
  26. .github/workflows/deploy-dev.yml +46 -0
  27. .github/workflows/deploy.yml +38 -0
  28. .github/workflows/dev-branch-images.yml +72 -0
  29. .github/workflows/dev-images.yml +72 -0
  30. .github/workflows/dev-staging-images.yml +66 -0
  31. .github/workflows/eslint-ci.yml +59 -0
  32. .github/workflows/frontend-review.yml +56 -0
  33. .github/workflows/generate_embeddings.yml +20 -0
  34. .github/workflows/helmcharts.yml +75 -0
  35. .github/workflows/i18n-unused-keys.yml +149 -0
  36. .github/workflows/locize-i18n-sync.yml +72 -0
  37. .github/workflows/main-image-workflow.yml +69 -0
  38. .github/workflows/tag-images.yml +67 -0
  39. .github/workflows/unused-packages.yml +244 -0
  40. .gitignore +171 -0
  41. .husky/lint-staged.config.js +4 -0
  42. .husky/pre-commit +2 -0
  43. .prettierrc +19 -0
  44. .vscode/launch.json +18 -0
  45. CHANGELOG.md +236 -0
  46. Dockerfile +57 -0
  47. Dockerfile.multi +82 -0
  48. LICENSE +21 -0
  49. README.md +214 -7
  50. api/app/clients/AnthropicClient.js +991 -0
.devcontainer/Dockerfile ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ FROM node:18-bullseye
2
+
3
+ RUN useradd -m -s /bin/bash vscode
4
+ RUN mkdir -p /workspaces && chown -R vscode:vscode /workspaces
5
+ WORKDIR /workspaces
.devcontainer/devcontainer.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "dockerComposeFile": "docker-compose.yml",
3
+ "service": "app",
4
+ "workspaceFolder": "/workspaces",
5
+ "customizations": {
6
+ "vscode": {
7
+ "extensions": [],
8
+ "settings": {
9
+ "terminal.integrated.profiles.linux": {
10
+ "bash": null
11
+ }
12
+ }
13
+ }
14
+ },
15
+ "postCreateCommand": "",
16
+ "features": { "ghcr.io/devcontainers/features/git:1": {} },
17
+ "remoteUser": "vscode"
18
+ }
.devcontainer/docker-compose.yml ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ services:
2
+ app:
3
+ build:
4
+ context: ..
5
+ dockerfile: .devcontainer/Dockerfile
6
+ # restart: always
7
+ links:
8
+ - mongodb
9
+ - meilisearch
10
+ # ports:
11
+ # - 3080:3080 # Change it to 9000:3080 to use nginx
12
+ extra_hosts: # if you are running APIs on docker you need access to, you will need to uncomment this line and next
13
+ - "host.docker.internal:host-gateway"
14
+
15
+ volumes:
16
+ # This is where VS Code should expect to find your project's source code and the value of "workspaceFolder" in .devcontainer/devcontainer.json
17
+ - ..:/workspaces:cached
18
+ # Uncomment the next line to use Docker from inside the container. See https://aka.ms/vscode-remote/samples/docker-from-docker-compose for details.
19
+ # - /var/run/docker.sock:/var/run/docker.sock
20
+ environment:
21
+ - HOST=0.0.0.0
22
+ - MONGO_URI=mongodb://mongodb:27017/LibreChat
23
+ # - CHATGPT_REVERSE_PROXY=http://host.docker.internal:8080/api/conversation # if you are hosting your own chatgpt reverse proxy with docker
24
+ # - OPENAI_REVERSE_PROXY=http://host.docker.internal:8070/v1/chat/completions # if you are hosting your own chatgpt reverse proxy with docker
25
+ - MEILI_HOST=http://meilisearch:7700
26
+
27
+ # Runs app on the same network as the service container, allows "forwardPorts" in devcontainer.json function.
28
+ # network_mode: service:another-service
29
+
30
+ # Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
31
+ # (Adding the "ports" property to this file will not forward from a Codespace.)
32
+
33
+ # Use a non-root user for all processes - See https://aka.ms/vscode-remote/containers/non-root for details.
34
+ user: vscode
35
+
36
+ # Overrides default command so things don't shut down after the process ends.
37
+ command: /bin/sh -c "while sleep 1000; do :; done"
38
+
39
+ mongodb:
40
+ container_name: chat-mongodb
41
+ expose:
42
+ - 27017
43
+ # ports:
44
+ # - 27018:27017
45
+ image: mongo
46
+ # restart: always
47
+ volumes:
48
+ - ./data-node:/data/db
49
+ command: mongod --noauth
50
+ meilisearch:
51
+ container_name: chat-meilisearch
52
+ image: getmeili/meilisearch:v1.5
53
+ # restart: always
54
+ expose:
55
+ - 7700
56
+ # Uncomment this to access meilisearch from outside docker
57
+ # ports:
58
+ # - 7700:7700 # if exposing these ports, make sure your master key is not the default value
59
+ environment:
60
+ - MEILI_NO_ANALYTICS=true
61
+ - MEILI_MASTER_KEY=5c71cf56d672d009e36070b5bc5e47b743535ae55c818ae3b735bb6ebfb4ba63
62
+ volumes:
63
+ - ./meili_data_v1.5:/meili_data
.dockerignore ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ **/.circleci
2
+ **/.editorconfig
3
+ **/.dockerignore
4
+ **/.git
5
+ **/.DS_Store
6
+ **/.vscode
7
+ **/node_modules
8
+
9
+ # Specific patterns to ignore
10
+ data-node
11
+ meili_data*
12
+ librechat*
13
+ Dockerfile*
14
+ docs
15
+
16
+ # Ignore all hidden files
17
+ .*
.env.example ADDED
@@ -0,0 +1,791 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #=====================================================================#
2
+ # LibreChat Configuration #
3
+ #=====================================================================#
4
+ # Please refer to the reference documentation for assistance #
5
+ # with configuring your LibreChat environment. #
6
+ # #
7
+ # https://www.librechat.ai/docs/configuration/dotenv #
8
+ #=====================================================================#
9
+
10
+ #==================================================#
11
+ # Server Configuration #
12
+ #==================================================#
13
+
14
+ HOST=localhost
15
+ PORT=3080
16
+
17
+ MONGO_URI=mongodb://127.0.0.1:27017/LibreChat
18
+ #The maximum number of connections in the connection pool. */
19
+ MONGO_MAX_POOL_SIZE=
20
+ #The minimum number of connections in the connection pool. */
21
+ MONGO_MIN_POOL_SIZE=
22
+ #The maximum number of connections that may be in the process of being established concurrently by the connection pool. */
23
+ MONGO_MAX_CONNECTING=
24
+ #The maximum number of milliseconds that a connection can remain idle in the pool before being removed and closed. */
25
+ MONGO_MAX_IDLE_TIME_MS=
26
+ #The maximum time in milliseconds that a thread can wait for a connection to become available. */
27
+ MONGO_WAIT_QUEUE_TIMEOUT_MS=
28
+ # Set to false to disable automatic index creation for all models associated with this connection. */
29
+ MONGO_AUTO_INDEX=
30
+ # Set to `false` to disable Mongoose automatically calling `createCollection()` on every model created on this connection. */
31
+ MONGO_AUTO_CREATE=
32
+
33
+ DOMAIN_CLIENT=http://localhost:3080
34
+ DOMAIN_SERVER=http://localhost:3080
35
+
36
+ NO_INDEX=true
37
+ # Use the address that is at most n number of hops away from the Express application.
38
+ # req.socket.remoteAddress is the first hop, and the rest are looked for in the X-Forwarded-For header from right to left.
39
+ # A value of 0 means that the first untrusted address would be req.socket.remoteAddress, i.e. there is no reverse proxy.
40
+ # Defaulted to 1.
41
+ TRUST_PROXY=1
42
+
43
+ # Minimum password length for user authentication
44
+ # Default: 8
45
+ # Note: When using LDAP authentication, you may want to set this to 1
46
+ # to bypass local password validation, as LDAP servers handle their own
47
+ # password policies.
48
+ # MIN_PASSWORD_LENGTH=8
49
+
50
+ #===============#
51
+ # JSON Logging #
52
+ #===============#
53
+
54
+ # Use when process console logs in cloud deployment like GCP/AWS
55
+ CONSOLE_JSON=false
56
+
57
+ #===============#
58
+ # Debug Logging #
59
+ #===============#
60
+
61
+ DEBUG_LOGGING=true
62
+ DEBUG_CONSOLE=false
63
+
64
+ #=============#
65
+ # Permissions #
66
+ #=============#
67
+
68
+ # UID=1000
69
+ # GID=1000
70
+
71
+ #===============#
72
+ # Configuration #
73
+ #===============#
74
+ # Use an absolute path, a relative path, or a URL
75
+
76
+ # CONFIG_PATH="/alternative/path/to/librechat.yaml"
77
+
78
+ #===================================================#
79
+ # Endpoints #
80
+ #===================================================#
81
+
82
+ # ENDPOINTS=openAI,assistants,azureOpenAI,google,anthropic
83
+
84
+ PROXY=
85
+
86
+ #===================================#
87
+ # Known Endpoints - librechat.yaml #
88
+ #===================================#
89
+ # https://www.librechat.ai/docs/configuration/librechat_yaml/ai_endpoints
90
+
91
+ # ANYSCALE_API_KEY=
92
+ # APIPIE_API_KEY=
93
+ # COHERE_API_KEY=
94
+ # DEEPSEEK_API_KEY=
95
+ # DATABRICKS_API_KEY=
96
+ # FIREWORKS_API_KEY=
97
+ # GROQ_API_KEY=
98
+ # HUGGINGFACE_TOKEN=
99
+ # MISTRAL_API_KEY=
100
+ # OPENROUTER_KEY=
101
+ # PERPLEXITY_API_KEY=
102
+ # SHUTTLEAI_API_KEY=
103
+ # TOGETHERAI_API_KEY=
104
+ # UNIFY_API_KEY=
105
+ # XAI_API_KEY=
106
+
107
+ #============#
108
+ # Anthropic #
109
+ #============#
110
+
111
+ ANTHROPIC_API_KEY=user_provided
112
+ # ANTHROPIC_MODELS=claude-opus-4-20250514,claude-sonnet-4-20250514,claude-3-7-sonnet-20250219,claude-3-5-sonnet-20241022,claude-3-5-haiku-20241022,claude-3-opus-20240229,claude-3-sonnet-20240229,claude-3-haiku-20240307
113
+ # ANTHROPIC_REVERSE_PROXY=
114
+
115
+ #============#
116
+ # Azure #
117
+ #============#
118
+
119
+ # Note: these variables are DEPRECATED
120
+ # Use the `librechat.yaml` configuration for `azureOpenAI` instead
121
+ # You may also continue to use them if you opt out of using the `librechat.yaml` configuration
122
+
123
+ # AZURE_OPENAI_DEFAULT_MODEL=gpt-3.5-turbo # Deprecated
124
+ # AZURE_OPENAI_MODELS=gpt-3.5-turbo,gpt-4 # Deprecated
125
+ # AZURE_USE_MODEL_AS_DEPLOYMENT_NAME=TRUE # Deprecated
126
+ # AZURE_API_KEY= # Deprecated
127
+ # AZURE_OPENAI_API_INSTANCE_NAME= # Deprecated
128
+ # AZURE_OPENAI_API_DEPLOYMENT_NAME= # Deprecated
129
+ # AZURE_OPENAI_API_VERSION= # Deprecated
130
+ # AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME= # Deprecated
131
+ # AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME= # Deprecated
132
+ # PLUGINS_USE_AZURE="true" # Deprecated
133
+
134
+ #=================#
135
+ # AWS Bedrock #
136
+ #=================#
137
+
138
+ # BEDROCK_AWS_DEFAULT_REGION=us-east-1 # A default region must be provided
139
+ # BEDROCK_AWS_ACCESS_KEY_ID=someAccessKey
140
+ # BEDROCK_AWS_SECRET_ACCESS_KEY=someSecretAccessKey
141
+ # BEDROCK_AWS_SESSION_TOKEN=someSessionToken
142
+
143
+ # Note: This example list is not meant to be exhaustive. If omitted, all known, supported model IDs will be included for you.
144
+ # BEDROCK_AWS_MODELS=anthropic.claude-3-5-sonnet-20240620-v1:0,meta.llama3-1-8b-instruct-v1:0
145
+
146
+ # See all Bedrock model IDs here: https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns
147
+
148
+ # Notes on specific models:
149
+ # The following models are not support due to not supporting streaming:
150
+ # ai21.j2-mid-v1
151
+
152
+ # The following models are not support due to not supporting conversation history:
153
+ # ai21.j2-ultra-v1, cohere.command-text-v14, cohere.command-light-text-v14
154
+
155
+ #============#
156
+ # Google #
157
+ #============#
158
+
159
+ GOOGLE_KEY=user_provided
160
+
161
+ # GOOGLE_REVERSE_PROXY=
162
+ # Some reverse proxies do not support the X-goog-api-key header, uncomment to pass the API key in Authorization header instead.
163
+ # GOOGLE_AUTH_HEADER=true
164
+
165
+ # Gemini API (AI Studio)
166
+ # GOOGLE_MODELS=gemini-2.5-pro,gemini-2.5-flash,gemini-2.5-flash-lite,gemini-2.0-flash,gemini-2.0-flash-lite
167
+
168
+ # Vertex AI
169
+ # GOOGLE_MODELS=gemini-2.5-pro,gemini-2.5-flash,gemini-2.5-flash-lite,gemini-2.0-flash-001,gemini-2.0-flash-lite-001
170
+
171
+ # GOOGLE_TITLE_MODEL=gemini-2.0-flash-lite-001
172
+
173
+ # GOOGLE_LOC=us-central1
174
+
175
+ # Google Safety Settings
176
+ # NOTE: These settings apply to both Vertex AI and Gemini API (AI Studio)
177
+ #
178
+ # For Vertex AI:
179
+ # To use the BLOCK_NONE setting, you need either:
180
+ # (a) Access through an allowlist via your Google account team, or
181
+ # (b) Switch to monthly invoiced billing: https://cloud.google.com/billing/docs/how-to/invoiced-billing
182
+ #
183
+ # For Gemini API (AI Studio):
184
+ # BLOCK_NONE is available by default, no special account requirements.
185
+ #
186
+ # Available options: BLOCK_NONE, BLOCK_ONLY_HIGH, BLOCK_MEDIUM_AND_ABOVE, BLOCK_LOW_AND_ABOVE
187
+ #
188
+ # GOOGLE_SAFETY_SEXUALLY_EXPLICIT=BLOCK_ONLY_HIGH
189
+ # GOOGLE_SAFETY_HATE_SPEECH=BLOCK_ONLY_HIGH
190
+ # GOOGLE_SAFETY_HARASSMENT=BLOCK_ONLY_HIGH
191
+ # GOOGLE_SAFETY_DANGEROUS_CONTENT=BLOCK_ONLY_HIGH
192
+ # GOOGLE_SAFETY_CIVIC_INTEGRITY=BLOCK_ONLY_HIGH
193
+
194
+ #============#
195
+ # OpenAI #
196
+ #============#
197
+
198
+ OPENAI_API_KEY=user_provided
199
+ # OPENAI_MODELS=gpt-5,gpt-5-codex,gpt-5-mini,gpt-5-nano,o3-pro,o3,o4-mini,gpt-4.1,gpt-4.1-mini,gpt-4.1-nano,o3-mini,o1-pro,o1,gpt-4o,gpt-4o-mini
200
+
201
+ DEBUG_OPENAI=false
202
+
203
+ # TITLE_CONVO=false
204
+ # OPENAI_TITLE_MODEL=gpt-4o-mini
205
+
206
+ # OPENAI_SUMMARIZE=true
207
+ # OPENAI_SUMMARY_MODEL=gpt-4o-mini
208
+
209
+ # OPENAI_FORCE_PROMPT=true
210
+
211
+ # OPENAI_REVERSE_PROXY=
212
+
213
+ # OPENAI_ORGANIZATION=
214
+
215
+ #====================#
216
+ # Assistants API #
217
+ #====================#
218
+
219
+ ASSISTANTS_API_KEY=user_provided
220
+ # ASSISTANTS_BASE_URL=
221
+ # ASSISTANTS_MODELS=gpt-4o,gpt-4o-mini,gpt-3.5-turbo-0125,gpt-3.5-turbo-16k-0613,gpt-3.5-turbo-16k,gpt-3.5-turbo,gpt-4,gpt-4-0314,gpt-4-32k-0314,gpt-4-0613,gpt-3.5-turbo-0613,gpt-3.5-turbo-1106,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview
222
+
223
+ #==========================#
224
+ # Azure Assistants API #
225
+ #==========================#
226
+
227
+ # Note: You should map your credentials with custom variables according to your Azure OpenAI Configuration
228
+ # The models for Azure Assistants are also determined by your Azure OpenAI configuration.
229
+
230
+ # More info, including how to enable use of Assistants with Azure here:
231
+ # https://www.librechat.ai/docs/configuration/librechat_yaml/ai_endpoints/azure#using-assistants-with-azure
232
+
233
+ #============#
234
+ # Plugins #
235
+ #============#
236
+
237
+ # PLUGIN_MODELS=gpt-4o,gpt-4o-mini,gpt-4,gpt-4-turbo-preview,gpt-4-0125-preview,gpt-4-1106-preview,gpt-4-0613,gpt-3.5-turbo,gpt-3.5-turbo-0125,gpt-3.5-turbo-1106,gpt-3.5-turbo-0613
238
+
239
+ DEBUG_PLUGINS=true
240
+
241
+ CREDS_KEY=f34be427ebb29de8d88c107a71546019685ed8b241d8f2ed00c3df97ad2566f0
242
+ CREDS_IV=e2341419ec3dd3d19b13a1a87fafcbfb
243
+
244
+ # Azure AI Search
245
+ #-----------------
246
+ AZURE_AI_SEARCH_SERVICE_ENDPOINT=
247
+ AZURE_AI_SEARCH_INDEX_NAME=
248
+ AZURE_AI_SEARCH_API_KEY=
249
+
250
+ AZURE_AI_SEARCH_API_VERSION=
251
+ AZURE_AI_SEARCH_SEARCH_OPTION_QUERY_TYPE=
252
+ AZURE_AI_SEARCH_SEARCH_OPTION_TOP=
253
+ AZURE_AI_SEARCH_SEARCH_OPTION_SELECT=
254
+
255
+ # OpenAI Image Tools Customization
256
+ #----------------
257
+ # IMAGE_GEN_OAI_API_KEY= # Create or reuse OpenAI API key for image generation tool
258
+ # IMAGE_GEN_OAI_BASEURL= # Custom OpenAI base URL for image generation tool
259
+ # IMAGE_GEN_OAI_AZURE_API_VERSION= # Custom Azure OpenAI deployments
260
+ # IMAGE_GEN_OAI_DESCRIPTION=
261
+ # IMAGE_GEN_OAI_DESCRIPTION_WITH_FILES=Custom description for image generation tool when files are present
262
+ # IMAGE_GEN_OAI_DESCRIPTION_NO_FILES=Custom description for image generation tool when no files are present
263
+ # IMAGE_EDIT_OAI_DESCRIPTION=Custom description for image editing tool
264
+ # IMAGE_GEN_OAI_PROMPT_DESCRIPTION=Custom prompt description for image generation tool
265
+ # IMAGE_EDIT_OAI_PROMPT_DESCRIPTION=Custom prompt description for image editing tool
266
+
267
+ # DALL·E
268
+ #----------------
269
+ # DALLE_API_KEY=
270
+ # DALLE3_API_KEY=
271
+ # DALLE2_API_KEY=
272
+ # DALLE3_SYSTEM_PROMPT=
273
+ # DALLE2_SYSTEM_PROMPT=
274
+ # DALLE_REVERSE_PROXY=
275
+ # DALLE3_BASEURL=
276
+ # DALLE2_BASEURL=
277
+
278
+ # DALL·E (via Azure OpenAI)
279
+ # Note: requires some of the variables above to be set
280
+ #----------------
281
+ # DALLE3_AZURE_API_VERSION=
282
+ # DALLE2_AZURE_API_VERSION=
283
+
284
+ # Flux
285
+ #-----------------
286
+ FLUX_API_BASE_URL=https://api.us1.bfl.ai
287
+ # FLUX_API_BASE_URL = 'https://api.bfl.ml';
288
+
289
+ # Get your API key at https://api.us1.bfl.ai/auth/profile
290
+ # FLUX_API_KEY=
291
+
292
+ # Google
293
+ #-----------------
294
+ GOOGLE_SEARCH_API_KEY=
295
+ GOOGLE_CSE_ID=
296
+
297
+ # YOUTUBE
298
+ #-----------------
299
+ YOUTUBE_API_KEY=
300
+
301
+ # Stable Diffusion
302
+ #-----------------
303
+ SD_WEBUI_URL=http://host.docker.internal:7860
304
+
305
+ # Tavily
306
+ #-----------------
307
+ TAVILY_API_KEY=
308
+
309
+ # Traversaal
310
+ #-----------------
311
+ TRAVERSAAL_API_KEY=
312
+
313
+ # WolframAlpha
314
+ #-----------------
315
+ WOLFRAM_APP_ID=
316
+
317
+ # Zapier
318
+ #-----------------
319
+ ZAPIER_NLA_API_KEY=
320
+
321
+ #==================================================#
322
+ # Search #
323
+ #==================================================#
324
+
325
+ SEARCH=true
326
+ MEILI_NO_ANALYTICS=true
327
+ MEILI_HOST=http://0.0.0.0:7700
328
+ MEILI_MASTER_KEY=DrhYf7zENyR6AlUCKmnz0eYASOQdl6zxH7s7MKFSfFCt
329
+
330
+ # Optional: Disable indexing, useful in a multi-node setup
331
+ # where only one instance should perform an index sync.
332
+ # MEILI_NO_SYNC=true
333
+
334
+ #==================================================#
335
+ # Speech to Text & Text to Speech #
336
+ #==================================================#
337
+
338
+ STT_API_KEY=
339
+ TTS_API_KEY=
340
+
341
+ #==================================================#
342
+ # RAG #
343
+ #==================================================#
344
+ # More info: https://www.librechat.ai/docs/configuration/rag_api
345
+
346
+ # RAG_OPENAI_BASEURL=
347
+ # RAG_OPENAI_API_KEY=
348
+ # RAG_USE_FULL_CONTEXT=
349
+ # EMBEDDINGS_PROVIDER=openai
350
+ # EMBEDDINGS_MODEL=text-embedding-3-small
351
+
352
+ #===================================================#
353
+ # User System #
354
+ #===================================================#
355
+
356
+ #========================#
357
+ # Moderation #
358
+ #========================#
359
+
360
+ OPENAI_MODERATION=false
361
+ OPENAI_MODERATION_API_KEY=
362
+ # OPENAI_MODERATION_REVERSE_PROXY=
363
+
364
+ BAN_VIOLATIONS=true
365
+ BAN_DURATION=1000 * 60 * 60 * 2
366
+ BAN_INTERVAL=20
367
+
368
+ LOGIN_VIOLATION_SCORE=1
369
+ REGISTRATION_VIOLATION_SCORE=1
370
+ CONCURRENT_VIOLATION_SCORE=1
371
+ MESSAGE_VIOLATION_SCORE=1
372
+ NON_BROWSER_VIOLATION_SCORE=20
373
+ TTS_VIOLATION_SCORE=0
374
+ STT_VIOLATION_SCORE=0
375
+ FORK_VIOLATION_SCORE=0
376
+ IMPORT_VIOLATION_SCORE=0
377
+ FILE_UPLOAD_VIOLATION_SCORE=0
378
+
379
+ LOGIN_MAX=7
380
+ LOGIN_WINDOW=5
381
+ REGISTER_MAX=5
382
+ REGISTER_WINDOW=60
383
+
384
+ LIMIT_CONCURRENT_MESSAGES=true
385
+ CONCURRENT_MESSAGE_MAX=2
386
+
387
+ LIMIT_MESSAGE_IP=true
388
+ MESSAGE_IP_MAX=40
389
+ MESSAGE_IP_WINDOW=1
390
+
391
+ LIMIT_MESSAGE_USER=false
392
+ MESSAGE_USER_MAX=40
393
+ MESSAGE_USER_WINDOW=1
394
+
395
+ ILLEGAL_MODEL_REQ_SCORE=5
396
+
397
+ #========================#
398
+ # Balance #
399
+ #========================#
400
+
401
+ # CHECK_BALANCE=false
402
+ # START_BALANCE=20000 # note: the number of tokens that will be credited after registration.
403
+
404
+ #========================#
405
+ # Registration and Login #
406
+ #========================#
407
+
408
+ ALLOW_EMAIL_LOGIN=true
409
+ ALLOW_REGISTRATION=true
410
+ ALLOW_SOCIAL_LOGIN=false
411
+ ALLOW_SOCIAL_REGISTRATION=false
412
+ ALLOW_PASSWORD_RESET=false
413
+ # ALLOW_ACCOUNT_DELETION=true # note: enabled by default if omitted/commented out
414
+ ALLOW_UNVERIFIED_EMAIL_LOGIN=true
415
+
416
+ SESSION_EXPIRY=1000 * 60 * 15
417
+ REFRESH_TOKEN_EXPIRY=(1000 * 60 * 60 * 24) * 7
418
+
419
+ JWT_SECRET=16f8c0ef4a5d391b26034086c628469d3f9f497f08163ab9b40137092f2909ef
420
+ JWT_REFRESH_SECRET=eaa5191f2914e30b9387fd84e254e4ba6fc51b4654968a9b0803b456a54b8418
421
+
422
+ # Discord
423
+ DISCORD_CLIENT_ID=
424
+ DISCORD_CLIENT_SECRET=
425
+ DISCORD_CALLBACK_URL=/oauth/discord/callback
426
+
427
+ # Facebook
428
+ FACEBOOK_CLIENT_ID=
429
+ FACEBOOK_CLIENT_SECRET=
430
+ FACEBOOK_CALLBACK_URL=/oauth/facebook/callback
431
+
432
+ # GitHub
433
+ GITHUB_CLIENT_ID=
434
+ GITHUB_CLIENT_SECRET=
435
+ GITHUB_CALLBACK_URL=/oauth/github/callback
436
+ # GitHub Enterprise
437
+ # GITHUB_ENTERPRISE_BASE_URL=
438
+ # GITHUB_ENTERPRISE_USER_AGENT=
439
+
440
+ # Google
441
+ GOOGLE_CLIENT_ID=
442
+ GOOGLE_CLIENT_SECRET=
443
+ GOOGLE_CALLBACK_URL=/oauth/google/callback
444
+
445
+ # Apple
446
+ APPLE_CLIENT_ID=
447
+ APPLE_TEAM_ID=
448
+ APPLE_KEY_ID=
449
+ APPLE_PRIVATE_KEY_PATH=
450
+ APPLE_CALLBACK_URL=/oauth/apple/callback
451
+
452
+ # OpenID
453
+ OPENID_CLIENT_ID=
454
+ OPENID_CLIENT_SECRET=
455
+ OPENID_ISSUER=
456
+ OPENID_SESSION_SECRET=
457
+ OPENID_SCOPE="openid profile email"
458
+ OPENID_CALLBACK_URL=/oauth/openid/callback
459
+ OPENID_REQUIRED_ROLE=
460
+ OPENID_REQUIRED_ROLE_TOKEN_KIND=
461
+ OPENID_REQUIRED_ROLE_PARAMETER_PATH=
462
+ OPENID_ADMIN_ROLE=
463
+ OPENID_ADMIN_ROLE_PARAMETER_PATH=
464
+ OPENID_ADMIN_ROLE_TOKEN_KIND=
465
+ # Set to determine which user info property returned from OpenID Provider to store as the User's username
466
+ OPENID_USERNAME_CLAIM=
467
+ # Set to determine which user info property returned from OpenID Provider to store as the User's name
468
+ OPENID_NAME_CLAIM=
469
+ # Optional audience parameter for OpenID authorization requests
470
+ OPENID_AUDIENCE=
471
+
472
+ OPENID_BUTTON_LABEL=
473
+ OPENID_IMAGE_URL=
474
+ # Set to true to automatically redirect to the OpenID provider when a user visits the login page
475
+ # This will bypass the login form completely for users, only use this if OpenID is your only authentication method
476
+ OPENID_AUTO_REDIRECT=false
477
+ # Set to true to use PKCE (Proof Key for Code Exchange) for OpenID authentication
478
+ OPENID_USE_PKCE=false
479
+ #Set to true to reuse openid tokens for authentication management instead of using the mongodb session and the custom refresh token.
480
+ OPENID_REUSE_TOKENS=
481
+ #By default, signing key verification results are cached in order to prevent excessive HTTP requests to the JWKS endpoint.
482
+ #If a signing key matching the kid is found, this will be cached and the next time this kid is requested the signing key will be served from the cache.
483
+ #Default is true.
484
+ OPENID_JWKS_URL_CACHE_ENABLED=
485
+ OPENID_JWKS_URL_CACHE_TIME= # 600000 ms eq to 10 minutes leave empty to disable caching
486
+ #Set to true to trigger token exchange flow to acquire access token for the userinfo endpoint.
487
+ OPENID_ON_BEHALF_FLOW_FOR_USERINFO_REQUIRED=
488
+ OPENID_ON_BEHALF_FLOW_USERINFO_SCOPE="user.read" # example for Scope Needed for Microsoft Graph API
489
+ # Set to true to use the OpenID Connect end session endpoint for logout
490
+ OPENID_USE_END_SESSION_ENDPOINT=
491
+
492
+ #========================#
493
+ # SharePoint Integration #
494
+ #========================#
495
+ # Requires Entra ID (OpenID) authentication to be configured
496
+
497
+ # Enable SharePoint file picker in chat and agent panels
498
+ # ENABLE_SHAREPOINT_FILEPICKER=true
499
+
500
+ # SharePoint tenant base URL (e.g., https://yourtenant.sharepoint.com)
501
+ # SHAREPOINT_BASE_URL=https://yourtenant.sharepoint.com
502
+
503
+ # Microsoft Graph API And SharePoint scopes for file picker
504
+ # SHAREPOINT_PICKER_SHAREPOINT_SCOPE==https://yourtenant.sharepoint.com/AllSites.Read
505
+ # SHAREPOINT_PICKER_GRAPH_SCOPE=Files.Read.All
506
+ #========================#
507
+
508
+ # SAML
509
+ # Note: If OpenID is enabled, SAML authentication will be automatically disabled.
510
+ SAML_ENTRY_POINT=
511
+ SAML_ISSUER=
512
+ SAML_CERT=
513
+ SAML_CALLBACK_URL=/oauth/saml/callback
514
+ SAML_SESSION_SECRET=
515
+
516
+ # Attribute mappings (optional)
517
+ SAML_EMAIL_CLAIM=
518
+ SAML_USERNAME_CLAIM=
519
+ SAML_GIVEN_NAME_CLAIM=
520
+ SAML_FAMILY_NAME_CLAIM=
521
+ SAML_PICTURE_CLAIM=
522
+ SAML_NAME_CLAIM=
523
+
524
+ # Logint buttion settings (optional)
525
+ SAML_BUTTON_LABEL=
526
+ SAML_IMAGE_URL=
527
+
528
+ # Whether the SAML Response should be signed.
529
+ # - If "true", the entire `SAML Response` will be signed.
530
+ # - If "false" or unset, only the `SAML Assertion` will be signed (default behavior).
531
+ # SAML_USE_AUTHN_RESPONSE_SIGNED=
532
+
533
+
534
+ #===============================================#
535
+ # Microsoft Graph API / Entra ID Integration #
536
+ #===============================================#
537
+
538
+ # Enable Entra ID people search integration in permissions/sharing system
539
+ # When enabled, the people picker will search both local database and Entra ID
540
+ USE_ENTRA_ID_FOR_PEOPLE_SEARCH=false
541
+
542
+ # When enabled, entra id groups owners will be considered as members of the group
543
+ ENTRA_ID_INCLUDE_OWNERS_AS_MEMBERS=false
544
+
545
+ # Microsoft Graph API scopes needed for people/group search
546
+ # Default scopes provide access to user profiles and group memberships
547
+ OPENID_GRAPH_SCOPES=User.Read,People.Read,GroupMember.Read.All
548
+
549
+ # LDAP
550
+ LDAP_URL=
551
+ LDAP_BIND_DN=
552
+ LDAP_BIND_CREDENTIALS=
553
+ LDAP_USER_SEARCH_BASE=
554
+ #LDAP_SEARCH_FILTER="mail="
555
+ LDAP_CA_CERT_PATH=
556
+ # LDAP_TLS_REJECT_UNAUTHORIZED=
557
+ # LDAP_STARTTLS=
558
+ # LDAP_LOGIN_USES_USERNAME=true
559
+ # LDAP_ID=
560
+ # LDAP_USERNAME=
561
+ # LDAP_EMAIL=
562
+ # LDAP_FULL_NAME=
563
+
564
+ #========================#
565
+ # Email Password Reset #
566
+ #========================#
567
+
568
+ EMAIL_SERVICE=
569
+ EMAIL_HOST=
570
+ EMAIL_PORT=25
571
+ EMAIL_ENCRYPTION=
572
+ EMAIL_ENCRYPTION_HOSTNAME=
573
+ EMAIL_ALLOW_SELFSIGNED=
574
+ EMAIL_USERNAME=
575
+ EMAIL_PASSWORD=
576
+ EMAIL_FROM_NAME=
577
+ EMAIL_FROM=noreply@librechat.ai
578
+
579
+ #========================#
580
+ # Mailgun API #
581
+ #========================#
582
+
583
+ # MAILGUN_API_KEY=your-mailgun-api-key
584
+ # MAILGUN_DOMAIN=mg.yourdomain.com
585
+ # EMAIL_FROM=noreply@yourdomain.com
586
+ # EMAIL_FROM_NAME="LibreChat"
587
+
588
+ # # Optional: For EU region
589
+ # MAILGUN_HOST=https://api.eu.mailgun.net
590
+
591
+ #========================#
592
+ # Firebase CDN #
593
+ #========================#
594
+
595
+ FIREBASE_API_KEY=
596
+ FIREBASE_AUTH_DOMAIN=
597
+ FIREBASE_PROJECT_ID=
598
+ FIREBASE_STORAGE_BUCKET=
599
+ FIREBASE_MESSAGING_SENDER_ID=
600
+ FIREBASE_APP_ID=
601
+
602
+ #========================#
603
+ # S3 AWS Bucket #
604
+ #========================#
605
+
606
+ AWS_ENDPOINT_URL=
607
+ AWS_ACCESS_KEY_ID=
608
+ AWS_SECRET_ACCESS_KEY=
609
+ AWS_REGION=
610
+ AWS_BUCKET_NAME=
611
+
612
+ #========================#
613
+ # Azure Blob Storage #
614
+ #========================#
615
+
616
+ AZURE_STORAGE_CONNECTION_STRING=
617
+ AZURE_STORAGE_PUBLIC_ACCESS=false
618
+ AZURE_CONTAINER_NAME=files
619
+
620
+ #========================#
621
+ # Shared Links #
622
+ #========================#
623
+
624
+ ALLOW_SHARED_LINKS=true
625
+ ALLOW_SHARED_LINKS_PUBLIC=true
626
+
627
+ #==============================#
628
+ # Static File Cache Control #
629
+ #==============================#
630
+
631
+ # Leave commented out to use defaults: 1 day (86400 seconds) for s-maxage and 2 days (172800 seconds) for max-age
632
+ # NODE_ENV must be set to production for these to take effect
633
+ # STATIC_CACHE_MAX_AGE=172800
634
+ # STATIC_CACHE_S_MAX_AGE=86400
635
+
636
+ # If you have another service in front of your LibreChat doing compression, disable express based compression here
637
+ # DISABLE_COMPRESSION=true
638
+
639
+ # If you have gzipped version of uploaded image images in the same folder, this will enable gzip scan and serving of these images
640
+ # Note: The images folder will be scanned on startup and a ma kept in memory. Be careful for large number of images.
641
+ # ENABLE_IMAGE_OUTPUT_GZIP_SCAN=true
642
+
643
+ #===================================================#
644
+ # UI #
645
+ #===================================================#
646
+
647
+ APP_TITLE=LibreChat
648
+ # CUSTOM_FOOTER="My custom footer"
649
+ HELP_AND_FAQ_URL=https://librechat.ai
650
+
651
+ # SHOW_BIRTHDAY_ICON=true
652
+
653
+ # Google tag manager id
654
+ #ANALYTICS_GTM_ID=user provided google tag manager id
655
+
656
+ # limit conversation file imports to a certain number of bytes in size to avoid the container
657
+ # maxing out memory limitations by unremarking this line and supplying a file size in bytes
658
+ # such as the below example of 250 mib
659
+ # CONVERSATION_IMPORT_MAX_FILE_SIZE_BYTES=262144000
660
+
661
+
662
+ #===============#
663
+ # REDIS Options #
664
+ #===============#
665
+
666
+ # Enable Redis for caching and session storage
667
+ # USE_REDIS=true
668
+
669
+ # Single Redis instance
670
+ # REDIS_URI=redis://127.0.0.1:6379
671
+
672
+ # Redis cluster (multiple nodes)
673
+ # REDIS_URI=redis://127.0.0.1:7001,redis://127.0.0.1:7002,redis://127.0.0.1:7003
674
+
675
+ # Redis with TLS/SSL encryption and CA certificate
676
+ # REDIS_URI=rediss://127.0.0.1:6380
677
+ # REDIS_CA=/path/to/ca-cert.pem
678
+
679
+ # Elasticache may need to use an alternate dnsLookup for TLS connections. see "Special Note: Aws Elasticache Clusters with TLS" on this webpage: https://www.npmjs.com/package/ioredis
680
+ # Enable alternative dnsLookup for redis
681
+ # REDIS_USE_ALTERNATIVE_DNS_LOOKUP=true
682
+
683
+ # Redis authentication (if required)
684
+ # REDIS_USERNAME=your_redis_username
685
+ # REDIS_PASSWORD=your_redis_password
686
+
687
+ # Redis key prefix configuration
688
+ # Use environment variable name for dynamic prefix (recommended for cloud deployments)
689
+ # REDIS_KEY_PREFIX_VAR=K_REVISION
690
+ # Or use static prefix directly
691
+ # REDIS_KEY_PREFIX=librechat
692
+
693
+ # Redis connection limits
694
+ # REDIS_MAX_LISTENERS=40
695
+
696
+ # Redis ping interval in seconds (0 = disabled, >0 = enabled)
697
+ # When set to a positive integer, Redis clients will ping the server at this interval to keep connections alive
698
+ # When unset or 0, no pinging is performed (recommended for most use cases)
699
+ # REDIS_PING_INTERVAL=300
700
+
701
+ # Force specific cache namespaces to use in-memory storage even when Redis is enabled
702
+ # Comma-separated list of CacheKeys (e.g., ROLES,MESSAGES)
703
+ # FORCED_IN_MEMORY_CACHE_NAMESPACES=ROLES,MESSAGES
704
+
705
+ # Leader Election Configuration (for multi-instance deployments with Redis)
706
+ # Duration in seconds that the leader lease is valid before it expires (default: 25)
707
+ # LEADER_LEASE_DURATION=25
708
+ # Interval in seconds at which the leader renews its lease (default: 10)
709
+ # LEADER_RENEW_INTERVAL=10
710
+ # Maximum number of retry attempts when renewing the lease fails (default: 3)
711
+ # LEADER_RENEW_ATTEMPTS=3
712
+ # Delay in seconds between retry attempts when renewing the lease (default: 0.5)
713
+ # LEADER_RENEW_RETRY_DELAY=0.5
714
+
715
+ #==================================================#
716
+ # Others #
717
+ #==================================================#
718
+ # You should leave the following commented out #
719
+
720
+ # NODE_ENV=
721
+
722
+ # E2E_USER_EMAIL=
723
+ # E2E_USER_PASSWORD=
724
+
725
+ #=====================================================#
726
+ # Cache Headers #
727
+ #=====================================================#
728
+ # Headers that control caching of the index.html #
729
+ # Default configuration prevents caching to ensure #
730
+ # users always get the latest version. Customize #
731
+ # only if you understand caching implications. #
732
+
733
+ # INDEX_CACHE_CONTROL=no-cache, no-store, must-revalidate
734
+ # INDEX_PRAGMA=no-cache
735
+ # INDEX_EXPIRES=0
736
+
737
+ # no-cache: Forces validation with server before using cached version
738
+ # no-store: Prevents storing the response entirely
739
+ # must-revalidate: Prevents using stale content when offline
740
+
741
+ #=====================================================#
742
+ # OpenWeather #
743
+ #=====================================================#
744
+ OPENWEATHER_API_KEY=
745
+
746
+ #====================================#
747
+ # LibreChat Code Interpreter API #
748
+ #====================================#
749
+
750
+ # https://code.librechat.ai
751
+ # LIBRECHAT_CODE_API_KEY=your-key
752
+
753
+ #======================#
754
+ # Web Search #
755
+ #======================#
756
+
757
+ # Note: All of the following variable names can be customized.
758
+ # Omit values to allow user to provide them.
759
+
760
+ # For more information on configuration values, see:
761
+ # https://librechat.ai/docs/features/web_search
762
+
763
+ # Search Provider (Required)
764
+ # SERPER_API_KEY=your_serper_api_key
765
+
766
+ # Scraper (Required)
767
+ # FIRECRAWL_API_KEY=your_firecrawl_api_key
768
+ # Optional: Custom Firecrawl API URL
769
+ # FIRECRAWL_API_URL=your_firecrawl_api_url
770
+
771
+ # Reranker (Required)
772
+ # JINA_API_KEY=your_jina_api_key
773
+ # or
774
+ # COHERE_API_KEY=your_cohere_api_key
775
+
776
+ #======================#
777
+ # MCP Configuration #
778
+ #======================#
779
+
780
+ # Treat 401/403 responses as OAuth requirement when no oauth metadata found
781
+ # MCP_OAUTH_ON_AUTH_ERROR=true
782
+
783
+ # Timeout for OAuth detection requests in milliseconds
784
+ # MCP_OAUTH_DETECTION_TIMEOUT=5000
785
+
786
+ # Cache connection status checks for this many milliseconds to avoid expensive verification
787
+ # MCP_CONNECTION_CHECK_TTL=60000
788
+
789
+ # Skip code challenge method validation (e.g., for AWS Cognito that supports S256 but doesn't advertise it)
790
+ # When set to true, forces S256 code challenge even if not advertised in .well-known/openid-configuration
791
+ # MCP_SKIP_CODE_CHALLENGE_CHECK=false
.gitattributes CHANGED
@@ -33,3 +33,11 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ client/public/assets/fireworks.png filter=lfs diff=lfs merge=lfs -text
37
+ client/public/assets/shuttleai.png filter=lfs diff=lfs merge=lfs -text
38
+ client/public/fonts/Inter-Bold.woff2 filter=lfs diff=lfs merge=lfs -text
39
+ client/public/fonts/Inter-BoldItalic.woff2 filter=lfs diff=lfs merge=lfs -text
40
+ client/public/fonts/Inter-Italic.woff2 filter=lfs diff=lfs merge=lfs -text
41
+ client/public/fonts/Inter-Regular.woff2 filter=lfs diff=lfs merge=lfs -text
42
+ client/public/fonts/Inter-SemiBold.woff2 filter=lfs diff=lfs merge=lfs -text
43
+ client/public/fonts/Inter-SemiBoldItalic.woff2 filter=lfs diff=lfs merge=lfs -text
.github/CODE_OF_CONDUCT.md ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Contributor Covenant Code of Conduct
2
+
3
+ ## Our Pledge
4
+
5
+ We as members, contributors, and leaders pledge to make participation in our
6
+ community a harassment-free experience for everyone, regardless of age, body
7
+ size, visible or invisible disability, ethnicity, sex characteristics, gender
8
+ identity and expression, level of experience, education, socio-economic status,
9
+ nationality, personal appearance, race, religion, or sexual identity
10
+ and orientation.
11
+
12
+ We pledge to act and interact in ways that contribute to an open, welcoming,
13
+ diverse, inclusive, and healthy community.
14
+
15
+ ## Our Standards
16
+
17
+ Examples of behavior that contributes to a positive environment for our
18
+ community include:
19
+
20
+ * Demonstrating empathy and kindness toward other people
21
+ * Being respectful of differing opinions, viewpoints, and experiences
22
+ * Giving and gracefully accepting constructive feedback
23
+ * Accepting responsibility and apologizing to those affected by our mistakes,
24
+ and learning from the experience
25
+ * Focusing on what is best not just for us as individuals, but for the
26
+ overall community
27
+
28
+ Examples of unacceptable behavior include:
29
+
30
+ * The use of sexualized language or imagery, and sexual attention or
31
+ advances of any kind
32
+ * Trolling, insulting or derogatory comments, and personal or political attacks
33
+ * Public or private harassment
34
+ * Publishing others' private information, such as a physical or email
35
+ address, without their explicit permission
36
+ * Other conduct which could reasonably be considered inappropriate in a
37
+ professional setting
38
+
39
+ ## Enforcement Responsibilities
40
+
41
+ Community leaders are responsible for clarifying and enforcing our standards of
42
+ acceptable behavior and will take appropriate and fair corrective action in
43
+ response to any behavior that they deem inappropriate, threatening, offensive,
44
+ or harmful.
45
+
46
+ Community leaders have the right and responsibility to remove, edit, or reject
47
+ comments, commits, code, wiki edits, issues, and other contributions that are
48
+ not aligned to this Code of Conduct, and will communicate reasons for moderation
49
+ decisions when appropriate.
50
+
51
+ ## Scope
52
+
53
+ This Code of Conduct applies within all community spaces, and also applies when
54
+ an individual is officially representing the community in public spaces.
55
+ Examples of representing our community include using an official e-mail address,
56
+ posting via an official social media account, or acting as an appointed
57
+ representative at an online or offline event.
58
+
59
+ ## Enforcement
60
+
61
+ Instances of abusive, harassing, or otherwise unacceptable behavior may be
62
+ reported to the community leaders responsible for enforcement here on GitHub or
63
+ on the official [Discord Server](https://discord.librechat.ai).
64
+ All complaints will be reviewed and investigated promptly and fairly.
65
+
66
+ All community leaders are obligated to respect the privacy and security of the
67
+ reporter of any incident.
68
+
69
+ ## Enforcement Guidelines
70
+
71
+ Community leaders will follow these Community Impact Guidelines in determining
72
+ the consequences for any action they deem in violation of this Code of Conduct:
73
+
74
+ ### 1. Correction
75
+
76
+ **Community Impact**: Use of inappropriate language or other behavior deemed
77
+ unprofessional or unwelcome in the community.
78
+
79
+ **Consequence**: A private, written warning from community leaders, providing
80
+ clarity around the nature of the violation and an explanation of why the
81
+ behavior was inappropriate. A public apology may be requested.
82
+
83
+ ### 2. Warning
84
+
85
+ **Community Impact**: A violation through a single incident or series
86
+ of actions.
87
+
88
+ **Consequence**: A warning with consequences for continued behavior. No
89
+ interaction with the people involved, including unsolicited interaction with
90
+ those enforcing the Code of Conduct, for a specified period of time. This
91
+ includes avoiding interactions in community spaces as well as external channels
92
+ like social media. Violating these terms may lead to a temporary or
93
+ permanent ban.
94
+
95
+ ### 3. Temporary Ban
96
+
97
+ **Community Impact**: A serious violation of community standards, including
98
+ sustained inappropriate behavior.
99
+
100
+ **Consequence**: A temporary ban from any sort of interaction or public
101
+ communication with the community for a specified period of time. No public or
102
+ private interaction with the people involved, including unsolicited interaction
103
+ with those enforcing the Code of Conduct, is allowed during this period.
104
+ Violating these terms may lead to a permanent ban.
105
+
106
+ ### 4. Permanent Ban
107
+
108
+ **Community Impact**: Demonstrating a pattern of violation of community
109
+ standards, including sustained inappropriate behavior, harassment of an
110
+ individual, or aggression toward or disparagement of classes of individuals.
111
+
112
+ **Consequence**: A permanent ban from any sort of public interaction within
113
+ the community.
114
+
115
+ ## Attribution
116
+
117
+ This Code of Conduct is adapted from the [Contributor Covenant][homepage],
118
+ version 2.0, available at
119
+ https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
120
+
121
+ Community Impact Guidelines were inspired by [Mozilla's code of conduct
122
+ enforcement ladder](https://github.com/mozilla/diversity).
123
+
124
+ [homepage]: https://www.contributor-covenant.org
125
+
126
+ For answers to common questions about this code of conduct, see the FAQ at
127
+ https://www.contributor-covenant.org/faq. Translations are available at
128
+ https://www.contributor-covenant.org/translations.
129
+
130
+ ---
131
+
132
+ ## [Go Back to ReadMe](../README.md)
.github/CONTRIBUTING.md ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Contributor Guidelines
2
+
3
+ Thank you to all the contributors who have helped make this project possible! We welcome various types of contributions, such as bug reports, documentation improvements, feature requests, and code contributions.
4
+
5
+ ## Contributing Guidelines
6
+
7
+ If the feature you would like to contribute has not already received prior approval from the project maintainers (i.e., the feature is currently on the [roadmap](https://github.com/users/danny-avila/projects/2)), please submit a request in the [Feature Requests & Suggestions category](https://github.com/danny-avila/LibreChat/discussions/new?category=feature-requests-suggestions) of the discussions board before beginning work on it. The requests should include specific implementation details, including areas of the application that will be affected by the change (including designs if applicable), and any other relevant information that might be required for a speedy review. However, proposals are not required for small changes, bug fixes, or documentation improvements. Small changes and bug fixes should be tied to an [issue](https://github.com/danny-avila/LibreChat/issues) and included in the corresponding pull request for tracking purposes.
8
+
9
+ Please note that a pull request involving a feature that has not been reviewed and approved by the project maintainers may be rejected. We appreciate your understanding and cooperation.
10
+
11
+ If you would like to discuss the changes you wish to make, join our [Discord community](https://discord.librechat.ai), where you can engage with other contributors and seek guidance from the community.
12
+
13
+ ## Our Standards
14
+
15
+ We strive to maintain a positive and inclusive environment within our project community. We expect all contributors to adhere to the following standards:
16
+
17
+ - Using welcoming and inclusive language.
18
+ - Being respectful of differing viewpoints and experiences.
19
+ - Gracefully accepting constructive criticism.
20
+ - Focusing on what is best for the community.
21
+ - Showing empathy towards other community members.
22
+
23
+ Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that do not align with these standards.
24
+
25
+ ## To contribute to this project, please adhere to the following guidelines:
26
+
27
+ ## 1. Development Setup
28
+
29
+ 1. Use Node.JS 20.x.
30
+ 2. Install typescript globally: `npm i -g typescript`.
31
+ 3. Run `npm ci` to install dependencies.
32
+ 4. Build the data provider: `npm run build:data-provider`.
33
+ 5. Build data schemas: `npm run build:data-schemas`.
34
+ 6. Build API methods: `npm run build:api`.
35
+ 7. Setup and run unit tests:
36
+ - Copy `.env.test`: `cp api/test/.env.test.example api/test/.env.test`.
37
+ - Run backend unit tests: `npm run test:api`.
38
+ - Run frontend unit tests: `npm run test:client`.
39
+ 8. Setup and run integration tests:
40
+ - Build client: `cd client && npm run build`.
41
+ - Create `.env`: `cp .env.example .env`.
42
+ - Install [MongoDB Community Edition](https://www.mongodb.com/docs/manual/administration/install-community/), ensure that `mongosh` connects to your local instance.
43
+ - Run: `npx install playwright`, then `npx playwright install`.
44
+ - Copy `config.local`: `cp e2e/config.local.example.ts e2e/config.local.ts`.
45
+ - Copy `librechat.yaml`: `cp librechat.example.yaml librechat.yaml`.
46
+ - Run: `npm run e2e`.
47
+
48
+ ## 2. Development Notes
49
+
50
+ 1. Before starting work, make sure your main branch has the latest commits with `npm run update`.
51
+ 3. Run linting command to find errors: `npm run lint`. Alternatively, ensure husky pre-commit checks are functioning.
52
+ 3. After your changes, reinstall packages in your current branch using `npm run reinstall` and ensure everything still works.
53
+ - Restart the ESLint server ("ESLint: Restart ESLint Server" in VS Code command bar) and your IDE after reinstalling or updating.
54
+ 4. Clear web app localStorage and cookies before and after changes.
55
+ 5. For frontend changes, compile typescript before and after changes to check for introduced errors: `cd client && npm run build`.
56
+ 6. Run backend unit tests: `npm run test:api`.
57
+ 7. Run frontend unit tests: `npm run test:client`.
58
+ 8. Run integration tests: `npm run e2e`.
59
+
60
+ ## 3. Git Workflow
61
+
62
+ We utilize a GitFlow workflow to manage changes to this project's codebase. Follow these general steps when contributing code:
63
+
64
+ 1. Fork the repository and create a new branch with a descriptive slash-based name (e.g., `new/feature/x`).
65
+ 2. Implement your changes and ensure that all tests pass.
66
+ 3. Commit your changes using conventional commit messages with GitFlow flags. Begin the commit message with a tag indicating the change type, such as "feat" (new feature), "fix" (bug fix), "docs" (documentation), or "refactor" (code refactoring), followed by a brief summary of the changes (e.g., `feat: Add new feature X to the project`).
67
+ 4. Submit a pull request with a clear and concise description of your changes and the reasons behind them.
68
+ 5. We will review your pull request, provide feedback as needed, and eventually merge the approved changes into the main branch.
69
+
70
+ ## 4. Commit Message Format
71
+
72
+ We follow the [semantic format](https://gist.github.com/joshbuchea/6f47e86d2510bce28f8e7f42ae84c716) for commit messages.
73
+
74
+ ### Example
75
+
76
+ ```
77
+ feat: add hat wobble
78
+ ^--^ ^------------^
79
+ | |
80
+ | +-> Summary in present tense.
81
+ |
82
+ +-------> Type: chore, docs, feat, fix, refactor, style, or test.
83
+ ```
84
+
85
+ ### Commit Guidelines
86
+ - Do your best to reduce the number of commits, organizing them as much possible. Look into [squashing commits](https://www.freecodecamp.org/news/git-squash-commits/) in order to keep a neat history.
87
+ - For those that care about maximizing commits for stats, adhere to the above as I 'squash and merge' an unorganized and/or unformatted commit history, which reduces the number of your commits to 1,:
88
+ ```
89
+ * Update Br.tsx
90
+
91
+ * Update Es.tsx
92
+
93
+ * Update Br.tsx
94
+ ```
95
+
96
+
97
+ ## 5. Pull Request Process
98
+
99
+ When submitting a pull request, please follow these guidelines:
100
+
101
+ - Ensure that any installation or build dependencies are removed before the end of the layer when doing a build.
102
+ - Update the README.md with details of changes to the interface, including new environment variables, exposed ports, useful file locations, and container parameters.
103
+ - Increase the version numbers in any example files and the README.md to reflect the new version that the pull request represents. We use [SemVer](http://semver.org/) for versioning.
104
+
105
+ Ensure that your changes meet the following criteria:
106
+
107
+ - All tests pass as highlighted [above](#1-development-notes).
108
+ - The code is well-formatted and adheres to our coding standards.
109
+ - The commit history is clean and easy to follow. You can use `git rebase` or `git merge --squash` to clean your commit history before submitting the pull request.
110
+ - The pull request description clearly outlines the changes and the reasons behind them. Be sure to include the steps to test the pull request.
111
+
112
+ ## 6. Naming Conventions
113
+
114
+ Apply the following naming conventions to branches, labels, and other Git-related entities:
115
+
116
+ - **Branch names:** Descriptive and slash-based (e.g., `new/feature/x`).
117
+ - **Labels:** Descriptive and kebab case (e.g., `bug-fix`).
118
+ - **JS/TS:** Directories and file names: Descriptive and camelCase. First letter uppercased for React files (e.g., `helperFunction.ts, ReactComponent.tsx`).
119
+ - **Docs:** Directories and file names: Descriptive and snake_case (e.g., `config_files.md`).
120
+
121
+ ## 7. TypeScript Conversion
122
+
123
+ 1. **Original State**: The project was initially developed entirely in JavaScript (JS).
124
+
125
+ 2. **Frontend Transition**:
126
+ - We are in the process of transitioning the frontend from JS to TypeScript (TS).
127
+ - The transition is nearing completion.
128
+ - This conversion is feasible due to React's capability to intermix JS and TS prior to code compilation. It's standard practice to compile/bundle the code in such scenarios.
129
+
130
+ 3. **Backend Considerations**:
131
+ - Transitioning the backend to TypeScript would be a more intricate process, especially for an established Express.js server.
132
+
133
+ - **Options for Transition**:
134
+ - **Single Phase Overhaul**: This involves converting the entire backend to TypeScript in one go. It's the most straightforward approach but can be disruptive, especially for larger codebases.
135
+
136
+ - **Incremental Transition**: Convert parts of the backend progressively. This can be done by:
137
+ - Maintaining a separate directory for TypeScript files.
138
+ - Gradually migrating and testing individual modules or routes.
139
+ - Using a build tool like `tsc` to compile TypeScript files independently until the entire transition is complete.
140
+
141
+ - **Compilation Considerations**:
142
+ - Introducing a compilation step for the server is an option. This would involve using tools like `ts-node` for development and `tsc` for production builds.
143
+ - However, this is not a conventional approach for Express.js servers and could introduce added complexity, especially in terms of build and deployment processes.
144
+
145
+ - **Current Stance**: At present, this backend transition is of lower priority and might not be pursued.
146
+
147
+ ## 8. Module Import Conventions
148
+
149
+ - `npm` packages first,
150
+ - from longest line (top) to shortest (bottom)
151
+
152
+ - Followed by typescript types (pertains to data-provider and client workspaces)
153
+ - longest line (top) to shortest (bottom)
154
+ - types from package come first
155
+
156
+ - Lastly, local imports
157
+ - longest line (top) to shortest (bottom)
158
+ - imports with alias `~` treated the same as relative import with respect to line length
159
+
160
+ **Note:** ESLint will automatically enforce these import conventions when you run `npm run lint --fix` or through pre-commit hooks.
161
+
162
+ ---
163
+
164
+ Please ensure that you adapt this summary to fit the specific context and nuances of your project.
165
+
166
+ ---
167
+
168
+ ## [Go Back to ReadMe](../README.md)
.github/FUNDING.yml ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # These are supported funding model platforms
2
+
3
+ github: [danny-avila]
4
+ patreon: # Replace with a single Patreon username
5
+ open_collective: # Replace with a single Open Collective username
6
+ ko_fi: # Replace with a single Ko-fi username
7
+ tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
8
+ community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
9
+ liberapay: # Replace with a single Liberapay username
10
+ issuehunt: # Replace with a single IssueHunt username
11
+ otechie: # Replace with a single Otechie username
12
+ lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
13
+ custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
.github/ISSUE_TEMPLATE/BUG-REPORT.yml ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Bug Report
2
+ description: File a bug report
3
+ title: "[Bug]: "
4
+ labels: ["🐛 bug"]
5
+ body:
6
+ - type: markdown
7
+ attributes:
8
+ value: |
9
+ Thanks for taking the time to fill out this bug report!
10
+
11
+ Before submitting, please:
12
+ - Search existing [Issues and Discussions](https://github.com/danny-avila/LibreChat/discussions) to see if your bug has already been reported
13
+ - Use [Discussions](https://github.com/danny-avila/LibreChat/discussions) instead of Issues for:
14
+ - General inquiries
15
+ - Help with setup
16
+ - Questions about whether you're experiencing a bug
17
+ - type: textarea
18
+ id: what-happened
19
+ attributes:
20
+ label: What happened?
21
+ description: Also tell us, what did you expect to happen?
22
+ placeholder: Please give as many details as possible
23
+ validations:
24
+ required: true
25
+ - type: textarea
26
+ id: version-info
27
+ attributes:
28
+ label: Version Information
29
+ description: |
30
+ If using Docker, please run and provide the output of:
31
+ ```bash
32
+ docker images | grep librechat
33
+ ```
34
+
35
+ If running from source, please run and provide the output of:
36
+ ```bash
37
+ git rev-parse HEAD
38
+ ```
39
+ placeholder: Paste the output here
40
+ validations:
41
+ required: true
42
+ - type: textarea
43
+ id: steps-to-reproduce
44
+ attributes:
45
+ label: Steps to Reproduce
46
+ description: Please list the steps needed to reproduce the issue.
47
+ placeholder: "1. Step 1\n2. Step 2\n3. Step 3"
48
+ validations:
49
+ required: true
50
+ - type: dropdown
51
+ id: browsers
52
+ attributes:
53
+ label: What browsers are you seeing the problem on?
54
+ multiple: true
55
+ options:
56
+ - Firefox
57
+ - Chrome
58
+ - Safari
59
+ - Microsoft Edge
60
+ - Mobile (iOS)
61
+ - Mobile (Android)
62
+ - type: textarea
63
+ id: logs
64
+ attributes:
65
+ label: Relevant log output
66
+ description: |
67
+ Please paste relevant logs that were created when reproducing the error.
68
+
69
+ Log locations:
70
+ - Docker: Project root directory ./logs
71
+ - npm: ./api/logs
72
+
73
+ There are two types of logs that can help diagnose the issue:
74
+ - debug logs (debug-YYYY-MM-DD.log)
75
+ - error logs (error-YYYY-MM-DD.log)
76
+
77
+ Error logs contain exact stack traces and are especially helpful, but both can provide valuable information.
78
+ Please only include the relevant portions of logs that correspond to when you reproduced the error.
79
+
80
+ For UI-related issues, browser console logs can be very helpful. You can provide these as screenshots or paste the text here.
81
+ render: shell
82
+ validations:
83
+ required: true
84
+ - type: textarea
85
+ id: screenshots
86
+ attributes:
87
+ label: Screenshots
88
+ description: If applicable, add screenshots to help explain your problem. You can drag and drop, paste images directly here or link to them.
89
+ - type: checkboxes
90
+ id: terms
91
+ attributes:
92
+ label: Code of Conduct
93
+ description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/danny-avila/LibreChat/blob/main/.github/CODE_OF_CONDUCT.md)
94
+ options:
95
+ - label: I agree to follow this project's Code of Conduct
96
+ required: true
.github/ISSUE_TEMPLATE/FEATURE-REQUEST.yml ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Feature Request
2
+ description: File a feature request
3
+ title: "[Enhancement]: "
4
+ labels: ["✨ enhancement"]
5
+ body:
6
+ - type: markdown
7
+ attributes:
8
+ value: |
9
+ Thank you for taking the time to fill this out!
10
+ - type: textarea
11
+ id: what
12
+ attributes:
13
+ label: What features would you like to see added?
14
+ description: Please provide as many details as possible.
15
+ placeholder: Please provide as many details as possible.
16
+ validations:
17
+ required: true
18
+ - type: textarea
19
+ id: details
20
+ attributes:
21
+ label: More details
22
+ description: Please provide additional details if needed.
23
+ placeholder: Please provide additional details if needed.
24
+ validations:
25
+ required: true
26
+ - type: dropdown
27
+ id: subject
28
+ attributes:
29
+ label: Which components are impacted by your request?
30
+ multiple: true
31
+ options:
32
+ - General
33
+ - UI
34
+ - Endpoints
35
+ - Plugins
36
+ - Other
37
+ - type: textarea
38
+ id: screenshots
39
+ attributes:
40
+ label: Pictures
41
+ description: If relevant, please include images to help clarify your request. You can drag and drop images directly here, paste them, or provide a link to them.
42
+ - type: checkboxes
43
+ id: terms
44
+ attributes:
45
+ label: Code of Conduct
46
+ description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/danny-avila/LibreChat/blob/main/.github/CODE_OF_CONDUCT.md)
47
+ options:
48
+ - label: I agree to follow this project's Code of Conduct
49
+ required: true
.github/ISSUE_TEMPLATE/LOCIZE_TRANSLATION_ACCESS_REQUEST.yml ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Locize Translation Access Request
2
+ description: Request access to an additional language in Locize for LibreChat translations.
3
+ title: "Locize Access Request: "
4
+ labels: ["🌍 i18n", "🔑 access request"]
5
+ body:
6
+ - type: markdown
7
+ attributes:
8
+ value: |
9
+ Thank you for your interest in contributing to LibreChat translations!
10
+ Please fill out the form below to request access to an additional language in **Locize**.
11
+
12
+ **🔗 Available Languages:** [View the list here](https://www.librechat.ai/docs/translation)
13
+
14
+ **📌 Note:** Ensure that the requested language is supported before submitting your request.
15
+ - type: input
16
+ id: account_name
17
+ attributes:
18
+ label: Locize Account Name
19
+ description: Please provide your Locize account name (e.g., John Doe).
20
+ placeholder: e.g., John Doe
21
+ validations:
22
+ required: true
23
+ - type: input
24
+ id: language_requested
25
+ attributes:
26
+ label: Language Code (ISO 639-1)
27
+ description: |
28
+ Enter the **ISO 639-1** language code for the language you want to translate into.
29
+ Example: `es` for Spanish, `zh-Hant` for Traditional Chinese.
30
+
31
+ **🔗 Reference:** [Available Languages](https://www.librechat.ai/docs/translation)
32
+ placeholder: e.g., es
33
+ validations:
34
+ required: true
35
+ - type: checkboxes
36
+ id: agreement
37
+ attributes:
38
+ label: Agreement
39
+ description: By submitting this request, you confirm that you will contribute responsibly and adhere to the project guidelines.
40
+ options:
41
+ - label: I agree to use my access solely for contributing to LibreChat translations.
42
+ required: true
.github/ISSUE_TEMPLATE/NEW-LANGUAGE-REQUEST.yml ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: New Language Request
2
+ description: Request to add a new language for LibreChat translations.
3
+ title: "New Language Request: "
4
+ labels: ["✨ enhancement", "🌍 i18n"]
5
+ body:
6
+ - type: markdown
7
+ attributes:
8
+ value: |
9
+ Thank you for taking the time to submit a new language request! Please fill out the following details so we can review your request.
10
+ - type: input
11
+ id: language_name
12
+ attributes:
13
+ label: Language Name
14
+ description: Please provide the full name of the language (e.g., Spanish, Mandarin).
15
+ placeholder: e.g., Spanish
16
+ validations:
17
+ required: true
18
+ - type: input
19
+ id: iso_code
20
+ attributes:
21
+ label: ISO 639-1 Code
22
+ description: Please provide the ISO 639-1 code for the language (e.g., es for Spanish). You can refer to [this list](https://www.w3schools.com/tags/ref_language_codes.asp) for valid codes.
23
+ placeholder: e.g., es
24
+ validations:
25
+ required: true
26
+ - type: checkboxes
27
+ id: terms
28
+ attributes:
29
+ label: Code of Conduct
30
+ description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/danny-avila/LibreChat/blob/main/.github/CODE_OF_CONDUCT.md).
31
+ options:
32
+ - label: I agree to follow this project's Code of Conduct
33
+ required: true
.github/SECURITY.md ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Security Policy
2
+
3
+ At LibreChat, we prioritize the security of our project and value the contributions of security researchers in helping us improve the security of our codebase. If you discover a security vulnerability within our project, we appreciate your responsible disclosure. Please follow the guidelines below to report any vulnerabilities to us:
4
+
5
+ **Note: Only report sensitive vulnerability details via the appropriate private communication channels mentioned below. Public channels, such as GitHub issues and Discord, should be used for initiating contact and establishing private communication channels.**
6
+
7
+ ## Communication Channels
8
+
9
+ When reporting a security vulnerability, you have the following options to reach out to us:
10
+
11
+ - **Option 1: GitHub Security Advisory System**: We encourage you to use GitHub's Security Advisory system to report any security vulnerabilities you find. This allows us to receive vulnerability reports directly through GitHub. For more information on how to submit a security advisory report, please refer to the [GitHub Security Advisories documentation](https://docs.github.com/en/code-security/getting-started-with-security-vulnerability-alerts/about-github-security-advisories).
12
+
13
+ - **Option 2: GitHub Issues**: You can initiate first contact via GitHub Issues. However, please note that initial contact through GitHub Issues should not include any sensitive details.
14
+
15
+ - **Option 3: Discord Server**: You can join our [Discord community](https://discord.librechat.ai) and initiate first contact in the `#issues` channel. However, please ensure that initial contact through Discord does not include any sensitive details.
16
+
17
+ _After the initial contact, we will establish a private communication channel for further discussion._
18
+
19
+ ### When submitting a vulnerability report, please provide us with the following information:
20
+
21
+ - A clear description of the vulnerability, including steps to reproduce it.
22
+ - The version(s) of the project affected by the vulnerability.
23
+ - Any additional information that may be useful for understanding and addressing the issue.
24
+
25
+ We strive to acknowledge vulnerability reports within 72 hours and will keep you informed of the progress towards resolution.
26
+
27
+ ## Security Updates and Patching
28
+
29
+ We are committed to maintaining the security of our open-source project, LibreChat, and promptly addressing any identified vulnerabilities. To ensure the security of our project, we adhere to the following practices:
30
+
31
+ - We prioritize security updates for the current major release of our software.
32
+ - We actively monitor the GitHub Security Advisory system and the `#issues` channel on Discord for any vulnerability reports.
33
+ - We promptly review and validate reported vulnerabilities and take appropriate actions to address them.
34
+ - We release security patches and updates in a timely manner to mitigate any identified vulnerabilities.
35
+
36
+ Please note that as a security-conscious community, we may not always disclose detailed information about security issues until we have determined that doing so would not put our users or the project at risk. We appreciate your understanding and cooperation in these matters.
37
+
38
+ ## Scope
39
+
40
+ This security policy applies to the following GitHub repository:
41
+
42
+ - Repository: [LibreChat](https://github.librechat.ai)
43
+
44
+ ## Contact
45
+
46
+ If you have any questions or concerns regarding the security of our project, please join our [Discord community](https://discord.librechat.ai) and report them in the appropriate channel. You can also reach out to us by [opening an issue](https://github.com/danny-avila/LibreChat/issues/new) on GitHub. Please note that the response time may vary depending on the nature and severity of the inquiry.
47
+
48
+ ## Acknowledgments
49
+
50
+ We would like to express our gratitude to the security researchers and community members who help us improve the security of our project. Your contributions are invaluable, and we sincerely appreciate your efforts.
51
+
52
+ ## Bug Bounty Program
53
+
54
+ We currently do not have a bug bounty program in place. However, we welcome and appreciate any
55
+
56
+ security-related contributions through pull requests (PRs) that address vulnerabilities in our codebase. We believe in the power of collaboration to improve the security of our project and invite you to join us in making it more robust.
57
+
58
+ **Reference**
59
+ - https://cheatsheetseries.owasp.org/cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html
60
+
61
+ ---
62
+
63
+ ## [Go Back to ReadMe](../README.md)
.github/configuration-release.json ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "categories": [
3
+ {
4
+ "title": "### ✨ New Features",
5
+ "labels": ["feat"]
6
+ },
7
+ {
8
+ "title": "### 🌍 Internationalization",
9
+ "labels": ["i18n"]
10
+ },
11
+ {
12
+ "title": "### 👐 Accessibility",
13
+ "labels": ["a11y"]
14
+ },
15
+ {
16
+ "title": "### 🔧 Fixes",
17
+ "labels": ["Fix", "fix"]
18
+ },
19
+ {
20
+ "title": "### ⚙️ Other Changes",
21
+ "labels": ["ci", "style", "docs", "refactor", "chore"]
22
+ }
23
+ ],
24
+ "ignore_labels": [
25
+ "🔁 duplicate",
26
+ "📊 analytics",
27
+ "🌱 good first issue",
28
+ "🔍 investigation",
29
+ "🙏 help wanted",
30
+ "❌ invalid",
31
+ "❓ question",
32
+ "🚫 wontfix",
33
+ "🚀 release",
34
+ "version"
35
+ ],
36
+ "base_branches": ["main"],
37
+ "sort": {
38
+ "order": "ASC",
39
+ "on_property": "mergedAt"
40
+ },
41
+ "label_extractor": [
42
+ {
43
+ "pattern": "^(?:[^A-Za-z0-9]*)(feat|fix|chore|docs|refactor|ci|style|a11y|i18n)\\s*:",
44
+ "target": "$1",
45
+ "flags": "i",
46
+ "on_property": "title",
47
+ "method": "match"
48
+ },
49
+ {
50
+ "pattern": "^(?:[^A-Za-z0-9]*)(v\\d+\\.\\d+\\.\\d+(?:-rc\\d+)?).*",
51
+ "target": "version",
52
+ "flags": "i",
53
+ "on_property": "title",
54
+ "method": "match"
55
+ }
56
+ ],
57
+ "template": "## [#{{TO_TAG}}] - #{{TO_TAG_DATE}}\n\nChanges from #{{FROM_TAG}} to #{{TO_TAG}}.\n\n#{{CHANGELOG}}\n\n[See full release details][release-#{{TO_TAG}}]\n\n[release-#{{TO_TAG}}]: https://github.com/#{{OWNER}}/#{{REPO}}/releases/tag/#{{TO_TAG}}\n\n---",
58
+ "pr_template": "- #{{TITLE}} by **@#{{AUTHOR}}** in [##{{NUMBER}}](#{{URL}})",
59
+ "empty_template": "- no changes"
60
+ }
.github/configuration-unreleased.json ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "categories": [
3
+ {
4
+ "title": "### ✨ New Features",
5
+ "labels": ["feat"]
6
+ },
7
+ {
8
+ "title": "### 🌍 Internationalization",
9
+ "labels": ["i18n"]
10
+ },
11
+ {
12
+ "title": "### 👐 Accessibility",
13
+ "labels": ["a11y"]
14
+ },
15
+ {
16
+ "title": "### 🔧 Fixes",
17
+ "labels": ["Fix", "fix"]
18
+ },
19
+ {
20
+ "title": "### ⚙️ Other Changes",
21
+ "labels": ["ci", "style", "docs", "refactor", "chore"]
22
+ }
23
+ ],
24
+ "ignore_labels": [
25
+ "🔁 duplicate",
26
+ "📊 analytics",
27
+ "🌱 good first issue",
28
+ "🔍 investigation",
29
+ "🙏 help wanted",
30
+ "❌ invalid",
31
+ "❓ question",
32
+ "🚫 wontfix",
33
+ "🚀 release",
34
+ "version",
35
+ "action"
36
+ ],
37
+ "base_branches": ["main"],
38
+ "sort": {
39
+ "order": "ASC",
40
+ "on_property": "mergedAt"
41
+ },
42
+ "label_extractor": [
43
+ {
44
+ "pattern": "^(?:[^A-Za-z0-9]*)(feat|fix|chore|docs|refactor|ci|style|a11y|i18n)\\s*:",
45
+ "target": "$1",
46
+ "flags": "i",
47
+ "on_property": "title",
48
+ "method": "match"
49
+ },
50
+ {
51
+ "pattern": "^(?:[^A-Za-z0-9]*)(v\\d+\\.\\d+\\.\\d+(?:-rc\\d+)?).*",
52
+ "target": "version",
53
+ "flags": "i",
54
+ "on_property": "title",
55
+ "method": "match"
56
+ },
57
+ {
58
+ "pattern": "^(?:[^A-Za-z0-9]*)(action)\\b.*",
59
+ "target": "action",
60
+ "flags": "i",
61
+ "on_property": "title",
62
+ "method": "match"
63
+ }
64
+ ],
65
+ "template": "## [Unreleased]\n\n#{{CHANGELOG}}\n\n---",
66
+ "pr_template": "- #{{TITLE}} by **@#{{AUTHOR}}** in [##{{NUMBER}}](#{{URL}})",
67
+ "empty_template": "- no changes"
68
+ }
.github/playwright.yml ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # name: Playwright Tests
2
+ # on:
3
+ # pull_request:
4
+ # branches:
5
+ # - main
6
+ # - dev
7
+ # - release/*
8
+ # paths:
9
+ # - 'api/**'
10
+ # - 'client/**'
11
+ # - 'packages/**'
12
+ # - 'e2e/**'
13
+ # jobs:
14
+ # tests_e2e:
15
+ # name: Run Playwright tests
16
+ # if: github.event.pull_request.head.repo.full_name == 'danny-avila/LibreChat'
17
+ # timeout-minutes: 60
18
+ # runs-on: ubuntu-latest
19
+ # env:
20
+ # NODE_ENV: CI
21
+ # CI: true
22
+ # SEARCH: false
23
+ # BINGAI_TOKEN: user_provided
24
+ # CHATGPT_TOKEN: user_provided
25
+ # MONGO_URI: ${{ secrets.MONGO_URI }}
26
+ # OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
27
+ # E2E_USER_EMAIL: ${{ secrets.E2E_USER_EMAIL }}
28
+ # E2E_USER_PASSWORD: ${{ secrets.E2E_USER_PASSWORD }}
29
+ # JWT_SECRET: ${{ secrets.JWT_SECRET }}
30
+ # JWT_REFRESH_SECRET: ${{ secrets.JWT_REFRESH_SECRET }}
31
+ # CREDS_KEY: ${{ secrets.CREDS_KEY }}
32
+ # CREDS_IV: ${{ secrets.CREDS_IV }}
33
+ # DOMAIN_CLIENT: ${{ secrets.DOMAIN_CLIENT }}
34
+ # DOMAIN_SERVER: ${{ secrets.DOMAIN_SERVER }}
35
+ # PLAYWRIGHT_SKIP_BROWSER_DOWNLOAD: 1 # Skip downloading during npm install
36
+ # PLAYWRIGHT_BROWSERS_PATH: 0 # Places binaries to node_modules/@playwright/test
37
+ # TITLE_CONVO: false
38
+ # steps:
39
+ # - uses: actions/checkout@v4
40
+ # - uses: actions/setup-node@v4
41
+ # with:
42
+ # node-version: 18
43
+ # cache: 'npm'
44
+
45
+ # - name: Install global dependencies
46
+ # run: npm ci
47
+
48
+ # # - name: Remove sharp dependency
49
+ # # run: rm -rf node_modules/sharp
50
+
51
+ # # - name: Install sharp with linux dependencies
52
+ # # run: cd api && SHARP_IGNORE_GLOBAL_LIBVIPS=1 npm install --arch=x64 --platform=linux --libc=glibc sharp
53
+
54
+ # - name: Build Client
55
+ # run: npm run frontend
56
+
57
+ # - name: Install Playwright
58
+ # run: |
59
+ # npx playwright install-deps
60
+ # npm install -D @playwright/test@latest
61
+ # npx playwright install chromium
62
+
63
+ # - name: Run Playwright tests
64
+ # run: npm run e2e:ci
65
+
66
+ # - name: Upload playwright report
67
+ # uses: actions/upload-artifact@v3
68
+ # if: always()
69
+ # with:
70
+ # name: playwright-report
71
+ # path: e2e/playwright-report/
72
+ # retention-days: 30
.github/pull_request_template.md ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Pull Request Template
2
+
3
+ ⚠️ Before Submitting a PR, Please Review:
4
+ - Please ensure that you have thoroughly read and understood the [Contributing Docs](https://github.com/danny-avila/LibreChat/blob/main/.github/CONTRIBUTING.md) before submitting your Pull Request.
5
+
6
+ ⚠️ Documentation Updates Notice:
7
+ - Kindly note that documentation updates are managed in this repository: [librechat.ai](https://github.com/LibreChat-AI/librechat.ai)
8
+
9
+ ## Summary
10
+
11
+ Please provide a brief summary of your changes and the related issue. Include any motivation and context that is relevant to your changes. If there are any dependencies necessary for your changes, please list them here.
12
+
13
+ ## Change Type
14
+
15
+ Please delete any irrelevant options.
16
+
17
+ - [ ] Bug fix (non-breaking change which fixes an issue)
18
+ - [ ] New feature (non-breaking change which adds functionality)
19
+ - [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected)
20
+ - [ ] This change requires a documentation update
21
+ - [ ] Translation update
22
+
23
+ ## Testing
24
+
25
+ Please describe your test process and include instructions so that we can reproduce your test. If there are any important variables for your testing configuration, list them here.
26
+
27
+ ### **Test Configuration**:
28
+
29
+ ## Checklist
30
+
31
+ Please delete any irrelevant options.
32
+
33
+ - [ ] My code adheres to this project's style guidelines
34
+ - [ ] I have performed a self-review of my own code
35
+ - [ ] I have commented in any complex areas of my code
36
+ - [ ] I have made pertinent documentation changes
37
+ - [ ] My changes do not introduce new warnings
38
+ - [ ] I have written tests demonstrating that my changes are effective or that my feature works
39
+ - [ ] Local unit tests pass with my changes
40
+ - [ ] Any changes dependent on mine have been merged and published in downstream modules.
41
+ - [ ] A pull request for updating the documentation has been submitted.
.github/workflows/a11y.yml ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Lint for accessibility issues
2
+
3
+ on:
4
+ pull_request:
5
+ paths:
6
+ - 'client/src/**'
7
+ workflow_dispatch:
8
+ inputs:
9
+ run_workflow:
10
+ description: 'Set to true to run this workflow'
11
+ required: true
12
+ default: 'false'
13
+
14
+ jobs:
15
+ axe-linter:
16
+ runs-on: ubuntu-latest
17
+ if: >
18
+ (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == 'danny-avila/LibreChat') ||
19
+ (github.event_name == 'workflow_dispatch' && github.event.inputs.run_workflow == 'true')
20
+
21
+ steps:
22
+ - uses: actions/checkout@v4
23
+ - uses: dequelabs/axe-linter-action@v1
24
+ with:
25
+ api_key: ${{ secrets.AXE_LINTER_API_KEY }}
26
+ github_token: ${{ secrets.GITHUB_TOKEN }}
.github/workflows/backend-review.yml ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Backend Unit Tests
2
+ on:
3
+ pull_request:
4
+ branches:
5
+ - main
6
+ - dev
7
+ - release/*
8
+ paths:
9
+ - 'api/**'
10
+ - 'packages/**'
11
+ jobs:
12
+ tests_Backend:
13
+ name: Run Backend unit tests
14
+ timeout-minutes: 60
15
+ runs-on: ubuntu-latest
16
+ env:
17
+ MONGO_URI: ${{ secrets.MONGO_URI }}
18
+ OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
19
+ JWT_SECRET: ${{ secrets.JWT_SECRET }}
20
+ CREDS_KEY: ${{ secrets.CREDS_KEY }}
21
+ CREDS_IV: ${{ secrets.CREDS_IV }}
22
+ BAN_VIOLATIONS: ${{ secrets.BAN_VIOLATIONS }}
23
+ BAN_DURATION: ${{ secrets.BAN_DURATION }}
24
+ BAN_INTERVAL: ${{ secrets.BAN_INTERVAL }}
25
+ NODE_ENV: CI
26
+ steps:
27
+ - uses: actions/checkout@v4
28
+ - name: Use Node.js 20.x
29
+ uses: actions/setup-node@v4
30
+ with:
31
+ node-version: 20
32
+ cache: 'npm'
33
+
34
+ - name: Install dependencies
35
+ run: npm ci
36
+
37
+ - name: Install Data Provider Package
38
+ run: npm run build:data-provider
39
+
40
+ - name: Install Data Schemas Package
41
+ run: npm run build:data-schemas
42
+
43
+ - name: Install API Package
44
+ run: npm run build:api
45
+
46
+ - name: Create empty auth.json file
47
+ run: |
48
+ mkdir -p api/data
49
+ echo '{}' > api/data/auth.json
50
+
51
+ - name: Check for Circular dependency in rollup
52
+ working-directory: ./packages/data-provider
53
+ run: |
54
+ output=$(npm run rollup:api)
55
+ echo "$output"
56
+ if echo "$output" | grep -q "Circular dependency"; then
57
+ echo "Error: Circular dependency detected!"
58
+ exit 1
59
+ fi
60
+
61
+ - name: Prepare .env.test file
62
+ run: cp api/test/.env.test.example api/test/.env.test
63
+
64
+ - name: Run unit tests
65
+ run: cd api && npm run test:ci
66
+
67
+ - name: Run librechat-data-provider unit tests
68
+ run: cd packages/data-provider && npm run test:ci
69
+
70
+ - name: Run @librechat/data-schemas unit tests
71
+ run: cd packages/data-schemas && npm run test:ci
72
+
73
+ - name: Run @librechat/api unit tests
74
+ run: cd packages/api && npm run test:ci
.github/workflows/build.yml ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Linux_Container_Workflow
2
+
3
+ on:
4
+ workflow_dispatch:
5
+
6
+ env:
7
+ RUNNER_VERSION: 2.293.0
8
+
9
+ jobs:
10
+ build-and-push:
11
+ runs-on: ubuntu-latest
12
+ steps:
13
+ # checkout the repo
14
+ - name: 'Checkout GitHub Action'
15
+ uses: actions/checkout@main
16
+
17
+ - name: 'Login via Azure CLI'
18
+ uses: azure/login@v1
19
+ with:
20
+ creds: ${{ secrets.AZURE_CREDENTIALS }}
21
+
22
+ - name: 'Build GitHub Runner container image'
23
+ uses: azure/docker-login@v1
24
+ with:
25
+ login-server: ${{ secrets.REGISTRY_LOGIN_SERVER }}
26
+ username: ${{ secrets.REGISTRY_USERNAME }}
27
+ password: ${{ secrets.REGISTRY_PASSWORD }}
28
+ - run: |
29
+ docker build --build-arg RUNNER_VERSION=${{ env.RUNNER_VERSION }} -t ${{ secrets.REGISTRY_LOGIN_SERVER }}/pwd9000-github-runner-lin:${{ env.RUNNER_VERSION }} .
30
+
31
+ - name: 'Push container image to ACR'
32
+ uses: azure/docker-login@v1
33
+ with:
34
+ login-server: ${{ secrets.REGISTRY_LOGIN_SERVER }}
35
+ username: ${{ secrets.REGISTRY_USERNAME }}
36
+ password: ${{ secrets.REGISTRY_PASSWORD }}
37
+ - run: |
38
+ docker push ${{ secrets.REGISTRY_LOGIN_SERVER }}/pwd9000-github-runner-lin:${{ env.RUNNER_VERSION }}
.github/workflows/cache-integration-tests.yml ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Cache Integration Tests
2
+
3
+ on:
4
+ pull_request:
5
+ branches:
6
+ - main
7
+ - dev
8
+ - release/*
9
+ paths:
10
+ - 'packages/api/src/cache/**'
11
+ - 'packages/api/src/cluster/**'
12
+ - 'packages/api/src/mcp/**'
13
+ - 'redis-config/**'
14
+ - '.github/workflows/cache-integration-tests.yml'
15
+
16
+ jobs:
17
+ cache_integration_tests:
18
+ name: Integration Tests that use actual Redis Cache
19
+ timeout-minutes: 30
20
+ runs-on: ubuntu-latest
21
+
22
+ steps:
23
+ - name: Checkout repository
24
+ uses: actions/checkout@v4
25
+
26
+ - name: Use Node.js 20.x
27
+ uses: actions/setup-node@v4
28
+ with:
29
+ node-version: 20
30
+ cache: 'npm'
31
+
32
+ - name: Install Redis tools
33
+ run: |
34
+ sudo apt-get update
35
+ sudo apt-get install -y redis-server redis-tools
36
+
37
+ - name: Start Single Redis Instance
38
+ run: |
39
+ redis-server --daemonize yes --port 6379
40
+ sleep 2
41
+ # Verify single Redis is running
42
+ redis-cli -p 6379 ping || exit 1
43
+
44
+ - name: Start Redis Cluster
45
+ working-directory: redis-config
46
+ run: |
47
+ chmod +x start-cluster.sh stop-cluster.sh
48
+ ./start-cluster.sh
49
+ sleep 10
50
+ # Verify cluster is running
51
+ redis-cli -p 7001 cluster info || exit 1
52
+ redis-cli -p 7002 cluster info || exit 1
53
+ redis-cli -p 7003 cluster info || exit 1
54
+
55
+ - name: Install dependencies
56
+ run: npm ci
57
+
58
+ - name: Build packages
59
+ run: |
60
+ npm run build:data-provider
61
+ npm run build:data-schemas
62
+ npm run build:api
63
+
64
+ - name: Run all cache integration tests (Single Redis Node)
65
+ working-directory: packages/api
66
+ env:
67
+ NODE_ENV: test
68
+ USE_REDIS: true
69
+ USE_REDIS_CLUSTER: false
70
+ REDIS_URI: redis://127.0.0.1:6379
71
+ run: npm run test:cache-integration
72
+
73
+ - name: Run all cache integration tests (Redis Cluster)
74
+ working-directory: packages/api
75
+ env:
76
+ NODE_ENV: test
77
+ USE_REDIS: true
78
+ USE_REDIS_CLUSTER: true
79
+ REDIS_URI: redis://127.0.0.1:7001,redis://127.0.0.1:7002,redis://127.0.0.1:7003
80
+ run: npm run test:cache-integration
81
+
82
+ - name: Stop Redis Cluster
83
+ if: always()
84
+ working-directory: redis-config
85
+ run: ./stop-cluster.sh || true
86
+
87
+ - name: Stop Single Redis Instance
88
+ if: always()
89
+ run: redis-cli -p 6379 shutdown || true
.github/workflows/client.yml ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Publish `@librechat/client` to NPM
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main
7
+ paths:
8
+ - 'packages/client/package.json'
9
+ workflow_dispatch:
10
+ inputs:
11
+ reason:
12
+ description: 'Reason for manual trigger'
13
+ required: false
14
+ default: 'Manual publish requested'
15
+
16
+ permissions:
17
+ id-token: write # Required for OIDC trusted publishing
18
+ contents: read
19
+
20
+ jobs:
21
+ build-and-publish:
22
+ runs-on: ubuntu-latest
23
+ environment: publish # Must match npm trusted publisher config
24
+ steps:
25
+ - uses: actions/checkout@v4
26
+
27
+ - name: Use Node.js
28
+ uses: actions/setup-node@v4
29
+ with:
30
+ node-version: '20.x'
31
+ registry-url: 'https://registry.npmjs.org'
32
+
33
+ - name: Update npm for OIDC support
34
+ run: npm install -g npm@latest # Must be 11.5.1+ for provenance
35
+
36
+ - name: Install client dependencies
37
+ run: cd packages/client && npm ci
38
+
39
+ - name: Build client
40
+ run: cd packages/client && npm run build
41
+
42
+ - name: Check version change
43
+ id: check
44
+ working-directory: packages/client
45
+ run: |
46
+ PACKAGE_VERSION=$(node -p "require('./package.json').version")
47
+ PUBLISHED_VERSION=$(npm view @librechat/client version 2>/dev/null || echo "0.0.0")
48
+ if [ "$PACKAGE_VERSION" = "$PUBLISHED_VERSION" ]; then
49
+ echo "No version change, skipping publish"
50
+ echo "skip=true" >> $GITHUB_OUTPUT
51
+ else
52
+ echo "Version changed, proceeding with publish"
53
+ echo "skip=false" >> $GITHUB_OUTPUT
54
+ fi
55
+
56
+ - name: Pack package
57
+ if: steps.check.outputs.skip != 'true'
58
+ working-directory: packages/client
59
+ run: npm pack
60
+
61
+ - name: Publish
62
+ if: steps.check.outputs.skip != 'true'
63
+ working-directory: packages/client
64
+ run: npm publish *.tgz --access public --provenance
.github/workflows/data-provider.yml ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Publish `librechat-data-provider` to NPM
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main
7
+ paths:
8
+ - 'packages/data-provider/package.json'
9
+ workflow_dispatch:
10
+ inputs:
11
+ reason:
12
+ description: 'Reason for manual trigger'
13
+ required: false
14
+ default: 'Manual publish requested'
15
+
16
+ permissions:
17
+ id-token: write # Required for OIDC trusted publishing
18
+ contents: read
19
+
20
+ jobs:
21
+ build:
22
+ runs-on: ubuntu-latest
23
+ steps:
24
+ - uses: actions/checkout@v4
25
+ - uses: actions/setup-node@v4
26
+ with:
27
+ node-version: 20
28
+ - run: cd packages/data-provider && npm ci
29
+ - run: cd packages/data-provider && npm run build
30
+
31
+ publish-npm:
32
+ needs: build
33
+ runs-on: ubuntu-latest
34
+ environment: publish # Must match npm trusted publisher config
35
+ steps:
36
+ - uses: actions/checkout@v4
37
+ - uses: actions/setup-node@v4
38
+ with:
39
+ node-version: 20
40
+ registry-url: 'https://registry.npmjs.org'
41
+
42
+ - name: Update npm for OIDC support
43
+ run: npm install -g npm@latest # Must be 11.5.1+ for provenance
44
+
45
+ - run: cd packages/data-provider && npm ci
46
+ - run: cd packages/data-provider && npm run build
47
+ - run: cd packages/data-provider && npm publish --provenance
.github/workflows/data-schemas.yml ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Publish `@librechat/data-schemas` to NPM
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main
7
+ paths:
8
+ - 'packages/data-schemas/package.json'
9
+ workflow_dispatch:
10
+ inputs:
11
+ reason:
12
+ description: 'Reason for manual trigger'
13
+ required: false
14
+ default: 'Manual publish requested'
15
+
16
+ permissions:
17
+ id-token: write # Required for OIDC trusted publishing
18
+ contents: read
19
+
20
+ jobs:
21
+ build-and-publish:
22
+ runs-on: ubuntu-latest
23
+ environment: publish # Must match npm trusted publisher config
24
+ steps:
25
+ - uses: actions/checkout@v4
26
+
27
+ - name: Use Node.js
28
+ uses: actions/setup-node@v4
29
+ with:
30
+ node-version: '20.x'
31
+ registry-url: 'https://registry.npmjs.org'
32
+
33
+ - name: Update npm for OIDC support
34
+ run: npm install -g npm@latest # Must be 11.5.1+ for provenance
35
+
36
+ - name: Install dependencies
37
+ run: cd packages/data-schemas && npm ci
38
+
39
+ - name: Build
40
+ run: cd packages/data-schemas && npm run build
41
+
42
+ - name: Check version change
43
+ id: check
44
+ working-directory: packages/data-schemas
45
+ run: |
46
+ PACKAGE_VERSION=$(node -p "require('./package.json').version")
47
+ PUBLISHED_VERSION=$(npm view @librechat/data-schemas version 2>/dev/null || echo "0.0.0")
48
+ if [ "$PACKAGE_VERSION" = "$PUBLISHED_VERSION" ]; then
49
+ echo "No version change, skipping publish"
50
+ echo "skip=true" >> $GITHUB_OUTPUT
51
+ else
52
+ echo "Version changed, proceeding with publish"
53
+ echo "skip=false" >> $GITHUB_OUTPUT
54
+ fi
55
+
56
+ - name: Pack package
57
+ if: steps.check.outputs.skip != 'true'
58
+ working-directory: packages/data-schemas
59
+ run: npm pack
60
+
61
+ - name: Publish
62
+ if: steps.check.outputs.skip != 'true'
63
+ working-directory: packages/data-schemas
64
+ run: npm publish *.tgz --access public --provenance
.github/workflows/deploy-dev.yml ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Update Test Server
2
+
3
+ on:
4
+ workflow_run:
5
+ workflows: ["Docker Dev Branch Images Build"]
6
+ types:
7
+ - completed
8
+ workflow_dispatch:
9
+
10
+ jobs:
11
+ deploy:
12
+ runs-on: ubuntu-latest
13
+ if: |
14
+ github.repository == 'danny-avila/LibreChat' &&
15
+ (github.event_name == 'workflow_dispatch' ||
16
+ (github.event.workflow_run.conclusion == 'success' && github.event.workflow_run.head_branch == 'dev'))
17
+ steps:
18
+ - name: Checkout repository
19
+ uses: actions/checkout@v4
20
+
21
+ - name: Install SSH Key
22
+ uses: shimataro/ssh-key-action@v2
23
+ with:
24
+ key: ${{ secrets.DO_SSH_PRIVATE_KEY }}
25
+ known_hosts: ${{ secrets.DO_KNOWN_HOSTS }}
26
+
27
+ - name: Run update script on DigitalOcean Droplet
28
+ env:
29
+ DO_HOST: ${{ secrets.DO_HOST }}
30
+ DO_USER: ${{ secrets.DO_USER }}
31
+ run: |
32
+ ssh -o StrictHostKeyChecking=no ${DO_USER}@${DO_HOST} << EOF
33
+ sudo -i -u danny bash << 'EEOF'
34
+ cd ~/LibreChat && \
35
+ git fetch origin main && \
36
+ sudo npm run stop:deployed && \
37
+ sudo docker images --format "{{.Repository}}:{{.ID}}" | grep -E "lc-dev|librechat" | cut -d: -f2 | xargs -r sudo docker rmi -f || true && \
38
+ sudo npm run update:deployed && \
39
+ git checkout dev && \
40
+ git pull origin dev && \
41
+ git checkout do-deploy && \
42
+ git rebase dev && \
43
+ sudo npm run start:deployed && \
44
+ echo "Update completed. Application should be running now."
45
+ EEOF
46
+ EOF
.github/workflows/deploy.yml ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Deploy_GHRunner_Linux_ACI
2
+
3
+ on:
4
+ workflow_dispatch:
5
+
6
+ env:
7
+ RUNNER_VERSION: 2.293.0
8
+ ACI_RESOURCE_GROUP: 'Demo-ACI-GitHub-Runners-RG'
9
+ ACI_NAME: 'gh-runner-linux-01'
10
+ DNS_NAME_LABEL: 'gh-lin-01'
11
+ GH_OWNER: ${{ github.repository_owner }}
12
+ GH_REPOSITORY: 'LibreChat' #Change here to deploy self hosted runner ACI to another repo.
13
+
14
+ jobs:
15
+ deploy-gh-runner-aci:
16
+ runs-on: ubuntu-latest
17
+ steps:
18
+ # checkout the repo
19
+ - name: 'Checkout GitHub Action'
20
+ uses: actions/checkout@v4
21
+
22
+ - name: 'Login via Azure CLI'
23
+ uses: azure/login@v1
24
+ with:
25
+ creds: ${{ secrets.AZURE_CREDENTIALS }}
26
+
27
+ - name: 'Deploy to Azure Container Instances'
28
+ uses: 'azure/aci-deploy@v1'
29
+ with:
30
+ resource-group: ${{ env.ACI_RESOURCE_GROUP }}
31
+ image: ${{ secrets.REGISTRY_LOGIN_SERVER }}/pwd9000-github-runner-lin:${{ env.RUNNER_VERSION }}
32
+ registry-login-server: ${{ secrets.REGISTRY_LOGIN_SERVER }}
33
+ registry-username: ${{ secrets.REGISTRY_USERNAME }}
34
+ registry-password: ${{ secrets.REGISTRY_PASSWORD }}
35
+ name: ${{ env.ACI_NAME }}
36
+ dns-name-label: ${{ env.DNS_NAME_LABEL }}
37
+ environment-variables: GH_TOKEN=${{ secrets.PAT_TOKEN }} GH_OWNER=${{ env.GH_OWNER }} GH_REPOSITORY=${{ env.GH_REPOSITORY }}
38
+ location: 'eastus'
.github/workflows/dev-branch-images.yml ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Docker Dev Branch Images Build
2
+
3
+ on:
4
+ workflow_dispatch:
5
+ push:
6
+ branches:
7
+ - dev
8
+ paths:
9
+ - 'api/**'
10
+ - 'client/**'
11
+ - 'packages/**'
12
+
13
+ jobs:
14
+ build:
15
+ runs-on: ubuntu-latest
16
+ strategy:
17
+ matrix:
18
+ include:
19
+ - target: api-build
20
+ file: Dockerfile.multi
21
+ image_name: lc-dev-api
22
+ - target: node
23
+ file: Dockerfile
24
+ image_name: lc-dev
25
+
26
+ steps:
27
+ # Check out the repository
28
+ - name: Checkout
29
+ uses: actions/checkout@v4
30
+
31
+ # Set up QEMU
32
+ - name: Set up QEMU
33
+ uses: docker/setup-qemu-action@v3
34
+
35
+ # Set up Docker Buildx
36
+ - name: Set up Docker Buildx
37
+ uses: docker/setup-buildx-action@v3
38
+
39
+ # Log in to GitHub Container Registry
40
+ - name: Log in to GitHub Container Registry
41
+ uses: docker/login-action@v2
42
+ with:
43
+ registry: ghcr.io
44
+ username: ${{ github.actor }}
45
+ password: ${{ secrets.GITHUB_TOKEN }}
46
+
47
+ # Login to Docker Hub
48
+ - name: Login to Docker Hub
49
+ uses: docker/login-action@v3
50
+ with:
51
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
52
+ password: ${{ secrets.DOCKERHUB_TOKEN }}
53
+
54
+ # Prepare the environment
55
+ - name: Prepare environment
56
+ run: |
57
+ cp .env.example .env
58
+
59
+ # Build and push Docker images for each target
60
+ - name: Build and push Docker images
61
+ uses: docker/build-push-action@v5
62
+ with:
63
+ context: .
64
+ file: ${{ matrix.file }}
65
+ push: true
66
+ tags: |
67
+ ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:${{ github.sha }}
68
+ ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:latest
69
+ ${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:${{ github.sha }}
70
+ ${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:latest
71
+ platforms: linux/amd64,linux/arm64
72
+ target: ${{ matrix.target }}
.github/workflows/dev-images.yml ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Docker Dev Images Build
2
+
3
+ on:
4
+ workflow_dispatch:
5
+ push:
6
+ branches:
7
+ - main
8
+ paths:
9
+ - 'api/**'
10
+ - 'client/**'
11
+ - 'packages/**'
12
+
13
+ jobs:
14
+ build:
15
+ runs-on: ubuntu-latest
16
+ strategy:
17
+ matrix:
18
+ include:
19
+ - target: api-build
20
+ file: Dockerfile.multi
21
+ image_name: librechat-dev-api
22
+ - target: node
23
+ file: Dockerfile
24
+ image_name: librechat-dev
25
+
26
+ steps:
27
+ # Check out the repository
28
+ - name: Checkout
29
+ uses: actions/checkout@v4
30
+
31
+ # Set up QEMU
32
+ - name: Set up QEMU
33
+ uses: docker/setup-qemu-action@v3
34
+
35
+ # Set up Docker Buildx
36
+ - name: Set up Docker Buildx
37
+ uses: docker/setup-buildx-action@v3
38
+
39
+ # Log in to GitHub Container Registry
40
+ - name: Log in to GitHub Container Registry
41
+ uses: docker/login-action@v2
42
+ with:
43
+ registry: ghcr.io
44
+ username: ${{ github.actor }}
45
+ password: ${{ secrets.GITHUB_TOKEN }}
46
+
47
+ # Login to Docker Hub
48
+ - name: Login to Docker Hub
49
+ uses: docker/login-action@v3
50
+ with:
51
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
52
+ password: ${{ secrets.DOCKERHUB_TOKEN }}
53
+
54
+ # Prepare the environment
55
+ - name: Prepare environment
56
+ run: |
57
+ cp .env.example .env
58
+
59
+ # Build and push Docker images for each target
60
+ - name: Build and push Docker images
61
+ uses: docker/build-push-action@v5
62
+ with:
63
+ context: .
64
+ file: ${{ matrix.file }}
65
+ push: true
66
+ tags: |
67
+ ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:${{ github.sha }}
68
+ ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:latest
69
+ ${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:${{ github.sha }}
70
+ ${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:latest
71
+ platforms: linux/amd64,linux/arm64
72
+ target: ${{ matrix.target }}
.github/workflows/dev-staging-images.yml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Docker Dev Staging Images Build
2
+
3
+ on:
4
+ workflow_dispatch:
5
+
6
+ jobs:
7
+ build:
8
+ runs-on: ubuntu-latest
9
+ strategy:
10
+ matrix:
11
+ include:
12
+ - target: api-build
13
+ file: Dockerfile.multi
14
+ image_name: lc-dev-staging-api
15
+ - target: node
16
+ file: Dockerfile
17
+ image_name: lc-dev-staging
18
+
19
+ steps:
20
+ # Check out the repository
21
+ - name: Checkout
22
+ uses: actions/checkout@v4
23
+
24
+ # Set up QEMU
25
+ - name: Set up QEMU
26
+ uses: docker/setup-qemu-action@v3
27
+
28
+ # Set up Docker Buildx
29
+ - name: Set up Docker Buildx
30
+ uses: docker/setup-buildx-action@v3
31
+
32
+ # Log in to GitHub Container Registry
33
+ - name: Log in to GitHub Container Registry
34
+ uses: docker/login-action@v2
35
+ with:
36
+ registry: ghcr.io
37
+ username: ${{ github.actor }}
38
+ password: ${{ secrets.GITHUB_TOKEN }}
39
+
40
+ # Login to Docker Hub
41
+ - name: Login to Docker Hub
42
+ uses: docker/login-action@v3
43
+ with:
44
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
45
+ password: ${{ secrets.DOCKERHUB_TOKEN }}
46
+
47
+ # Prepare the environment
48
+ - name: Prepare environment
49
+ run: |
50
+ cp .env.example .env
51
+
52
+ # Build and push Docker images for each target
53
+ - name: Build and push Docker images
54
+ uses: docker/build-push-action@v5
55
+ with:
56
+ context: .
57
+ file: ${{ matrix.file }}
58
+ push: true
59
+ tags: |
60
+ ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:${{ github.sha }}
61
+ ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:latest
62
+ ${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:${{ github.sha }}
63
+ ${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:latest
64
+ platforms: linux/amd64,linux/arm64
65
+ target: ${{ matrix.target }}
66
+
.github/workflows/eslint-ci.yml ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: ESLint Code Quality Checks
2
+
3
+ on:
4
+ pull_request:
5
+ branches:
6
+ - main
7
+ - dev
8
+ - release/*
9
+ paths:
10
+ - 'api/**'
11
+ - 'client/**'
12
+
13
+ jobs:
14
+ eslint_checks:
15
+ name: Run ESLint Linting
16
+ runs-on: ubuntu-latest
17
+ permissions:
18
+ contents: read
19
+ security-events: write
20
+ actions: read
21
+ steps:
22
+ - name: Checkout repository
23
+ uses: actions/checkout@v4
24
+ with:
25
+ fetch-depth: 0
26
+
27
+ - name: Set up Node.js 20.x
28
+ uses: actions/setup-node@v4
29
+ with:
30
+ node-version: 20
31
+ cache: npm
32
+
33
+ - name: Install dependencies
34
+ run: npm ci
35
+
36
+ # Run ESLint on changed files within the api/ and client/ directories.
37
+ - name: Run ESLint on changed files
38
+ run: |
39
+ # Extract the base commit SHA from the pull_request event payload.
40
+ BASE_SHA=$(jq --raw-output .pull_request.base.sha "$GITHUB_EVENT_PATH")
41
+ echo "Base commit SHA: $BASE_SHA"
42
+
43
+ # Get changed files (only JS/TS files in api/ or client/)
44
+ CHANGED_FILES=$(git diff --name-only --diff-filter=ACMRTUXB "$BASE_SHA" HEAD | grep -E '^(api|client)/.*\.(js|jsx|ts|tsx)$' || true)
45
+
46
+ # Debug output
47
+ echo "Changed files:"
48
+ echo "$CHANGED_FILES"
49
+
50
+ # Ensure there are files to lint before running ESLint
51
+ if [[ -z "$CHANGED_FILES" ]]; then
52
+ echo "No matching files changed. Skipping ESLint."
53
+ exit 0
54
+ fi
55
+
56
+ # Run ESLint
57
+ npx eslint --no-error-on-unmatched-pattern \
58
+ --config eslint.config.mjs \
59
+ $CHANGED_FILES
.github/workflows/frontend-review.yml ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Frontend Unit Tests
2
+
3
+ on:
4
+ pull_request:
5
+ branches:
6
+ - main
7
+ - dev
8
+ - release/*
9
+ paths:
10
+ - 'client/**'
11
+ - 'packages/data-provider/**'
12
+
13
+ jobs:
14
+ tests_frontend_ubuntu:
15
+ name: Run frontend unit tests on Ubuntu
16
+ timeout-minutes: 60
17
+ runs-on: ubuntu-latest
18
+ steps:
19
+ - uses: actions/checkout@v4
20
+ - name: Use Node.js 20.x
21
+ uses: actions/setup-node@v4
22
+ with:
23
+ node-version: 20
24
+ cache: 'npm'
25
+
26
+ - name: Install dependencies
27
+ run: npm ci
28
+
29
+ - name: Build Client
30
+ run: npm run frontend:ci
31
+
32
+ - name: Run unit tests
33
+ run: npm run test:ci --verbose
34
+ working-directory: client
35
+
36
+ tests_frontend_windows:
37
+ name: Run frontend unit tests on Windows
38
+ timeout-minutes: 60
39
+ runs-on: windows-latest
40
+ steps:
41
+ - uses: actions/checkout@v4
42
+ - name: Use Node.js 20.x
43
+ uses: actions/setup-node@v4
44
+ with:
45
+ node-version: 20
46
+ cache: 'npm'
47
+
48
+ - name: Install dependencies
49
+ run: npm ci
50
+
51
+ - name: Build Client
52
+ run: npm run frontend:ci
53
+
54
+ - name: Run unit tests
55
+ run: npm run test:ci --verbose
56
+ working-directory: client
.github/workflows/generate_embeddings.yml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: 'generate_embeddings'
2
+ on:
3
+ workflow_dispatch:
4
+ push:
5
+ branches:
6
+ - main
7
+ paths:
8
+ - 'docs/**'
9
+
10
+ jobs:
11
+ generate:
12
+ runs-on: ubuntu-latest
13
+ steps:
14
+ - uses: actions/checkout@v3
15
+ - uses: supabase/embeddings-generator@v0.0.5
16
+ with:
17
+ supabase-url: ${{ secrets.SUPABASE_URL }}
18
+ supabase-service-role-key: ${{ secrets.SUPABASE_SERVICE_ROLE_KEY }}
19
+ openai-key: ${{ secrets.OPENAI_DOC_EMBEDDINGS_KEY }}
20
+ docs-root-path: 'docs'
.github/workflows/helmcharts.yml ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Build Helm Charts on Tag
2
+
3
+ # The workflow is triggered when a tag is pushed
4
+ on:
5
+ push:
6
+ tags:
7
+ - "chart-*"
8
+
9
+ jobs:
10
+ release:
11
+ permissions:
12
+ contents: write
13
+ packages: write
14
+ runs-on: ubuntu-latest
15
+ steps:
16
+ - name: Checkout
17
+ uses: actions/checkout@v4
18
+ with:
19
+ fetch-depth: 0
20
+
21
+ - name: Configure Git
22
+ run: |
23
+ git config user.name "$GITHUB_ACTOR"
24
+ git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
25
+
26
+ - name: Install Helm
27
+ uses: azure/setup-helm@v4
28
+ env:
29
+ GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
30
+
31
+ - name: Build Subchart Deps
32
+ run: |
33
+ cd helm/librechat
34
+ helm dependency build
35
+ cd ../librechat-rag-api
36
+ helm dependency build
37
+
38
+ - name: Get Chart Version
39
+ id: chart-version
40
+ run: |
41
+ CHART_VERSION=$(echo "${{ github.ref_name }}" | cut -d'-' -f2)
42
+ echo "CHART_VERSION=${CHART_VERSION}" >> "$GITHUB_OUTPUT"
43
+
44
+ # Log in to GitHub Container Registry
45
+ - name: Log in to GitHub Container Registry
46
+ uses: docker/login-action@v3
47
+ with:
48
+ registry: ghcr.io
49
+ username: ${{ github.actor }}
50
+ password: ${{ secrets.GITHUB_TOKEN }}
51
+
52
+ # Run Helm OCI Charts Releaser
53
+ # This is for the librechat chart
54
+ - name: Release Helm OCI Charts for librechat
55
+ uses: appany/helm-oci-chart-releaser@v0.4.2
56
+ with:
57
+ name: librechat
58
+ repository: ${{ github.actor }}/librechat-chart
59
+ tag: ${{ steps.chart-version.outputs.CHART_VERSION }}
60
+ path: helm/librechat
61
+ registry: ghcr.io
62
+ registry_username: ${{ github.actor }}
63
+ registry_password: ${{ secrets.GITHUB_TOKEN }}
64
+
65
+ # this is for the librechat-rag-api chart
66
+ - name: Release Helm OCI Charts for librechat-rag-api
67
+ uses: appany/helm-oci-chart-releaser@v0.4.2
68
+ with:
69
+ name: librechat-rag-api
70
+ repository: ${{ github.actor }}/librechat-chart
71
+ tag: ${{ steps.chart-version.outputs.CHART_VERSION }}
72
+ path: helm/librechat-rag-api
73
+ registry: ghcr.io
74
+ registry_username: ${{ github.actor }}
75
+ registry_password: ${{ secrets.GITHUB_TOKEN }}
.github/workflows/i18n-unused-keys.yml ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Detect Unused i18next Strings
2
+
3
+ # This workflow checks for unused i18n keys in translation files.
4
+ # It has special handling for:
5
+ # - com_ui_special_var_* keys that are dynamically constructed
6
+ # - com_agents_category_* keys that are stored in the database and used dynamically
7
+
8
+ on:
9
+ pull_request:
10
+ paths:
11
+ - "client/src/**"
12
+ - "api/**"
13
+ - "packages/data-provider/src/**"
14
+ - "packages/client/**"
15
+ - "packages/data-schemas/src/**"
16
+
17
+ jobs:
18
+ detect-unused-i18n-keys:
19
+ runs-on: ubuntu-latest
20
+ permissions:
21
+ pull-requests: write
22
+ steps:
23
+ - name: Checkout repository
24
+ uses: actions/checkout@v3
25
+
26
+ - name: Find unused i18next keys
27
+ id: find-unused
28
+ run: |
29
+ echo "🔍 Scanning for unused i18next keys..."
30
+
31
+ # Define paths
32
+ I18N_FILE="client/src/locales/en/translation.json"
33
+ SOURCE_DIRS=("client/src" "api" "packages/data-provider/src" "packages/client" "packages/data-schemas/src")
34
+
35
+ # Check if translation file exists
36
+ if [[ ! -f "$I18N_FILE" ]]; then
37
+ echo "::error title=Missing i18n File::Translation file not found: $I18N_FILE"
38
+ exit 1
39
+ fi
40
+
41
+ # Extract all keys from the JSON file
42
+ KEYS=$(jq -r 'keys[]' "$I18N_FILE")
43
+
44
+ # Track unused keys
45
+ UNUSED_KEYS=()
46
+
47
+ # Check if each key is used in the source code
48
+ for KEY in $KEYS; do
49
+ FOUND=false
50
+
51
+ # Special case for dynamically constructed special variable keys
52
+ if [[ "$KEY" == com_ui_special_var_* ]]; then
53
+ # Check if TSpecialVarLabel is used in the codebase
54
+ for DIR in "${SOURCE_DIRS[@]}"; do
55
+ if grep -r --include=\*.{js,jsx,ts,tsx} -q "TSpecialVarLabel" "$DIR"; then
56
+ FOUND=true
57
+ break
58
+ fi
59
+ done
60
+
61
+ # Also check if the key is directly used somewhere
62
+ if [[ "$FOUND" == false ]]; then
63
+ for DIR in "${SOURCE_DIRS[@]}"; do
64
+ if grep -r --include=\*.{js,jsx,ts,tsx} -q "$KEY" "$DIR"; then
65
+ FOUND=true
66
+ break
67
+ fi
68
+ done
69
+ fi
70
+ # Special case for agent category keys that are dynamically used from database
71
+ elif [[ "$KEY" == com_agents_category_* ]]; then
72
+ # Check if agent category localization is being used
73
+ for DIR in "${SOURCE_DIRS[@]}"; do
74
+ # Check for dynamic category label/description usage
75
+ if grep -r --include=\*.{js,jsx,ts,tsx} -E "category\.(label|description).*startsWith.*['\"]com_" "$DIR" > /dev/null 2>&1 || \
76
+ # Check for the method that defines these keys
77
+ grep -r --include=\*.{js,jsx,ts,tsx} "ensureDefaultCategories" "$DIR" > /dev/null 2>&1 || \
78
+ # Check for direct usage in agentCategory.ts
79
+ grep -r --include=\*.ts -E "label:.*['\"]$KEY['\"]" "$DIR" > /dev/null 2>&1 || \
80
+ grep -r --include=\*.ts -E "description:.*['\"]$KEY['\"]" "$DIR" > /dev/null 2>&1; then
81
+ FOUND=true
82
+ break
83
+ fi
84
+ done
85
+
86
+ # Also check if the key is directly used somewhere
87
+ if [[ "$FOUND" == false ]]; then
88
+ for DIR in "${SOURCE_DIRS[@]}"; do
89
+ if grep -r --include=\*.{js,jsx,ts,tsx} -q "$KEY" "$DIR"; then
90
+ FOUND=true
91
+ break
92
+ fi
93
+ done
94
+ fi
95
+ else
96
+ # Regular check for other keys
97
+ for DIR in "${SOURCE_DIRS[@]}"; do
98
+ if grep -r --include=\*.{js,jsx,ts,tsx} -q "$KEY" "$DIR"; then
99
+ FOUND=true
100
+ break
101
+ fi
102
+ done
103
+ fi
104
+
105
+ if [[ "$FOUND" == false ]]; then
106
+ UNUSED_KEYS+=("$KEY")
107
+ fi
108
+ done
109
+
110
+ # Output results
111
+ if [[ ${#UNUSED_KEYS[@]} -gt 0 ]]; then
112
+ echo "🛑 Found ${#UNUSED_KEYS[@]} unused i18n keys:"
113
+ echo "unused_keys=$(echo "${UNUSED_KEYS[@]}" | jq -R -s -c 'split(" ")')" >> $GITHUB_ENV
114
+ for KEY in "${UNUSED_KEYS[@]}"; do
115
+ echo "::warning title=Unused i18n Key::'$KEY' is defined but not used in the codebase."
116
+ done
117
+ else
118
+ echo "✅ No unused i18n keys detected!"
119
+ echo "unused_keys=[]" >> $GITHUB_ENV
120
+ fi
121
+
122
+ - name: Post verified comment on PR
123
+ if: env.unused_keys != '[]'
124
+ run: |
125
+ PR_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH")
126
+
127
+ # Format the unused keys list as checkboxes for easy manual checking.
128
+ FILTERED_KEYS=$(echo "$unused_keys" | jq -r '.[]' | grep -v '^\s*$' | sed 's/^/- [ ] `/;s/$/`/' )
129
+
130
+ COMMENT_BODY=$(cat <<EOF
131
+ ### 🚨 Unused i18next Keys Detected
132
+
133
+ The following translation keys are defined in \`translation.json\` but are **not used** in the codebase:
134
+
135
+ $FILTERED_KEYS
136
+
137
+ ⚠️ **Please remove these unused keys to keep the translation files clean.**
138
+ EOF
139
+ )
140
+
141
+ gh api "repos/${{ github.repository }}/issues/${PR_NUMBER}/comments" \
142
+ -f body="$COMMENT_BODY" \
143
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}"
144
+ env:
145
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
146
+
147
+ - name: Fail workflow if unused keys found
148
+ if: env.unused_keys != '[]'
149
+ run: exit 1
.github/workflows/locize-i18n-sync.yml ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Sync Locize Translations & Create Translation PR
2
+
3
+ on:
4
+ push:
5
+ branches: [main]
6
+ repository_dispatch:
7
+ types: [locize/versionPublished]
8
+
9
+ jobs:
10
+ sync-translations:
11
+ name: Sync Translation Keys with Locize
12
+ runs-on: ubuntu-latest
13
+ steps:
14
+ - name: Checkout Repository
15
+ uses: actions/checkout@v4
16
+
17
+ - name: Set Up Node.js
18
+ uses: actions/setup-node@v4
19
+ with:
20
+ node-version: 20
21
+
22
+ - name: Install locize CLI
23
+ run: npm install -g locize-cli
24
+
25
+ # Sync translations (Push missing keys & remove deleted ones)
26
+ - name: Sync Locize with Repository
27
+ if: ${{ github.event_name == 'push' }}
28
+ run: |
29
+ cd client/src/locales
30
+ locize sync --api-key ${{ secrets.LOCIZE_API_KEY }} --project-id ${{ secrets.LOCIZE_PROJECT_ID }} --language en
31
+
32
+ # When triggered by repository_dispatch, skip sync step.
33
+ - name: Skip sync step on non-push events
34
+ if: ${{ github.event_name != 'push' }}
35
+ run: echo "Skipping sync as the event is not a push."
36
+
37
+ create-pull-request:
38
+ name: Create Translation PR on Version Published
39
+ runs-on: ubuntu-latest
40
+ needs: sync-translations
41
+ permissions:
42
+ contents: write
43
+ pull-requests: write
44
+ steps:
45
+ # 1. Check out the repository.
46
+ - name: Checkout Repository
47
+ uses: actions/checkout@v4
48
+
49
+ # 2. Download translation files from locize.
50
+ - name: Download Translations from locize
51
+ uses: locize/download@v2
52
+ with:
53
+ project-id: ${{ secrets.LOCIZE_PROJECT_ID }}
54
+ path: "client/src/locales"
55
+
56
+ # 3. Create a Pull Request using built-in functionality.
57
+ - name: Create Pull Request
58
+ uses: peter-evans/create-pull-request@v7
59
+ with:
60
+ token: ${{ secrets.GITHUB_TOKEN }}
61
+ sign-commits: true
62
+ commit-message: "🌍 i18n: Update translation.json with latest translations"
63
+ base: main
64
+ branch: i18n/locize-translation-update
65
+ reviewers: danny-avila
66
+ title: "🌍 i18n: Update translation.json with latest translations"
67
+ body: |
68
+ **Description**:
69
+ - 🎯 **Objective**: Update `translation.json` with the latest translations from locize.
70
+ - 🔍 **Details**: This PR is automatically generated upon receiving a versionPublished event with version "latest". It reflects the newest translations provided by locize.
71
+ - ✅ **Status**: Ready for review.
72
+ labels: "🌍 i18n"
.github/workflows/main-image-workflow.yml ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Docker Compose Build Latest Main Image Tag (Manual Dispatch)
2
+
3
+ on:
4
+ workflow_dispatch:
5
+
6
+ jobs:
7
+ build:
8
+ runs-on: ubuntu-latest
9
+ strategy:
10
+ matrix:
11
+ include:
12
+ - target: api-build
13
+ file: Dockerfile.multi
14
+ image_name: librechat-api
15
+ - target: node
16
+ file: Dockerfile
17
+ image_name: librechat
18
+
19
+ steps:
20
+ - name: Checkout
21
+ uses: actions/checkout@v4
22
+
23
+ - name: Fetch tags and set the latest tag
24
+ run: |
25
+ git fetch --tags
26
+ echo "LATEST_TAG=$(git describe --tags `git rev-list --tags --max-count=1`)" >> $GITHUB_ENV
27
+
28
+ # Set up QEMU
29
+ - name: Set up QEMU
30
+ uses: docker/setup-qemu-action@v3
31
+
32
+ # Set up Docker Buildx
33
+ - name: Set up Docker Buildx
34
+ uses: docker/setup-buildx-action@v3
35
+
36
+ # Log in to GitHub Container Registry
37
+ - name: Log in to GitHub Container Registry
38
+ uses: docker/login-action@v2
39
+ with:
40
+ registry: ghcr.io
41
+ username: ${{ github.actor }}
42
+ password: ${{ secrets.GITHUB_TOKEN }}
43
+
44
+ # Login to Docker Hub
45
+ - name: Login to Docker Hub
46
+ uses: docker/login-action@v3
47
+ with:
48
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
49
+ password: ${{ secrets.DOCKERHUB_TOKEN }}
50
+
51
+ # Prepare the environment
52
+ - name: Prepare environment
53
+ run: |
54
+ cp .env.example .env
55
+
56
+ # Build and push Docker images for each target
57
+ - name: Build and push Docker images
58
+ uses: docker/build-push-action@v5
59
+ with:
60
+ context: .
61
+ file: ${{ matrix.file }}
62
+ push: true
63
+ tags: |
64
+ ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:${{ env.LATEST_TAG }}
65
+ ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:latest
66
+ ${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:${{ env.LATEST_TAG }}
67
+ ${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:latest
68
+ platforms: linux/amd64,linux/arm64
69
+ target: ${{ matrix.target }}
.github/workflows/tag-images.yml ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Docker Images Build on Tag
2
+
3
+ on:
4
+ push:
5
+ tags:
6
+ - '*'
7
+
8
+ jobs:
9
+ build:
10
+ runs-on: ubuntu-latest
11
+ strategy:
12
+ matrix:
13
+ include:
14
+ - target: api-build
15
+ file: Dockerfile.multi
16
+ image_name: librechat-api
17
+ - target: node
18
+ file: Dockerfile
19
+ image_name: librechat
20
+
21
+ steps:
22
+ # Check out the repository
23
+ - name: Checkout
24
+ uses: actions/checkout@v4
25
+
26
+ # Set up QEMU
27
+ - name: Set up QEMU
28
+ uses: docker/setup-qemu-action@v3
29
+
30
+ # Set up Docker Buildx
31
+ - name: Set up Docker Buildx
32
+ uses: docker/setup-buildx-action@v3
33
+
34
+ # Log in to GitHub Container Registry
35
+ - name: Log in to GitHub Container Registry
36
+ uses: docker/login-action@v2
37
+ with:
38
+ registry: ghcr.io
39
+ username: ${{ github.actor }}
40
+ password: ${{ secrets.GITHUB_TOKEN }}
41
+
42
+ # Login to Docker Hub
43
+ - name: Login to Docker Hub
44
+ uses: docker/login-action@v3
45
+ with:
46
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
47
+ password: ${{ secrets.DOCKERHUB_TOKEN }}
48
+
49
+ # Prepare the environment
50
+ - name: Prepare environment
51
+ run: |
52
+ cp .env.example .env
53
+
54
+ # Build and push Docker images for each target
55
+ - name: Build and push Docker images
56
+ uses: docker/build-push-action@v5
57
+ with:
58
+ context: .
59
+ file: ${{ matrix.file }}
60
+ push: true
61
+ tags: |
62
+ ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:${{ github.ref_name }}
63
+ ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:latest
64
+ ${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:${{ github.ref_name }}
65
+ ${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:latest
66
+ platforms: linux/amd64,linux/arm64
67
+ target: ${{ matrix.target }}
.github/workflows/unused-packages.yml ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Detect Unused NPM Packages
2
+
3
+ on:
4
+ pull_request:
5
+ paths:
6
+ - 'package.json'
7
+ - 'package-lock.json'
8
+ - 'client/**'
9
+ - 'api/**'
10
+ - 'packages/client/**'
11
+
12
+ jobs:
13
+ detect-unused-packages:
14
+ runs-on: ubuntu-latest
15
+ permissions:
16
+ pull-requests: write
17
+
18
+ steps:
19
+ - uses: actions/checkout@v4
20
+
21
+ - name: Use Node.js 20.x
22
+ uses: actions/setup-node@v4
23
+ with:
24
+ node-version: 20
25
+ cache: 'npm'
26
+
27
+ - name: Install depcheck
28
+ run: npm install -g depcheck
29
+
30
+ - name: Validate JSON files
31
+ run: |
32
+ for FILE in package.json client/package.json api/package.json packages/client/package.json; do
33
+ if [[ -f "$FILE" ]]; then
34
+ jq empty "$FILE" || (echo "::error title=Invalid JSON::$FILE is invalid" && exit 1)
35
+ fi
36
+ done
37
+
38
+ - name: Extract Dependencies Used in Scripts
39
+ id: extract-used-scripts
40
+ run: |
41
+ extract_deps_from_scripts() {
42
+ local package_file=$1
43
+ if [[ -f "$package_file" ]]; then
44
+ jq -r '.scripts | to_entries[].value' "$package_file" | \
45
+ grep -oE '([a-zA-Z0-9_-]+)' | sort -u > used_scripts.txt
46
+ else
47
+ touch used_scripts.txt
48
+ fi
49
+ }
50
+
51
+ extract_deps_from_scripts "package.json"
52
+ mv used_scripts.txt root_used_deps.txt
53
+
54
+ extract_deps_from_scripts "client/package.json"
55
+ mv used_scripts.txt client_used_deps.txt
56
+
57
+ extract_deps_from_scripts "api/package.json"
58
+ mv used_scripts.txt api_used_deps.txt
59
+
60
+ - name: Extract Dependencies Used in Source Code
61
+ id: extract-used-code
62
+ run: |
63
+ extract_deps_from_code() {
64
+ local folder=$1
65
+ local output_file=$2
66
+ if [[ -d "$folder" ]]; then
67
+ # Extract require() statements
68
+ grep -rEho "require\\(['\"]([a-zA-Z0-9@/._-]+)['\"]\\)" "$folder" --include=\*.{js,ts,tsx,jsx,mjs,cjs} | \
69
+ sed -E "s/require\\(['\"]([a-zA-Z0-9@/._-]+)['\"]\\)/\1/" > "$output_file"
70
+
71
+ # Extract ES6 imports - various patterns
72
+ # import x from 'module'
73
+ grep -rEho "import .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" --include=\*.{js,ts,tsx,jsx,mjs,cjs} | \
74
+ sed -E "s/import .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file"
75
+
76
+ # import 'module' (side-effect imports)
77
+ grep -rEho "import ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" --include=\*.{js,ts,tsx,jsx,mjs,cjs} | \
78
+ sed -E "s/import ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file"
79
+
80
+ # export { x } from 'module' or export * from 'module'
81
+ grep -rEho "export .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" --include=\*.{js,ts,tsx,jsx,mjs,cjs} | \
82
+ sed -E "s/export .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file"
83
+
84
+ # import type { x } from 'module' (TypeScript)
85
+ grep -rEho "import type .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" --include=\*.{ts,tsx} | \
86
+ sed -E "s/import type .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file"
87
+
88
+ # Remove subpath imports but keep the base package
89
+ # e.g., '@tanstack/react-query/devtools' becomes '@tanstack/react-query'
90
+ sed -i -E 's|^(@?[a-zA-Z0-9-]+(/[a-zA-Z0-9-]+)?)/.*|\1|' "$output_file"
91
+
92
+ sort -u "$output_file" -o "$output_file"
93
+ else
94
+ touch "$output_file"
95
+ fi
96
+ }
97
+
98
+ extract_deps_from_code "." root_used_code.txt
99
+ extract_deps_from_code "client" client_used_code.txt
100
+ extract_deps_from_code "api" api_used_code.txt
101
+
102
+ # Extract dependencies used by @librechat/client package
103
+ extract_deps_from_code "packages/client" packages_client_used_code.txt
104
+
105
+ - name: Get @librechat/client dependencies
106
+ id: get-librechat-client-deps
107
+ run: |
108
+ if [[ -f "packages/client/package.json" ]]; then
109
+ # Get all dependencies from @librechat/client (dependencies, devDependencies, and peerDependencies)
110
+ DEPS=$(jq -r '.dependencies // {} | keys[]' packages/client/package.json 2>/dev/null || echo "")
111
+ DEV_DEPS=$(jq -r '.devDependencies // {} | keys[]' packages/client/package.json 2>/dev/null || echo "")
112
+ PEER_DEPS=$(jq -r '.peerDependencies // {} | keys[]' packages/client/package.json 2>/dev/null || echo "")
113
+
114
+ # Combine all dependencies
115
+ echo "$DEPS" > librechat_client_deps.txt
116
+ echo "$DEV_DEPS" >> librechat_client_deps.txt
117
+ echo "$PEER_DEPS" >> librechat_client_deps.txt
118
+
119
+ # Also include dependencies that are imported in packages/client
120
+ cat packages_client_used_code.txt >> librechat_client_deps.txt
121
+
122
+ # Remove empty lines and sort
123
+ grep -v '^$' librechat_client_deps.txt | sort -u > temp_deps.txt
124
+ mv temp_deps.txt librechat_client_deps.txt
125
+ else
126
+ touch librechat_client_deps.txt
127
+ fi
128
+
129
+ - name: Extract Workspace Dependencies
130
+ id: extract-workspace-deps
131
+ run: |
132
+ # Function to get dependencies from a workspace package that are used by another package
133
+ get_workspace_package_deps() {
134
+ local package_json=$1
135
+ local output_file=$2
136
+
137
+ # Get all workspace dependencies (starting with @librechat/)
138
+ if [[ -f "$package_json" ]]; then
139
+ local workspace_deps=$(jq -r '.dependencies // {} | to_entries[] | select(.key | startswith("@librechat/")) | .key' "$package_json" 2>/dev/null || echo "")
140
+
141
+ # For each workspace dependency, get its dependencies
142
+ for dep in $workspace_deps; do
143
+ # Convert @librechat/api to packages/api
144
+ local workspace_path=$(echo "$dep" | sed 's/@librechat\//packages\//')
145
+ local workspace_package_json="${workspace_path}/package.json"
146
+
147
+ if [[ -f "$workspace_package_json" ]]; then
148
+ # Extract all dependencies from the workspace package
149
+ jq -r '.dependencies // {} | keys[]' "$workspace_package_json" 2>/dev/null >> "$output_file"
150
+ # Also extract peerDependencies
151
+ jq -r '.peerDependencies // {} | keys[]' "$workspace_package_json" 2>/dev/null >> "$output_file"
152
+ fi
153
+ done
154
+ fi
155
+
156
+ if [[ -f "$output_file" ]]; then
157
+ sort -u "$output_file" -o "$output_file"
158
+ else
159
+ touch "$output_file"
160
+ fi
161
+ }
162
+
163
+ # Get workspace dependencies for each package
164
+ get_workspace_package_deps "package.json" root_workspace_deps.txt
165
+ get_workspace_package_deps "client/package.json" client_workspace_deps.txt
166
+ get_workspace_package_deps "api/package.json" api_workspace_deps.txt
167
+
168
+ - name: Run depcheck for root package.json
169
+ id: check-root
170
+ run: |
171
+ if [[ -f "package.json" ]]; then
172
+ UNUSED=$(depcheck --json | jq -r '.dependencies | join("\n")' || echo "")
173
+ # Exclude dependencies used in scripts, code, and workspace packages
174
+ UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat root_used_deps.txt root_used_code.txt root_workspace_deps.txt | sort) || echo "")
175
+ echo "ROOT_UNUSED<<EOF" >> $GITHUB_ENV
176
+ echo "$UNUSED" >> $GITHUB_ENV
177
+ echo "EOF" >> $GITHUB_ENV
178
+ fi
179
+
180
+ - name: Run depcheck for client/package.json
181
+ id: check-client
182
+ run: |
183
+ if [[ -f "client/package.json" ]]; then
184
+ chmod -R 755 client
185
+ cd client
186
+ UNUSED=$(depcheck --json | jq -r '.dependencies | join("\n")' || echo "")
187
+ # Exclude dependencies used in scripts, code, and workspace packages
188
+ UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat ../client_used_deps.txt ../client_used_code.txt ../client_workspace_deps.txt | sort) || echo "")
189
+ # Filter out false positives
190
+ UNUSED=$(echo "$UNUSED" | grep -v "^micromark-extension-llm-math$" || echo "")
191
+ echo "CLIENT_UNUSED<<EOF" >> $GITHUB_ENV
192
+ echo "$UNUSED" >> $GITHUB_ENV
193
+ echo "EOF" >> $GITHUB_ENV
194
+ cd ..
195
+ fi
196
+
197
+ - name: Run depcheck for api/package.json
198
+ id: check-api
199
+ run: |
200
+ if [[ -f "api/package.json" ]]; then
201
+ chmod -R 755 api
202
+ cd api
203
+ UNUSED=$(depcheck --json | jq -r '.dependencies | join("\n")' || echo "")
204
+ # Exclude dependencies used in scripts, code, and workspace packages
205
+ UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat ../api_used_deps.txt ../api_used_code.txt ../api_workspace_deps.txt | sort) || echo "")
206
+ echo "API_UNUSED<<EOF" >> $GITHUB_ENV
207
+ echo "$UNUSED" >> $GITHUB_ENV
208
+ echo "EOF" >> $GITHUB_ENV
209
+ cd ..
210
+ fi
211
+
212
+ - name: Post comment on PR if unused dependencies are found
213
+ if: env.ROOT_UNUSED != '' || env.CLIENT_UNUSED != '' || env.API_UNUSED != ''
214
+ run: |
215
+ PR_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH")
216
+
217
+ ROOT_LIST=$(echo "$ROOT_UNUSED" | awk '{print "- `" $0 "`"}')
218
+ CLIENT_LIST=$(echo "$CLIENT_UNUSED" | awk '{print "- `" $0 "`"}')
219
+ API_LIST=$(echo "$API_UNUSED" | awk '{print "- `" $0 "`"}')
220
+
221
+ COMMENT_BODY=$(cat <<EOF
222
+ ### 🚨 Unused NPM Packages Detected
223
+
224
+ The following **unused dependencies** were found:
225
+
226
+ $(if [[ ! -z "$ROOT_UNUSED" ]]; then echo "#### 📂 Root \`package.json\`"; echo ""; echo "$ROOT_LIST"; echo ""; fi)
227
+
228
+ $(if [[ ! -z "$CLIENT_UNUSED" ]]; then echo "#### 📂 Client \`client/package.json\`"; echo ""; echo "$CLIENT_LIST"; echo ""; fi)
229
+
230
+ $(if [[ ! -z "$API_UNUSED" ]]; then echo "#### 📂 API \`api/package.json\`"; echo ""; echo "$API_LIST"; echo ""; fi)
231
+
232
+ ⚠️ **Please remove these unused dependencies to keep your project clean.**
233
+ EOF
234
+ )
235
+
236
+ gh api "repos/${{ github.repository }}/issues/${PR_NUMBER}/comments" \
237
+ -f body="$COMMENT_BODY" \
238
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}"
239
+ env:
240
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
241
+
242
+ - name: Fail workflow if unused dependencies found
243
+ if: env.ROOT_UNUSED != '' || env.CLIENT_UNUSED != '' || env.API_UNUSED != ''
244
+ run: exit 1
.gitignore ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### node etc ###
2
+
3
+ # Logs
4
+ data-node
5
+ meili_data*
6
+ data/
7
+ logs
8
+ *.log
9
+
10
+ # Runtime data
11
+ pids
12
+ *.pid
13
+ *.seed
14
+ .git
15
+
16
+ # CI/CD data
17
+ test-image*
18
+
19
+ # Directory for instrumented libs generated by jscoverage/JSCover
20
+ lib-cov
21
+
22
+ # Coverage directory used by tools like istanbul
23
+ coverage
24
+
25
+ # Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files)
26
+ .grunt
27
+
28
+ # translation services
29
+ config/translations/stores/*
30
+ client/src/localization/languages/*_missing_keys.json
31
+
32
+ # Compiled Dirs (http://nodejs.org/api/addons.html)
33
+ build/
34
+ dist/
35
+ public/main.js
36
+ public/main.js.map
37
+ public/main.js.LICENSE.txt
38
+ client/public/images/
39
+ client/public/main.js
40
+ client/public/main.js.map
41
+ client/public/main.js.LICENSE.txt
42
+
43
+ # Azure Blob Storage Emulator (Azurite)
44
+ __azurite**
45
+ __blobstorage__/**/*
46
+
47
+ # Dependency directorys
48
+ # Deployed apps should consider commenting these lines out:
49
+ # see https://npmjs.org/doc/faq.html#Should-I-check-my-node_modules-folder-into-git
50
+ node_modules/
51
+ meili_data/
52
+ api/node_modules/
53
+ client/node_modules/
54
+ bower_components/
55
+ *.d.ts
56
+ !vite-env.d.ts
57
+
58
+ # AI
59
+ .clineignore
60
+ .cursor
61
+ .aider*
62
+
63
+ # Floobits
64
+ .floo
65
+ .floobit
66
+ .floo
67
+ .flooignore
68
+
69
+ #config file
70
+ librechat.yaml
71
+ librechat.yml
72
+
73
+ # Environment
74
+ .npmrc
75
+ .env*
76
+ my.secrets
77
+ !**/.env.example
78
+ !**/.env.test.example
79
+ cache.json
80
+ api/data/
81
+ owner.yml
82
+ archive
83
+ .vscode/settings.json
84
+ src/style - official.css
85
+ /e2e/specs/.test-results/
86
+ /e2e/playwright-report/
87
+ /playwright/.cache/
88
+ .DS_Store
89
+ *.code-workspace
90
+ .idx
91
+ monospace.json
92
+ .idea
93
+ *.iml
94
+ *.pem
95
+ config.local.ts
96
+ **/storageState.json
97
+ junit.xml
98
+ **/.venv/
99
+ **/venv/
100
+
101
+ # docker override file
102
+ docker-compose.override.yaml
103
+ docker-compose.override.yml
104
+
105
+ # meilisearch
106
+ meilisearch
107
+ meilisearch.exe
108
+ data.ms/*
109
+ auth.json
110
+
111
+ /packages/ux-shared/
112
+ /images
113
+
114
+ !client/src/components/Nav/SettingsTabs/Data/
115
+
116
+ # User uploads
117
+ uploads/
118
+
119
+ # owner
120
+ release/
121
+
122
+ # Helm
123
+ helm/librechat/Chart.lock
124
+ helm/**/charts/
125
+ helm/**/.values.yaml
126
+
127
+ !/client/src/@types/i18next.d.ts
128
+
129
+ # SAML Idp cert
130
+ *.cert
131
+
132
+ # AI Assistants
133
+ /.claude/
134
+ /.cursor/
135
+ /.copilot/
136
+ /.aider/
137
+ /.openai/
138
+ /.tabnine/
139
+ /.codeium
140
+ *.local.md
141
+
142
+
143
+ # Removed Windows wrapper files per user request
144
+ hive-mind-prompt-*.txt
145
+
146
+ # Claude Flow generated files
147
+ .claude/settings.local.json
148
+ .mcp.json
149
+ claude-flow.config.json
150
+ .swarm/
151
+ .hive-mind/
152
+ .claude-flow/
153
+ memory/
154
+ coordination/
155
+ memory/claude-flow-data.json
156
+ memory/sessions/*
157
+ !memory/sessions/README.md
158
+ memory/agents/*
159
+ !memory/agents/README.md
160
+ coordination/memory_bank/*
161
+ coordination/subtasks/*
162
+ coordination/orchestration/*
163
+ *.db
164
+ *.db-journal
165
+ *.db-wal
166
+ *.sqlite
167
+ *.sqlite-journal
168
+ *.sqlite-wal
169
+ claude-flow
170
+ # Removed Windows wrapper files per user request
171
+ hive-mind-prompt-*.txt
.husky/lint-staged.config.js ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ module.exports = {
2
+ '*.{js,jsx,ts,tsx}': ['prettier --write', 'eslint --fix', 'eslint'],
3
+ '*.json': ['prettier --write'],
4
+ };
.husky/pre-commit ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ [ -n "$CI" ] && exit 0
2
+ npx lint-staged --config ./.husky/lint-staged.config.js
.prettierrc ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "tailwindConfig": "./client/tailwind.config.cjs",
3
+ "printWidth": 100,
4
+ "tabWidth": 2,
5
+ "useTabs": false,
6
+ "semi": true,
7
+ "singleQuote": true,
8
+ "trailingComma": "all",
9
+ "arrowParens": "always",
10
+ "embeddedLanguageFormatting": "auto",
11
+ "insertPragma": false,
12
+ "proseWrap": "preserve",
13
+ "quoteProps": "as-needed",
14
+ "requirePragma": false,
15
+ "rangeStart": 0,
16
+ "endOfLine": "auto",
17
+ "jsxSingleQuote": false,
18
+ "plugins": ["prettier-plugin-tailwindcss"]
19
+ }
.vscode/launch.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "0.2.0",
3
+ "configurations": [
4
+ {
5
+ "type": "node",
6
+ "request": "launch",
7
+ "name": "Launch LibreChat (debug)",
8
+ "skipFiles": ["<node_internals>/**"],
9
+ "program": "${workspaceFolder}/api/server/index.js",
10
+ "env": {
11
+ "NODE_ENV": "production",
12
+ "NODE_TLS_REJECT_UNAUTHORIZED": "0"
13
+ },
14
+ "console": "integratedTerminal",
15
+ "envFile": "${workspaceFolder}/.env"
16
+ }
17
+ ]
18
+ }
CHANGELOG.md ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Changelog
2
+
3
+ All notable changes to this project will be documented in this file.
4
+
5
+
6
+
7
+
8
+
9
+
10
+ ## [Unreleased]
11
+
12
+ ### ✨ New Features
13
+
14
+ - ✨ feat: implement search parameter updates by **@mawburn** in [#7151](https://github.com/danny-avila/LibreChat/pull/7151)
15
+ - 🎏 feat: Add MCP support for Streamable HTTP Transport by **@benverhees** in [#7353](https://github.com/danny-avila/LibreChat/pull/7353)
16
+ - 🔒 feat: Add Content Security Policy using Helmet middleware by **@rubentalstra** in [#7377](https://github.com/danny-avila/LibreChat/pull/7377)
17
+ - ✨ feat: Add Normalization for MCP Server Names by **@danny-avila** in [#7421](https://github.com/danny-avila/LibreChat/pull/7421)
18
+ - 📊 feat: Improve Helm Chart by **@hofq** in [#3638](https://github.com/danny-avila/LibreChat/pull/3638)
19
+ - 🦾 feat: Claude-4 Support by **@danny-avila** in [#7509](https://github.com/danny-avila/LibreChat/pull/7509)
20
+ - 🪨 feat: Bedrock Support for Claude-4 Reasoning by **@danny-avila** in [#7517](https://github.com/danny-avila/LibreChat/pull/7517)
21
+
22
+ ### 🌍 Internationalization
23
+
24
+ - 🌍 i18n: Add `Danish` and `Czech` and `Catalan` localization support by **@rubentalstra** in [#7373](https://github.com/danny-avila/LibreChat/pull/7373)
25
+ - 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#7375](https://github.com/danny-avila/LibreChat/pull/7375)
26
+ - 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#7468](https://github.com/danny-avila/LibreChat/pull/7468)
27
+
28
+ ### 🔧 Fixes
29
+
30
+ - 💬 fix: update aria-label for accessibility in ConvoLink component by **@berry-13** in [#7320](https://github.com/danny-avila/LibreChat/pull/7320)
31
+ - 🔑 fix: use `apiKey` instead of `openAIApiKey` in OpenAI-like Config by **@danny-avila** in [#7337](https://github.com/danny-avila/LibreChat/pull/7337)
32
+ - 🔄 fix: update navigation logic in `useFocusChatEffect` to ensure correct search parameters are used by **@mawburn** in [#7340](https://github.com/danny-avila/LibreChat/pull/7340)
33
+ - 🔄 fix: Improve MCP Connection Cleanup by **@danny-avila** in [#7400](https://github.com/danny-avila/LibreChat/pull/7400)
34
+ - 🛡️ fix: Preset and Validation Logic for URL Query Params by **@danny-avila** in [#7407](https://github.com/danny-avila/LibreChat/pull/7407)
35
+ - 🌘 fix: artifact of preview text is illegible in dark mode by **@nhtruong** in [#7405](https://github.com/danny-avila/LibreChat/pull/7405)
36
+ - 🛡️ fix: Temporarily Remove CSP until Configurable by **@danny-avila** in [#7419](https://github.com/danny-avila/LibreChat/pull/7419)
37
+ - 💽 fix: Exclude index page `/` from static cache settings by **@sbruel** in [#7382](https://github.com/danny-avila/LibreChat/pull/7382)
38
+
39
+ ### ⚙️ Other Changes
40
+
41
+ - 📜 docs: CHANGELOG for release v0.7.8 by **@github-actions[bot]** in [#7290](https://github.com/danny-avila/LibreChat/pull/7290)
42
+ - 📦 chore: Update API Package Dependencies by **@danny-avila** in [#7359](https://github.com/danny-avila/LibreChat/pull/7359)
43
+ - 📜 docs: Unreleased Changelog by **@github-actions[bot]** in [#7321](https://github.com/danny-avila/LibreChat/pull/7321)
44
+ - 📜 docs: Unreleased Changelog by **@github-actions[bot]** in [#7434](https://github.com/danny-avila/LibreChat/pull/7434)
45
+ - 🛡️ chore: `multer` v2.0.0 for CVE-2025-47935 and CVE-2025-47944 by **@danny-avila** in [#7454](https://github.com/danny-avila/LibreChat/pull/7454)
46
+ - 📂 refactor: Improve `FileAttachment` & File Form Deletion by **@danny-avila** in [#7471](https://github.com/danny-avila/LibreChat/pull/7471)
47
+ - 📊 chore: Remove Old Helm Chart by **@hofq** in [#7512](https://github.com/danny-avila/LibreChat/pull/7512)
48
+ - 🪖 chore: bump helm app version to v0.7.8 by **@austin-barrington** in [#7524](https://github.com/danny-avila/LibreChat/pull/7524)
49
+
50
+
51
+
52
+ ---
53
+ ## [v0.7.8] -
54
+
55
+ Changes from v0.7.8-rc1 to v0.7.8.
56
+
57
+ ### ✨ New Features
58
+
59
+ - ✨ feat: Enhance form submission for touch screens by **@berry-13** in [#7198](https://github.com/danny-avila/LibreChat/pull/7198)
60
+ - 🔍 feat: Additional Tavily API Tool Parameters by **@glowforge-opensource** in [#7232](https://github.com/danny-avila/LibreChat/pull/7232)
61
+ - 🐋 feat: Add python to Dockerfile for increased MCP compatibility by **@technicalpickles** in [#7270](https://github.com/danny-avila/LibreChat/pull/7270)
62
+
63
+ ### 🔧 Fixes
64
+
65
+ - 🔧 fix: Google Gemma Support & OpenAI Reasoning Instructions by **@danny-avila** in [#7196](https://github.com/danny-avila/LibreChat/pull/7196)
66
+ - 🛠️ fix: Conversation Navigation State by **@danny-avila** in [#7210](https://github.com/danny-avila/LibreChat/pull/7210)
67
+ - 🔄 fix: o-Series Model Regex for System Messages by **@danny-avila** in [#7245](https://github.com/danny-avila/LibreChat/pull/7245)
68
+ - 🔖 fix: Custom Headers for Initial MCP SSE Connection by **@danny-avila** in [#7246](https://github.com/danny-avila/LibreChat/pull/7246)
69
+ - 🛡️ fix: Deep Clone `MCPOptions` for User MCP Connections by **@danny-avila** in [#7247](https://github.com/danny-avila/LibreChat/pull/7247)
70
+ - 🔄 fix: URL Param Race Condition and File Draft Persistence by **@danny-avila** in [#7257](https://github.com/danny-avila/LibreChat/pull/7257)
71
+ - 🔄 fix: Assistants Endpoint & Minor Issues by **@danny-avila** in [#7274](https://github.com/danny-avila/LibreChat/pull/7274)
72
+ - 🔄 fix: Ollama Think Tag Edge Case with Tools by **@danny-avila** in [#7275](https://github.com/danny-avila/LibreChat/pull/7275)
73
+
74
+ ### ⚙️ Other Changes
75
+
76
+ - 📜 docs: CHANGELOG for release v0.7.8-rc1 by **@github-actions[bot]** in [#7153](https://github.com/danny-avila/LibreChat/pull/7153)
77
+ - 🔄 refactor: Artifact Visibility Management by **@danny-avila** in [#7181](https://github.com/danny-avila/LibreChat/pull/7181)
78
+ - 📦 chore: Bump Package Security by **@danny-avila** in [#7183](https://github.com/danny-avila/LibreChat/pull/7183)
79
+ - 🌿 refactor: Unmount Fork Popover on Hide for Better Performance by **@danny-avila** in [#7189](https://github.com/danny-avila/LibreChat/pull/7189)
80
+ - 🧰 chore: ESLint configuration to enforce Prettier formatting rules by **@mawburn** in [#7186](https://github.com/danny-avila/LibreChat/pull/7186)
81
+ - 🎨 style: Improve KaTeX Rendering for LaTeX Equations by **@andresgit** in [#7223](https://github.com/danny-avila/LibreChat/pull/7223)
82
+ - 📝 docs: Update `.env.example` Google models by **@marlonka** in [#7254](https://github.com/danny-avila/LibreChat/pull/7254)
83
+ - 💬 refactor: MCP Chat Visibility Option, Google Rates, Remove OpenAPI Plugins by **@danny-avila** in [#7286](https://github.com/danny-avila/LibreChat/pull/7286)
84
+ - 📜 docs: Unreleased Changelog by **@github-actions[bot]** in [#7214](https://github.com/danny-avila/LibreChat/pull/7214)
85
+
86
+
87
+
88
+ [See full release details][release-v0.7.8]
89
+
90
+ [release-v0.7.8]: https://github.com/danny-avila/LibreChat/releases/tag/v0.7.8
91
+
92
+ ---
93
+ ## [v0.7.8-rc1] -
94
+
95
+ Changes from v0.7.7 to v0.7.8-rc1.
96
+
97
+ ### ✨ New Features
98
+
99
+ - 🔍 feat: Mistral OCR API / Upload Files as Text by **@danny-avila** in [#6274](https://github.com/danny-avila/LibreChat/pull/6274)
100
+ - 🤖 feat: Support OpenAI Web Search models by **@danny-avila** in [#6313](https://github.com/danny-avila/LibreChat/pull/6313)
101
+ - 🔗 feat: Agent Chain (Mixture-of-Agents) by **@danny-avila** in [#6374](https://github.com/danny-avila/LibreChat/pull/6374)
102
+ - ⌛ feat: `initTimeout` for Slow Starting MCP Servers by **@perweij** in [#6383](https://github.com/danny-avila/LibreChat/pull/6383)
103
+ - 🚀 feat: `S3` Integration for File handling and Image uploads by **@rubentalstra** in [#6142](https://github.com/danny-avila/LibreChat/pull/6142)
104
+ - 🔒feat: Enable OpenID Auto-Redirect by **@leondape** in [#6066](https://github.com/danny-avila/LibreChat/pull/6066)
105
+ - 🚀 feat: Integrate `Azure Blob Storage` for file handling and image uploads by **@rubentalstra** in [#6153](https://github.com/danny-avila/LibreChat/pull/6153)
106
+ - 🚀 feat: Add support for custom `AWS` endpoint in `S3` by **@rubentalstra** in [#6431](https://github.com/danny-avila/LibreChat/pull/6431)
107
+ - 🚀 feat: Add support for LDAP STARTTLS in LDAP authentication by **@rubentalstra** in [#6438](https://github.com/danny-avila/LibreChat/pull/6438)
108
+ - 🚀 feat: Refactor schema exports and update package version to 0.0.4 by **@rubentalstra** in [#6455](https://github.com/danny-avila/LibreChat/pull/6455)
109
+ - 🔼 feat: Add Auto Submit For URL Query Params by **@mjaverto** in [#6440](https://github.com/danny-avila/LibreChat/pull/6440)
110
+ - 🛠 feat: Enhance Redis Integration, Rate Limiters & Log Headers by **@danny-avila** in [#6462](https://github.com/danny-avila/LibreChat/pull/6462)
111
+ - 💵 feat: Add Automatic Balance Refill by **@rubentalstra** in [#6452](https://github.com/danny-avila/LibreChat/pull/6452)
112
+ - 🗣️ feat: add support for gpt-4o-transcribe models by **@berry-13** in [#6483](https://github.com/danny-avila/LibreChat/pull/6483)
113
+ - 🎨 feat: UI Refresh for Enhanced UX by **@berry-13** in [#6346](https://github.com/danny-avila/LibreChat/pull/6346)
114
+ - 🌍 feat: Add support for Hungarian language localization by **@rubentalstra** in [#6508](https://github.com/danny-avila/LibreChat/pull/6508)
115
+ - 🚀 feat: Add Gemini 2.5 Token/Context Values, Increase Max Possible Output to 64k by **@danny-avila** in [#6563](https://github.com/danny-avila/LibreChat/pull/6563)
116
+ - 🚀 feat: Enhance MCP Connections For Multi-User Support by **@danny-avila** in [#6610](https://github.com/danny-avila/LibreChat/pull/6610)
117
+ - 🚀 feat: Enhance S3 URL Expiry with Refresh; fix: S3 File Deletion by **@danny-avila** in [#6647](https://github.com/danny-avila/LibreChat/pull/6647)
118
+ - 🚀 feat: enhance UI components and refactor settings by **@berry-13** in [#6625](https://github.com/danny-avila/LibreChat/pull/6625)
119
+ - 💬 feat: move TemporaryChat to the Header by **@berry-13** in [#6646](https://github.com/danny-avila/LibreChat/pull/6646)
120
+ - 🚀 feat: Use Model Specs + Specific Endpoints, Limit Providers for Agents by **@danny-avila** in [#6650](https://github.com/danny-avila/LibreChat/pull/6650)
121
+ - 🪙 feat: Sync Balance Config on Login by **@danny-avila** in [#6671](https://github.com/danny-avila/LibreChat/pull/6671)
122
+ - 🔦 feat: MCP Support for Non-Agent Endpoints by **@danny-avila** in [#6775](https://github.com/danny-avila/LibreChat/pull/6775)
123
+ - 🗃️ feat: Code Interpreter File Persistence between Sessions by **@danny-avila** in [#6790](https://github.com/danny-avila/LibreChat/pull/6790)
124
+ - 🖥️ feat: Code Interpreter API for Non-Agent Endpoints by **@danny-avila** in [#6803](https://github.com/danny-avila/LibreChat/pull/6803)
125
+ - ⚡ feat: Self-hosted Artifacts Static Bundler URL by **@danny-avila** in [#6827](https://github.com/danny-avila/LibreChat/pull/6827)
126
+ - 🐳 feat: Add Jemalloc and UV to Docker Builds by **@danny-avila** in [#6836](https://github.com/danny-avila/LibreChat/pull/6836)
127
+ - 🤖 feat: GPT-4.1 by **@danny-avila** in [#6880](https://github.com/danny-avila/LibreChat/pull/6880)
128
+ - 👋 feat: remove Edge TTS by **@berry-13** in [#6885](https://github.com/danny-avila/LibreChat/pull/6885)
129
+ - feat: nav optimization by **@berry-13** in [#5785](https://github.com/danny-avila/LibreChat/pull/5785)
130
+ - 🗺️ feat: Add Parameter Location Mapping for OpenAPI actions by **@peeeteeer** in [#6858](https://github.com/danny-avila/LibreChat/pull/6858)
131
+ - 🤖 feat: Support `o4-mini` and `o3` Models by **@danny-avila** in [#6928](https://github.com/danny-avila/LibreChat/pull/6928)
132
+ - 🎨 feat: OpenAI Image Tools (GPT-Image-1) by **@danny-avila** in [#7079](https://github.com/danny-avila/LibreChat/pull/7079)
133
+ - 🗓️ feat: Add Special Variables for Prompts & Agents, Prompt UI Improvements by **@danny-avila** in [#7123](https://github.com/danny-avila/LibreChat/pull/7123)
134
+
135
+ ### 🌍 Internationalization
136
+
137
+ - 🌍 i18n: Add Thai Language Support and Update Translations by **@rubentalstra** in [#6219](https://github.com/danny-avila/LibreChat/pull/6219)
138
+ - 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#6220](https://github.com/danny-avila/LibreChat/pull/6220)
139
+ - 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#6240](https://github.com/danny-avila/LibreChat/pull/6240)
140
+ - 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#6241](https://github.com/danny-avila/LibreChat/pull/6241)
141
+ - 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#6277](https://github.com/danny-avila/LibreChat/pull/6277)
142
+ - 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#6414](https://github.com/danny-avila/LibreChat/pull/6414)
143
+ - 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#6505](https://github.com/danny-avila/LibreChat/pull/6505)
144
+ - 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#6530](https://github.com/danny-avila/LibreChat/pull/6530)
145
+ - 🌍 i18n: Add Persian Localization Support by **@rubentalstra** in [#6669](https://github.com/danny-avila/LibreChat/pull/6669)
146
+ - 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#6667](https://github.com/danny-avila/LibreChat/pull/6667)
147
+ - 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#7126](https://github.com/danny-avila/LibreChat/pull/7126)
148
+ - 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#7148](https://github.com/danny-avila/LibreChat/pull/7148)
149
+
150
+ ### 👐 Accessibility
151
+
152
+ - 🎨 a11y: Update Model Spec Description Text by **@berry-13** in [#6294](https://github.com/danny-avila/LibreChat/pull/6294)
153
+ - 🗑️ a11y: Add Accessible Name to Button for File Attachment Removal by **@kangabell** in [#6709](https://github.com/danny-avila/LibreChat/pull/6709)
154
+ - ⌨️ a11y: enhance accessibility & visual consistency by **@berry-13** in [#6866](https://github.com/danny-avila/LibreChat/pull/6866)
155
+ - 🙌 a11y: Searchbar/Conversations List Focus by **@danny-avila** in [#7096](https://github.com/danny-avila/LibreChat/pull/7096)
156
+ - 👐 a11y: Improve Fork and SplitText Accessibility by **@danny-avila** in [#7147](https://github.com/danny-avila/LibreChat/pull/7147)
157
+
158
+ ### 🔧 Fixes
159
+
160
+ - 🐛 fix: Avatar Type Definitions in Agent/Assistant Schemas by **@danny-avila** in [#6235](https://github.com/danny-avila/LibreChat/pull/6235)
161
+ - 🔧 fix: MeiliSearch Field Error and Patch Incorrect Import by #6210 by **@rubentalstra** in [#6245](https://github.com/danny-avila/LibreChat/pull/6245)
162
+ - 🔏 fix: Enhance Two-Factor Authentication by **@rubentalstra** in [#6247](https://github.com/danny-avila/LibreChat/pull/6247)
163
+ - 🐛 fix: Await saveMessage in abortMiddleware to ensure proper execution by **@sh4shii** in [#6248](https://github.com/danny-avila/LibreChat/pull/6248)
164
+ - 🔧 fix: Axios Proxy Usage And Bump `mongoose` by **@danny-avila** in [#6298](https://github.com/danny-avila/LibreChat/pull/6298)
165
+ - 🔧 fix: comment out MCP servers to resolve service run issues by **@KunalScriptz** in [#6316](https://github.com/danny-avila/LibreChat/pull/6316)
166
+ - 🔧 fix: Update Token Calculations and Mapping, MCP `env` Initialization by **@danny-avila** in [#6406](https://github.com/danny-avila/LibreChat/pull/6406)
167
+ - 🐞 fix: Agent "Resend" Message Attachments + Source Icon Styling by **@danny-avila** in [#6408](https://github.com/danny-avila/LibreChat/pull/6408)
168
+ - 🐛 fix: Prevent Crash on Duplicate Message ID by **@Odrec** in [#6392](https://github.com/danny-avila/LibreChat/pull/6392)
169
+ - 🔐 fix: Invalid Key Length in 2FA Encryption by **@rubentalstra** in [#6432](https://github.com/danny-avila/LibreChat/pull/6432)
170
+ - 🏗️ fix: Fix Agents Token Spend Race Conditions, Expand Test Coverage by **@danny-avila** in [#6480](https://github.com/danny-avila/LibreChat/pull/6480)
171
+ - 🔃 fix: Draft Clearing, Claude Titles, Remove Default Vision Max Tokens by **@danny-avila** in [#6501](https://github.com/danny-avila/LibreChat/pull/6501)
172
+ - 🔧 fix: Update username reference to use user.name in greeting display by **@rubentalstra** in [#6534](https://github.com/danny-avila/LibreChat/pull/6534)
173
+ - 🔧 fix: S3 Download Stream with Key Extraction and Blob Storage Encoding for Vision by **@danny-avila** in [#6557](https://github.com/danny-avila/LibreChat/pull/6557)
174
+ - 🔧 fix: Mistral type strictness for `usage` & update token values/windows by **@danny-avila** in [#6562](https://github.com/danny-avila/LibreChat/pull/6562)
175
+ - 🔧 fix: Consolidate Text Parsing and TTS Edge Initialization by **@danny-avila** in [#6582](https://github.com/danny-avila/LibreChat/pull/6582)
176
+ - 🔧 fix: Ensure continuation in image processing on base64 encoding from Blob Storage by **@danny-avila** in [#6619](https://github.com/danny-avila/LibreChat/pull/6619)
177
+ - ✉️ fix: Fallback For User Name In Email Templates by **@danny-avila** in [#6620](https://github.com/danny-avila/LibreChat/pull/6620)
178
+ - 🔧 fix: Azure Blob Integration and File Source References by **@rubentalstra** in [#6575](https://github.com/danny-avila/LibreChat/pull/6575)
179
+ - 🐛 fix: Safeguard against undefined addedEndpoints by **@wipash** in [#6654](https://github.com/danny-avila/LibreChat/pull/6654)
180
+ - 🤖 fix: Gemini 2.5 Vision Support by **@danny-avila** in [#6663](https://github.com/danny-avila/LibreChat/pull/6663)
181
+ - 🔄 fix: Avatar & Error Handling Enhancements by **@danny-avila** in [#6687](https://github.com/danny-avila/LibreChat/pull/6687)
182
+ - 🔧 fix: Chat Middleware, Zod Conversion, Auto-Save and S3 URL Refresh by **@danny-avila** in [#6720](https://github.com/danny-avila/LibreChat/pull/6720)
183
+ - 🔧 fix: Agent Capability Checks & DocumentDB Compatibility for Agent Resource Removal by **@danny-avila** in [#6726](https://github.com/danny-avila/LibreChat/pull/6726)
184
+ - 🔄 fix: Improve audio MIME type detection and handling by **@berry-13** in [#6707](https://github.com/danny-avila/LibreChat/pull/6707)
185
+ - 🪺 fix: Update Role Handling due to New Schema Shape by **@danny-avila** in [#6774](https://github.com/danny-avila/LibreChat/pull/6774)
186
+ - 🗨️ fix: Show ModelSpec Greeting by **@berry-13** in [#6770](https://github.com/danny-avila/LibreChat/pull/6770)
187
+ - 🔧 fix: Keyv and Proxy Issues, and More Memory Optimizations by **@danny-avila** in [#6867](https://github.com/danny-avila/LibreChat/pull/6867)
188
+ - ✨ fix: Implement dynamic text sizing for greeting and name display by **@berry-13** in [#6833](https://github.com/danny-avila/LibreChat/pull/6833)
189
+ - 📝 fix: Mistral OCR Image Support and Azure Agent Titles by **@danny-avila** in [#6901](https://github.com/danny-avila/LibreChat/pull/6901)
190
+ - 📢 fix: Invalid `engineTTS` and Conversation State on Navigation by **@berry-13** in [#6904](https://github.com/danny-avila/LibreChat/pull/6904)
191
+ - 🛠️ fix: Improve Accessibility and Display of Conversation Menu by **@danny-avila** in [#6913](https://github.com/danny-avila/LibreChat/pull/6913)
192
+ - 🔧 fix: Agent Resource Form, Convo Menu Style, Ensure Draft Clears on Submission by **@danny-avila** in [#6925](https://github.com/danny-avila/LibreChat/pull/6925)
193
+ - 🔀 fix: MCP Improvements, Auto-Save Drafts, Artifact Markup by **@danny-avila** in [#7040](https://github.com/danny-avila/LibreChat/pull/7040)
194
+ - 🐋 fix: Improve Deepseek Compatbility by **@danny-avila** in [#7132](https://github.com/danny-avila/LibreChat/pull/7132)
195
+ - 🐙 fix: Add Redis Ping Interval to Prevent Connection Drops by **@peeeteeer** in [#7127](https://github.com/danny-avila/LibreChat/pull/7127)
196
+
197
+ ### ⚙️ Other Changes
198
+
199
+ - 📦 refactor: Move DB Models to `@librechat/data-schemas` by **@rubentalstra** in [#6210](https://github.com/danny-avila/LibreChat/pull/6210)
200
+ - 📦 chore: Patch `axios` to address CVE-2025-27152 by **@danny-avila** in [#6222](https://github.com/danny-avila/LibreChat/pull/6222)
201
+ - ⚠️ refactor: Use Error Content Part Instead Of Throwing Error for Agents by **@danny-avila** in [#6262](https://github.com/danny-avila/LibreChat/pull/6262)
202
+ - 🏃‍♂️ refactor: Improve Agent Run Context & Misc. Changes by **@danny-avila** in [#6448](https://github.com/danny-avila/LibreChat/pull/6448)
203
+ - 📝 docs: librechat.example.yaml by **@ineiti** in [#6442](https://github.com/danny-avila/LibreChat/pull/6442)
204
+ - 🏃‍♂️ refactor: More Agent Context Improvements during Run by **@danny-avila** in [#6477](https://github.com/danny-avila/LibreChat/pull/6477)
205
+ - 🔃 refactor: Allow streaming for `o1` models by **@danny-avila** in [#6509](https://github.com/danny-avila/LibreChat/pull/6509)
206
+ - 🔧 chore: `Vite` Plugin Upgrades & Config Optimizations by **@rubentalstra** in [#6547](https://github.com/danny-avila/LibreChat/pull/6547)
207
+ - 🔧 refactor: Consolidate Logging, Model Selection & Actions Optimizations, Minor Fixes by **@danny-avila** in [#6553](https://github.com/danny-avila/LibreChat/pull/6553)
208
+ - 🎨 style: Address Minor UI Refresh Issues by **@berry-13** in [#6552](https://github.com/danny-avila/LibreChat/pull/6552)
209
+ - 🔧 refactor: Enhance Model & Endpoint Configurations with Global Indicators 🌍 by **@berry-13** in [#6578](https://github.com/danny-avila/LibreChat/pull/6578)
210
+ - 💬 style: Chat UI, Greeting, and Message adjustments by **@berry-13** in [#6612](https://github.com/danny-avila/LibreChat/pull/6612)
211
+ - ⚡ refactor: DocumentDB Compatibility for Balance Updates by **@danny-avila** in [#6673](https://github.com/danny-avila/LibreChat/pull/6673)
212
+ - 🧹 chore: Update ESLint rules for React hooks by **@rubentalstra** in [#6685](https://github.com/danny-avila/LibreChat/pull/6685)
213
+ - 🪙 chore: Update Gemini Pricing by **@RedwindA** in [#6731](https://github.com/danny-avila/LibreChat/pull/6731)
214
+ - 🪺 refactor: Nest Permission fields for Roles by **@rubentalstra** in [#6487](https://github.com/danny-avila/LibreChat/pull/6487)
215
+ - 📦 chore: Update `caniuse-lite` dependency to version 1.0.30001706 by **@rubentalstra** in [#6482](https://github.com/danny-avila/LibreChat/pull/6482)
216
+ - ⚙️ refactor: OAuth Flow Signal, Type Safety, Tool Progress & Updated Packages by **@danny-avila** in [#6752](https://github.com/danny-avila/LibreChat/pull/6752)
217
+ - 📦 chore: bump vite from 6.2.3 to 6.2.5 by **@dependabot[bot]** in [#6745](https://github.com/danny-avila/LibreChat/pull/6745)
218
+ - 💾 chore: Enhance Local Storage Handling and Update MCP SDK by **@danny-avila** in [#6809](https://github.com/danny-avila/LibreChat/pull/6809)
219
+ - 🤖 refactor: Improve Agents Memory Usage, Bump Keyv, Grok 3 by **@danny-avila** in [#6850](https://github.com/danny-avila/LibreChat/pull/6850)
220
+ - 💾 refactor: Enhance Memory In Image Encodings & Client Disposal by **@danny-avila** in [#6852](https://github.com/danny-avila/LibreChat/pull/6852)
221
+ - 🔁 refactor: Token Event Handler and Standardize `maxTokens` Key by **@danny-avila** in [#6886](https://github.com/danny-avila/LibreChat/pull/6886)
222
+ - 🔍 refactor: Search & Message Retrieval by **@berry-13** in [#6903](https://github.com/danny-avila/LibreChat/pull/6903)
223
+ - 🎨 style: standardize dropdown styling & fix z-Index layering by **@berry-13** in [#6939](https://github.com/danny-avila/LibreChat/pull/6939)
224
+ - 📙 docs: CONTRIBUTING.md by **@dblock** in [#6831](https://github.com/danny-avila/LibreChat/pull/6831)
225
+ - 🧭 refactor: Modernize Nav/Header by **@danny-avila** in [#7094](https://github.com/danny-avila/LibreChat/pull/7094)
226
+ - 🪶 refactor: Chat Input Focus for Conversation Navigations & ChatForm Optimizations by **@danny-avila** in [#7100](https://github.com/danny-avila/LibreChat/pull/7100)
227
+ - 🔃 refactor: Streamline Navigation, Message Loading UX by **@danny-avila** in [#7118](https://github.com/danny-avila/LibreChat/pull/7118)
228
+ - 📜 docs: Unreleased changelog by **@github-actions[bot]** in [#6265](https://github.com/danny-avila/LibreChat/pull/6265)
229
+
230
+
231
+
232
+ [See full release details][release-v0.7.8-rc1]
233
+
234
+ [release-v0.7.8-rc1]: https://github.com/danny-avila/LibreChat/releases/tag/v0.7.8-rc1
235
+
236
+ ---
Dockerfile ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # v0.8.1
2
+
3
+ # Base node image
4
+ FROM node:20-alpine AS node
5
+
6
+ # Install jemalloc
7
+ RUN apk add --no-cache jemalloc
8
+ RUN apk add --no-cache python3 py3-pip uv
9
+
10
+ # Set environment variable to use jemalloc
11
+ ENV LD_PRELOAD=/usr/lib/libjemalloc.so.2
12
+
13
+ # Add `uv` for extended MCP support
14
+ COPY --from=ghcr.io/astral-sh/uv:0.6.13 /uv /uvx /bin/
15
+ RUN uv --version
16
+
17
+ RUN mkdir -p /app && chown node:node /app
18
+ WORKDIR /app
19
+
20
+ USER node
21
+
22
+ COPY --chown=node:node package.json package-lock.json ./
23
+ COPY --chown=node:node api/package.json ./api/package.json
24
+ COPY --chown=node:node client/package.json ./client/package.json
25
+ COPY --chown=node:node packages/data-provider/package.json ./packages/data-provider/package.json
26
+ COPY --chown=node:node packages/data-schemas/package.json ./packages/data-schemas/package.json
27
+ COPY --chown=node:node packages/api/package.json ./packages/api/package.json
28
+
29
+ RUN \
30
+ # Allow mounting of these files, which have no default
31
+ touch .env ; \
32
+ # Create directories for the volumes to inherit the correct permissions
33
+ mkdir -p /app/client/public/images /app/api/logs /app/uploads ; \
34
+ npm config set fetch-retry-maxtimeout 600000 ; \
35
+ npm config set fetch-retries 5 ; \
36
+ npm config set fetch-retry-mintimeout 15000 ; \
37
+ npm ci --no-audit
38
+
39
+ COPY --chown=node:node . .
40
+
41
+ RUN \
42
+ # React client build
43
+ NODE_OPTIONS="--max-old-space-size=2048" npm run frontend; \
44
+ npm prune --production; \
45
+ npm cache clean --force
46
+
47
+ # Node API setup
48
+ EXPOSE 3080
49
+ ENV HOST=0.0.0.0
50
+ CMD ["npm", "run", "backend"]
51
+
52
+ # Optional: for client with nginx routing
53
+ # FROM nginx:stable-alpine AS nginx-client
54
+ # WORKDIR /usr/share/nginx/html
55
+ # COPY --from=node /app/client/dist /usr/share/nginx/html
56
+ # COPY client/nginx.conf /etc/nginx/conf.d/default.conf
57
+ # ENTRYPOINT ["nginx", "-g", "daemon off;"]
Dockerfile.multi ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Dockerfile.multi
2
+ # v0.8.1
3
+
4
+ # Base for all builds
5
+ FROM node:20-alpine AS base-min
6
+ # Install jemalloc
7
+ RUN apk add --no-cache jemalloc
8
+ # Set environment variable to use jemalloc
9
+ ENV LD_PRELOAD=/usr/lib/libjemalloc.so.2
10
+ WORKDIR /app
11
+ RUN apk --no-cache add curl
12
+ RUN npm config set fetch-retry-maxtimeout 600000 && \
13
+ npm config set fetch-retries 5 && \
14
+ npm config set fetch-retry-mintimeout 15000
15
+ COPY package*.json ./
16
+ COPY packages/data-provider/package*.json ./packages/data-provider/
17
+ COPY packages/api/package*.json ./packages/api/
18
+ COPY packages/data-schemas/package*.json ./packages/data-schemas/
19
+ COPY packages/client/package*.json ./packages/client/
20
+ COPY client/package*.json ./client/
21
+ COPY api/package*.json ./api/
22
+
23
+ # Install all dependencies for every build
24
+ FROM base-min AS base
25
+ WORKDIR /app
26
+ RUN npm ci
27
+
28
+ # Build `data-provider` package
29
+ FROM base AS data-provider-build
30
+ WORKDIR /app/packages/data-provider
31
+ COPY packages/data-provider ./
32
+ RUN npm run build
33
+
34
+ # Build `data-schemas` package
35
+ FROM base AS data-schemas-build
36
+ WORKDIR /app/packages/data-schemas
37
+ COPY packages/data-schemas ./
38
+ COPY --from=data-provider-build /app/packages/data-provider/dist /app/packages/data-provider/dist
39
+ RUN npm run build
40
+
41
+ # Build `api` package
42
+ FROM base AS api-package-build
43
+ WORKDIR /app/packages/api
44
+ COPY packages/api ./
45
+ COPY --from=data-provider-build /app/packages/data-provider/dist /app/packages/data-provider/dist
46
+ COPY --from=data-schemas-build /app/packages/data-schemas/dist /app/packages/data-schemas/dist
47
+ RUN npm run build
48
+
49
+ # Build `client` package
50
+ FROM base AS client-package-build
51
+ WORKDIR /app/packages/client
52
+ COPY packages/client ./
53
+ RUN npm run build
54
+
55
+ # Client build
56
+ FROM base AS client-build
57
+ WORKDIR /app/client
58
+ COPY client ./
59
+ COPY --from=data-provider-build /app/packages/data-provider/dist /app/packages/data-provider/dist
60
+ COPY --from=client-package-build /app/packages/client/dist /app/packages/client/dist
61
+ COPY --from=client-package-build /app/packages/client/src /app/packages/client/src
62
+ ENV NODE_OPTIONS="--max-old-space-size=2048"
63
+ RUN npm run build
64
+
65
+ # API setup (including client dist)
66
+ FROM base-min AS api-build
67
+ # Add `uv` for extended MCP support
68
+ COPY --from=ghcr.io/astral-sh/uv:0.6.13 /uv /uvx /bin/
69
+ RUN uv --version
70
+ WORKDIR /app
71
+ # Install only production deps
72
+ RUN npm ci --omit=dev
73
+ COPY api ./api
74
+ COPY config ./config
75
+ COPY --from=data-provider-build /app/packages/data-provider/dist ./packages/data-provider/dist
76
+ COPY --from=data-schemas-build /app/packages/data-schemas/dist ./packages/data-schemas/dist
77
+ COPY --from=api-package-build /app/packages/api/dist ./packages/api/dist
78
+ COPY --from=client-build /app/client/dist ./client/dist
79
+ WORKDIR /app/api
80
+ EXPOSE 3080
81
+ ENV HOST=0.0.0.0
82
+ CMD ["node", "server/index.js"]
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2025 LibreChat
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
README.md CHANGED
@@ -1,10 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
- title: Chat
3
- emoji: 🦀
4
- colorFrom: purple
5
- colorTo: green
6
- sdk: docker
7
- pinned: false
 
 
 
8
  ---
9
 
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <p align="center">
2
+ <a href="https://librechat.ai">
3
+ <img src="client/public/assets/logo.svg" height="256">
4
+ </a>
5
+ <h1 align="center">
6
+ <a href="https://librechat.ai">LibreChat</a>
7
+ </h1>
8
+ </p>
9
+
10
+ <p align="center">
11
+ <a href="https://discord.librechat.ai">
12
+ <img
13
+ src="https://img.shields.io/discord/1086345563026489514?label=&logo=discord&style=for-the-badge&logoWidth=20&logoColor=white&labelColor=000000&color=blueviolet">
14
+ </a>
15
+ <a href="https://www.youtube.com/@LibreChat">
16
+ <img
17
+ src="https://img.shields.io/badge/YOUTUBE-red.svg?style=for-the-badge&logo=youtube&logoColor=white&labelColor=000000&logoWidth=20">
18
+ </a>
19
+ <a href="https://docs.librechat.ai">
20
+ <img
21
+ src="https://img.shields.io/badge/DOCS-blue.svg?style=for-the-badge&logo=read-the-docs&logoColor=white&labelColor=000000&logoWidth=20">
22
+ </a>
23
+ <a aria-label="Sponsors" href="https://github.com/sponsors/danny-avila">
24
+ <img
25
+ src="https://img.shields.io/badge/SPONSORS-brightgreen.svg?style=for-the-badge&logo=github-sponsors&logoColor=white&labelColor=000000&logoWidth=20">
26
+ </a>
27
+ </p>
28
+
29
+ <p align="center">
30
+ <a href="https://railway.app/template/b5k2mn?referralCode=HI9hWz">
31
+ <img src="https://railway.app/button.svg" alt="Deploy on Railway" height="30">
32
+ </a>
33
+ <a href="https://zeabur.com/templates/0X2ZY8">
34
+ <img src="https://zeabur.com/button.svg" alt="Deploy on Zeabur" height="30"/>
35
+ </a>
36
+ <a href="https://template.cloud.sealos.io/deploy?templateName=librechat">
37
+ <img src="https://raw.githubusercontent.com/labring-actions/templates/main/Deploy-on-Sealos.svg" alt="Deploy on Sealos" height="30">
38
+ </a>
39
+ </p>
40
+
41
+ <p align="center">
42
+ <a href="https://www.librechat.ai/docs/translation">
43
+ <img
44
+ src="https://img.shields.io/badge/dynamic/json.svg?style=for-the-badge&color=2096F3&label=locize&query=%24.translatedPercentage&url=https://api.locize.app/badgedata/4cb2598b-ed4d-469c-9b04-2ed531a8cb45&suffix=%+translated"
45
+ alt="Translation Progress">
46
+ </a>
47
+ </p>
48
+
49
+
50
+ # ✨ Features
51
+
52
+ - 🖥️ **UI & Experience** inspired by ChatGPT with enhanced design and features
53
+
54
+ - 🤖 **AI Model Selection**:
55
+ - Anthropic (Claude), AWS Bedrock, OpenAI, Azure OpenAI, Google, Vertex AI, OpenAI Responses API (incl. Azure)
56
+ - [Custom Endpoints](https://www.librechat.ai/docs/quick_start/custom_endpoints): Use any OpenAI-compatible API with LibreChat, no proxy required
57
+ - Compatible with [Local & Remote AI Providers](https://www.librechat.ai/docs/configuration/librechat_yaml/ai_endpoints):
58
+ - Ollama, groq, Cohere, Mistral AI, Apple MLX, koboldcpp, together.ai,
59
+ - OpenRouter, Helicone, Perplexity, ShuttleAI, Deepseek, Qwen, and more
60
+
61
+ - 🔧 **[Code Interpreter API](https://www.librechat.ai/docs/features/code_interpreter)**:
62
+ - Secure, Sandboxed Execution in Python, Node.js (JS/TS), Go, C/C++, Java, PHP, Rust, and Fortran
63
+ - Seamless File Handling: Upload, process, and download files directly
64
+ - No Privacy Concerns: Fully isolated and secure execution
65
+
66
+ - 🔦 **Agents & Tools Integration**:
67
+ - **[LibreChat Agents](https://www.librechat.ai/docs/features/agents)**:
68
+ - No-Code Custom Assistants: Build specialized, AI-driven helpers
69
+ - Agent Marketplace: Discover and deploy community-built agents
70
+ - Collaborative Sharing: Share agents with specific users and groups
71
+ - Flexible & Extensible: Use MCP Servers, tools, file search, code execution, and more
72
+ - Compatible with Custom Endpoints, OpenAI, Azure, Anthropic, AWS Bedrock, Google, Vertex AI, Responses API, and more
73
+ - [Model Context Protocol (MCP) Support](https://modelcontextprotocol.io/clients#librechat) for Tools
74
+
75
+ - 🔍 **Web Search**:
76
+ - Search the internet and retrieve relevant information to enhance your AI context
77
+ - Combines search providers, content scrapers, and result rerankers for optimal results
78
+ - **Customizable Jina Reranking**: Configure custom Jina API URLs for reranking services
79
+ - **[Learn More →](https://www.librechat.ai/docs/features/web_search)**
80
+
81
+ - 🪄 **Generative UI with Code Artifacts**:
82
+ - [Code Artifacts](https://youtu.be/GfTj7O4gmd0?si=WJbdnemZpJzBrJo3) allow creation of React, HTML, and Mermaid diagrams directly in chat
83
+
84
+ - 🎨 **Image Generation & Editing**
85
+ - Text-to-image and image-to-image with [GPT-Image-1](https://www.librechat.ai/docs/features/image_gen#1--openai-image-tools-recommended)
86
+ - Text-to-image with [DALL-E (3/2)](https://www.librechat.ai/docs/features/image_gen#2--dalle-legacy), [Stable Diffusion](https://www.librechat.ai/docs/features/image_gen#3--stable-diffusion-local), [Flux](https://www.librechat.ai/docs/features/image_gen#4--flux), or any [MCP server](https://www.librechat.ai/docs/features/image_gen#5--model-context-protocol-mcp)
87
+ - Produce stunning visuals from prompts or refine existing images with a single instruction
88
+
89
+ - 💾 **Presets & Context Management**:
90
+ - Create, Save, & Share Custom Presets
91
+ - Switch between AI Endpoints and Presets mid-chat
92
+ - Edit, Resubmit, and Continue Messages with Conversation branching
93
+ - Create and share prompts with specific users and groups
94
+ - [Fork Messages & Conversations](https://www.librechat.ai/docs/features/fork) for Advanced Context control
95
+
96
+ - 💬 **Multimodal & File Interactions**:
97
+ - Upload and analyze images with Claude 3, GPT-4.5, GPT-4o, o1, Llama-Vision, and Gemini 📸
98
+ - Chat with Files using Custom Endpoints, OpenAI, Azure, Anthropic, AWS Bedrock, & Google 🗃️
99
+
100
+ - 🌎 **Multilingual UI**:
101
+ - English, 中文 (简体), 中文 (繁體), العربية, Deutsch, Español, Français, Italiano
102
+ - Polski, Português (PT), Português (BR), Русский, 日本語, Svenska, 한국어, Tiếng Việt
103
+ - Türkçe, Nederlands, עברית, Català, Čeština, Dansk, Eesti, فارسی
104
+ - Suomi, Magyar, Հայերեն, Bahasa Indonesia, ქართული, Latviešu, ไทย, ئۇيغۇرچە
105
+
106
+ - 🧠 **Reasoning UI**:
107
+ - Dynamic Reasoning UI for Chain-of-Thought/Reasoning AI models like DeepSeek-R1
108
+
109
+ - 🎨 **Customizable Interface**:
110
+ - Customizable Dropdown & Interface that adapts to both power users and newcomers
111
+
112
+ - 🗣️ **Speech & Audio**:
113
+ - Chat hands-free with Speech-to-Text and Text-to-Speech
114
+ - Automatically send and play Audio
115
+ - Supports OpenAI, Azure OpenAI, and Elevenlabs
116
+
117
+ - 📥 **Import & Export Conversations**:
118
+ - Import Conversations from LibreChat, ChatGPT, Chatbot UI
119
+ - Export conversations as screenshots, markdown, text, json
120
+
121
+ - 🔍 **Search & Discovery**:
122
+ - Search all messages/conversations
123
+
124
+ - 👥 **Multi-User & Secure Access**:
125
+ - Multi-User, Secure Authentication with OAuth2, LDAP, & Email Login Support
126
+ - Built-in Moderation, and Token spend tools
127
+
128
+ - ⚙️ **Configuration & Deployment**:
129
+ - Configure Proxy, Reverse Proxy, Docker, & many Deployment options
130
+ - Use completely local or deploy on the cloud
131
+
132
+ - 📖 **Open-Source & Community**:
133
+ - Completely Open-Source & Built in Public
134
+ - Community-driven development, support, and feedback
135
+
136
+ [For a thorough review of our features, see our docs here](https://docs.librechat.ai/) 📚
137
+
138
+ ## 🪶 All-In-One AI Conversations with LibreChat
139
+
140
+ LibreChat brings together the future of assistant AIs with the revolutionary technology of OpenAI's ChatGPT. Celebrating the original styling, LibreChat gives you the ability to integrate multiple AI models. It also integrates and enhances original client features such as conversation and message search, prompt templates and plugins.
141
+
142
+ With LibreChat, you no longer need to opt for ChatGPT Plus and can instead use free or pay-per-call APIs. We welcome contributions, cloning, and forking to enhance the capabilities of this advanced chatbot platform.
143
+
144
+ [![Watch the video](https://raw.githubusercontent.com/LibreChat-AI/librechat.ai/main/public/images/changelog/v0.7.6.gif)](https://www.youtube.com/watch?v=ilfwGQtJNlI)
145
+
146
+ Click on the thumbnail to open the video☝️
147
+
148
+ ---
149
+
150
+ ## 🌐 Resources
151
+
152
+ **GitHub Repo:**
153
+ - **RAG API:** [github.com/danny-avila/rag_api](https://github.com/danny-avila/rag_api)
154
+ - **Website:** [github.com/LibreChat-AI/librechat.ai](https://github.com/LibreChat-AI/librechat.ai)
155
+
156
+ **Other:**
157
+ - **Website:** [librechat.ai](https://librechat.ai)
158
+ - **Documentation:** [librechat.ai/docs](https://librechat.ai/docs)
159
+ - **Blog:** [librechat.ai/blog](https://librechat.ai/blog)
160
+
161
  ---
162
+
163
+ ## 📝 Changelog
164
+
165
+ Keep up with the latest updates by visiting the releases page and notes:
166
+ - [Releases](https://github.com/danny-avila/LibreChat/releases)
167
+ - [Changelog](https://www.librechat.ai/changelog)
168
+
169
+ **⚠️ Please consult the [changelog](https://www.librechat.ai/changelog) for breaking changes before updating.**
170
+
171
  ---
172
 
173
+ ## Star History
174
+
175
+ <p align="center">
176
+ <a href="https://star-history.com/#danny-avila/LibreChat&Date">
177
+ <img alt="Star History Chart" src="https://api.star-history.com/svg?repos=danny-avila/LibreChat&type=Date&theme=dark" onerror="this.src='https://api.star-history.com/svg?repos=danny-avila/LibreChat&type=Date'" />
178
+ </a>
179
+ </p>
180
+ <p align="center">
181
+ <a href="https://trendshift.io/repositories/4685" target="_blank" style="padding: 10px;">
182
+ <img src="https://trendshift.io/api/badge/repositories/4685" alt="danny-avila%2FLibreChat | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/>
183
+ </a>
184
+ <a href="https://runacap.com/ross-index/q1-24/" target="_blank" rel="noopener" style="margin-left: 20px;">
185
+ <img style="width: 260px; height: 56px" src="https://runacap.com/wp-content/uploads/2024/04/ROSS_badge_white_Q1_2024.svg" alt="ROSS Index - Fastest Growing Open-Source Startups in Q1 2024 | Runa Capital" width="260" height="56"/>
186
+ </a>
187
+ </p>
188
+
189
+ ---
190
+
191
+ ## ✨ Contributions
192
+
193
+ Contributions, suggestions, bug reports and fixes are welcome!
194
+
195
+ For new features, components, or extensions, please open an issue and discuss before sending a PR.
196
+
197
+ If you'd like to help translate LibreChat into your language, we'd love your contribution! Improving our translations not only makes LibreChat more accessible to users around the world but also enhances the overall user experience. Please check out our [Translation Guide](https://www.librechat.ai/docs/translation).
198
+
199
+ ---
200
+
201
+ ## 💖 This project exists in its current state thanks to all the people who contribute
202
+
203
+ <a href="https://github.com/danny-avila/LibreChat/graphs/contributors">
204
+ <img src="https://contrib.rocks/image?repo=danny-avila/LibreChat" />
205
+ </a>
206
+
207
+ ---
208
+
209
+ ## 🎉 Special Thanks
210
+
211
+ We thank [Locize](https://locize.com) for their translation management tools that support multiple languages in LibreChat.
212
+
213
+ <p align="center">
214
+ <a href="https://locize.com" target="_blank" rel="noopener noreferrer">
215
+ <img src="https://github.com/user-attachments/assets/d6b70894-6064-475e-bb65-92a9e23e0077" alt="Locize Logo" height="50">
216
+ </a>
217
+ </p>
api/app/clients/AnthropicClient.js ADDED
@@ -0,0 +1,991 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ const Anthropic = require('@anthropic-ai/sdk');
2
+ const { logger } = require('@librechat/data-schemas');
3
+ const { HttpsProxyAgent } = require('https-proxy-agent');
4
+ const {
5
+ Constants,
6
+ ErrorTypes,
7
+ EModelEndpoint,
8
+ parseTextParts,
9
+ anthropicSettings,
10
+ getResponseSender,
11
+ validateVisionModel,
12
+ } = require('librechat-data-provider');
13
+ const { sleep, SplitStreamHandler: _Handler, addCacheControl } = require('@librechat/agents');
14
+ const {
15
+ Tokenizer,
16
+ createFetch,
17
+ matchModelName,
18
+ getClaudeHeaders,
19
+ getModelMaxTokens,
20
+ configureReasoning,
21
+ checkPromptCacheSupport,
22
+ getModelMaxOutputTokens,
23
+ createStreamEventHandlers,
24
+ } = require('@librechat/api');
25
+ const {
26
+ truncateText,
27
+ formatMessage,
28
+ titleFunctionPrompt,
29
+ parseParamFromPrompt,
30
+ createContextHandlers,
31
+ } = require('./prompts');
32
+ const { spendTokens, spendStructuredTokens } = require('~/models/spendTokens');
33
+ const { encodeAndFormat } = require('~/server/services/Files/images/encode');
34
+ const BaseClient = require('./BaseClient');
35
+
36
+ const HUMAN_PROMPT = '\n\nHuman:';
37
+ const AI_PROMPT = '\n\nAssistant:';
38
+
39
+ class SplitStreamHandler extends _Handler {
40
+ getDeltaContent(chunk) {
41
+ return (chunk?.delta?.text ?? chunk?.completion) || '';
42
+ }
43
+ getReasoningDelta(chunk) {
44
+ return chunk?.delta?.thinking || '';
45
+ }
46
+ }
47
+
48
+ /** Helper function to introduce a delay before retrying */
49
+ function delayBeforeRetry(attempts, baseDelay = 1000) {
50
+ return new Promise((resolve) => setTimeout(resolve, baseDelay * attempts));
51
+ }
52
+
53
+ const tokenEventTypes = new Set(['message_start', 'message_delta']);
54
+ const { legacy } = anthropicSettings;
55
+
56
+ class AnthropicClient extends BaseClient {
57
+ constructor(apiKey, options = {}) {
58
+ super(apiKey, options);
59
+ this.apiKey = apiKey || process.env.ANTHROPIC_API_KEY;
60
+ this.userLabel = HUMAN_PROMPT;
61
+ this.assistantLabel = AI_PROMPT;
62
+ this.contextStrategy = options.contextStrategy
63
+ ? options.contextStrategy.toLowerCase()
64
+ : 'discard';
65
+ this.setOptions(options);
66
+ /** @type {string | undefined} */
67
+ this.systemMessage;
68
+ /** @type {AnthropicMessageStartEvent| undefined} */
69
+ this.message_start;
70
+ /** @type {AnthropicMessageDeltaEvent| undefined} */
71
+ this.message_delta;
72
+ /** Whether the model is part of the Claude 3 Family
73
+ * @type {boolean} */
74
+ this.isClaudeLatest;
75
+ /** Whether to use Messages API or Completions API
76
+ * @type {boolean} */
77
+ this.useMessages;
78
+ /** Whether or not the model supports Prompt Caching
79
+ * @type {boolean} */
80
+ this.supportsCacheControl;
81
+ /** The key for the usage object's input tokens
82
+ * @type {string} */
83
+ this.inputTokensKey = 'input_tokens';
84
+ /** The key for the usage object's output tokens
85
+ * @type {string} */
86
+ this.outputTokensKey = 'output_tokens';
87
+ /** @type {SplitStreamHandler | undefined} */
88
+ this.streamHandler;
89
+ }
90
+
91
+ setOptions(options) {
92
+ if (this.options && !this.options.replaceOptions) {
93
+ // nested options aren't spread properly, so we need to do this manually
94
+ this.options.modelOptions = {
95
+ ...this.options.modelOptions,
96
+ ...options.modelOptions,
97
+ };
98
+ delete options.modelOptions;
99
+ // now we can merge options
100
+ this.options = {
101
+ ...this.options,
102
+ ...options,
103
+ };
104
+ } else {
105
+ this.options = options;
106
+ }
107
+
108
+ this.modelOptions = Object.assign(
109
+ {
110
+ model: anthropicSettings.model.default,
111
+ },
112
+ this.modelOptions,
113
+ this.options.modelOptions,
114
+ );
115
+
116
+ const modelMatch = matchModelName(this.modelOptions.model, EModelEndpoint.anthropic);
117
+ this.isClaudeLatest =
118
+ /claude-[3-9]/.test(modelMatch) || /claude-(?:sonnet|opus|haiku)-[4-9]/.test(modelMatch);
119
+ const isLegacyOutput = !(
120
+ /claude-3[-.]5-sonnet/.test(modelMatch) ||
121
+ /claude-3[-.]7/.test(modelMatch) ||
122
+ /claude-(?:sonnet|opus|haiku)-[4-9]/.test(modelMatch) ||
123
+ /claude-[4-9]/.test(modelMatch)
124
+ );
125
+ this.supportsCacheControl = this.options.promptCache && checkPromptCacheSupport(modelMatch);
126
+
127
+ if (
128
+ isLegacyOutput &&
129
+ this.modelOptions.maxOutputTokens &&
130
+ this.modelOptions.maxOutputTokens > legacy.maxOutputTokens.default
131
+ ) {
132
+ this.modelOptions.maxOutputTokens = legacy.maxOutputTokens.default;
133
+ }
134
+
135
+ this.useMessages = this.isClaudeLatest || !!this.options.attachments;
136
+
137
+ this.defaultVisionModel = this.options.visionModel ?? 'claude-3-sonnet-20240229';
138
+ this.options.attachments?.then((attachments) => this.checkVisionRequest(attachments));
139
+
140
+ this.maxContextTokens =
141
+ this.options.maxContextTokens ??
142
+ getModelMaxTokens(this.modelOptions.model, EModelEndpoint.anthropic) ??
143
+ 100000;
144
+ this.maxResponseTokens =
145
+ this.modelOptions.maxOutputTokens ??
146
+ getModelMaxOutputTokens(
147
+ this.modelOptions.model,
148
+ this.options.endpointType ?? this.options.endpoint,
149
+ this.options.endpointTokenConfig,
150
+ ) ??
151
+ anthropicSettings.maxOutputTokens.reset(this.modelOptions.model);
152
+ this.maxPromptTokens =
153
+ this.options.maxPromptTokens || this.maxContextTokens - this.maxResponseTokens;
154
+
155
+ const reservedTokens = this.maxPromptTokens + this.maxResponseTokens;
156
+ if (reservedTokens > this.maxContextTokens) {
157
+ const info = `Total Possible Tokens + Max Output Tokens must be less than or equal to Max Context Tokens: ${this.maxPromptTokens} (total possible output) + ${this.maxResponseTokens} (max output) = ${reservedTokens}/${this.maxContextTokens} (max context)`;
158
+ const errorMessage = `{ "type": "${ErrorTypes.INPUT_LENGTH}", "info": "${info}" }`;
159
+ logger.warn(info);
160
+ throw new Error(errorMessage);
161
+ } else if (this.maxResponseTokens === this.maxContextTokens) {
162
+ const info = `Max Output Tokens must be less than Max Context Tokens: ${this.maxResponseTokens} (max output) = ${this.maxContextTokens} (max context)`;
163
+ const errorMessage = `{ "type": "${ErrorTypes.INPUT_LENGTH}", "info": "${info}" }`;
164
+ logger.warn(info);
165
+ throw new Error(errorMessage);
166
+ }
167
+
168
+ this.sender =
169
+ this.options.sender ??
170
+ getResponseSender({
171
+ model: this.modelOptions.model,
172
+ endpoint: EModelEndpoint.anthropic,
173
+ modelLabel: this.options.modelLabel,
174
+ });
175
+
176
+ this.startToken = '||>';
177
+ this.endToken = '';
178
+
179
+ return this;
180
+ }
181
+
182
+ /**
183
+ * Get the initialized Anthropic client.
184
+ * @param {Partial<Anthropic.ClientOptions>} requestOptions - The options for the client.
185
+ * @returns {Anthropic} The Anthropic client instance.
186
+ */
187
+ getClient(requestOptions) {
188
+ /** @type {Anthropic.ClientOptions} */
189
+ const options = {
190
+ fetch: createFetch({
191
+ directEndpoint: this.options.directEndpoint,
192
+ reverseProxyUrl: this.options.reverseProxyUrl,
193
+ }),
194
+ apiKey: this.apiKey,
195
+ fetchOptions: {},
196
+ };
197
+
198
+ if (this.options.proxy) {
199
+ options.fetchOptions.agent = new HttpsProxyAgent(this.options.proxy);
200
+ }
201
+
202
+ if (this.options.reverseProxyUrl) {
203
+ options.baseURL = this.options.reverseProxyUrl;
204
+ }
205
+
206
+ const headers = getClaudeHeaders(requestOptions?.model, this.supportsCacheControl);
207
+ if (headers) {
208
+ options.defaultHeaders = headers;
209
+ }
210
+
211
+ return new Anthropic(options);
212
+ }
213
+
214
+ /**
215
+ * Get stream usage as returned by this client's API response.
216
+ * @returns {AnthropicStreamUsage} The stream usage object.
217
+ */
218
+ getStreamUsage() {
219
+ const inputUsage = this.message_start?.message?.usage ?? {};
220
+ const outputUsage = this.message_delta?.usage ?? {};
221
+ return Object.assign({}, inputUsage, outputUsage);
222
+ }
223
+
224
+ /**
225
+ * Calculates the correct token count for the current user message based on the token count map and API usage.
226
+ * Edge case: If the calculation results in a negative value, it returns the original estimate.
227
+ * If revisiting a conversation with a chat history entirely composed of token estimates,
228
+ * the cumulative token count going forward should become more accurate as the conversation progresses.
229
+ * @param {Object} params - The parameters for the calculation.
230
+ * @param {Record<string, number>} params.tokenCountMap - A map of message IDs to their token counts.
231
+ * @param {string} params.currentMessageId - The ID of the current message to calculate.
232
+ * @param {AnthropicStreamUsage} params.usage - The usage object returned by the API.
233
+ * @returns {number} The correct token count for the current user message.
234
+ */
235
+ calculateCurrentTokenCount({ tokenCountMap, currentMessageId, usage }) {
236
+ const originalEstimate = tokenCountMap[currentMessageId] || 0;
237
+
238
+ if (!usage || typeof usage.input_tokens !== 'number') {
239
+ return originalEstimate;
240
+ }
241
+
242
+ tokenCountMap[currentMessageId] = 0;
243
+ const totalTokensFromMap = Object.values(tokenCountMap).reduce((sum, count) => {
244
+ const numCount = Number(count);
245
+ return sum + (isNaN(numCount) ? 0 : numCount);
246
+ }, 0);
247
+ const totalInputTokens =
248
+ (usage.input_tokens ?? 0) +
249
+ (usage.cache_creation_input_tokens ?? 0) +
250
+ (usage.cache_read_input_tokens ?? 0);
251
+
252
+ const currentMessageTokens = totalInputTokens - totalTokensFromMap;
253
+ return currentMessageTokens > 0 ? currentMessageTokens : originalEstimate;
254
+ }
255
+
256
+ /**
257
+ * Get Token Count for LibreChat Message
258
+ * @param {TMessage} responseMessage
259
+ * @returns {number}
260
+ */
261
+ getTokenCountForResponse(responseMessage) {
262
+ return this.getTokenCountForMessage({
263
+ role: 'assistant',
264
+ content: responseMessage.text,
265
+ });
266
+ }
267
+
268
+ /**
269
+ *
270
+ * Checks if the model is a vision model based on request attachments and sets the appropriate options:
271
+ * - Sets `this.modelOptions.model` to `gpt-4-vision-preview` if the request is a vision request.
272
+ * - Sets `this.isVisionModel` to `true` if vision request.
273
+ * - Deletes `this.modelOptions.stop` if vision request.
274
+ * @param {MongoFile[]} attachments
275
+ */
276
+ checkVisionRequest(attachments) {
277
+ const availableModels = this.options.modelsConfig?.[EModelEndpoint.anthropic];
278
+ this.isVisionModel = validateVisionModel({ model: this.modelOptions.model, availableModels });
279
+
280
+ const visionModelAvailable = availableModels?.includes(this.defaultVisionModel);
281
+ if (
282
+ attachments &&
283
+ attachments.some((file) => file?.type && file?.type?.includes('image')) &&
284
+ visionModelAvailable &&
285
+ !this.isVisionModel
286
+ ) {
287
+ this.modelOptions.model = this.defaultVisionModel;
288
+ this.isVisionModel = true;
289
+ }
290
+ }
291
+
292
+ /**
293
+ * Calculate the token cost in tokens for an image based on its dimensions and detail level.
294
+ *
295
+ * For reference, see: https://docs.anthropic.com/claude/docs/vision#image-costs
296
+ *
297
+ * @param {Object} image - The image object.
298
+ * @param {number} image.width - The width of the image.
299
+ * @param {number} image.height - The height of the image.
300
+ * @returns {number} The calculated token cost measured by tokens.
301
+ *
302
+ */
303
+ calculateImageTokenCost({ width, height }) {
304
+ return Math.ceil((width * height) / 750);
305
+ }
306
+
307
+ async addImageURLs(message, attachments) {
308
+ const { files, image_urls } = await encodeAndFormat(this.options.req, attachments, {
309
+ endpoint: EModelEndpoint.anthropic,
310
+ });
311
+ message.image_urls = image_urls.length ? image_urls : undefined;
312
+ return files;
313
+ }
314
+
315
+ /**
316
+ * @param {object} params
317
+ * @param {number} params.promptTokens
318
+ * @param {number} params.completionTokens
319
+ * @param {AnthropicStreamUsage} [params.usage]
320
+ * @param {string} [params.model]
321
+ * @param {string} [params.context='message']
322
+ * @returns {Promise<void>}
323
+ */
324
+ async recordTokenUsage({ promptTokens, completionTokens, usage, model, context = 'message' }) {
325
+ if (usage != null && usage?.input_tokens != null) {
326
+ const input = usage.input_tokens ?? 0;
327
+ const write = usage.cache_creation_input_tokens ?? 0;
328
+ const read = usage.cache_read_input_tokens ?? 0;
329
+
330
+ await spendStructuredTokens(
331
+ {
332
+ context,
333
+ user: this.user,
334
+ conversationId: this.conversationId,
335
+ model: model ?? this.modelOptions.model,
336
+ endpointTokenConfig: this.options.endpointTokenConfig,
337
+ },
338
+ {
339
+ promptTokens: { input, write, read },
340
+ completionTokens,
341
+ },
342
+ );
343
+
344
+ return;
345
+ }
346
+
347
+ await spendTokens(
348
+ {
349
+ context,
350
+ user: this.user,
351
+ conversationId: this.conversationId,
352
+ model: model ?? this.modelOptions.model,
353
+ endpointTokenConfig: this.options.endpointTokenConfig,
354
+ },
355
+ { promptTokens, completionTokens },
356
+ );
357
+ }
358
+
359
+ async buildMessages(messages, parentMessageId) {
360
+ const orderedMessages = this.constructor.getMessagesForConversation({
361
+ messages,
362
+ parentMessageId,
363
+ });
364
+
365
+ logger.debug('[AnthropicClient] orderedMessages', { orderedMessages, parentMessageId });
366
+
367
+ if (this.options.attachments) {
368
+ const attachments = await this.options.attachments;
369
+ const images = attachments.filter((file) => file.type.includes('image'));
370
+
371
+ if (images.length && !this.isVisionModel) {
372
+ throw new Error('Images are only supported with the Claude 3 family of models');
373
+ }
374
+
375
+ const latestMessage = orderedMessages[orderedMessages.length - 1];
376
+
377
+ if (this.message_file_map) {
378
+ this.message_file_map[latestMessage.messageId] = attachments;
379
+ } else {
380
+ this.message_file_map = {
381
+ [latestMessage.messageId]: attachments,
382
+ };
383
+ }
384
+
385
+ const files = await this.addImageURLs(latestMessage, attachments);
386
+
387
+ this.options.attachments = files;
388
+ }
389
+
390
+ if (this.message_file_map) {
391
+ this.contextHandlers = createContextHandlers(
392
+ this.options.req,
393
+ orderedMessages[orderedMessages.length - 1].text,
394
+ );
395
+ }
396
+
397
+ const formattedMessages = orderedMessages.map((message, i) => {
398
+ const formattedMessage = this.useMessages
399
+ ? formatMessage({
400
+ message,
401
+ endpoint: EModelEndpoint.anthropic,
402
+ })
403
+ : {
404
+ author: message.isCreatedByUser ? this.userLabel : this.assistantLabel,
405
+ content: message?.content ?? message.text,
406
+ };
407
+
408
+ const needsTokenCount = this.contextStrategy && !orderedMessages[i].tokenCount;
409
+ /* If tokens were never counted, or, is a Vision request and the message has files, count again */
410
+ if (needsTokenCount || (this.isVisionModel && (message.image_urls || message.files))) {
411
+ orderedMessages[i].tokenCount = this.getTokenCountForMessage(formattedMessage);
412
+ }
413
+
414
+ /* If message has files, calculate image token cost */
415
+ if (this.message_file_map && this.message_file_map[message.messageId]) {
416
+ const attachments = this.message_file_map[message.messageId];
417
+ for (const file of attachments) {
418
+ if (file.embedded) {
419
+ this.contextHandlers?.processFile(file);
420
+ continue;
421
+ }
422
+ if (file.metadata?.fileIdentifier) {
423
+ continue;
424
+ }
425
+
426
+ orderedMessages[i].tokenCount += this.calculateImageTokenCost({
427
+ width: file.width,
428
+ height: file.height,
429
+ });
430
+ }
431
+ }
432
+
433
+ formattedMessage.tokenCount = orderedMessages[i].tokenCount;
434
+ return formattedMessage;
435
+ });
436
+
437
+ if (this.contextHandlers) {
438
+ this.augmentedPrompt = await this.contextHandlers.createContext();
439
+ this.options.promptPrefix = this.augmentedPrompt + (this.options.promptPrefix ?? '');
440
+ }
441
+
442
+ let { context: messagesInWindow, remainingContextTokens } =
443
+ await this.getMessagesWithinTokenLimit({ messages: formattedMessages });
444
+
445
+ const tokenCountMap = orderedMessages
446
+ .slice(orderedMessages.length - messagesInWindow.length)
447
+ .reduce((map, message, index) => {
448
+ const { messageId } = message;
449
+ if (!messageId) {
450
+ return map;
451
+ }
452
+
453
+ map[messageId] = orderedMessages[index].tokenCount;
454
+ return map;
455
+ }, {});
456
+
457
+ logger.debug('[AnthropicClient]', {
458
+ messagesInWindow: messagesInWindow.length,
459
+ remainingContextTokens,
460
+ });
461
+
462
+ let lastAuthor = '';
463
+ let groupedMessages = [];
464
+
465
+ for (let i = 0; i < messagesInWindow.length; i++) {
466
+ const message = messagesInWindow[i];
467
+ const author = message.role ?? message.author;
468
+ // If last author is not same as current author, add to new group
469
+ if (lastAuthor !== author) {
470
+ const newMessage = {
471
+ content: [message.content],
472
+ };
473
+
474
+ if (message.role) {
475
+ newMessage.role = message.role;
476
+ } else {
477
+ newMessage.author = message.author;
478
+ }
479
+
480
+ groupedMessages.push(newMessage);
481
+ lastAuthor = author;
482
+ // If same author, append content to the last group
483
+ } else {
484
+ groupedMessages[groupedMessages.length - 1].content.push(message.content);
485
+ }
486
+ }
487
+
488
+ groupedMessages = groupedMessages.map((msg, i) => {
489
+ const isLast = i === groupedMessages.length - 1;
490
+ if (msg.content.length === 1) {
491
+ const content = msg.content[0];
492
+ return {
493
+ ...msg,
494
+ // reason: final assistant content cannot end with trailing whitespace
495
+ content:
496
+ isLast && this.useMessages && msg.role === 'assistant' && typeof content === 'string'
497
+ ? content?.trim()
498
+ : content,
499
+ };
500
+ }
501
+
502
+ if (!this.useMessages && msg.tokenCount) {
503
+ delete msg.tokenCount;
504
+ }
505
+
506
+ return msg;
507
+ });
508
+
509
+ let identityPrefix = '';
510
+ if (this.options.userLabel) {
511
+ identityPrefix = `\nHuman's name: ${this.options.userLabel}`;
512
+ }
513
+
514
+ if (this.options.modelLabel) {
515
+ identityPrefix = `${identityPrefix}\nYou are ${this.options.modelLabel}`;
516
+ }
517
+
518
+ let promptPrefix = (this.options.promptPrefix ?? '').trim();
519
+ if (typeof this.options.artifactsPrompt === 'string' && this.options.artifactsPrompt) {
520
+ promptPrefix = `${promptPrefix ?? ''}\n${this.options.artifactsPrompt}`.trim();
521
+ }
522
+ if (promptPrefix) {
523
+ // If the prompt prefix doesn't end with the end token, add it.
524
+ if (!promptPrefix.endsWith(`${this.endToken}`)) {
525
+ promptPrefix = `${promptPrefix.trim()}${this.endToken}\n\n`;
526
+ }
527
+ promptPrefix = `\nContext:\n${promptPrefix}`;
528
+ }
529
+
530
+ if (identityPrefix) {
531
+ promptPrefix = `${identityPrefix}${promptPrefix}`;
532
+ }
533
+
534
+ // Prompt AI to respond, empty if last message was from AI
535
+ let isEdited = lastAuthor === this.assistantLabel;
536
+ const promptSuffix = isEdited ? '' : `${promptPrefix}${this.assistantLabel}\n`;
537
+ let currentTokenCount =
538
+ isEdited || this.useMessages
539
+ ? this.getTokenCount(promptPrefix)
540
+ : this.getTokenCount(promptSuffix);
541
+
542
+ let promptBody = '';
543
+ const maxTokenCount = this.maxPromptTokens;
544
+
545
+ const context = [];
546
+
547
+ // Iterate backwards through the messages, adding them to the prompt until we reach the max token count.
548
+ // Do this within a recursive async function so that it doesn't block the event loop for too long.
549
+ // Also, remove the next message when the message that puts us over the token limit is created by the user.
550
+ // Otherwise, remove only the exceeding message. This is due to Anthropic's strict payload rule to start with "Human:".
551
+ const nextMessage = {
552
+ remove: false,
553
+ tokenCount: 0,
554
+ messageString: '',
555
+ };
556
+
557
+ const buildPromptBody = async () => {
558
+ if (currentTokenCount < maxTokenCount && groupedMessages.length > 0) {
559
+ const message = groupedMessages.pop();
560
+ const isCreatedByUser = message.author === this.userLabel;
561
+ // Use promptPrefix if message is edited assistant'
562
+ const messagePrefix =
563
+ isCreatedByUser || !isEdited ? message.author : `${promptPrefix}${message.author}`;
564
+ const messageString = `${messagePrefix}\n${message.content}${this.endToken}\n`;
565
+ let newPromptBody = `${messageString}${promptBody}`;
566
+
567
+ context.unshift(message);
568
+
569
+ const tokenCountForMessage = this.getTokenCount(messageString);
570
+ const newTokenCount = currentTokenCount + tokenCountForMessage;
571
+
572
+ if (!isCreatedByUser) {
573
+ nextMessage.messageString = messageString;
574
+ nextMessage.tokenCount = tokenCountForMessage;
575
+ }
576
+
577
+ if (newTokenCount > maxTokenCount) {
578
+ if (!promptBody) {
579
+ // This is the first message, so we can't add it. Just throw an error.
580
+ throw new Error(
581
+ `Prompt is too long. Max token count is ${maxTokenCount}, but prompt is ${newTokenCount} tokens long.`,
582
+ );
583
+ }
584
+
585
+ // Otherwise, ths message would put us over the token limit, so don't add it.
586
+ // if created by user, remove next message, otherwise remove only this message
587
+ if (isCreatedByUser) {
588
+ nextMessage.remove = true;
589
+ }
590
+
591
+ return false;
592
+ }
593
+ promptBody = newPromptBody;
594
+ currentTokenCount = newTokenCount;
595
+
596
+ // Switch off isEdited after using it for the first time
597
+ if (isEdited) {
598
+ isEdited = false;
599
+ }
600
+
601
+ // wait for next tick to avoid blocking the event loop
602
+ await new Promise((resolve) => setImmediate(resolve));
603
+ return buildPromptBody();
604
+ }
605
+ return true;
606
+ };
607
+
608
+ const messagesPayload = [];
609
+ const buildMessagesPayload = async () => {
610
+ let canContinue = true;
611
+
612
+ if (promptPrefix) {
613
+ this.systemMessage = promptPrefix;
614
+ }
615
+
616
+ while (currentTokenCount < maxTokenCount && groupedMessages.length > 0 && canContinue) {
617
+ const message = groupedMessages.pop();
618
+
619
+ let tokenCountForMessage = message.tokenCount ?? this.getTokenCountForMessage(message);
620
+
621
+ const newTokenCount = currentTokenCount + tokenCountForMessage;
622
+ const exceededMaxCount = newTokenCount > maxTokenCount;
623
+
624
+ if (exceededMaxCount && messagesPayload.length === 0) {
625
+ throw new Error(
626
+ `Prompt is too long. Max token count is ${maxTokenCount}, but prompt is ${newTokenCount} tokens long.`,
627
+ );
628
+ } else if (exceededMaxCount) {
629
+ canContinue = false;
630
+ break;
631
+ }
632
+
633
+ delete message.tokenCount;
634
+ messagesPayload.unshift(message);
635
+ currentTokenCount = newTokenCount;
636
+
637
+ // Switch off isEdited after using it once
638
+ if (isEdited && message.role === 'assistant') {
639
+ isEdited = false;
640
+ }
641
+
642
+ // Wait for next tick to avoid blocking the event loop
643
+ await new Promise((resolve) => setImmediate(resolve));
644
+ }
645
+ };
646
+
647
+ const processTokens = () => {
648
+ // Add 2 tokens for metadata after all messages have been counted.
649
+ currentTokenCount += 2;
650
+
651
+ // Use up to `this.maxContextTokens` tokens (prompt + response), but try to leave `this.maxTokens` tokens for the response.
652
+ this.modelOptions.maxOutputTokens = Math.min(
653
+ this.maxContextTokens - currentTokenCount,
654
+ this.maxResponseTokens,
655
+ );
656
+ };
657
+
658
+ if (
659
+ /claude-[3-9]/.test(this.modelOptions.model) ||
660
+ /claude-(?:sonnet|opus|haiku)-[4-9]/.test(this.modelOptions.model)
661
+ ) {
662
+ await buildMessagesPayload();
663
+ processTokens();
664
+ return {
665
+ prompt: messagesPayload,
666
+ context: messagesInWindow,
667
+ promptTokens: currentTokenCount,
668
+ tokenCountMap,
669
+ };
670
+ } else {
671
+ await buildPromptBody();
672
+ processTokens();
673
+ }
674
+
675
+ if (nextMessage.remove) {
676
+ promptBody = promptBody.replace(nextMessage.messageString, '');
677
+ currentTokenCount -= nextMessage.tokenCount;
678
+ context.shift();
679
+ }
680
+
681
+ let prompt = `${promptBody}${promptSuffix}`;
682
+
683
+ return { prompt, context, promptTokens: currentTokenCount, tokenCountMap };
684
+ }
685
+
686
+ getCompletion() {
687
+ logger.debug("AnthropicClient doesn't use getCompletion (all handled in sendCompletion)");
688
+ }
689
+
690
+ /**
691
+ * Creates a message or completion response using the Anthropic client.
692
+ * @param {Anthropic} client - The Anthropic client instance.
693
+ * @param {Anthropic.default.MessageCreateParams | Anthropic.default.CompletionCreateParams} options - The options for the message or completion.
694
+ * @param {boolean} useMessages - Whether to use messages or completions. Defaults to `this.useMessages`.
695
+ * @returns {Promise<Anthropic.default.Message | Anthropic.default.Completion>} The response from the Anthropic client.
696
+ */
697
+ async createResponse(client, options, useMessages) {
698
+ return (useMessages ?? this.useMessages)
699
+ ? await client.messages.create(options)
700
+ : await client.completions.create(options);
701
+ }
702
+
703
+ getMessageMapMethod() {
704
+ /**
705
+ * @param {TMessage} msg
706
+ */
707
+ return (msg) => {
708
+ if (msg.text != null && msg.text && msg.text.startsWith(':::thinking')) {
709
+ msg.text = msg.text.replace(/:::thinking.*?:::/gs, '').trim();
710
+ } else if (msg.content != null) {
711
+ msg.text = parseTextParts(msg.content, true);
712
+ delete msg.content;
713
+ }
714
+
715
+ return msg;
716
+ };
717
+ }
718
+
719
+ /**
720
+ * @param {string[]} [intermediateReply]
721
+ * @returns {string}
722
+ */
723
+ getStreamText(intermediateReply) {
724
+ if (!this.streamHandler) {
725
+ return intermediateReply?.join('') ?? '';
726
+ }
727
+
728
+ const reasoningText = this.streamHandler.reasoningTokens.join('');
729
+
730
+ const reasoningBlock = reasoningText.length > 0 ? `:::thinking\n${reasoningText}\n:::\n` : '';
731
+
732
+ return `${reasoningBlock}${this.streamHandler.tokens.join('')}`;
733
+ }
734
+
735
+ async sendCompletion(payload, { onProgress, abortController }) {
736
+ if (!abortController) {
737
+ abortController = new AbortController();
738
+ }
739
+
740
+ const { signal } = abortController;
741
+
742
+ const modelOptions = { ...this.modelOptions };
743
+ if (typeof onProgress === 'function') {
744
+ modelOptions.stream = true;
745
+ }
746
+
747
+ logger.debug('modelOptions', { modelOptions });
748
+ const metadata = {
749
+ user_id: this.user,
750
+ };
751
+
752
+ const {
753
+ stream,
754
+ model,
755
+ temperature,
756
+ maxOutputTokens,
757
+ stop: stop_sequences,
758
+ topP: top_p,
759
+ topK: top_k,
760
+ } = this.modelOptions;
761
+
762
+ let requestOptions = {
763
+ model,
764
+ stream: stream || true,
765
+ stop_sequences,
766
+ temperature,
767
+ metadata,
768
+ };
769
+
770
+ if (this.useMessages) {
771
+ requestOptions.messages = payload;
772
+ requestOptions.max_tokens =
773
+ maxOutputTokens || anthropicSettings.maxOutputTokens.reset(requestOptions.model);
774
+ } else {
775
+ requestOptions.prompt = payload;
776
+ requestOptions.max_tokens_to_sample = maxOutputTokens || legacy.maxOutputTokens.default;
777
+ }
778
+
779
+ requestOptions = configureReasoning(requestOptions, {
780
+ thinking: this.options.thinking,
781
+ thinkingBudget: this.options.thinkingBudget,
782
+ });
783
+
784
+ if (!/claude-3[-.]7/.test(model)) {
785
+ requestOptions.top_p = top_p;
786
+ requestOptions.top_k = top_k;
787
+ } else if (requestOptions.thinking == null) {
788
+ requestOptions.topP = top_p;
789
+ requestOptions.topK = top_k;
790
+ }
791
+
792
+ if (this.systemMessage && this.supportsCacheControl === true) {
793
+ requestOptions.system = [
794
+ {
795
+ type: 'text',
796
+ text: this.systemMessage,
797
+ cache_control: { type: 'ephemeral' },
798
+ },
799
+ ];
800
+ } else if (this.systemMessage) {
801
+ requestOptions.system = this.systemMessage;
802
+ }
803
+
804
+ if (this.supportsCacheControl === true && this.useMessages) {
805
+ requestOptions.messages = addCacheControl(requestOptions.messages);
806
+ }
807
+
808
+ logger.debug('[AnthropicClient]', { ...requestOptions });
809
+ const handlers = createStreamEventHandlers(this.options.res);
810
+ this.streamHandler = new SplitStreamHandler({
811
+ accumulate: true,
812
+ runId: this.responseMessageId,
813
+ handlers,
814
+ });
815
+
816
+ let intermediateReply = this.streamHandler.tokens;
817
+
818
+ const maxRetries = 3;
819
+ const streamRate = this.options.streamRate ?? Constants.DEFAULT_STREAM_RATE;
820
+ async function processResponse() {
821
+ let attempts = 0;
822
+
823
+ while (attempts < maxRetries) {
824
+ let response;
825
+ try {
826
+ const client = this.getClient(requestOptions);
827
+ response = await this.createResponse(client, requestOptions);
828
+
829
+ signal.addEventListener('abort', () => {
830
+ logger.debug('[AnthropicClient] message aborted!');
831
+ if (response.controller?.abort) {
832
+ response.controller.abort();
833
+ }
834
+ });
835
+
836
+ for await (const completion of response) {
837
+ const type = completion?.type ?? '';
838
+ if (tokenEventTypes.has(type)) {
839
+ logger.debug(`[AnthropicClient] ${type}`, completion);
840
+ this[type] = completion;
841
+ }
842
+ this.streamHandler.handle(completion);
843
+ await sleep(streamRate);
844
+ }
845
+
846
+ break;
847
+ } catch (error) {
848
+ attempts += 1;
849
+ logger.warn(
850
+ `User: ${this.user} | Anthropic Request ${attempts} failed: ${error.message}`,
851
+ );
852
+
853
+ if (attempts < maxRetries) {
854
+ await delayBeforeRetry(attempts, 350);
855
+ } else if (this.streamHandler && this.streamHandler.reasoningTokens.length) {
856
+ return this.getStreamText();
857
+ } else if (intermediateReply.length > 0) {
858
+ return this.getStreamText(intermediateReply);
859
+ } else {
860
+ throw new Error(`Operation failed after ${maxRetries} attempts: ${error.message}`);
861
+ }
862
+ } finally {
863
+ signal.removeEventListener('abort', () => {
864
+ logger.debug('[AnthropicClient] message aborted!');
865
+ if (response.controller?.abort) {
866
+ response.controller.abort();
867
+ }
868
+ });
869
+ }
870
+ }
871
+ }
872
+
873
+ await processResponse.bind(this)();
874
+ return this.getStreamText(intermediateReply);
875
+ }
876
+
877
+ getSaveOptions() {
878
+ return {
879
+ maxContextTokens: this.options.maxContextTokens,
880
+ artifacts: this.options.artifacts,
881
+ promptPrefix: this.options.promptPrefix,
882
+ modelLabel: this.options.modelLabel,
883
+ promptCache: this.options.promptCache,
884
+ thinking: this.options.thinking,
885
+ thinkingBudget: this.options.thinkingBudget,
886
+ resendFiles: this.options.resendFiles,
887
+ iconURL: this.options.iconURL,
888
+ greeting: this.options.greeting,
889
+ spec: this.options.spec,
890
+ ...this.modelOptions,
891
+ };
892
+ }
893
+
894
+ getBuildMessagesOptions() {
895
+ logger.debug("AnthropicClient doesn't use getBuildMessagesOptions");
896
+ }
897
+
898
+ getEncoding() {
899
+ return 'cl100k_base';
900
+ }
901
+
902
+ /**
903
+ * Returns the token count of a given text. It also checks and resets the tokenizers if necessary.
904
+ * @param {string} text - The text to get the token count for.
905
+ * @returns {number} The token count of the given text.
906
+ */
907
+ getTokenCount(text) {
908
+ const encoding = this.getEncoding();
909
+ return Tokenizer.getTokenCount(text, encoding);
910
+ }
911
+
912
+ /**
913
+ * Generates a concise title for a conversation based on the user's input text and response.
914
+ * Involves sending a chat completion request with specific instructions for title generation.
915
+ *
916
+ * This function capitlizes on [Anthropic's function calling training](https://docs.anthropic.com/claude/docs/functions-external-tools).
917
+ *
918
+ * @param {Object} params - The parameters for the conversation title generation.
919
+ * @param {string} params.text - The user's input.
920
+ * @param {string} [params.responseText=''] - The AI's immediate response to the user.
921
+ *
922
+ * @returns {Promise<string | 'New Chat'>} A promise that resolves to the generated conversation title.
923
+ * In case of failure, it will return the default title, "New Chat".
924
+ */
925
+ async titleConvo({ text, responseText = '' }) {
926
+ let title = 'New Chat';
927
+ this.message_delta = undefined;
928
+ this.message_start = undefined;
929
+ const convo = `<initial_message>
930
+ ${truncateText(text)}
931
+ </initial_message>
932
+ <response>
933
+ ${JSON.stringify(truncateText(responseText))}
934
+ </response>`;
935
+
936
+ const { ANTHROPIC_TITLE_MODEL } = process.env ?? {};
937
+ const model = this.options.titleModel ?? ANTHROPIC_TITLE_MODEL ?? 'claude-3-haiku-20240307';
938
+ const system = titleFunctionPrompt;
939
+
940
+ const titleChatCompletion = async () => {
941
+ const content = `<conversation_context>
942
+ ${convo}
943
+ </conversation_context>
944
+
945
+ Please generate a title for this conversation.`;
946
+
947
+ const titleMessage = { role: 'user', content };
948
+ const requestOptions = {
949
+ model,
950
+ temperature: 0.3,
951
+ max_tokens: 1024,
952
+ system,
953
+ stop_sequences: ['\n\nHuman:', '\n\nAssistant', '</function_calls>'],
954
+ messages: [titleMessage],
955
+ };
956
+
957
+ try {
958
+ const response = await this.createResponse(
959
+ this.getClient(requestOptions),
960
+ requestOptions,
961
+ true,
962
+ );
963
+ let promptTokens = response?.usage?.input_tokens;
964
+ let completionTokens = response?.usage?.output_tokens;
965
+ if (!promptTokens) {
966
+ promptTokens = this.getTokenCountForMessage(titleMessage);
967
+ promptTokens += this.getTokenCountForMessage({ role: 'system', content: system });
968
+ }
969
+ if (!completionTokens) {
970
+ completionTokens = this.getTokenCountForMessage(response.content[0]);
971
+ }
972
+ await this.recordTokenUsage({
973
+ model,
974
+ promptTokens,
975
+ completionTokens,
976
+ context: 'title',
977
+ });
978
+ const text = response.content[0].text;
979
+ title = parseParamFromPrompt(text, 'title');
980
+ } catch (e) {
981
+ logger.error('[AnthropicClient] There was an issue generating the title', e);
982
+ }
983
+ };
984
+
985
+ await titleChatCompletion();
986
+ logger.debug('[AnthropicClient] Convo Title: ' + title);
987
+ return title;
988
+ }
989
+ }
990
+
991
+ module.exports = AnthropicClient;