harl00.github.io commited on
Commit
7edc0ac
·
1 Parent(s): b6bf934

update to librechat.yml to comment out additional llms

Browse files
Files changed (1) hide show
  1. librechat.yaml +786 -64
librechat.yaml CHANGED
@@ -1,70 +1,621 @@
1
- # Configuration version (required)
2
- version: 1.0.0
3
 
4
- # Cache settings: Set to true to enable caching
5
  cache: true
6
 
7
- # Definition of custom endpoints
 
 
8
  endpoints:
9
  custom:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  # Mistral AI API
11
- - name: "Mistral" # Unique name for the endpoint
12
- # For `apiKey` and `baseURL`, you can use environment variables that you define.
13
- # recommended environment variables:
14
  apiKey: "${MISTRAL_API_KEY}"
15
  baseURL: "https://api.mistral.ai/v1"
16
-
17
- # Models configuration
18
  models:
19
- # List of default models to use. At least one value is required.
20
- default: ["mistral-tiny", "mistral-small", "mistral-medium"]
21
- # Fetch option: Set to true to fetch models from API.
22
- fetch: true # Defaults to false.
23
-
24
- # Optional configurations
25
-
26
- # Title Conversation setting
27
- titleConvo: true # Set to true to enable title conversation
28
-
29
- # Title Method: Choose between "completion" or "functions".
30
- titleMethod: "completion" # Defaults to "completion" if omitted.
31
-
32
- # Title Model: Specify the model to use for titles.
33
- titleModel: "mistral-tiny" # Defaults to "gpt-3.5-turbo" if omitted.
34
-
35
- # Summarize setting: Set to true to enable summarization.
36
  summarize: false
37
-
38
- # Summary Model: Specify the model to use if summarization is enabled.
39
- summaryModel: "mistral-tiny" # Defaults to "gpt-3.5-turbo" if omitted.
40
-
41
- # Force Prompt setting: If true, sends a `prompt` parameter instead of `messages`.
42
  forcePrompt: false
 
 
43
 
44
- # The label displayed for the AI model in messages.
45
- modelDisplayLabel: "Mistral" # Default is "AI" when not set.
46
-
47
- # Add additional parameters to the request. Default params will be overwritten.
48
- addParams:
49
- safe_mode: true # This field is specific to Mistral AI: https://docs.mistral.ai/api/
50
-
51
- # Drop Default params parameters from the request. See default params in guide linked below.
52
- dropParams: ["stop", "temperature", "top_p"]
53
- # - stop # dropped since it's not recognized by Mistral AI API
54
- # `temperature` and `top_p` are removed to allow Mistral AI API defaults to be used:
55
- # - temperature
56
- # - top_p
57
-
58
- # OpenRouter.ai Example
59
  - name: "OpenRouter"
60
- # For `apiKey` and `baseURL`, you can use environment variables that you define.
61
- # recommended environment variables:
62
- # Known issue: you should not use `OPENROUTER_API_KEY` as it will then override the `openAI` endpoint to use OpenRouter as well.
63
  apiKey: "${OPENROUTER_KEY}"
64
  baseURL: "https://openrouter.ai/api/v1"
65
  models:
66
- default: ["nousresearch/nous-capybara-7b:free", "mistralai/mistral-7b-instruct:free", "huggingfaceh4/zephyr-7b-beta:free", "openchat/openchat-7b:free", "gryphe/mythomist-7b:free", "undi95/toppy-m-7b:free", "openrouter/cinematika-7b:free", "openrouter/auto", "nousresearch/nous-capybara-7b", "mistralai/mistral-7b-instruct", "huggingfaceh4/zephyr-7b-beta", "openchat/openchat-7b", "gryphe/mythomist-7b", "openrouter/cinematika-7b", "rwkv/rwkv-5-world-3b", "recursal/rwkv-5-3b-ai-town", "jondurbin/bagel-34b", "jebcarter/psyfighter-13b", "koboldai/psyfighter-13b-2", "neversleep/noromaid-mixtral-8x7b-instruct", "nousresearch/nous-hermes-llama2-13b", "meta-llama/codellama-34b-instruct", "phind/phind-codellama-34b", "intel/neural-chat-7b", "nousresearch/nous-hermes-2-mixtral-8x7b-dpo", "nousresearch/nous-hermes-2-mixtral-8x7b-sft", "haotian-liu/llava-13b", "nousresearch/nous-hermes-2-vision-7b", "meta-llama/llama-2-13b-chat", "gryphe/mythomax-l2-13b", "nousresearch/nous-hermes-llama2-70b", "teknium/openhermes-2-mistral-7b", "teknium/openhermes-2.5-mistral-7b", "undi95/remm-slerp-l2-13b", "undi95/toppy-m-7b", "01-ai/yi-34b-chat", "01-ai/yi-34b", "01-ai/yi-6b", "togethercomputer/stripedhyena-nous-7b", "togethercomputer/stripedhyena-hessian-7b", "mistralai/mixtral-8x7b", "nousresearch/nous-hermes-yi-34b", "open-orca/mistral-7b-openorca", "openai/gpt-3.5-turbo", "openai/gpt-3.5-turbo-1106", "openai/gpt-3.5-turbo-16k", "openai/gpt-4-1106-preview", "openai/gpt-4", "openai/gpt-4-32k", "openai/gpt-4-vision-preview", "openai/gpt-3.5-turbo-instruct", "google/palm-2-chat-bison", "google/palm-2-codechat-bison", "google/palm-2-chat-bison-32k", "google/palm-2-codechat-bison-32k", "google/gemini-pro", "google/gemini-pro-vision", "perplexity/pplx-70b-online", "perplexity/pplx-7b-online", "perplexity/pplx-7b-chat", "perplexity/pplx-70b-chat", "meta-llama/llama-2-70b-chat", "nousresearch/nous-capybara-34b", "jondurbin/airoboros-l2-70b", "austism/chronos-hermes-13b", "migtissera/synthia-70b", "pygmalionai/mythalion-13b", "undi95/remm-slerp-l2-13b-6k", "xwin-lm/xwin-lm-70b", "gryphe/mythomax-l2-13b-8k", "alpindale/goliath-120b ", "lizpreciatior/lzlv-70b-fp16-hf", "neversleep/noromaid-20b", "mistralai/mixtral-8x7b-instruct", "cognitivecomputations/dolphin-mixtral-8x7b", "anthropic/claude-2", "anthropic/claude-2.0", "anthropic/claude-instant-v1", "mancer/weaver", "mistralai/mistral-tiny", "mistralai/mistral-small", "mistralai/mistral-medium"]
67
- fetch: true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
  titleConvo: true
69
  titleModel: "gpt-3.5-turbo"
70
  summarize: false
@@ -72,21 +623,192 @@ endpoints:
72
  forcePrompt: false
73
  modelDisplayLabel: "OpenRouter"
74
 
75
- - name: "Reverse Proxy"
76
- # For `apiKey` and `baseURL`, you can use environment variables that you define.
77
- # recommended environment variables:
78
- # Known issue: you should not use `OPENROUTER_API_KEY` as it will then override the `openAI` endpoint to use OpenRouter as well.
79
- apiKey: "user_provided"
80
- baseURL: "user_provided"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  models:
82
- default: ["gpt-3.5-turbo"]
83
- fetch: true
 
 
 
 
 
 
84
  titleConvo: true
85
- titleModel: "gpt-3.5-turbo"
86
  summarize: false
87
- summaryModel: "gpt-3.5-turbo"
88
  forcePrompt: false
89
- modelDisplayLabel: "AI"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
 
91
- # See the Custom Configuration Guide for more information:
92
- # https://docs.librechat.ai/install/configuration/custom_config.html
 
1
+ version: 1.1.2
 
2
 
 
3
  cache: true
4
 
5
+ registration:
6
+ socialLogins: ["google"]
7
+
8
  endpoints:
9
  custom:
10
+ # Anyscale
11
+ - name: "Anyscale"
12
+ apiKey: "${ANYSCALE_API_KEY}"
13
+ baseURL: "https://api.endpoints.anyscale.com/v1"
14
+ models:
15
+ default: [
16
+ "meta-llama/Llama-2-7b-chat-hf",
17
+ "meta-llama/Llama-2-13b-chat-hf",
18
+ "meta-llama/Llama-2-70b-chat-hf",
19
+ "codellama/CodeLlama-34b-Instruct-hf",
20
+ "codellama/CodeLlama-70b-Instruct-hf",
21
+ "mistralai/Mistral-7B-Instruct-v0.1",
22
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
23
+ "mlabonne/NeuralHermes-2.5-Mistral-7B",
24
+ "Open-Orca/Mistral-7B-OpenOrca",
25
+ "HuggingFaceH4/zephyr-7b-beta",
26
+ "google/gemma-7b-it"
27
+ ]
28
+ fetch: false
29
+ titleConvo: true
30
+ titleModel: "meta-llama/Llama-2-7b-chat-hf"
31
+ summarize: false
32
+ summaryModel: "meta-llama/Llama-2-7b-chat-hf"
33
+ forcePrompt: false
34
+ modelDisplayLabel: "Anyscale"
35
+
36
+ # APIpie
37
+ # # - name: "APIpie"
38
+ # apiKey: "${APIPIE_API_KEY}"
39
+ # baseURL: "https://apipie.ai/v1/"
40
+ # models:
41
+ # default: [
42
+ # "BioM-ELECTRA-Large-SQuAD2",
43
+ # "Bio_ClinicalBERT",
44
+ # "Bio_Discharge_Summary_BERT",
45
+ # "BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext",
46
+ # "GPT-JT-Moderation-6B",
47
+ # "German-MedBERT",
48
+ # "LLaMA-2-7B-32K",
49
+ # "Llama-2-13b-chat-hf",
50
+ # "Llama-2-13b-hf",
51
+ # "Llama-2-70b-chat-hf",
52
+ # "Llama-2-70b-hf",
53
+ # "Llama-2-7B-32K-Instruct",
54
+ # "Llama-2-7b-chat-hf",
55
+ # "Llama-2-7b-hf",
56
+ # "Meta-Llama-3-70B-Instruct",
57
+ # "Meta-Llama-3-8B-Instruct",
58
+ # "Mistral-7B-Instruct-v0.1",
59
+ # "Mistral-7B-Instruct-v0.2",
60
+ # "Mistral-7B-OpenOrca",
61
+ # "Mixtral-8x22B-Instruct-v0.1",
62
+ # "Mixtral-8x22B-v0.1",
63
+ # "Mixtral-8x7B-Instruct-v0.1",
64
+ # "Mixtral-8x7B-v0.1",
65
+ # "MythoMax-L2-13b",
66
+ # "NexusRaven-V2-13B",
67
+ # "Nous-Hermes-2-Mixtral-8x7B-DPO",
68
+ # "Nous-Hermes-2-Mixtral-8x7B-SFT",
69
+ # "Nous-Hermes-Llama2-13b",
70
+ # "Nous-Hermes-llama-2-7b",
71
+ # "ReMM-SLERP-L2-13B",
72
+ # "RedPajama-INCITE-7B-Base",
73
+ # "RedPajama-INCITE-7B-Chat",
74
+ # "RedPajama-INCITE-7B-Instruct",
75
+ # "RedPajama-INCITE-Base-3B-v1",
76
+ # "RedPajama-INCITE-Chat-3B-v1",
77
+ # "RedPajama-INCITE-Instruct-3B-v1",
78
+ # "SecBERT",
79
+ # "TinyLlama-1.1B-Chat-v1.0",
80
+ # "Toppy-M-7B",
81
+ # "WizardLM-2-7B",
82
+ # "WizardLM-2-8x22B",
83
+ # "Yi-34B-Chat",
84
+ # "airoboros-70b",
85
+ # "airoboros-l2-70b",
86
+ # "albert-base-chinese-cluecorpussmall",
87
+ # "albert-base-v1",
88
+ # "albert-base-v2",
89
+ # "alpaca-7b",
90
+ # "babbage-002",
91
+ # "bert-base",
92
+ # "bert-base-arabertv02",
93
+ # "bert-base-cased",
94
+ # "bert-base-chinese",
95
+ # "bert-base-german-cased",
96
+ # "bert-base-multilingual-cased",
97
+ # "bert-base-multilingual-uncased",
98
+ # "bert-base-portuguese-cased",
99
+ # "bert-base-swedish-cased",
100
+ # "bert-base-uncased",
101
+ # "bert-base-uncased-squad-v1",
102
+ # "bert-large-cased",
103
+ # "bert-large-portuguese-cased",
104
+ # "bert-large-uncased",
105
+ # "bert-large-uncased-whole-word-masking-finetuned-squad",
106
+ # "bert-large-uncased-whole-word-masking-squad2",
107
+ # "biobert-base-cased-v1.2",
108
+ # "chat-bison",
109
+ # "chinese-bert-wwm-ext",
110
+ # "chinese-roberta-wwm-ext",
111
+ # "chronos-hermes-13b",
112
+ # "chronos-hermes-13b-v2",
113
+ # "cinematika-7b",
114
+ # "claude-1",
115
+ # "claude-1.2",
116
+ # "claude-2",
117
+ # "claude-2.0",
118
+ # "claude-2.1",
119
+ # "claude-3-haiku",
120
+ # "claude-3-opus",
121
+ # "claude-3-sonnet",
122
+ # "claude-instant-1",
123
+ # "claude-instant-1.0",
124
+ # "claude-instant-1.1",
125
+ # "claude-instant-1.2",
126
+ # "command",
127
+ # "command-light",
128
+ # "command-light-nightly",
129
+ # "command-light-text-v14",
130
+ # "command-nightly",
131
+ # "command-r",
132
+ # "command-r-plus",
133
+ # "command-r-plus-v1",
134
+ # "command-r-v1",
135
+ # "command-text-v14",
136
+ # "davinci-002",
137
+ # "dbrx-instruct",
138
+ # "deberta-base",
139
+ # "deberta-v2-xlarge",
140
+ # "deberta-v3-base",
141
+ # "deepseek-chat",
142
+ # "distilbert-base-cased-distilled-squad",
143
+ # "distilbert-base-multilingual-cased",
144
+ # "distilbert-base-uncased",
145
+ # "distilbert-base-uncased-distilled-squad",
146
+ # "dolphin-2.5-mixtral-8x7b",
147
+ # "dolphin-2.6-mixtral-8x7b",
148
+ # "dolphin-mixtral-8x7b",
149
+ # "eagle-7b",
150
+ # "fimbulvetr-11b-v2",
151
+ # "firellava-13b",
152
+ # "gemini-flash-1.5",
153
+ # "gemini-pro",
154
+ # "gemini-pro-1.5",
155
+ # "gemini-pro-vision",
156
+ # "gemma-1.1-7b-it",
157
+ # "gemma-7b-it",
158
+ # "goliath-120b",
159
+ # "gpt-3.5-turbo",
160
+ # "gpt-3.5-turbo-0125",
161
+ # "gpt-3.5-turbo-0301",
162
+ # "gpt-3.5-turbo-0613",
163
+ # "gpt-3.5-turbo-1106",
164
+ # "gpt-3.5-turbo-16k",
165
+ # "gpt-3.5-turbo-16k-0613",
166
+ # "gpt-3.5-turbo-instruct",
167
+ # "gpt-3.5-turbo-instruct-0914",
168
+ # "gpt-4",
169
+ # "gpt-4-0125-preview",
170
+ # "gpt-4-0314",
171
+ # "gpt-4-0613",
172
+ # "gpt-4-1106-preview",
173
+ # "gpt-4-1106-vision-preview",
174
+ # "gpt-4-32k",
175
+ # "gpt-4-32k-0314",
176
+ # "gpt-4-turbo",
177
+ # "gpt-4-turbo-2024-04-09",
178
+ # "gpt-4-turbo-preview",
179
+ # "gpt-4-vision-preview",
180
+ # "gpt-4o",
181
+ # "gpt-4o-2024-05-13",
182
+ # "gpt-4o-test-shared",
183
+ # "hermes-2-pro-llama-3-8b",
184
+ # "j2-grande-instruct",
185
+ # "j2-jumbo-instruct",
186
+ # "j2-mid",
187
+ # "j2-mid-v1",
188
+ # "j2-ultra",
189
+ # "j2-ultra-v1",
190
+ # "koelectra-small-v2-distilled-korquad-384",
191
+ # "large-latest",
192
+ # "legal-bert-base-uncased",
193
+ # "legal-bert-small-uncased",
194
+ # "llama-2-13b-chat",
195
+ # "llama-2-70b-chat",
196
+ # "llama-3-70b",
197
+ # "llama-3-70b-instruct",
198
+ # "llama-3-8b",
199
+ # "llama-3-8b-instruct",
200
+ # "llama-3-lumimaid-70b",
201
+ # "llama-3-lumimaid-8b",
202
+ # "llama-3-sonar-large-32k-chat",
203
+ # "llama-3-sonar-large-32k-online",
204
+ # "llama-3-sonar-small-32k-chat",
205
+ # "llama-3-sonar-small-32k-online",
206
+ # "llama-guard-2-8b",
207
+ # "llama2-13b-chat-v1",
208
+ # "llama2-70b-chat-v1",
209
+ # "llama3-70b-instruct-v1",
210
+ # "llama3-8b-instruct-v1",
211
+ # "llava-1.5-7b-hf",
212
+ # "llava-13b",
213
+ # "llava-yi-34b",
214
+ # "lzlv-70b-fp16-hf",
215
+ # "lzlv_70b_fp16_hf",
216
+ # "medium",
217
+ # "midnight-rose-70b",
218
+ # "minilm-uncased-squad2",
219
+ # "mistral-7b-instruct",
220
+ # "mistral-7b-instruct-v0",
221
+ # "mistral-7b-instruct-v0.1",
222
+ # "mistral-7b-instruct-v0.2",
223
+ # "mistral-7b-instruct-v0.3",
224
+ # "mistral-large",
225
+ # "mistral-medium",
226
+ # "mistral-small",
227
+ # "mistral-small-2402-v1",
228
+ # "mistral-tiny",
229
+ # "mixtral-8x22b",
230
+ # "mixtral-8x22b-instruct",
231
+ # "mixtral-8x22b-instruct-preview",
232
+ # "mixtral-8x7b",
233
+ # "mixtral-8x7b-instruct",
234
+ # "mixtral-8x7b-instruct-v0",
235
+ # "mythalion-13b",
236
+ # "mythomax-l2-13b",
237
+ # "mythomist-7b",
238
+ # "neural-chat-7b",
239
+ # "noromaid-20b",
240
+ # "noromaid-mixtral-8x7b-instruct",
241
+ # "nous-capybara-34b",
242
+ # "nous-capybara-7b",
243
+ # "nous-hermes-2-mistral-7b-dpo",
244
+ # "nous-hermes-2-mixtral-8x7b-dpo",
245
+ # "nous-hermes-2-mixtral-8x7b-sft",
246
+ # "nous-hermes-2-vision-7b",
247
+ # "nous-hermes-llama2-13b",
248
+ # "nous-hermes-yi-34b",
249
+ # "olmo-7b-instruct",
250
+ # "olympus-premier-v1",
251
+ # "openchat-3.5-1210",
252
+ # "openchat-7b",
253
+ # "openchat_3.5",
254
+ # "openhermes-2-mistral-7b",
255
+ # "openhermes-2.5-mistral-7b",
256
+ # "palm-2-chat-bison",
257
+ # "palm-2-chat-bison-32k",
258
+ # "phi-2",
259
+ # "phi-3-medium-128k-instruct",
260
+ # "phi-3-mini-128k-instruct",
261
+ # "pplx-70b-chat",
262
+ # "pplx-70b-online",
263
+ # "pplx-7b-chat",
264
+ # "pplx-7b-online",
265
+ # "prot_bert",
266
+ # "prot_bert_bfd",
267
+ # "psyfighter-13b-2",
268
+ # "qwen-110b-chat",
269
+ # "qwen-14b-chat",
270
+ # "qwen-32b-chat",
271
+ # "qwen-4b-chat",
272
+ # "qwen-72b-chat",
273
+ # "qwen-7b-chat",
274
+ # "remm-slerp-l2-13b",
275
+ # "roberta-base-squad2",
276
+ # "roberta-base-squad2-covid",
277
+ # "roberta-large-squad2",
278
+ # "small",
279
+ # "snowflake-arctic-instruct",
280
+ # "soliloquy-l3",
281
+ # "sonar-medium-chat",
282
+ # "sonar-medium-online",
283
+ # "sonar-small-chat",
284
+ # "sonar-small-online",
285
+ # "splade-cocondenser-ensembledistil",
286
+ # "stripedhyena-hessian-7b",
287
+ # "stripedhyena-nous-7b",
288
+ # "synthia-70b",
289
+ # "text-babbage-002",
290
+ # "text-bison",
291
+ # "text-davinci-002",
292
+ # "tiny",
293
+ # "tinyroberta-squad2",
294
+ # "titan-text-express-v1",
295
+ # "titan-text-lite-v1",
296
+ # "titan-text-premier-v1",
297
+ # "titan-tg1-large",
298
+ # "toppy-m-7b",
299
+ # "vicuna-13b-v1.5",
300
+ # "vicuna-7b-v1.5",
301
+ # "weaver",
302
+ # "wizardlm-2-7b",
303
+ # "wizardlm-2-8x22b",
304
+ # "xwin-lm-70b",
305
+ # "yi-34b",
306
+ # "yi-34b-chat",
307
+ # "yi-6b",
308
+ # "zephyr-7b-beta",
309
+ # "zephyr-orpo-141b-A35b-v0.1"
310
+ # ]
311
+ # fetch: false
312
+ # titleConvo: true
313
+ # titleModel: "claude-3-haiku"
314
+ # summarize: false
315
+ # summaryModel: "claude-3-haiku"
316
+ # dropParams: ["stream"]
317
+ # modelDisplayLabel: "APIpie"
318
+ # iconURL: "https://raw.githubusercontent.com/fuegovic/lc-config-yaml/main/icons/APIpie.png"
319
+
320
+ #cohere
321
+ # - name: "cohere"
322
+ # apiKey: "${COHERE_API_KEY}"
323
+ # baseURL: "https://api.cohere.ai/v1"
324
+ # models:
325
+ # default: ["command-r","command-r-plus","command-light","command-light-nightly","command","command-nightly"]
326
+ # fetch: false
327
+ # modelDisplayLabel: "cohere"
328
+ # titleModel: "command"
329
+ # dropParams: ["stop", "user", "frequency_penalty", "presence_penalty", "temperature", "top_p"]
330
+
331
+ # DEEPNIGHT
332
+ # - name: "DEEPNIGHT"
333
+ # apiKey: "sk-free1234"
334
+ # baseURL: "https://aiforcause.deepnight.tech/openai/"
335
+ # models:
336
+ # default: [
337
+ # "gpt-35-turbo",
338
+ # "gpt-35-turbo-16k",
339
+ # "gpt-4-turbo"
340
+ # ]
341
+ # fetch: false
342
+ # titleConvo: true
343
+ # titleModel: "gpt-35-turbo"
344
+ # summarize: false
345
+ # summaryModel: "gpt-35-turbo"
346
+ # forcePrompt: false
347
+ # modelDisplayLabel: "DEEPNIGHT"
348
+ # addParams:
349
+ # stream: True
350
+ # iconURL: "https://raw.githubusercontent.com/fuegovic/lc-config-yaml/main/icons/DEEPNIGHT.png"
351
+
352
+ # Fireworks.ai
353
+ # - name: "Fireworks"
354
+ # apiKey: "${FIREWORKS_API_KEY}"
355
+ # baseURL: "https://api.fireworks.ai/inference/v1"
356
+ # models:
357
+ # default: [
358
+ # "accounts/fireworks/models/devashisht-test-v2",
359
+ # "accounts/fireworks/models/dt-fc-rc-v1",
360
+ # "accounts/fireworks/models/firefunction-v1",
361
+ # "accounts/fireworks/models/firellava-13b",
362
+ # "accounts/devashisht-72fdad/models/function-calling-v11",
363
+ # "accounts/fireworks/models/fw-function-call-34b-v0",
364
+ # "accounts/stability/models/japanese-stablelm-instruct-beta-70b",
365
+ # "accounts/stability/models/japanese-stablelm-instruct-gamma-7b",
366
+ # "accounts/fireworks/models/japanese-stable-vlm",
367
+ # "accounts/fireworks/models/llama-v2-13b-chat",
368
+ # "accounts/fireworks/models/llama-v2-13b-code-instruct",
369
+ # "accounts/fireworks/models/llama-v2-34b-code-instruct",
370
+ # "accounts/fireworks/models/llama-v2-70b-chat",
371
+ # "accounts/fireworks/models/llama-v2-70b-code-instruct",
372
+ # "accounts/fireworks/models/llama-v2-7b-chat",
373
+ # "accounts/fireworks/models/llava-v15-13b-fireworks",
374
+ # "accounts/fireworks/models/mistral-7b-instruct-4k",
375
+ # "accounts/dev-e24710/models/mistral-spellbound-format",
376
+ # "accounts/fireworks/models/mixtral-8x7b-instruct",
377
+ # "accounts/fireworks/models/mixtral-8x7b-instruct-hf",
378
+ # "accounts/fireworks/models/new-mixtral-chat",
379
+ # "accounts/fireworks/models/qwen-14b-chat",
380
+ # "accounts/fireworks/models/qwen-1-8b-chat",
381
+ # "accounts/fireworks/models/qwen-72b-chat",
382
+ # "accounts/stability/models/stablelm-zephyr-3b",
383
+ # "accounts/fireworks/models/yi-34b-200k-capybara",
384
+ # ]
385
+ # fetch: false
386
+ # titleConvo: true
387
+ # titleModel: "accounts/fireworks/models/llama-v2-7b-chat"
388
+ # summarize: false
389
+ # summaryModel: "accounts/fireworks/models/llama-v2-7b-chat"
390
+ # forcePrompt: false
391
+ # modelDisplayLabel: "Fireworks"
392
+ # dropParams: ["user"]
393
+
394
+ # groq
395
+ - name: "groq"
396
+ apiKey: "${GROQ_API_KEY}"
397
+ baseURL: "https://api.groq.com/openai/v1/"
398
+ models:
399
+ default: [
400
+ "llama2-70b-4096",
401
+ "llama3-70b-8192",
402
+ "llama3-8b-8192",
403
+ "mixtral-8x7b-32768",
404
+ "gemma-7b-it",
405
+ ]
406
+ fetch: false
407
+ titleConvo: true
408
+ titleModel: "mixtral-8x7b-32768"
409
+ modelDisplayLabel: "groq"
410
+
411
  # Mistral AI API
412
+ - name: "Mistral"
 
 
413
  apiKey: "${MISTRAL_API_KEY}"
414
  baseURL: "https://api.mistral.ai/v1"
 
 
415
  models:
416
+ default: [
417
+ "mistral-tiny",
418
+ "mistral-small",
419
+ "mistral-medium",
420
+ "mistral-large-latest"
421
+ ]
422
+ fetch: false
423
+ titleConvo: true
424
+ titleMethod: "completion"
425
+ titleModel: "mistral-tiny"
 
 
 
 
 
 
 
426
  summarize: false
427
+ summaryModel: "mistral-tiny"
 
 
 
 
428
  forcePrompt: false
429
+ modelDisplayLabel: "Mistral"
430
+ dropParams: ["stop", "user", "frequency_penalty", "presence_penalty"]
431
 
432
+ # OpenRouter.ai
 
 
 
 
 
 
 
 
 
 
 
 
 
 
433
  - name: "OpenRouter"
 
 
 
434
  apiKey: "${OPENROUTER_KEY}"
435
  baseURL: "https://openrouter.ai/api/v1"
436
  models:
437
+ "default": [
438
+ "openrouter/auto",
439
+ "---FREE---",
440
+ "google/gemma-7b-it:free",
441
+ "gryphe/mythomist-7b:free",
442
+ "huggingfaceh4/zephyr-7b-beta:free",
443
+ "meta-llama/llama-3-8b-instruct:free",
444
+ "microsoft/phi-3-medium-128k-instruct:free",
445
+ "microsoft/phi-3-mini-128k-instruct:free",
446
+ "mistralai/mistral-7b-instruct:free",
447
+ "nousresearch/nous-capybara-7b:free",
448
+ "openchat/openchat-7b:free",
449
+ "openrouter/cinematika-7b:free",
450
+ "undi95/toppy-m-7b:free",
451
+ "---NITRO---",
452
+ "google/gemma-7b-it:nitro",
453
+ "gryphe/mythomax-l2-13b:nitro",
454
+ "meta-llama/llama-2-70b-chat:nitro",
455
+ "meta-llama/llama-3-70b-instruct:nitro",
456
+ "meta-llama/llama-3-8b-instruct:nitro",
457
+ "mistralai/mistral-7b-instruct:nitro",
458
+ "mistralai/mixtral-8x7b-instruct:nitro",
459
+ "undi95/toppy-m-7b:nitro",
460
+ "---BETA---",
461
+ "anthropic/claude-2.0:beta",
462
+ "anthropic/claude-2.1:beta",
463
+ "anthropic/claude-2:beta",
464
+ "anthropic/claude-3-haiku:beta",
465
+ "anthropic/claude-3-opus:beta",
466
+ "anthropic/claude-3-sonnet:beta",
467
+ "anthropic/claude-instant-1:beta",
468
+ "---EXTENDED---",
469
+ "gryphe/mythomax-l2-13b:extended",
470
+ "meta-llama/llama-3-8b-instruct:extended",
471
+ "neversleep/llama-3-lumimaid-8b:extended",
472
+ "undi95/remm-slerp-l2-13b:extended",
473
+ "---01-AI---",
474
+ "01-ai/yi-34b",
475
+ "01-ai/yi-34b-chat",
476
+ "01-ai/yi-6b",
477
+ "---ANTHROPIC---",
478
+ "anthropic/claude-1",
479
+ "anthropic/claude-1.2",
480
+ "anthropic/claude-2",
481
+ "anthropic/claude-2.0",
482
+ "anthropic/claude-2.1",
483
+ "anthropic/claude-3-haiku",
484
+ "anthropic/claude-3-opus",
485
+ "anthropic/claude-3-sonnet",
486
+ "anthropic/claude-instant-1",
487
+ "anthropic/claude-instant-1.0",
488
+ "anthropic/claude-instant-1.1",
489
+ "---COHERE---",
490
+ "cohere/command",
491
+ "cohere/command-r",
492
+ "cohere/command-r-plus",
493
+ "---GOOGLE---",
494
+ "google/gemini-flash-1.5",
495
+ "google/gemini-pro",
496
+ "google/gemini-pro-1.5",
497
+ "google/gemini-pro-vision",
498
+ "google/gemma-7b-it",
499
+ "google/palm-2-chat-bison",
500
+ "google/palm-2-chat-bison-32k",
501
+ "google/palm-2-codechat-bison",
502
+ "google/palm-2-codechat-bison-32k",
503
+ "---META-LLAMA---",
504
+ "meta-llama/codellama-34b-instruct",
505
+ "meta-llama/llama-2-13b-chat",
506
+ "meta-llama/llama-2-70b-chat",
507
+ "meta-llama/llama-3-70b",
508
+ "meta-llama/llama-3-70b-instruct",
509
+ "meta-llama/llama-3-8b",
510
+ "meta-llama/llama-3-8b-instruct",
511
+ "meta-llama/llama-guard-2-8b",
512
+ "---MICROSOFT---",
513
+ "microsoft/phi-3-medium-128k-instruct",
514
+ "microsoft/phi-3-mini-128k-instruct",
515
+ "microsoft/wizardlm-2-7b",
516
+ "microsoft/wizardlm-2-8x22b",
517
+ "---MISTRALAI---",
518
+ "mistralai/mistral-7b-instruct",
519
+ "mistralai/mistral-7b-instruct-v0.1",
520
+ "mistralai/mistral-7b-instruct-v0.2",
521
+ "mistralai/mistral-7b-instruct-v0.3",
522
+ "mistralai/mistral-large",
523
+ "mistralai/mistral-medium",
524
+ "mistralai/mistral-small",
525
+ "mistralai/mistral-tiny",
526
+ "mistralai/mixtral-8x22b",
527
+ "mistralai/mixtral-8x22b-instruct",
528
+ "mistralai/mixtral-8x7b",
529
+ "mistralai/mixtral-8x7b-instruct",
530
+ "---NEVERSLEEP---",
531
+ "neversleep/llama-3-lumimaid-70b",
532
+ "neversleep/llama-3-lumimaid-8b",
533
+ "neversleep/noromaid-20b",
534
+ "neversleep/noromaid-mixtral-8x7b-instruct",
535
+ "---NOUSRESEARCH---",
536
+ "nousresearch/hermes-2-pro-llama-3-8b",
537
+ "nousresearch/nous-capybara-34b",
538
+ "nousresearch/nous-capybara-7b",
539
+ "nousresearch/nous-hermes-2-mistral-7b-dpo",
540
+ "nousresearch/nous-hermes-2-mixtral-8x7b-dpo",
541
+ "nousresearch/nous-hermes-2-mixtral-8x7b-sft",
542
+ "nousresearch/nous-hermes-2-vision-7b",
543
+ "nousresearch/nous-hermes-llama2-13b",
544
+ "nousresearch/nous-hermes-yi-34b",
545
+ "---OPENAI---",
546
+ "openai/gpt-3.5-turbo",
547
+ "openai/gpt-3.5-turbo-0125",
548
+ "openai/gpt-3.5-turbo-0301",
549
+ "openai/gpt-3.5-turbo-0613",
550
+ "openai/gpt-3.5-turbo-1106",
551
+ "openai/gpt-3.5-turbo-16k",
552
+ "openai/gpt-3.5-turbo-instruct",
553
+ "openai/gpt-4",
554
+ "openai/gpt-4-0314",
555
+ "openai/gpt-4-1106-preview",
556
+ "openai/gpt-4-32k",
557
+ "openai/gpt-4-32k-0314",
558
+ "openai/gpt-4-turbo",
559
+ "openai/gpt-4-turbo-preview",
560
+ "openai/gpt-4-vision-preview",
561
+ "openai/gpt-4o",
562
+ "openai/gpt-4o-2024-05-13",
563
+ "---PERPLEXITY---",
564
+ "perplexity/llama-3-sonar-large-32k-chat",
565
+ "perplexity/llama-3-sonar-large-32k-online",
566
+ "perplexity/llama-3-sonar-small-32k-chat",
567
+ "perplexity/llama-3-sonar-small-32k-online",
568
+ "---QWEN---",
569
+ "qwen/qwen-110b-chat",
570
+ "qwen/qwen-14b-chat",
571
+ "qwen/qwen-32b-chat",
572
+ "qwen/qwen-4b-chat",
573
+ "qwen/qwen-72b-chat",
574
+ "qwen/qwen-7b-chat",
575
+ "---OTHERS---",
576
+ "allenai/olmo-7b-instruct",
577
+ "alpindale/goliath-120b",
578
+ "austism/chronos-hermes-13b",
579
+ "codellama/codellama-70b-instruct",
580
+ "cognitivecomputations/dolphin-mixtral-8x7b",
581
+ "databricks/dbrx-instruct",
582
+ "deepseek/deepseek-chat",
583
+ "deepseek/deepseek-coder",
584
+ "fireworks/firellava-13b",
585
+ "gryphe/mythomax-l2-13b",
586
+ "gryphe/mythomist-7b",
587
+ "huggingfaceh4/zephyr-7b-beta",
588
+ "intel/neural-chat-7b",
589
+ "jebcarter/psyfighter-13b",
590
+ "jondurbin/airoboros-l2-70b",
591
+ "jondurbin/bagel-34b",
592
+ "koboldai/psyfighter-13b-2",
593
+ "liuhaotian/llava-13b",
594
+ "liuhaotian/llava-yi-34b",
595
+ "lizpreciatior/lzlv-70b-fp16-hf",
596
+ "lynn/soliloquy-l3",
597
+ "mancer/weaver",
598
+ "open-orca/mistral-7b-openorca",
599
+ "openchat/openchat-7b",
600
+ "openrouter/cinematika-7b",
601
+ "phind/phind-codellama-34b",
602
+ "pygmalionai/mythalion-13b",
603
+ "recursal/eagle-7b",
604
+ "recursal/rwkv-5-3b-ai-town",
605
+ "rwkv/rwkv-5-world-3b",
606
+ "sao10k/fimbulvetr-11b-v2",
607
+ "snowflake/snowflake-arctic-instruct",
608
+ "sophosympatheia/midnight-rose-70b",
609
+ "teknium/openhermes-2-mistral-7b",
610
+ "teknium/openhermes-2.5-mistral-7b",
611
+ "togethercomputer/stripedhyena-hessian-7b",
612
+ "togethercomputer/stripedhyena-nous-7b",
613
+ "undi95/remm-slerp-l2-13b",
614
+ "undi95/toppy-m-7b",
615
+ "xwin-lm/xwin-lm-70b"
616
+ ]
617
+ fetch: false
618
+ dropParams: ["stop"]
619
  titleConvo: true
620
  titleModel: "gpt-3.5-turbo"
621
  summarize: false
 
623
  forcePrompt: false
624
  modelDisplayLabel: "OpenRouter"
625
 
626
+ # # OpenRouter.ai - Perplexity
627
+ # - name: "OpenRouter-Perplexity"
628
+ # apiKey: "${OPENROUTER_KEY}"
629
+ # baseURL: "https://openrouter.ai/api/v1"
630
+ # models:
631
+ # "default": [
632
+ # "perplexity/pplx-7b-chat",
633
+ # "perplexity/pplx-70b-chat",
634
+ # "perplexity/pplx-7b-online",
635
+ # "perplexity/pplx-70b-online",
636
+ # "perplexity/sonar-medium-online",
637
+ # ]
638
+ # fetch: false
639
+ # titleConvo: true
640
+ # titleModel: "perplexity/pplx-7b-chat"
641
+ # summarize: false
642
+ # summaryModel: "perplexity/pplx-7b-chat"
643
+ # forcePrompt: false
644
+ # dropParams: ["stop"]
645
+ # modelDisplayLabel: "Perplexity"
646
+ # iconURL: "https://raw.githubusercontent.com/fuegovic/lc-config-yaml/main/icons/perplexityai.png"
647
+
648
+ - name: "Perplexity"
649
+ apiKey: "${PERPLEXITY_API_KEY}"
650
+ baseURL: "https://api.perplexity.ai/"
651
  models:
652
+ default: [
653
+ "mistral-7b-instruct",
654
+ "sonar-small-chat",
655
+ "sonar-small-online",
656
+ "sonar-medium-chat",
657
+ "sonar-medium-online"
658
+ ]
659
+ fetch: false # fetching list of models is not supported
660
  titleConvo: true
661
+ titleModel: "sonar-medium-chat"
662
  summarize: false
663
+ summaryModel: "sonar-medium-chat"
664
  forcePrompt: false
665
+ dropParams: ["stop", "frequency_penalty"]
666
+ modelDisplayLabel: "Perplexity"
667
+
668
+ # ShuttleAI API
669
+ # - name: "ShuttleAI"
670
+ # apiKey: "${SHUTTLEAI_API_KEY}"
671
+ # baseURL: "https://api.shuttleai.app/v1"
672
+ # models:
673
+ # default: [
674
+ # "shuttle-2-turbo",
675
+ # "shuttle-turbo",
676
+ # "gpt-4o-2024-05-13",
677
+ # "gpt-4o",
678
+ # "im-also-a-good-gpt2-chatbot",
679
+ # "gpt-4-turbo-2024-04-09",
680
+ # "gpt-4-turbo",
681
+ # "gpt-4-0125-preview",
682
+ # "gpt-4-turbo-preview",
683
+ # "gpt-4-1106-preview",
684
+ # "gpt-4-1106-vision-preview",
685
+ # "gpt-4-vision-preview",
686
+ # "gpt-4-0613",
687
+ # "gpt-4",
688
+ # "gpt-4-bing",
689
+ # "gpt-4-turbo-bing",
690
+ # "gpt-4-32k-0613",
691
+ # "gpt-4-32k",
692
+ # "gpt-3.5-turbo-0125",
693
+ # "gpt-3.5-turbo",
694
+ # "gpt-3.5-turbo-1106",
695
+ # "claude-3-opus-20240229",
696
+ # "claude-3-opus",
697
+ # "claude-3-sonnet-20240229",
698
+ # "claude-3-sonnet",
699
+ # "claude-3-haiku-20240307",
700
+ # "claude-3-haiku",
701
+ # "claude-2.1",
702
+ # "claude-2.0",
703
+ # "claude-2",
704
+ # "claude-instant-1.2",
705
+ # "claude-instant-1.1",
706
+ # "claude-instant-1.0",
707
+ # "claude-instant",
708
+ # "meta-llama-3-70b-instruct",
709
+ # "llama-3-70b-instruct",
710
+ # "meta-llama-3-8b-instruct",
711
+ # "llama-3-8b-instruct",
712
+ # "llama-3-sonar-large-32k-online",
713
+ # "llama-3-sonar-small-32k-online",
714
+ # "llama-3-sonar-large-32k-chat",
715
+ # "llama-3-sonar-small-32k-chat",
716
+ # "blackbox",
717
+ # "blackbox-code",
718
+ # "wizardlm-2-8x22b",
719
+ # "wizardlm-2-70b",
720
+ # "dolphin-2.6-mixtral-8x7b",
721
+ # "dolphin-mixtral-8x7b",
722
+ # "mistral-large",
723
+ # "mistral-next",
724
+ # "mistral-medium",
725
+ # "mistral-small",
726
+ # "mistral-tiny",
727
+ # "mixtral-8x7b-instruct-v0.1",
728
+ # "mixtral-8x7b-instruct",
729
+ # "mixtral-8x22b-instruct-v0.1",
730
+ # "mixtral-8x22b-instruct",
731
+ # "mistral-7b-instruct-v0.2",
732
+ # "mistral-7b-instruct-2",
733
+ # "mistral-7b-instruct-v0.1",
734
+ # "mistral-7b-instruct",
735
+ # "nous-hermes-2-mixtral-8x7b",
736
+ # "gemini-1.5-pro-latest",
737
+ # "gemini-1.5-pro",
738
+ # "gemini-1.0-pro-latest",
739
+ # "gemini-1.0-pro",
740
+ # "gemini-pro",
741
+ # "gemini-1.0-pro-vision",
742
+ # "gemini-pro-vision",
743
+ # "lzlv-70b",
744
+ # "figgs-rp",
745
+ # "cinematika-7b"
746
+ # ]
747
+ # fetch: true
748
+ # titleConvo: true
749
+ # titleMethod: "completion"
750
+ # titleModel: "shuttle-2-turbo"
751
+ # summarize: false
752
+ # summaryModel: "shuttle-2-turbo"
753
+ # forcePrompt: false
754
+ # dropParams: ["user", "frequency_penalty", "presence_penalty", "repition_penalty"]
755
+ # modelDisplayLabel: "ShuttleAI"
756
+
757
+ # - name: "together.ai"
758
+ # apiKey: "${TOGETHERAI_API_KEY}"
759
+ # baseURL: "https://api.together.xyz"
760
+ # models:
761
+ # default: [
762
+ # "zero-one-ai/Yi-34B-Chat",
763
+ # "Austism/chronos-hermes-13b",
764
+ # "DiscoResearch/DiscoLM-mixtral-8x7b-v2",
765
+ # "Gryphe/MythoMax-L2-13b",
766
+ # "lmsys/vicuna-13b-v1.5",
767
+ # "lmsys/vicuna-7b-v1.5",
768
+ # "lmsys/vicuna-13b-v1.5-16k",
769
+ # "codellama/CodeLlama-13b-Instruct-hf",
770
+ # "codellama/CodeLlama-34b-Instruct-hf",
771
+ # "codellama/CodeLlama-70b-Instruct-hf",
772
+ # "codellama/CodeLlama-7b-Instruct-hf",
773
+ # "togethercomputer/llama-2-13b-chat",
774
+ # "togethercomputer/llama-2-70b-chat",
775
+ # "togethercomputer/llama-2-7b-chat",
776
+ # "NousResearch/Nous-Capybara-7B-V1p9",
777
+ # "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
778
+ # "NousResearch/Nous-Hermes-2-Mixtral-8x7B-SFT",
779
+ # "NousResearch/Nous-Hermes-Llama2-70b",
780
+ # "NousResearch/Nous-Hermes-llama-2-7b",
781
+ # "NousResearch/Nous-Hermes-Llama2-13b",
782
+ # "NousResearch/Nous-Hermes-2-Yi-34B",
783
+ # "openchat/openchat-3.5-1210",
784
+ # "Open-Orca/Mistral-7B-OpenOrca",
785
+ # "togethercomputer/Qwen-7B-Chat",
786
+ # "snorkelai/Snorkel-Mistral-PairRM-DPO",
787
+ # "togethercomputer/alpaca-7b",
788
+ # "togethercomputer/falcon-40b-instruct",
789
+ # "togethercomputer/falcon-7b-instruct",
790
+ # "togethercomputer/GPT-NeoXT-Chat-Base-20B",
791
+ # "togethercomputer/Llama-2-7B-32K-Instruct",
792
+ # "togethercomputer/Pythia-Chat-Base-7B-v0.16",
793
+ # "togethercomputer/RedPajama-INCITE-Chat-3B-v1",
794
+ # "togethercomputer/RedPajama-INCITE-7B-Chat",
795
+ # "togethercomputer/StripedHyena-Nous-7B",
796
+ # "Undi95/ReMM-SLERP-L2-13B",
797
+ # "Undi95/Toppy-M-7B",
798
+ # "WizardLM/WizardLM-13B-V1.2",
799
+ # "garage-bAInd/Platypus2-70B-instruct",
800
+ # "mistralai/Mistral-7B-Instruct-v0.1",
801
+ # "mistralai/Mistral-7B-Instruct-v0.2",
802
+ # "mistralai/Mixtral-8x7B-Instruct-v0.1",
803
+ # "teknium/OpenHermes-2-Mistral-7B",
804
+ # "teknium/OpenHermes-2p5-Mistral-7B",
805
+ # "upstage/SOLAR-10.7B-Instruct-v1.0"
806
+ # ]
807
+ # fetch: false
808
+ # titleConvo: true
809
+ # titleModel: "togethercomputer/llama-2-7b-chat"
810
+ # summarize: false
811
+ # summaryModel: "togethercomputer/llama-2-7b-chat"
812
+ # forcePrompt: false
813
+ # modelDisplayLabel: "together.ai"
814