Spaces:
Sleeping
Sleeping
support llama3 models
Browse files
app.py
CHANGED
|
@@ -56,9 +56,9 @@ models = {
|
|
| 56 |
"tokens": 32768,
|
| 57 |
"developer": "Mistral",
|
| 58 |
},
|
| 59 |
-
"llama2-70b-4096": {"name": "LLaMA2-70b-chat", "tokens": 4096, "developer": "Meta"},
|
| 60 |
-
"gemma-7b-it": {"name": "Gemma-7b-it", "tokens": 8192, "developer": "Google"},
|
| 61 |
"llama3-70b-8192": {"name": "LLaMA3-70b-chat", "tokens": 8192, "developer": "Meta"},
|
|
|
|
|
|
|
| 62 |
}
|
| 63 |
|
| 64 |
# Layout for model selection and max_tokens slider
|
|
|
|
| 56 |
"tokens": 32768,
|
| 57 |
"developer": "Mistral",
|
| 58 |
},
|
|
|
|
|
|
|
| 59 |
"llama3-70b-8192": {"name": "LLaMA3-70b-chat", "tokens": 8192, "developer": "Meta"},
|
| 60 |
+
"llama3-8b-8192": {"name": "LLaMA3-8b-chat", "tokens": 8192, "developer": "Meta"},
|
| 61 |
+
"gemma-7b-it": {"name": "Gemma-7b-it", "tokens": 8192, "developer": "Google"},
|
| 62 |
}
|
| 63 |
|
| 64 |
# Layout for model selection and max_tokens slider
|