Update config.json
Browse files- config.json +3 -1
config.json
CHANGED
|
@@ -1,10 +1,12 @@
|
|
| 1 |
{
|
| 2 |
"model_name": "slim-sentiment-tool",
|
|
|
|
| 3 |
"quantization": "4Q_K_M GGUF",
|
| 4 |
"model_base": "tiny-llama",
|
|
|
|
| 5 |
"model_type": "llama",
|
| 6 |
"parameters": "1.1 billion",
|
| 7 |
-
"description": "slim-sentiment is a function-calling model, fine-tuned to output structured
|
| 8 |
"prompt_wrapper": "human_bot",
|
| 9 |
"prompt_format": "<human> {context_passage} <classify> sentiment </classify>\n<bot>:",
|
| 10 |
"output_format": "{'sentiment': ['positive']}",
|
|
|
|
| 1 |
{
|
| 2 |
"model_name": "slim-sentiment-tool",
|
| 3 |
+
"model_ft_base": "slim-sentiment",
|
| 4 |
"quantization": "4Q_K_M GGUF",
|
| 5 |
"model_base": "tiny-llama",
|
| 6 |
+
"tokenizer": "llmware/slim-sentiment",
|
| 7 |
"model_type": "llama",
|
| 8 |
"parameters": "1.1 billion",
|
| 9 |
+
"description": "slim-sentiment is a function-calling model, fine-tuned to output structured python dictionaries",
|
| 10 |
"prompt_wrapper": "human_bot",
|
| 11 |
"prompt_format": "<human> {context_passage} <classify> sentiment </classify>\n<bot>:",
|
| 12 |
"output_format": "{'sentiment': ['positive']}",
|