Update README.md
Browse files
README.md
CHANGED
|
@@ -133,11 +133,10 @@ Use the `main` transformers branch or pass `trust_remote_code=True` with a relea
|
|
| 133 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 134 |
import torch
|
| 135 |
|
| 136 |
-
model_id = "arcee-ai/Trinity-Large-Preview"
|
| 137 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 138 |
model = AutoModelForCausalLM.from_pretrained(
|
| 139 |
model_id,
|
| 140 |
-
torch_dtype=torch.bfloat16,
|
| 141 |
device_map="auto",
|
| 142 |
trust_remote_code=True
|
| 143 |
)
|
|
@@ -156,9 +155,9 @@ outputs = model.generate(
|
|
| 156 |
input_ids,
|
| 157 |
max_new_tokens=256,
|
| 158 |
do_sample=True,
|
| 159 |
-
temperature=0.
|
| 160 |
top_k=50,
|
| 161 |
-
top_p=0.
|
| 162 |
)
|
| 163 |
|
| 164 |
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
|
@@ -170,24 +169,11 @@ print(response)
|
|
| 170 |
Supported in VLLM release 0.11.1+
|
| 171 |
|
| 172 |
```bash
|
| 173 |
-
vllm serve arcee-ai/Trinity-Large-Preview \
|
| 174 |
-
--dtype bfloat16 \
|
| 175 |
--enable-auto-tool-choice \
|
| 176 |
--tool-call-parser hermes
|
| 177 |
```
|
| 178 |
|
| 179 |
-
### llama.cpp
|
| 180 |
-
|
| 181 |
-
Supported in llama.cpp release b7061+
|
| 182 |
-
|
| 183 |
-
```bash
|
| 184 |
-
llama-server -hf arcee-ai/Trinity-Large-Preview-GGUF:q4_k_m
|
| 185 |
-
```
|
| 186 |
-
|
| 187 |
-
### LM Studio
|
| 188 |
-
|
| 189 |
-
Supported in the latest LM Studio runtime. Search for `arcee-ai/Trinity-Large-Preview-GGUF` in Model Search.
|
| 190 |
-
|
| 191 |
### API
|
| 192 |
|
| 193 |
Available on OpenRouter:
|
|
|
|
| 133 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 134 |
import torch
|
| 135 |
|
| 136 |
+
model_id = "arcee-ai/Trinity-Large-Preview-FP8"
|
| 137 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 138 |
model = AutoModelForCausalLM.from_pretrained(
|
| 139 |
model_id,
|
|
|
|
| 140 |
device_map="auto",
|
| 141 |
trust_remote_code=True
|
| 142 |
)
|
|
|
|
| 155 |
input_ids,
|
| 156 |
max_new_tokens=256,
|
| 157 |
do_sample=True,
|
| 158 |
+
temperature=0.8,
|
| 159 |
top_k=50,
|
| 160 |
+
top_p=0.8
|
| 161 |
)
|
| 162 |
|
| 163 |
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
|
|
|
| 169 |
Supported in VLLM release 0.11.1+
|
| 170 |
|
| 171 |
```bash
|
| 172 |
+
vllm serve arcee-ai/Trinity-Large-Preview-FP8 \
|
|
|
|
| 173 |
--enable-auto-tool-choice \
|
| 174 |
--tool-call-parser hermes
|
| 175 |
```
|
| 176 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 177 |
### API
|
| 178 |
|
| 179 |
Available on OpenRouter:
|