Update main.py
Browse files
main.py
CHANGED
|
@@ -1,20 +1,9 @@
|
|
| 1 |
-
from
|
|
|
|
| 2 |
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
model = AutoModelForCausalLM.from_pretrained(model_name)
|
| 6 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 7 |
|
| 8 |
-
|
| 9 |
-
# model.to("cpu") # This line can be omitted since it's already on CPU by default
|
| 10 |
|
| 11 |
-
# Encode input tokens
|
| 12 |
-
input_text = "Your input text here"
|
| 13 |
-
tokens = tokenizer(input_text, return_tensors="pt").input_ids
|
| 14 |
-
|
| 15 |
-
# Generate output
|
| 16 |
-
generated_ids = model.generate(tokens, max_new_tokens=1000, do_sample=True)
|
| 17 |
-
|
| 18 |
-
# Decode generated tokens
|
| 19 |
-
result = tokenizer.decode(generated_ids[0].tolist(), skip_special_tokens=True)
|
| 20 |
print(result)
|
|
|
|
| 1 |
+
from mistral_inference.transformer import Transformer
|
| 2 |
+
from mistral_inference.generate import generate
|
| 3 |
|
| 4 |
+
model = Transformer.from_folder(mistral_models_path)
|
| 5 |
+
out_tokens, _ = generate([tokens], model, max_tokens=64, temperature=0.0, eos_id=tokenizer.instruct_tokenizer.tokenizer.eos_id)
|
|
|
|
|
|
|
| 6 |
|
| 7 |
+
result = tokenizer.decode(out_tokens[0])
|
|
|
|
| 8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
print(result)
|