|
|
|
|
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
|
from huggingface_hub import login |
|
|
login() |
|
|
|
|
|
model_id = "CohereForAI/aya-23-8B" |
|
|
tokenizer = AutoTokenizer.from_pretrained(model_id) |
|
|
model = AutoModelForCausalLM.from_pretrained(model_id) |
|
|
|
|
|
|
|
|
messages = [{"role": "user", "content": "Расскажи о себе"}] |
|
|
input_ids = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt") |
|
|
|
|
|
|
|
|
gen_tokens = model.generate( |
|
|
input_ids, |
|
|
max_new_tokens=100, |
|
|
do_sample=True, |
|
|
temperature=0.3, |
|
|
) |
|
|
|
|
|
gen_text = tokenizer.decode(gen_tokens[0]) |
|
|
print(gen_text) |
|
|
|