|
|
--- |
|
|
|
|
|
|
|
|
{} |
|
|
--- |
|
|
|
|
|
## Usage |
|
|
|
|
|
```python |
|
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
|
import torch |
|
|
|
|
|
model_id = 'datapaf/fvt_ift_rus' |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_id) |
|
|
model = AutoModelForCausalLM.from_pretrained( |
|
|
model_id, |
|
|
torch_dtype=torch.bfloat16, |
|
|
device_map='auto' |
|
|
) |
|
|
|
|
|
chat = [ |
|
|
{"role": "system", "content": "Ты AI-помощник, ответь на вопрос"}, |
|
|
{"role": "user", "content": "Привет! Как дела?"}, |
|
|
] |
|
|
|
|
|
templated = tokenizer.apply_chat_template(chat, tokenize=False) |
|
|
encoded = tokenizer(templated, return_tensors="pt",add_special_tokens=True) |
|
|
inputs = {key: tensor.to(model.device) for key, tensor in encoded.items()} |
|
|
|
|
|
output = model.generate( |
|
|
**inputs, |
|
|
max_new_tokens=1024, |
|
|
do_sample=False, |
|
|
repetition_penalty=1.2 |
|
|
) |
|
|
|
|
|
decoded_output = tokenizer.decode( |
|
|
output[0][inputs['input_ids'].size(1)+2:], |
|
|
skip_special_tokens=True |
|
|
) |
|
|
|
|
|
print(decoded_output) |
|
|
``` |
|
|
|