| import gradio as gr | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| import torch | |
| model = AutoModelForCausalLM.from_pretrained("Vikhrmodels/Vikhr-7B-instruct_0.4", | |
| device_map="auto", | |
| attn_implementation="flash_attention_2", | |
| torch_dtype=torch.bfloat16) | |
| tokenizer = AutoTokenizer.from_pretrained("Vikhrmodels/Vikhr-7B-instruct_0.4") | |
| from transformers import AutoTokenizer, pipeline | |
| pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) | |
| prompts = [ | |
| "В чем разница между фруктом и овощем?", | |
| "Годы жизни колмагорова?"] | |
| def test_inference(prompt): | |
| prompt = pipe.tokenizer.apply_chat_template([{"role": "user", "content": prompt}], tokenize=False, add_generation_prompt=True) | |
| print(prompt) | |
| outputs = pipe(prompt, max_new_tokens=512, do_sample=True, num_beams=1, temperature=0.25, top_k=50, top_p=0.98, eos_token_id=79097) | |
| return outputs[0]['generated_text'][len(prompt):].strip() | |
| for prompt in prompts: | |
| print(f" prompt:\n{prompt}") | |
| print(f" response:\n{test_inference(prompt)}") | |
| print("-"*50) |