Spaces:
Sleeping
Sleeping
| import torch | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| import gradio as gr | |
| MODEL_ID = "Kostya27/IIvanIIvanovich" # 👈 твоя модель на HuggingFace | |
| tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) | |
| model = AutoModelForCausalLM.from_pretrained( | |
| MODEL_ID, | |
| torch_dtype=torch.float32, | |
| device_map="auto" | |
| ) | |
| def chat_fn(message, history): | |
| # собираем историю | |
| prompt = "" | |
| for user, assistant in history: | |
| prompt += f"<|user|>\n{user}\n<|assistant|>\n{assistant}\n" | |
| prompt += f"<|user|>\n{message}\n<|assistant|>" | |
| inputs = tokenizer(prompt, return_tensors="pt").to(model.device) | |
| with torch.inference_mode(): | |
| output_ids = model.generate( | |
| **inputs, | |
| max_new_tokens=200, | |
| temperature=0.7, | |
| top_p=0.9, | |
| do_sample=True, | |
| pad_token_id=tokenizer.eos_token_id | |
| ) | |
| new_tokens = output_ids[0, inputs["input_ids"].shape[1]:] | |
| answer = tokenizer.decode(new_tokens, skip_special_tokens=True) | |
| return answer | |
| interface = gr.ChatInterface(chat_fn, title="IIvanIIvanovich Chatbot") | |
| interface.launch() | |