| import torch | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| import gradio as gr | |
| tokenizer = AutoTokenizer.from_pretrained("gpt2") | |
| model = AutoModelForCausalLM.from_pretrained("gpt2") | |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
| model.to(device) | |
| model.eval() | |
| def generate_response(prompt): | |
| inputs = tokenizer(prompt, return_tensors="pt").to(device) | |
| outputs = model.generate( | |
| **inputs, | |
| max_length=200, | |
| do_sample=True, | |
| top_p=0.95, | |
| top_k=50, | |
| pad_token_id=tokenizer.eos_token_id | |
| ) | |
| response = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| return response | |
| demo = gr.Interface( | |
| fn=generate_response, | |
| inputs=gr.Textbox(lines=5, placeholder="Talk to the model..."), | |
| outputs=gr.Textbox(label="Response", lines=10), | |
| title="GPT-2 Chatbot", | |
| description="Chat with GPT-2 model." | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() | |