import gradio as gr from transformers import AutoModelForCausalLM, AutoTokenizer import torch model_id = "MahiH/dialogpt-finetuned-chatbot" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) model.eval() device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) def chat(prompt): input_text = f"Human: {prompt}\nAssistant: " inputs = tokenizer(input_text, return_tensors="pt").to(device) with torch.no_grad(): outputs = model.generate( **inputs, max_new_tokens=100, do_sample=True, top_p=0.95, temperature=0.8, pad_token_id=tokenizer.eos_token_id ) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) return decoded.split("Assistant:")[-1].strip() # Create Interface demo = gr.Interface(fn=chat, inputs="text", outputs="text") # Enable queuing to support the REST API endpoint demo.queue() # Launch (no extra args needed) demo.launch()