Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| import torch | |
| # Load DialoGPT-medium (fast and lightweight) | |
| model_name = "microsoft/DialoGPT-medium" | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModelForCausalLM.from_pretrained(model_name) | |
| # Simple persona presets | |
| persona_prompts = { | |
| "Friendly": "You are a kind and friendly chatbot who always tries to brighten someone's day.", | |
| "Professional": "You are a professional assistant who responds with clarity and precision.", | |
| "Sarcastic": "You are a sarcastic chatbot who always has a witty, dry reply.", | |
| "Motivational Coach": "You are a motivational coach who always uplifts and encourages the user." | |
| } | |
| # Chat function | |
| def chatbot(persona, input_text): | |
| prompt = persona_prompts.get(persona, "") + " " + input_text | |
| input_ids = tokenizer.encode(prompt + tokenizer.eos_token, return_tensors="pt") | |
| output_ids = model.generate( | |
| input_ids, | |
| max_length=100, | |
| pad_token_id=tokenizer.eos_token_id, | |
| do_sample=True, | |
| top_k=50, | |
| top_p=0.95, | |
| temperature=0.7 | |
| ) | |
| response = tokenizer.decode(output_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True) | |
| return response.strip() | |
| # Gradio UI | |
| iface = gr.Interface( | |
| fn=chatbot, | |
| inputs=[ | |
| gr.Dropdown(label="Choose a Persona", choices=list(persona_prompts.keys())), | |
| gr.Textbox(label="input_text", placeholder="Ask something...") | |
| ], | |
| outputs="text", | |
| title="Persona Bot (DialoGPT)", | |
| description="Choose a simple chatbot persona: Friendly, Professional, Sarcastic, or Motivational Coach." | |
| ) | |
| iface.launch() | |