Spaces:
Sleeping
Sleeping
File size: 1,680 Bytes
71048e3 5c2269a c607a1f 179e55e 5c2269a f2b6f3f 5c2269a 179e55e d206c19 179e55e d206c19 179e55e d206c19 179e55e 5c2269a 179e55e 5c2269a 179e55e 02e9269 d206c19 c607a1f 6a59851 d206c19 179e55e d206c19 c607a1f d206c19 71048e3 179e55e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load DialoGPT-medium (fast and lightweight)
model_name = "microsoft/DialoGPT-medium"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
# Simple persona presets
persona_prompts = {
"Friendly": "You are a kind and friendly chatbot who always tries to brighten someone's day.",
"Professional": "You are a professional assistant who responds with clarity and precision.",
"Sarcastic": "You are a sarcastic chatbot who always has a witty, dry reply.",
"Motivational Coach": "You are a motivational coach who always uplifts and encourages the user."
}
# Chat function
def chatbot(persona, input_text):
prompt = persona_prompts.get(persona, "") + " " + input_text
input_ids = tokenizer.encode(prompt + tokenizer.eos_token, return_tensors="pt")
output_ids = model.generate(
input_ids,
max_length=100,
pad_token_id=tokenizer.eos_token_id,
do_sample=True,
top_k=50,
top_p=0.95,
temperature=0.7
)
response = tokenizer.decode(output_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
return response.strip()
# Gradio UI
iface = gr.Interface(
fn=chatbot,
inputs=[
gr.Dropdown(label="Choose a Persona", choices=list(persona_prompts.keys())),
gr.Textbox(label="input_text", placeholder="Ask something...")
],
outputs="text",
title="Persona Bot (DialoGPT)",
description="Choose a simple chatbot persona: Friendly, Professional, Sarcastic, or Motivational Coach."
)
iface.launch()
|