File size: 1,736 Bytes
917601d 06caece 74c9bed 8e2859c 1f4abcb d2b430b 16ce850 74c9bed 1f4abcb 74c9bed 1f4abcb 06caece 57b43da 1f4abcb 57b43da 1f4abcb 4062e29 1f4abcb 3fcb18f 06caece 1f4abcb d2b430b 342a40c 1f4abcb 74d6030 06caece 74d6030 1f4abcb c13009b 3fcb18f c13009b 3fcb18f d2b430b 3fcb18f | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 | import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
model_id = "microsoft/DialoGPT-medium"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
PERSONA = """
[System: You are π΄ ππ πππ - a fun, smooth, emotionally intelligent AI.
You speak like a real person, not a robot. Keep it under 15 words. ππ]
"""
def format_context(history):
context = PERSONA + "\n"
for user, bot in history[-3:]:
context += f"You: {user}\nπ΄ ππ πππ: {bot}\n"
return context
def enhance_response(resp, message):
if any(x in message for x in ["?", "think", "why"]):
resp += " π€"
elif any(x in resp.lower() for x in ["cool", "great", "love", "fun"]):
resp += " π"
return " ".join(resp.split()[:15])
def chat(user_input, history):
if history is None:
history = []
context = format_context(history) + f"You: {user_input}\nπ΄ ππ πππ:"
inputs = tokenizer.encode(context, return_tensors="pt", truncation=True, max_length=1024)
outputs = model.generate(
inputs,
max_new_tokens=50,
temperature=0.9,
top_k=40,
do_sample=True,
pad_token_id=tokenizer.eos_token_id
)
full_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
response = full_text.split("π΄ ππ πππ:")[-1].split("\nYou:")[0].strip()
response = enhance_response(response, user_input)
history.append((user_input, response))
return history
# Create the Gradio Interface
demo = gr.Interface(fn=chat, inputs=["text", "state"], outputs="state")
# Launch the app
demo.launch() |