Update app.py
Browse files
app.py
CHANGED
|
@@ -2,7 +2,7 @@ import gradio as gr
|
|
| 2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 3 |
import torch
|
| 4 |
|
| 5 |
-
MODEL_NAME = "microsoft/DialoGPT-small" # small, CPU-friendly
|
| 6 |
|
| 7 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
| 8 |
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
|
|
@@ -41,15 +41,30 @@ Rules:
|
|
| 41 |
def respond(message: str, history: list[dict]) -> str:
|
| 42 |
"""
|
| 43 |
Gradio ChatInterface(type='messages') calls this as (message, history)
|
| 44 |
-
|
|
|
|
|
|
|
|
|
|
| 45 |
"""
|
| 46 |
|
| 47 |
-
# Build a simple
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
|
| 54 |
inputs = tokenizer(prompt, return_tensors="pt")
|
| 55 |
|
|
@@ -63,29 +78,28 @@ def respond(message: str, history: list[dict]) -> str:
|
|
| 63 |
temperature=0.9,
|
| 64 |
)
|
| 65 |
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
reply =
|
| 70 |
-
if "Shinchan:" in full_text:
|
| 71 |
-
reply = full_text.split("Shinchan:")[-1].strip()
|
| 72 |
-
else:
|
| 73 |
-
# Fallback: everything after the original prompt length
|
| 74 |
-
reply = full_text[len(prompt):].strip()
|
| 75 |
|
| 76 |
-
# If
|
| 77 |
-
if not reply
|
| 78 |
reply = (
|
| 79 |
"Heeey, it’s Shinchan! 😂 I heard you, even if my brain glitched for a second. "
|
| 80 |
"Tell me more, I’m listening. 🌻"
|
| 81 |
)
|
| 82 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 83 |
return reply
|
| 84 |
|
| 85 |
|
| 86 |
demo = gr.ChatInterface(
|
| 87 |
fn=respond,
|
| 88 |
-
type="messages", #
|
| 89 |
title="Shinchan for Ruru",
|
| 90 |
description="Private Shinchan-style chat for Ruru.",
|
| 91 |
)
|
|
|
|
| 2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 3 |
import torch
|
| 4 |
|
| 5 |
+
MODEL_NAME = "microsoft/DialoGPT-small" # small, CPU-friendly model [web:103]
|
| 6 |
|
| 7 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
| 8 |
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
|
|
|
|
| 41 |
def respond(message: str, history: list[dict]) -> str:
|
| 42 |
"""
|
| 43 |
Gradio ChatInterface(type='messages') calls this as (message, history)
|
| 44 |
+
where history is a list of dicts like:
|
| 45 |
+
{"role": "user" | "assistant", "content": "..."} [web:120]
|
| 46 |
+
|
| 47 |
+
We return a single reply string.
|
| 48 |
"""
|
| 49 |
|
| 50 |
+
# Build a simple prompt: system + last few turns + latest message
|
| 51 |
+
lines = [f"System: {SHINCHAN_SYSTEM_PROMPT}"]
|
| 52 |
+
|
| 53 |
+
# keep prompt short: last 4 user/assistant messages max
|
| 54 |
+
trimmed = history[-8:] if history else []
|
| 55 |
+
for turn in trimmed:
|
| 56 |
+
role = turn.get("role")
|
| 57 |
+
content = turn.get("content", "")
|
| 58 |
+
if not content:
|
| 59 |
+
continue
|
| 60 |
+
if role == "user":
|
| 61 |
+
lines.append(f"User: {content}")
|
| 62 |
+
elif role == "assistant":
|
| 63 |
+
lines.append(f"Shinchan: {content}")
|
| 64 |
+
|
| 65 |
+
lines.append(f"User: {message}")
|
| 66 |
+
lines.append("Shinchan:")
|
| 67 |
+
prompt = "\n".join(lines)
|
| 68 |
|
| 69 |
inputs = tokenizer(prompt, return_tensors="pt")
|
| 70 |
|
|
|
|
| 78 |
temperature=0.9,
|
| 79 |
)
|
| 80 |
|
| 81 |
+
# IMPORTANT: only take the newly generated tokens AFTER the prompt
|
| 82 |
+
input_len = inputs["input_ids"].shape[1]
|
| 83 |
+
generated_ids = output_ids[0][input_len:]
|
| 84 |
+
reply = tokenizer.decode(generated_ids, skip_special_tokens=True).strip()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 85 |
|
| 86 |
+
# If it's somehow empty, use a fallback so she never sees blank
|
| 87 |
+
if not reply:
|
| 88 |
reply = (
|
| 89 |
"Heeey, it’s Shinchan! 😂 I heard you, even if my brain glitched for a second. "
|
| 90 |
"Tell me more, I’m listening. 🌻"
|
| 91 |
)
|
| 92 |
|
| 93 |
+
# Optional: hard limit on length
|
| 94 |
+
if len(reply) > 400:
|
| 95 |
+
reply = reply[:380].rstrip() + "…"
|
| 96 |
+
|
| 97 |
return reply
|
| 98 |
|
| 99 |
|
| 100 |
demo = gr.ChatInterface(
|
| 101 |
fn=respond,
|
| 102 |
+
type="messages", # uses role/content dicts internally [web:120]
|
| 103 |
title="Shinchan for Ruru",
|
| 104 |
description="Private Shinchan-style chat for Ruru.",
|
| 105 |
)
|