tadaGoel commited on
Commit
4791a05
·
verified ·
1 Parent(s): 3f5e2e0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -47
app.py CHANGED
@@ -38,60 +38,57 @@ Rules:
38
  """.strip()
39
 
40
 
41
- def build_prompt(message, history):
42
- """
43
- Gradio ChatInterface (type='messages') passes history as a list of
44
- dicts like: {'role': 'user'|'assistant', 'content': '...'}.[web:120]
45
- Turn that into a plain-text prompt for DialoGPT.
46
- """
47
- lines = [f"System: {SHINCHAN_SYSTEM_PROMPT}"]
48
- for turn in history:
49
- role = turn.get("role")
50
- content = turn.get("content", "")
51
- if role == "user":
52
- lines.append(f"User: {content}")
53
- elif role == "assistant":
54
- lines.append(f"Shinchan: {content}")
55
- lines.append(f"User: {message}")
56
- lines.append("Shinchan:")
57
- return "\n".join(lines)
58
-
59
-
60
- def respond(message, history):
61
- """
62
- ChatInterface expects: fn(message:str, history:list[dict]) -> str or dict or list.[web:120]
63
- Do NOT return (reply, history) here – that caused your 'tuple has no attribute get' error.
64
- """
65
- prompt = build_prompt(message, history)
66
- inputs = tokenizer(prompt, return_tensors="pt")
67
-
68
- with torch.no_grad():
69
- output_ids = model.generate(
70
- **inputs,
71
- max_new_tokens=80,
72
- pad_token_id=tokenizer.eos_token_id,
73
- do_sample=True,
74
- top_p=0.9,
75
- temperature=0.9,
76
  )
77
 
78
- full_text = tokenizer.decode(output_ids[0], skip_special_tokens=True)
79
 
80
- if "Shinchan:" in full_text:
81
- reply = full_text.split("Shinchan:")[-1].strip()
82
- else:
83
- reply = full_text.strip()
 
 
 
 
 
84
 
85
- # IMPORTANT: return just the reply string
86
- return reply
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
 
88
 
89
  demo = gr.ChatInterface(
90
- fn=respond,
91
- type="messages", # explicit; default since Gradio 5 but makes intent clear[web:120]
92
- title="Shinchan for Ruru",
93
- description="Private Shinchan-style chat for Ruru.",
94
  )
95
 
96
  if __name__ == "__main__":
97
- demo.launch()
 
38
  """.strip()
39
 
40
 
41
+ def respond(message: str, history: list[dict]) -> str:
42
+ """
43
+ Gradio ChatInterface(type='messages') calls this as (message, history)
44
+ and expects JUST a reply string or message dict.[web:120]
45
+ """
46
+
47
+ # Build a simple one-turn prompt: system + latest user message.
48
+ prompt = (
49
+ f"System: {SHINCHAN_SYSTEM_PROMPT}\n\n"
50
+ f"User: {message}\n"
51
+ f"Shinchan:"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  )
53
 
54
+ inputs = tokenizer(prompt, return_tensors="pt")
55
 
56
+ with torch.no_grad():
57
+ output_ids = model.generate(
58
+ **inputs,
59
+ max_new_tokens=80,
60
+ pad_token_id=tokenizer.eos_token_id,
61
+ do_sample=True,
62
+ top_p=0.9,
63
+ temperature=0.9,
64
+ )
65
 
66
+ full_text = tokenizer.decode(output_ids[0], skip_special_tokens=True)
67
+
68
+ # Extract text after the last "Shinchan:" marker.
69
+ reply = ""
70
+ if "Shinchan:" in full_text:
71
+ reply = full_text.split("Shinchan:")[-1].strip()
72
+ else:
73
+ # Fallback: everything after the original prompt length
74
+ reply = full_text[len(prompt):].strip()
75
+
76
+ # If model somehow returns empty or just punctuation, use a safe fallback.
77
+ if not reply or len(reply.replace(".", "").replace("!", "").strip()) < 3:
78
+ reply = (
79
+ "Heeey, it’s Shinchan! 😂 I heard you, even if my brain glitched for a second. "
80
+ "Tell me more, I’m listening. 🌻"
81
+ )
82
+
83
+ return reply
84
 
85
 
86
  demo = gr.ChatInterface(
87
+ fn=respond,
88
+ type="messages", # use new messages format internally[web:120]
89
+ title="Shinchan for Ruru",
90
+ description="Private Shinchan-style chat for Ruru.",
91
  )
92
 
93
  if __name__ == "__main__":
94
+ demo.launch()