Trigger82 commited on
Commit
74d6030
Β·
verified Β·
1 Parent(s): 9660389

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -12
app.py CHANGED
@@ -1,29 +1,45 @@
1
- from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
2
  import gradio as gr
3
 
4
- model_id = "google/flan-t5-small" # Extremely fast and CPU-friendly
 
5
  tokenizer = AutoTokenizer.from_pretrained(model_id)
6
  model = AutoModelForSeq2SeqLM.from_pretrained(model_id)
7
 
8
- system_prompt = "You are 𝕴 𝖆𝖒 π–π–Žπ–’ β€” a fun, fast, emotionally tuned AI chatbot created by 𝕴 𝖆𝖒 π–π–Žπ–’. You reply quickly, like a chill and clever human friend."
9
 
10
  def chat(history, message):
11
- prompt = f"{system_prompt}\nUser: {message}\nAI:"
12
- inputs = tokenizer(prompt, return_tensors="pt")
13
- outputs = model.generate(**inputs, max_new_tokens=100)
14
- reply = tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
15
-
16
  history = history or []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  history.append((message, reply))
 
 
18
  return history, history
19
 
20
  iface = gr.Interface(
21
  fn=chat,
22
  inputs=[gr.State(), gr.Textbox(placeholder="Talk to 𝕴 𝖆𝖒 π–π–Žπ–’...")],
23
- outputs=[gr.State(), gr.Chatbot(label="𝕴 𝖆𝖒 π–π–Žπ–’ AI Chatbot")],
24
- title="𝕴 𝖆𝖒 π–π–Žπ–’ β€” Superfast Chatbot",
25
- description="An extremely fast and chill AI chatbot, created by 𝕴 𝖆𝖒 π–π–Žπ–’. Running on Hugging Face Spaces (CPU only).",
26
- allow_flagging="never"
27
  )
28
 
29
  iface.launch()
 
1
+ from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
2
  import gradio as gr
3
 
4
+ model_id = "google/flan-t5-small"
5
+
6
  tokenizer = AutoTokenizer.from_pretrained(model_id)
7
  model = AutoModelForSeq2SeqLM.from_pretrained(model_id)
8
 
9
+ system_prompt = "You are 𝕴 𝖆𝖒 π–π–Žπ–’ β€” a chill, emotionally smart, witty AI friend who talks like a real person. Be smooth, real, and clever.\n"
10
 
11
  def chat(history, message):
 
 
 
 
 
12
  history = history or []
13
+ prompt = system_prompt
14
+ for user, bot in history:
15
+ prompt += f"User: {user}\nAI: {bot}\n"
16
+ prompt += f"User: {message}\nAI:"
17
+
18
+ input_ids = tokenizer(prompt, return_tensors="pt").input_ids
19
+ outputs = model.generate(
20
+ input_ids,
21
+ max_new_tokens=100,
22
+ do_sample=True,
23
+ temperature=0.9,
24
+ top_p=0.95,
25
+ num_return_sequences=1
26
+ )
27
+
28
+ decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
29
+ reply = decoded.split("AI:")[-1].strip()
30
+
31
  history.append((message, reply))
32
+ if len(history) > 5:
33
+ history = history[-5:]
34
  return history, history
35
 
36
  iface = gr.Interface(
37
  fn=chat,
38
  inputs=[gr.State(), gr.Textbox(placeholder="Talk to 𝕴 𝖆𝖒 π–π–Žπ–’...")],
39
+ outputs=[gr.State(), gr.Chatbot(label="𝕴 𝖆𝖒 π–π–Žπ–’ AI Chat")],
40
+ title="𝕴 𝖆𝖒 π–π–Žπ–’ - Fast AI Friend",
41
+ allow_flagging="never",
42
+ theme="default"
43
  )
44
 
45
  iface.launch()