Trigger82 commited on
Commit
9660389
Β·
verified Β·
1 Parent(s): fb441e2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -42
app.py CHANGED
@@ -1,52 +1,29 @@
1
- from transformers import AutoModelForCausalLM, AutoTokenizer
2
  import gradio as gr
3
- import torch
4
-
5
- model_id = "microsoft/DialoGPT-small"
6
 
 
7
  tokenizer = AutoTokenizer.from_pretrained(model_id)
8
- model = AutoModelForCausalLM.from_pretrained(model_id)
9
 
10
- system_prompt = "You are 𝕴 𝖆𝖒 π–π–Žπ–’ β€” a chill, witty, emotionally tuned AI friend who talks like a real person.\n"
11
 
12
  def chat(history, message):
13
- history = history or []
14
- history.append((message, ""))
15
-
16
- convo = system_prompt
17
- for user_msg, bot_msg in history[:-1]:
18
- convo += f"Human: {user_msg}\nAI: {bot_msg}\n"
19
- convo += f"Human: {message}\nAI:"
20
-
21
- inputs = tokenizer.encode(convo, return_tensors="pt")
22
- outputs = model.generate(
23
- inputs,
24
- max_new_tokens=50,
25
- pad_token_id=tokenizer.eos_token_id,
26
- do_sample=True,
27
- temperature=0.7,
28
- top_p=0.9,
29
- eos_token_id=tokenizer.eos_token_id,
30
- )
31
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
32
- bot_reply = response.split("AI:")[-1].strip()
33
- history[-1] = (message, bot_reply)
34
-
35
- if len(history) > 5:
36
- history = history[-5:]
37
 
 
 
38
  return history, history
39
 
40
- # βœ… Wrap in gr.Blocks so HF Spaces detects the interface
41
- with gr.Blocks() as demo:
42
- gr.Markdown("## 𝕴 𝖆𝖒 π–π–Žπ–’ AI Chat")
43
- chatbot = gr.Chatbot()
44
- state = gr.State([])
45
- msg = gr.Textbox(show_label=False, placeholder="Say something...")
46
-
47
- def respond(message, history):
48
- return chat(history, message)
49
-
50
- msg.submit(respond, [state, msg], [state, chatbot])
51
 
52
- demo.launch()
 
1
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
2
  import gradio as gr
 
 
 
3
 
4
+ model_id = "google/flan-t5-small" # Extremely fast and CPU-friendly
5
  tokenizer = AutoTokenizer.from_pretrained(model_id)
6
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_id)
7
 
8
+ system_prompt = "You are 𝕴 𝖆𝖒 π–π–Žπ–’ β€” a fun, fast, emotionally tuned AI chatbot created by 𝕴 𝖆𝖒 π–π–Žπ–’. You reply quickly, like a chill and clever human friend."
9
 
10
  def chat(history, message):
11
+ prompt = f"{system_prompt}\nUser: {message}\nAI:"
12
+ inputs = tokenizer(prompt, return_tensors="pt")
13
+ outputs = model.generate(**inputs, max_new_tokens=100)
14
+ reply = tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
+ history = history or []
17
+ history.append((message, reply))
18
  return history, history
19
 
20
+ iface = gr.Interface(
21
+ fn=chat,
22
+ inputs=[gr.State(), gr.Textbox(placeholder="Talk to 𝕴 𝖆𝖒 π–π–Žπ–’...")],
23
+ outputs=[gr.State(), gr.Chatbot(label="𝕴 𝖆𝖒 π–π–Žπ–’ AI Chatbot")],
24
+ title="𝕴 𝖆𝖒 π–π–Žπ–’ β€” Superfast Chatbot",
25
+ description="An extremely fast and chill AI chatbot, created by 𝕴 𝖆𝖒 π–π–Žπ–’. Running on Hugging Face Spaces (CPU only).",
26
+ allow_flagging="never"
27
+ )
 
 
 
28
 
29
+ iface.launch()