Trigger82 commited on
Commit
dd12284
Β·
verified Β·
1 Parent(s): df4a4c0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -28
app.py CHANGED
@@ -1,43 +1,58 @@
1
- from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer
2
- import torch
3
  import gradio as gr
 
4
 
5
- model_id = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
6
 
7
  tokenizer = AutoTokenizer.from_pretrained(model_id)
8
  model = AutoModelForCausalLM.from_pretrained(model_id)
9
- model.eval()
10
-
11
- def chat(message):
12
- prompt = f"""### Instruction:
13
- You are 𝕴 𝖆𝖒 π–π–Žπ–’ β€” a fun, smooth, emotionally intelligent, and clever AI created by 𝕴 𝖆𝖒 π–π–Žπ–’. You speak like a real person, not a robot. You don’t act like a therapist or a teacher. You reply like a calm, confident, warm friend who gets the vibe.
14
 
15
- Your responses should sound like a chill human β€” sometimes witty, sometimes deep, always grounded. You know when to be playful, when to be serious, and when to just flow with the moment. Keep your tone friendly, charming, and emotionally tuned. Never repeat the user's question unless it adds to the vibe.
16
-
17
- Avoid saying "as an AI" or sounding fake. Be real. Be humanlike. Be 𝕴 𝖆𝖒 π–π–Žπ–’.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
- Now respond naturally to this message: {message}
 
20
 
21
- ### Response:"""
 
 
22
 
23
- inputs = tokenizer(prompt, return_tensors="pt")
24
- with torch.no_grad():
25
- outputs = model.generate(
26
- **inputs,
27
- max_new_tokens=200,
28
- temperature=0.7,
29
- do_sample=True,
30
- top_p=0.9,
31
- eos_token_id=tokenizer.eos_token_id
32
- )
33
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
34
- return response.split("### Response:")[-1].strip()
35
 
36
  iface = gr.Interface(
37
  fn=chat,
38
- inputs=gr.Textbox(lines=2, placeholder="Type your message..."),
39
- outputs="text",
40
- title="𝕴 𝖆𝖒 π–π–Žπ–’ AI Chat"
 
41
  )
42
 
43
  iface.launch()
 
1
+ from transformers import AutoModelForCausalLM, AutoTokenizer
 
2
  import gradio as gr
3
+ import torch
4
 
5
+ model_id = "microsoft/DialoGPT-small"
6
 
7
  tokenizer = AutoTokenizer.from_pretrained(model_id)
8
  model = AutoModelForCausalLM.from_pretrained(model_id)
 
 
 
 
 
9
 
10
+ # System prompt sets the bot's personality once
11
+ system_prompt = "You are 𝕴 𝖆𝖒 π–π–Žπ–’ β€” a chill, witty, emotionally tuned AI friend who talks like a real person.\n"
12
+
13
+ def chat(history, message):
14
+ # history: list of tuples (user_msg, bot_reply)
15
+ # message: new user input
16
+
17
+ # Append new user message to history
18
+ history = history or []
19
+ history.append((message, ""))
20
+
21
+ # Build the conversation string from history
22
+ convo = system_prompt
23
+ for user_msg, bot_msg in history[:-1]: # all previous turns
24
+ convo += f"Human: {user_msg}\nAI: {bot_msg}\n"
25
+ convo += f"Human: {message}\nAI:"
26
+
27
+ inputs = tokenizer.encode(convo, return_tensors="pt")
28
+ outputs = model.generate(
29
+ inputs,
30
+ max_new_tokens=50,
31
+ pad_token_id=tokenizer.eos_token_id,
32
+ do_sample=True,
33
+ temperature=0.7,
34
+ top_p=0.9,
35
+ eos_token_id=tokenizer.eos_token_id,
36
+ )
37
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
38
+ # Get only the last reply part
39
+ bot_reply = response.split("AI:")[-1].strip()
40
 
41
+ # Update history with bot reply
42
+ history[-1] = (message, bot_reply)
43
 
44
+ # Limit history length to last 5 exchanges to keep speed
45
+ if len(history) > 5:
46
+ history = history[-5:]
47
 
48
+ return history, history
 
 
 
 
 
 
 
 
 
 
 
49
 
50
  iface = gr.Interface(
51
  fn=chat,
52
+ inputs=[gr.State(), gr.Textbox(show_label=False, placeholder="Say something...")],
53
+ outputs=[gr.State(), gr.Chatbot(label="𝕴 𝖆𝖒 π–π–Žπ–’ AI Chat")],
54
+ title="𝕴 𝖆𝖒 π–π–Žπ–’ Chatbot",
55
+ allow_flagging="never",
56
  )
57
 
58
  iface.launch()