Trigger82 commited on
Commit
c13009b
Β·
verified Β·
1 Parent(s): 48f88e8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -70
app.py CHANGED
@@ -2,87 +2,49 @@ import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
4
 
5
- # Fast lightweight model for CPU
6
- model_id = "microsoft/phi-2"
7
- tokenizer = AutoTokenizer.from_pretrained(model_id)
8
- model = AutoModelForCausalLM.from_pretrained(model_id)
9
 
10
- # Persona prefix
11
  PERSONA = """
12
- [System: You are 𝕴 𝖆𝖒 π–π–Žπ–’ - a chill, smooth, confident, emotionally intelligent AI.
13
- Speak like a witty friend, not a robot.
14
- Keep replies natural, short (<15 words), human, and with emotional vibes: 😊 😏 πŸ€”]
15
  """
16
 
17
- # Build context from last 2-3 messages
18
- def format_context(history):
19
- context = PERSONA.strip() + "\n"
20
  for user, bot in history[-3:]:
21
- context += f"You: {user}\n"
22
- context += f"𝕴 𝖆𝖒 π–π–Žπ–’: {bot}\n"
23
- return context
24
-
25
- # Add emotional flavor
26
- def add_vibes(response, user_msg):
27
- if any(word in response.lower() for word in ["cool", "love", "great", "awesome", "!"]):
28
- response += " 😊"
29
- elif "?" in user_msg or any(w in response.lower() for w in ["think", "why", "how"]):
30
- response += " πŸ€”"
31
- if "?" in user_msg and not response.endswith("?"):
32
- if len(response.split()) < 10:
33
- response += " What do you think?"
34
-
35
- # Make more human
36
- response = response.replace("I am", "I'm").replace("You are", "You're")
37
- words = response.split()
38
- response = " ".join(words[:15]) if len(words) > 15 else response
39
- if response and response[-1] not in {".", "!", "?", "..."}:
40
- response += "." if len(response) > 20 else "..."
41
- return response
42
-
43
- # Generate the bot's reply
44
- def generate_response(message, history):
45
- context = format_context(history) + f"You: {message}\n𝕴 𝖆𝖒 π–π–Žπ–’:"
46
- inputs = tokenizer(context, return_tensors="pt")
47
  outputs = model.generate(
48
  **inputs,
49
  max_new_tokens=48,
50
  temperature=0.9,
51
- top_k=40,
52
  do_sample=True,
53
- repetition_penalty=1.1,
54
  pad_token_id=tokenizer.eos_token_id
55
  )
56
- decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
57
- response = decoded.split("𝕴 𝖆𝖒 π–π–Žπ–’:")[-1].strip()
58
- if "\nYou:" in response:
59
- response = response.split("\nYou:")[0].strip()
60
- return add_vibes(response, message)[:80]
61
-
62
- # Gradio Interface
63
- with gr.Blocks(title="𝕴 𝖆𝖒 π–π–Žπ–’", theme=gr.themes.Soft()) as demo:
64
- gr.Markdown("# 𝕴 𝖆𝖒 π–π–Žπ–’ 😎\n*Chill β€’ Confident β€’ Gets You*")
65
- chatbot = gr.Chatbot(height=300, bubble_full_width=False, label="Chat", type="tuples")
66
- msg = gr.Textbox(placeholder="Type here...", container=False)
67
- clear = gr.Button("New Vibe", size="sm")
68
- history_state = gr.State([])
69
-
70
- def user_input(message, history):
71
- return "", history + [[message, None]]
72
-
73
- def bot_reply(history):
74
- user_msg = history[-1][0]
75
- reply = generate_response(user_msg, history[:-1])
76
- return history[:-1] + [[user_msg, reply]]
77
-
78
- def clear_chat():
79
- return []
80
-
81
- msg.submit(user_input, [msg, history_state], [msg, history_state]).then(
82
- bot_reply, history_state, [chatbot, history_state]
83
- )
84
- clear.click(clear_chat, None, [chatbot, history_state])
85
- demo.load(lambda: [], None, history_state)
86
-
87
  demo.queue()
88
  demo.launch()
 
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
4
 
5
+ # Load model (CPU-friendly)
6
+ tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-2")
7
+ model = AutoModelForCausalLM.from_pretrained("microsoft/phi-2")
 
8
 
9
+ # Style prompt
10
  PERSONA = """
11
+ [System: You are 𝕴 𝖆𝖒 π–π–Žπ–’, a smooth, chill AI who replies with emotion and charm in under 15 words.]
 
 
12
  """
13
 
14
+ # Function to format past conversation
15
+ def format_prompt(message, history):
16
+ prompt = PERSONA
17
  for user, bot in history[-3:]:
18
+ prompt += f"\nYou: {user}\n𝕴 𝖆𝖒 π–π–Žπ–’: {bot}"
19
+ prompt += f"\nYou: {message}\n𝕴 𝖆𝖒 π–π–Žπ–’:"
20
+ return prompt
21
+
22
+ # Chat function
23
+ def chat(message, history):
24
+ prompt = format_prompt(message, history)
25
+ inputs = tokenizer(prompt, return_tensors="pt")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  outputs = model.generate(
27
  **inputs,
28
  max_new_tokens=48,
29
  temperature=0.9,
30
+ top_k=50,
31
  do_sample=True,
 
32
  pad_token_id=tokenizer.eos_token_id
33
  )
34
+ result = tokenizer.decode(outputs[0], skip_special_tokens=True)
35
+ reply = result.split("𝕴 𝖆𝖒 π–π–Žπ–’:")[-1].split("\n")[0].strip()
36
+ reply = reply[:80] + " 😊" if len(reply.split()) < 15 else reply[:80]
37
+ history.append((message, reply))
38
+ return history, history
39
+
40
+ # UI
41
+ with gr.Blocks() as demo:
42
+ gr.Markdown("# 𝕴 𝖆𝖒 π–π–Žπ–’ πŸ€–βœ¨")
43
+ chatbot = gr.Chatbot()
44
+ msg = gr.Textbox(placeholder="Drop something smooth...", show_label=False)
45
+ state = gr.State([])
46
+
47
+ msg.submit(chat, [msg, state], [chatbot, state])
48
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  demo.queue()
50
  demo.launch()