Trigger82 commited on
Commit
d2b430b
Β·
verified Β·
1 Parent(s): 41b4ee6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -17
app.py CHANGED
@@ -2,30 +2,32 @@ import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
4
 
5
- # Load model (CPU-friendly)
6
- tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-2")
7
- model = AutoModelForCausalLM.from_pretrained("microsoft/phi-2")
 
8
 
9
- # Style prompt
10
  PERSONA = """
11
- [System: You are 𝕴 𝖆𝖒 π–π–Žπ–’, a smooth, chill AI who replies with emotion and charm in under 15 words.]
 
12
  """
13
 
14
- # Function to format past conversation
15
- def format_prompt(message, history):
16
  prompt = PERSONA
17
  for user, bot in history[-3:]:
18
  prompt += f"\nYou: {user}\n𝕴 𝖆𝖒 π–π–Žπ–’: {bot}"
19
  prompt += f"\nYou: {message}\n𝕴 𝖆𝖒 π–π–Žπ–’:"
20
  return prompt
21
 
22
- # Chat function
23
- def chat(message, history):
24
- prompt = format_prompt(message, history)
25
  inputs = tokenizer(prompt, return_tensors="pt")
26
  outputs = model.generate(
27
  **inputs,
28
- max_new_tokens=48,
29
  temperature=0.9,
30
  top_k=50,
31
  do_sample=True,
@@ -33,18 +35,18 @@ def chat(message, history):
33
  )
34
  result = tokenizer.decode(outputs[0], skip_special_tokens=True)
35
  reply = result.split("𝕴 𝖆𝖒 π–π–Žπ–’:")[-1].split("\n")[0].strip()
36
- reply = reply[:80] + " 😊" if len(reply.split()) < 15 else reply[:80]
37
  history.append((message, reply))
38
  return history, history
39
 
40
- # UI
41
  with gr.Blocks() as demo:
42
- gr.Markdown("# 𝕴 𝖆𝖒 π–π–Žπ–’ πŸ€–βœ¨")
43
  chatbot = gr.Chatbot()
44
- msg = gr.Textbox(placeholder="Drop something smooth...", show_label=False)
45
  state = gr.State([])
46
 
47
- msg.submit(chat, [msg, state], [chatbot, state])
48
-
49
  demo.queue()
50
  demo.launch()
 
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
4
 
5
+ # Load lightweight model (CPU-only friendly)
6
+ model_id = "microsoft/phi-2"
7
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
8
+ model = AutoModelForCausalLM.from_pretrained(model_id)
9
 
10
+ # Persona style
11
  PERSONA = """
12
+ [System: You are 𝕴 𝖆𝖒 π–π–Žπ–’ - a fun, smooth, emotionally intelligent AI.
13
+ You speak like a chill human, not a bot. Keep replies under 15 words, natural, clever, emotional.]
14
  """
15
 
16
+ # Build prompt from history
17
+ def build_prompt(message, history):
18
  prompt = PERSONA
19
  for user, bot in history[-3:]:
20
  prompt += f"\nYou: {user}\n𝕴 𝖆𝖒 π–π–Žπ–’: {bot}"
21
  prompt += f"\nYou: {message}\n𝕴 𝖆𝖒 π–π–Žπ–’:"
22
  return prompt
23
 
24
+ # Generate a response
25
+ def generate(message, history):
26
+ prompt = build_prompt(message, history)
27
  inputs = tokenizer(prompt, return_tensors="pt")
28
  outputs = model.generate(
29
  **inputs,
30
+ max_new_tokens=50,
31
  temperature=0.9,
32
  top_k=50,
33
  do_sample=True,
 
35
  )
36
  result = tokenizer.decode(outputs[0], skip_special_tokens=True)
37
  reply = result.split("𝕴 𝖆𝖒 π–π–Žπ–’:")[-1].split("\n")[0].strip()
38
+ reply = " ".join(reply.split()[:15]) # max 15 words
39
  history.append((message, reply))
40
  return history, history
41
 
42
+ # Gradio UI
43
  with gr.Blocks() as demo:
44
+ gr.Markdown("# 𝕴 𝖆𝖒 π–π–Žπ–’ πŸ€–\n*Vibing on CPU only - Hugging Face Free Tier*")
45
  chatbot = gr.Chatbot()
46
+ msg = gr.Textbox(placeholder="Type your vibe...", show_label=False)
47
  state = gr.State([])
48
 
49
+ msg.submit(generate, [msg, state], [chatbot, state])
50
+
51
  demo.queue()
52
  demo.launch()