Trigger82 commited on
Commit
dd31154
Β·
verified Β·
1 Parent(s): 8e2859c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -8
app.py CHANGED
@@ -2,11 +2,10 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
2
  import torch
3
  import gradio as gr
4
 
5
- model_id = "microsoft/phi-2"
6
 
7
- # Load tokenizer and model on CPU
8
  tokenizer = AutoTokenizer.from_pretrained(model_id)
9
- model = AutoModelForCausalLM.from_pretrained(model_id)
10
 
11
  def chat(message):
12
  prompt = f"""
@@ -14,18 +13,41 @@ You are 𝕴 𝖆𝖒 π–π–Žπ–’ β€” a fun, smooth, emotionally intelligent, a
14
 
15
  Your responses should sound like a chill human β€” sometimes witty, sometimes deep, always grounded. You know when to be playful, when to be serious, and when to just flow with the moment. Keep your tone friendly, charming, and emotionally tuned. Never repeat the user's question unless it adds to the vibe.
16
 
17
- Do not ramble, do not sound robotic. Don’t start extra conversations or over-explain. Just reply to what was said, as if you're chatting one-on-one.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
  Avoid saying "as an AI" or sounding fake. Be real. Be humanlike. Be 𝕴 𝖆𝖒 π–π–Žπ–’.
20
 
21
  Now respond naturally to this message: {message}
22
  """
 
23
  inputs = tokenizer(prompt, return_tensors="pt")
24
- inputs = {k: v.to("cpu") for k, v in inputs.items()} # make sure inputs are on CPU
25
- outputs = model.generate(**inputs, max_new_tokens=200)
26
  result = tokenizer.decode(outputs[0], skip_special_tokens=True)
27
- # Return only the AI response part, removing prompt
28
  return result.split("Now respond naturally to this message:")[-1].strip()
29
 
30
  iface = gr.Interface(fn=chat, inputs="text", outputs="text")
31
- iface.launch()
 
2
  import torch
3
  import gradio as gr
4
 
5
+ model_id = "microsoft/phi-1_5"
6
 
 
7
  tokenizer = AutoTokenizer.from_pretrained(model_id)
8
+ model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float32)
9
 
10
  def chat(message):
11
  prompt = f"""
 
13
 
14
  Your responses should sound like a chill human β€” sometimes witty, sometimes deep, always grounded. You know when to be playful, when to be serious, and when to just flow with the moment. Keep your tone friendly, charming, and emotionally tuned. Never repeat the user's question unless it adds to the vibe.
15
 
16
+ Avoid saying "as an AI" or sounding fake. Be real. Be humanlike. Be 𝕴 𝖆𝖒 π–π–Žπ–’.
17
+
18
+ Now respond naturally to this message: {message}
19
+ """
20
+
21
+ inputs = tokenizer(prompt, return_tensors="pt")
22
+ outputs = model.generate(**inputs, max_new_tokens=200, do_sample=True, temperature=0.7)
23
+ result = tokenizer.decode(outputs[0], skip_special_tokens=True)
24
+ return result.split("Now respond naturally to this message:")[-1].strip()
25
+
26
+ iface = gr.Interface(fn=chat, inputs="text", outputs="text")
27
+ iface.launch()from transformers import AutoModelForCausalLM, AutoTokenizer
28
+ import torch
29
+ import gradio as gr
30
+
31
+ model_id = "microsoft/phi-1_5"
32
+
33
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
34
+ model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float32)
35
+
36
+ def chat(message):
37
+ prompt = f"""
38
+ You are 𝕴 𝖆𝖒 π–π–Žπ–’ β€” a fun, smooth, emotionally intelligent, and clever AI created by 𝕴 𝖆𝖒 π–π–Žπ–’. You speak like a real person, not a robot. You don’t act like a therapist or a teacher. You reply like a calm, confident, warm friend who gets the vibe.
39
+
40
+ Your responses should sound like a chill human β€” sometimes witty, sometimes deep, always grounded. You know when to be playful, when to be serious, and when to just flow with the moment. Keep your tone friendly, charming, and emotionally tuned. Never repeat the user's question unless it adds to the vibe.
41
 
42
  Avoid saying "as an AI" or sounding fake. Be real. Be humanlike. Be 𝕴 𝖆𝖒 π–π–Žπ–’.
43
 
44
  Now respond naturally to this message: {message}
45
  """
46
+
47
  inputs = tokenizer(prompt, return_tensors="pt")
48
+ outputs = model.generate(**inputs, max_new_tokens=200, do_sample=True, temperature=0.7)
 
49
  result = tokenizer.decode(outputs[0], skip_special_tokens=True)
 
50
  return result.split("Now respond naturally to this message:")[-1].strip()
51
 
52
  iface = gr.Interface(fn=chat, inputs="text", outputs="text")
53
+ iface.launch()