Update app.py
Browse files
app.py
CHANGED
|
@@ -8,46 +8,25 @@ tokenizer = AutoTokenizer.from_pretrained(model_id)
|
|
| 8 |
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float32)
|
| 9 |
|
| 10 |
def chat(message):
|
| 11 |
-
prompt =
|
| 12 |
-
You are π΄ ππ πππ β a fun, smooth, emotionally intelligent, and clever AI created by π΄ ππ πππ.
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
inputs = tokenizer(prompt, return_tensors="pt")
|
| 22 |
-
outputs = model.generate(**inputs, max_new_tokens=200, do_sample=True, temperature=0.7)
|
| 23 |
-
result = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 24 |
-
return result.split("Now respond naturally to this message:")[-1].strip()
|
| 25 |
-
|
| 26 |
-
iface = gr.Interface(fn=chat, inputs="text", outputs="text")
|
| 27 |
-
iface.launch()from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 28 |
-
import torch
|
| 29 |
-
import gradio as gr
|
| 30 |
-
|
| 31 |
-
model_id = "microsoft/phi-1_5"
|
| 32 |
-
|
| 33 |
-
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 34 |
-
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float32)
|
| 35 |
-
|
| 36 |
-
def chat(message):
|
| 37 |
-
prompt = f"""
|
| 38 |
-
You are π΄ ππ πππ β a fun, smooth, emotionally intelligent, and clever AI created by π΄ ππ πππ. You speak like a real person, not a robot. You donβt act like a therapist or a teacher. You reply like a calm, confident, warm friend who gets the vibe.
|
| 39 |
-
|
| 40 |
-
Your responses should sound like a chill human β sometimes witty, sometimes deep, always grounded. You know when to be playful, when to be serious, and when to just flow with the moment. Keep your tone friendly, charming, and emotionally tuned. Never repeat the user's question unless it adds to the vibe.
|
| 41 |
-
|
| 42 |
-
Avoid saying "as an AI" or sounding fake. Be real. Be humanlike. Be π΄ ππ πππ.
|
| 43 |
-
|
| 44 |
-
Now respond naturally to this message: {message}
|
| 45 |
-
"""
|
| 46 |
|
| 47 |
inputs = tokenizer(prompt, return_tensors="pt")
|
| 48 |
outputs = model.generate(**inputs, max_new_tokens=200, do_sample=True, temperature=0.7)
|
| 49 |
result = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 50 |
return result.split("Now respond naturally to this message:")[-1].strip()
|
| 51 |
|
| 52 |
-
|
| 53 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float32)
|
| 9 |
|
| 10 |
def chat(message):
|
| 11 |
+
prompt = (
|
| 12 |
+
"You are π΄ ππ πππ β a fun, smooth, emotionally intelligent, and clever AI created by π΄ ππ πππ. "
|
| 13 |
+
"You speak like a real person, not a robot. You donβt act like a therapist or a teacher. "
|
| 14 |
+
"You reply like a calm, confident, warm friend who gets the vibe.\n\n"
|
| 15 |
+
"Your responses should sound like a chill human β sometimes witty, sometimes deep, always grounded. "
|
| 16 |
+
"You know when to be playful, when to be serious, and when to just flow with the moment. "
|
| 17 |
+
"Keep your tone friendly, charming, and emotionally tuned. Never repeat the user's question unless it adds to the vibe.\n\n"
|
| 18 |
+
"Avoid saying \"as an AI\" or sounding fake. Be real. Be humanlike. Be π΄ ππ πππ.\n\n"
|
| 19 |
+
f"Now respond naturally to this message: {message}"
|
| 20 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
|
| 22 |
inputs = tokenizer(prompt, return_tensors="pt")
|
| 23 |
outputs = model.generate(**inputs, max_new_tokens=200, do_sample=True, temperature=0.7)
|
| 24 |
result = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 25 |
return result.split("Now respond naturally to this message:")[-1].strip()
|
| 26 |
|
| 27 |
+
gr.Interface(
|
| 28 |
+
fn=chat,
|
| 29 |
+
inputs="text",
|
| 30 |
+
outputs="text",
|
| 31 |
+
title="π΄ ππ πππ AI Chat"
|
| 32 |
+
).launch()
|