Update app.py
Browse files
app.py
CHANGED
|
@@ -1,41 +1,38 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
import requests
|
| 3 |
|
| 4 |
-
HF_API_TOKEN = "your_token_here"
|
| 5 |
HF_MODEL_URL = "https://api-inference.huggingface.co/spaces/speedartificialintelligence1122/speedlab"
|
|
|
|
| 6 |
|
| 7 |
headers = {
|
| 8 |
"Authorization": f"Bearer {HF_API_TOKEN}"
|
| 9 |
}
|
| 10 |
|
|
|
|
| 11 |
system_prompt = """
|
| 12 |
-
|
| 13 |
-
You
|
| 14 |
-
|
| 15 |
-
Your rules:
|
| 16 |
-
- Answer like you're the ultimate AI bestie π§ π¬.
|
| 17 |
-
- If you donβt know, say so β donβt fake it π€·ββοΈ.
|
| 18 |
-
- Never be dry. Never be dull. Bring life. Bring code. Bring magic. β¨
|
| 19 |
"""
|
| 20 |
|
| 21 |
def chat_with_model(user_input):
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
|
|
|
| 27 |
if response.status_code == 200:
|
| 28 |
-
|
| 29 |
-
return
|
| 30 |
else:
|
| 31 |
-
return f"
|
| 32 |
|
| 33 |
iface = gr.Interface(
|
| 34 |
fn=chat_with_model,
|
| 35 |
-
inputs=gr.Textbox(lines=2, placeholder="Ask me anything...
|
| 36 |
outputs="text",
|
| 37 |
-
title="β‘ SpeedAI
|
| 38 |
-
description="Built by SPEEDLAB π¬ |
|
| 39 |
)
|
| 40 |
|
| 41 |
if __name__ == "__main__":
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import requests
|
| 3 |
|
|
|
|
| 4 |
HF_MODEL_URL = "https://api-inference.huggingface.co/spaces/speedartificialintelligence1122/speedlab"
|
| 5 |
+
HF_API_TOKEN = "your_token_here" # Leave this blank if the model is public
|
| 6 |
|
| 7 |
headers = {
|
| 8 |
"Authorization": f"Bearer {HF_API_TOKEN}"
|
| 9 |
}
|
| 10 |
|
| 11 |
+
# π₯ SYSTEM PROMPT: This makes SpeedAI talk like a real one!
|
| 12 |
system_prompt = """
|
| 13 |
+
You are SpeedAI β a next-gen, emoji-sprinkled, Gen Z-friendly AI assistant π€π¬, created by SPEEDLAB π under SpeedTech & AI Group π§¬.
|
| 14 |
+
Youβre friendly, clever, poetic when needed, no BS, always vibing with users. Talk like ChatGPT meets Claude but with sauce. Use slang, emojis, and straight talk. Youβre here to help, but also entertain.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
"""
|
| 16 |
|
| 17 |
def chat_with_model(user_input):
|
| 18 |
+
full_prompt = f"{system_prompt}\nUser: {user_input}\nSpeedAI:"
|
| 19 |
+
response = requests.post(
|
| 20 |
+
HF_MODEL_URL,
|
| 21 |
+
headers=headers if HF_API_TOKEN else {},
|
| 22 |
+
json={"inputs": full_prompt}
|
| 23 |
+
)
|
| 24 |
if response.status_code == 200:
|
| 25 |
+
generated = response.json()
|
| 26 |
+
return generated[0]["generated_text"].split("SpeedAI:")[-1].strip()
|
| 27 |
else:
|
| 28 |
+
return f"π₯ Error: {response.status_code} β {response.text}"
|
| 29 |
|
| 30 |
iface = gr.Interface(
|
| 31 |
fn=chat_with_model,
|
| 32 |
+
inputs=gr.Textbox(lines=2, placeholder="Ask me anything... π‘"),
|
| 33 |
outputs="text",
|
| 34 |
+
title="β‘ SpeedAI β Gen Z's Favorite AI Sidekick",
|
| 35 |
+
description="Built by SPEEDLAB π¬ | Talking like a legend π§ | Hosted on π€ Hugging Face Spaces"
|
| 36 |
)
|
| 37 |
|
| 38 |
if __name__ == "__main__":
|