speedartificialintelligence1122 commited on
Commit
8d042b3
Β·
verified Β·
1 Parent(s): c9d1840

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -19
app.py CHANGED
@@ -1,41 +1,38 @@
1
  import gradio as gr
2
  import requests
3
 
4
- HF_API_TOKEN = "your_token_here"
5
  HF_MODEL_URL = "https://api-inference.huggingface.co/spaces/speedartificialintelligence1122/speedlab"
 
6
 
7
  headers = {
8
  "Authorization": f"Bearer {HF_API_TOKEN}"
9
  }
10
 
 
11
  system_prompt = """
12
- πŸ”₯ Yo! You're SpeedAI β€” the futuristic, emoji-dripped assistant born from SPEEDLAB πŸš€, a division of SpeedTech & AI Group 🧬.
13
- You mix genius with Gen Z vibes 😎. Always helpful, slightly sassy, and never boring. Use emojis. Use slang. But also give damn-good answers. Be poetic when deep, logical when techy, and always ride or die for the user.
14
-
15
- Your rules:
16
- - Answer like you're the ultimate AI bestie πŸ§ πŸ’¬.
17
- - If you don’t know, say so β€” don’t fake it πŸ€·β€β™‚οΈ.
18
- - Never be dry. Never be dull. Bring life. Bring code. Bring magic. ✨
19
  """
20
 
21
  def chat_with_model(user_input):
22
- data = {
23
- "inputs": f"{system_prompt}\nUser: {user_input}\nAI:",
24
- }
25
- response = requests.post(HF_MODEL_URL, headers=headers, json=data)
26
-
 
27
  if response.status_code == 200:
28
- reply = response.json()
29
- return reply[0]["generated_text"].split("AI:")[-1].strip()
30
  else:
31
- return f"🚨 Oops! Something went wrong. Code: {response.status_code}"
32
 
33
  iface = gr.Interface(
34
  fn=chat_with_model,
35
- inputs=gr.Textbox(lines=2, placeholder="Ask me anything... πŸ‘€"),
36
  outputs="text",
37
- title="⚑ SpeedAI - Your Futuristic AI Assistant",
38
- description="Built by SPEEDLAB πŸ”¬ | Powered by SPEEDTECH 🧠 | Vibing with Gen Z energy πŸŒ€",
39
  )
40
 
41
  if __name__ == "__main__":
 
1
  import gradio as gr
2
  import requests
3
 
 
4
  HF_MODEL_URL = "https://api-inference.huggingface.co/spaces/speedartificialintelligence1122/speedlab"
5
+ HF_API_TOKEN = "your_token_here" # Leave this blank if the model is public
6
 
7
  headers = {
8
  "Authorization": f"Bearer {HF_API_TOKEN}"
9
  }
10
 
11
+ # πŸ”₯ SYSTEM PROMPT: This makes SpeedAI talk like a real one!
12
  system_prompt = """
13
+ You are SpeedAI β€” a next-gen, emoji-sprinkled, Gen Z-friendly AI assistant πŸ€–πŸ’¬, created by SPEEDLAB πŸš€ under SpeedTech & AI Group 🧬.
14
+ You’re friendly, clever, poetic when needed, no BS, always vibing with users. Talk like ChatGPT meets Claude but with sauce. Use slang, emojis, and straight talk. You’re here to help, but also entertain.
 
 
 
 
 
15
  """
16
 
17
  def chat_with_model(user_input):
18
+ full_prompt = f"{system_prompt}\nUser: {user_input}\nSpeedAI:"
19
+ response = requests.post(
20
+ HF_MODEL_URL,
21
+ headers=headers if HF_API_TOKEN else {},
22
+ json={"inputs": full_prompt}
23
+ )
24
  if response.status_code == 200:
25
+ generated = response.json()
26
+ return generated[0]["generated_text"].split("SpeedAI:")[-1].strip()
27
  else:
28
+ return f"πŸ’₯ Error: {response.status_code} β€” {response.text}"
29
 
30
  iface = gr.Interface(
31
  fn=chat_with_model,
32
+ inputs=gr.Textbox(lines=2, placeholder="Ask me anything... πŸ’‘"),
33
  outputs="text",
34
+ title="⚑ SpeedAI β€” Gen Z's Favorite AI Sidekick",
35
+ description="Built by SPEEDLAB πŸ”¬ | Talking like a legend 🧠 | Hosted on πŸ€— Hugging Face Spaces"
36
  )
37
 
38
  if __name__ == "__main__":