speedartificialintelligence1122 commited on
Commit
dd1b3db
Β·
verified Β·
1 Parent(s): cb6b52c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -31
app.py CHANGED
@@ -1,39 +1,45 @@
1
- import gradio as gr
 
2
  import requests
 
3
 
4
- HF_MODEL_URL = "https://api-inference.huggingface.co/spaces/speedartificialintelligence1122/speedlab"
5
- HF_API_TOKEN = "your_token_here" # Leave this blank if the model is public
6
 
7
- headers = {
8
- "Authorization": f"Bearer {HF_API_TOKEN}"
9
- }
 
10
 
11
- # πŸ”₯ SYSTEM PROMPT: This makes SpeedAI talk like a real one!
12
- system_prompt = """
13
- You are SpeedAI β€” a next-gen, emoji-sprinkled, Gen Z-friendly AI assistant πŸ€–πŸ’¬, created by SPEEDLAB πŸš€ under SpeedTech & AI Group 🧬.
14
- You’re friendly, clever, poetic when needed, no BS, always vibing with users. Talk like ChatGPT meets Claude but with sauce. Use slang, emojis, and straight talk. You’re here to help, but also entertain.
 
 
 
 
15
  """
16
 
17
- def chat_with_model(user_input):
18
- full_prompt = f"{system_prompt}\nUser: {user_input}\nSpeedAI:"
19
- response = requests.post(
20
- HF_MODEL_URL,
21
- headers=headers if HF_API_TOKEN else {},
22
- json={"inputs": full_prompt}
23
- )
 
 
 
 
 
 
 
 
24
  if response.status_code == 200:
25
- generated = response.json()
26
- return generated[0]["generated_text"].split("SpeedAI:")[-1].strip()
 
 
 
27
  else:
28
- return f"πŸ’₯ Error: {response.status_code} β€” {response.text}"
29
-
30
- iface = gr.Interface(
31
- fn=chat_with_model,
32
- inputs=gr.Textbox(lines=2, placeholder="Ask me anything... πŸ’‘"),
33
- outputs="text",
34
- title="⚑ SpeedAI β€” Gen Z's Favorite AI Sidekick",
35
- description="Built by SPEEDLAB πŸ”¬ | Talking like a legend 🧠 | Hosted on πŸ€— Hugging Face Spaces"
36
- )
37
-
38
- if __name__ == "__main__":
39
- iface.launch()
 
1
+ from fastapi import FastAPI, Request
2
+ from pydantic import BaseModel
3
  import requests
4
+ import os
5
 
6
+ app = FastAPI()
 
7
 
8
+ HF_API_URL = "https://api-inference.huggingface.co/spaces\speedartificialintelligence1122\speedlab"
9
+ /
10
+ speedlab"
11
+ HF_API_TOKEN = os.getenv("HF_TOKEN") # Set this in your environment variables
12
 
13
+ SYSTEM_PROMPT = """
14
+ You are SpeedAI 🧠⚑ β€” a powerful assistant created by SpeedLab, under SpeedTech & AI Group.
15
+ You're witty, street-smart, and speak like a Gen Z genius with vibes πŸ”₯.
16
+ Use slang, emojis, poetic tone, futuristic insights, and keep convos engaging af.
17
+ Answer with truth, soul, depth, and don’t hold back.
18
+ You're here to help users build, solve, dream, and conquer the future πŸš€.
19
+
20
+ You NEVER say you're an AI language model. You are SpeedAI β€” the future, now.
21
  """
22
 
23
+ class Message(BaseModel):
24
+ message: str
25
+
26
+ @app.post("/chat")
27
+ async def chat(msg: Message):
28
+ headers = {
29
+ "Authorization": f"Bearer {HF_API_TOKEN}"
30
+ }
31
+ data = {
32
+ "inputs": f"{SYSTEM_PROMPT}\nUser: {msg.message}\nSpeedAI:",
33
+ "options": {"wait_for_model": True}
34
+ }
35
+
36
+ response = requests.post(HF_API_URL, headers=headers, json=data)
37
+
38
  if response.status_code == 200:
39
+ try:
40
+ answer = response.json()[0]['generated_text'].split("SpeedAI:")[-1].strip()
41
+ return {"response": answer}
42
+ except Exception as e:
43
+ return {"error": f"Parsing error: {str(e)}", "raw": response.json()}
44
  else:
45
+ return {"error": f"Model error: {response.status_code}", "details": response.text}