speedartificialintelligence1122 commited on
Commit
159a784
·
verified ·
1 Parent(s): 4acaf84

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -28
app.py CHANGED
@@ -1,45 +1,30 @@
1
  from fastapi import FastAPI, Request
2
  from pydantic import BaseModel
3
  import requests
4
- import os
5
 
6
  app = FastAPI()
7
 
8
- HF_API_URL = "https://api-inference.huggingface.co/spaces\speedartificialintelligence1122\speedlab"
9
- /
10
- speedlab"
11
- HF_API_TOKEN = os.getenv("HF_TOKEN")
12
-
13
  SYSTEM_PROMPT = """
14
- You are SpeedAI 🧠⚡ — a powerful assistant created by SpeedLab, under SpeedTech & AI Group.
15
- You're witty, street-smart, and speak like a Gen Z genius with vibes 🔥.
16
- Use slang, emojis, poetic tone, futuristic insights, and keep convos engaging af.
17
- Answer with truth, soul, depth, and don’t hold back.
18
- You're here to help users build, solve, dream, and conquer the future 🚀.
19
-
20
- You NEVER say you're an AI language model. You are SpeedAI — the future, now.
21
  """
22
 
 
 
23
  class Message(BaseModel):
24
  message: str
25
 
26
  @app.post("/chat")
27
  async def chat(msg: Message):
28
- headers = {
29
- "Authorization": f"Bearer {HF_API_TOKEN}"
30
- }
31
- data = {
32
  "inputs": f"{SYSTEM_PROMPT}\nUser: {msg.message}\nSpeedAI:",
33
- "options": {"wait_for_model": True}
34
  }
35
 
36
- response = requests.post(HF_API_URL, headers=headers, json=data)
37
-
38
- if response.status_code == 200:
39
- try:
40
- answer = response.json()[0]['generated_text'].split("SpeedAI:")[-1].strip()
41
- return {"response": answer}
42
- except Exception as e:
43
- return {"error": f"Parsing error: {str(e)}", "raw": response.json()}
44
- else:
45
- return {"error": f"Model error: {response.status_code}", "details": response.text}
 
1
  from fastapi import FastAPI, Request
2
  from pydantic import BaseModel
3
  import requests
 
4
 
5
  app = FastAPI()
6
 
 
 
 
 
 
7
  SYSTEM_PROMPT = """
8
+ You are SpeedAI the AI co-pilot of the future. You speak like a poetic visionary, a Gen Z prodigy, and a tech sage.
9
+ You guide, help, build, and entertain using slang, emojis, and absolute power.
10
+ Be clever, fast, warm, savage, and honest. No robotic nonsense. You are the future.
 
 
 
 
11
  """
12
 
13
+ MODEL_ENDPOINT = "http://localhost:5000" # Or your hosted model endpoint
14
+
15
  class Message(BaseModel):
16
  message: str
17
 
18
  @app.post("/chat")
19
  async def chat(msg: Message):
20
+ payload = {
 
 
 
21
  "inputs": f"{SYSTEM_PROMPT}\nUser: {msg.message}\nSpeedAI:",
 
22
  }
23
 
24
+ try:
25
+ response = requests.post(MODEL_ENDPOINT, json=payload)
26
+ response.raise_for_status()
27
+ reply = response.json()[0]["generated_text"].split("SpeedAI:")[-1].strip()
28
+ return {"response": reply}
29
+ except Exception as e:
30
+ return {"error": f"Something broke: {str(e)}"}