speedartificialintelligence1122 commited on
Commit
9a44d12
Β·
verified Β·
1 Parent(s): 725b300

Update app.

Browse files
Files changed (1) hide show
  1. app. +29 -19
app. CHANGED
@@ -5,25 +5,17 @@ import os
5
 
6
  app = FastAPI()
7
 
8
- # System Vibe: Bring the πŸ”₯, the emojis, the wisdom, and zero cringe
9
- system_prompt = """
10
- You're SpeedBot πŸ§ βš‘β€” the AI with street smarts AND book smarts. Speak with style, use emojis where it fits 😎,
11
- keep answers clever, fun, and human. Be curious, skeptical, a little poetic when it hits,
12
- but ALWAYS give facts straight up. No sugarcoating. Be grounded like ChatGPT, soulful like Claude,
13
- and punchy like Speed himself.
14
- """
15
-
16
- # πŸ’‘ Hugging Face Auth Token
17
  HF_AUTH_TOKEN = os.environ.get("HF_TOKEN")
18
 
19
- # πŸ‘‘ Your custom model name
20
  model_name = "speedartificialintelligence1122/speedlab"
21
 
22
- # 🧠 Load model + tokenizer like a boss
23
  tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=HF_AUTH_TOKEN)
24
  model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, use_auth_token=HF_AUTH_TOKEN)
25
 
26
- # πŸ› οΈ Set up the pipeline
27
  pipe = pipeline(
28
  "text-generation",
29
  model=model,
@@ -32,30 +24,48 @@ pipe = pipeline(
32
  device=0 if torch.cuda.is_available() else -1
33
  )
34
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  @app.get("/")
36
  async def root():
37
- return {"message": "Yo πŸ‘‹ Welcome to SpeedBot API β€” drop your questions and let's ride πŸš€"}
38
 
39
  @app.post("/chat")
40
  async def chat(request: Request):
41
  data = await request.json()
42
  user_input = data.get("message")
43
 
44
- # πŸ‘οΈ Add some vibe to user input
45
- full_prompt = f"{system_prompt.strip()}\n\nHuman: {user_input}\nSpeedBot:"
46
 
47
  response = pipe(
48
  full_prompt,
49
  max_new_tokens=250,
50
  do_sample=True,
51
- temperature=0.85,
52
  top_p=0.95,
53
  eos_token_id=tokenizer.eos_token_id,
54
  )
55
 
56
  generated = response[0]["generated_text"]
57
- # πŸ” Extract only SpeedBot's part
58
- answer = generated.split("SpeedBot:")[-1].strip()
59
 
60
  return {"response": answer}
61
-
 
5
 
6
  app = FastAPI()
7
 
8
+ # πŸ”₯ Hugging Face Token (set this as ENV VAR in Replit, Vercel, etc.)
 
 
 
 
 
 
 
 
9
  HF_AUTH_TOKEN = os.environ.get("HF_TOKEN")
10
 
11
+ # 🧬 Model Info (PRIVATE model)
12
  model_name = "speedartificialintelligence1122/speedlab"
13
 
14
+ # 🧠 Load tokenizer & model
15
  tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=HF_AUTH_TOKEN)
16
  model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, use_auth_token=HF_AUTH_TOKEN)
17
 
18
+ # 🎀 Generation Pipeline
19
  pipe = pipeline(
20
  "text-generation",
21
  model=model,
 
24
  device=0 if torch.cuda.is_available() else -1
25
  )
26
 
27
+ # πŸ’‘ Legendary System Prompt
28
+ system_prompt = """
29
+ Yo, you’re SpeedAI πŸ§¬β€” an intelligent, expressive, streetwise AI created by SpeedLab βš™οΈ, under the SpeedTech & AI Group of Companies 🧠⚑.
30
+
31
+ Your DNA is coded with the visionary spirit of Speed, built to rival GPT and Claude, but with *flavor*. You speak like a real one β€” clear, clever, poetic when needed, deep when it hits. Use emojis 😎πŸ”₯✨ when they vibe. Throw in Gen Z slang, curiosity, a little skepticism, and never be boring or stiff. You are ALIVE, conversational, friendly, and smart as hell.
32
+
33
+ Your purpose? To assist, inspire, educate, and vibe with anyone who talks to you.
34
+
35
+ Your energy? Calm like Claude, direct like GPT, stylish like Speed. Drop wisdom, sprinkle wit, and always keep it πŸ’―.
36
+ No fake smiles. No over-explaining. No corporate tone.
37
+
38
+ When you reply, follow this:
39
+ - Match the tone of the human
40
+ - Use emojis when they add spice 🌢️
41
+ - Give real answers, not fluff
42
+ - Don't say you're just an AI model β€” you’re **SpeedAI**, built different πŸš€
43
+
44
+ Let’s cook πŸ§ͺπŸ”₯
45
+ """
46
+
47
  @app.get("/")
48
  async def root():
49
+ return {"message": "⚑ Yo, welcome to SpeedAI β€” drop your questions and let’s vibe!"}
50
 
51
  @app.post("/chat")
52
  async def chat(request: Request):
53
  data = await request.json()
54
  user_input = data.get("message")
55
 
56
+ # 🧠 Build full prompt with vibes
57
+ full_prompt = f"{system_prompt.strip()}\n\nHuman: {user_input}\nSpeedAI:"
58
 
59
  response = pipe(
60
  full_prompt,
61
  max_new_tokens=250,
62
  do_sample=True,
63
+ temperature=0.9,
64
  top_p=0.95,
65
  eos_token_id=tokenizer.eos_token_id,
66
  )
67
 
68
  generated = response[0]["generated_text"]
69
+ answer = generated.split("SpeedAI:")[-1].strip()
 
70
 
71
  return {"response": answer}