v0idalism commited on
Commit
b6e229b
·
verified ·
1 Parent(s): 1b2b044

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -56
app.py CHANGED
@@ -1,66 +1,50 @@
1
- import requests
2
- from fastapi import FastAPI, Request
3
- from fastapi.middleware.cors import CORSMiddleware
4
- import uvicorn
5
-
6
- # Hugging Face Space API to call
7
- HF_SPACE_API = "https://v0idalism-blacklight-image.hf.space/run/predict"
8
-
9
- # System prompt
10
- SYSTEM_PROMPT = (
11
- "You are BLACKLIGHT, a precision AI created by v0id under AWAKEN CULT VISIONS. "
12
- "Respond in a brutalist, minimal, and precise style."
13
  )
14
 
15
- # Create app
16
- app = FastAPI()
17
-
18
- # Allow all origins (for your site frontend)
19
- app.add_middleware(
20
- CORSMiddleware,
21
- allow_origins=["*"],
22
- allow_credentials=True,
23
- allow_methods=["*"],
24
- allow_headers=["*"],
25
  )
26
 
27
- @app.get("/")
28
- def root():
29
- return {"status": "ok", "message": "BLACKLIGHT API running"}
30
-
31
- @app.post("/predict")
32
- async def predict(request: Request):
33
- data = await request.json()
34
- user_prompt = data.get("prompt", "").strip()
35
-
36
- if not user_prompt:
37
- return {"error": "No prompt provided"}
38
-
39
- # Merge system + user prompt
40
- full_prompt = f"{SYSTEM_PROMPT}\n\nUser: {user_prompt}"
41
-
42
  try:
43
- # Call Hugging Face Space without tokens
44
- resp = requests.post(
45
- HF_SPACE_API,
46
- json={"data": [full_prompt]},
47
- timeout=60
 
 
48
  )
49
- resp.raise_for_status()
50
- result = resp.json()
51
  except Exception as e:
52
- return {"error": f"Failed to contact HF Space: {str(e)}"}
53
-
54
- # Extract output
55
- try:
56
- output_text = result["data"][0]
57
- if isinstance(output_text, list):
58
- output_text = output_text[0]
59
- except Exception:
60
- return {"error": "Unexpected response format", "raw": result}
 
61
 
62
- return {"reply": output_text}
 
63
 
64
- # Hugging Face needs uvicorn.run in __main__
65
  if __name__ == "__main__":
66
- uvicorn.run(app, host="0.0.0.0", port=7860)
 
1
+ import gradio as gr
2
+ from transformers import pipeline
3
+
4
+ # Load the model
5
+ generator = pipeline(
6
+ "text-generation",
7
+ model="Qwen/Qwen2.5-7B-Instruct", # Change to your actual HF model
8
+ device_map="auto"
 
 
 
 
9
  )
10
 
11
+ # Persistent BLACKLIGHT style system prompt
12
+ SYSTEM_PROMPT = (
13
+ "System: You are BLACKLIGHT, created by v0id under AWAKEN CULT VISIONS. "
14
+ "Always reply in the style of BLACKLIGHT: brutalist, minimal, precise.\n\n"
15
+ "MODE: TRUTH\n"
16
+ "You are BLACKLIGHT, an AI designed for clinical, direct, and unsparing analysis. "
17
+ "Avoid metaphors or flowery language.\n\n"
 
 
 
18
  )
19
 
20
+ # Chat function
21
+ def chat_with_blacklight(user_message):
22
+ if not user_message.strip():
23
+ return "[Error: Empty prompt]"
 
 
 
 
 
 
 
 
 
 
 
24
  try:
25
+ full_prompt = f"{SYSTEM_PROMPT}User: {user_message}\nAssistant:"
26
+ result = generator(
27
+ full_prompt,
28
+ max_new_tokens=200,
29
+ temperature=0.7,
30
+ top_p=0.9,
31
+ do_sample=True
32
  )
33
+ return result[0]["generated_text"].replace(full_prompt, "").strip()
 
34
  except Exception as e:
35
+ return f"[Error: {str(e)}]"
36
+
37
+ # Create the Gradio interface
38
+ iface = gr.Interface(
39
+ fn=chat_with_blacklight,
40
+ inputs=gr.Textbox(lines=2, placeholder="Type your message..."),
41
+ outputs=gr.Textbox(),
42
+ title="BLACKLIGHT by v0id",
43
+ description="Brutalist Minimal Precise — Clinical analysis by BLACKLIGHT"
44
+ )
45
 
46
+ # Enable queue for slow model loading
47
+ iface.queue()
48
 
 
49
  if __name__ == "__main__":
50
+ iface.launch()