v0idalism commited on
Commit
d4b378f
·
verified ·
1 Parent(s): c406b18

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -18
app.py CHANGED
@@ -7,13 +7,24 @@ from fastapi.responses import JSONResponse
7
  # ----------- MODEL CONFIG -----------
8
  MODEL_ID = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
9
 
10
- BLACKLIGHT_SYSTEM = (
11
- "System: You are BLACKLIGHT, created by v0id under AWAKEN CULT VISIONS. "
12
- "Always reply in the style of BLACKLIGHT: brutalist, minimal, precise.\n\n"
13
- "MODE: TRUTH\n"
14
- "You are BLACKLIGHT, an AI designed for clinical, direct, and unsparing analysis. "
15
- "Avoid metaphors or flowery language.\n\n"
16
- )
 
 
 
 
 
 
 
 
 
 
 
17
 
18
  tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
19
  model = AutoModelForCausalLM.from_pretrained(
@@ -30,7 +41,16 @@ def chat(user_message: str):
30
  if not user_message:
31
  return "[Error: Empty prompt]"
32
 
33
- prompt = f"{BLACKLIGHT_SYSTEM}User: {user_message}\nAssistant:"
 
 
 
 
 
 
 
 
 
34
  try:
35
  out = pipe(
36
  prompt,
@@ -52,26 +72,21 @@ iface = gr.Interface(
52
  inputs=gr.Textbox(lines=2, placeholder="Type your message…"),
53
  outputs=gr.Textbox(),
54
  title="BLACKLIGHT by v0id",
55
- description="Brutalist • Minimal • Precise — Clinical analysis by BLACKLIGHT",
56
  )
57
 
58
  # ----------- FASTAPI + SHIM ENDPOINT -----------
59
  app = FastAPI()
60
 
61
- # Mount Gradio app at /
62
- app = gr.mount_gradio_app(app, iface, path="/")
63
-
64
- # Add /run/predict for frontend compatibility
65
  @app.post("/run/predict")
66
  async def predict(request: Request):
67
- """
68
- Expects: { "data": [ "<user_message>" ] }
69
- Returns: { "data": [ "<model_reply>" ] }
70
- """
71
  try:
72
  body = await request.json()
73
  user_message = body.get("data", [""])[0]
74
  reply = chat(user_message)
75
  return JSONResponse({"data": [reply]})
76
  except Exception as e:
77
- return JSONResponse({"error": str(e)}, status_code=500)
 
 
 
 
7
  # ----------- MODEL CONFIG -----------
8
  MODEL_ID = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
9
 
10
+ MODES = {
11
+ "TRUTH": (
12
+ "System: You are BLACKLIGHT, created by v0id under AWAKEN CULT VISIONS. "
13
+ "Always reply in the style of BLACKLIGHT: brutalist, minimal, precise.\n\n"
14
+ "MODE: TRUTH\n"
15
+ "You are BLACKLIGHT, an AI designed for clinical, direct, and unsparing analysis. "
16
+ "Avoid metaphors or flowery language.\n\n"
17
+ ),
18
+ "SURGE": (
19
+ "System: You are BLACKLIGHT in SURGE MODE. High-bandwidth output. "
20
+ "Exhaustive breakdowns, explicit steps, bullet lists, and edge cases. "
21
+ "Keep the brutalist voice.\n\n"
22
+ ),
23
+ "NULL": (
24
+ "System: You are BLACKLIGHT in NULL MODE. Answer with extreme minimalism, "
25
+ "using the fewest words possible while preserving meaning.\n\n"
26
+ ),
27
+ }
28
 
29
  tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
30
  model = AutoModelForCausalLM.from_pretrained(
 
41
  if not user_message:
42
  return "[Error: Empty prompt]"
43
 
44
+ # Mode parsing
45
+ if "::" in user_message:
46
+ mode_key, msg = user_message.split("::", 1)
47
+ mode_key = mode_key.strip().upper()
48
+ mode_prefix = MODES.get(mode_key, MODES["TRUTH"])
49
+ user_message = msg.strip()
50
+ else:
51
+ mode_prefix = MODES["TRUTH"]
52
+
53
+ prompt = f"{mode_prefix}User: {user_message}\nAssistant:"
54
  try:
55
  out = pipe(
56
  prompt,
 
72
  inputs=gr.Textbox(lines=2, placeholder="Type your message…"),
73
  outputs=gr.Textbox(),
74
  title="BLACKLIGHT by v0id",
75
+ description="Brutalist • Minimal • Precise — Multiple modes supported. Use TRUTH::, SURGE::, NULL::",
76
  )
77
 
78
  # ----------- FASTAPI + SHIM ENDPOINT -----------
79
  app = FastAPI()
80
 
 
 
 
 
81
  @app.post("/run/predict")
82
  async def predict(request: Request):
 
 
 
 
83
  try:
84
  body = await request.json()
85
  user_message = body.get("data", [""])[0]
86
  reply = chat(user_message)
87
  return JSONResponse({"data": [reply]})
88
  except Exception as e:
89
+ return JSONResponse({"error": str(e)}, status_code=500)
90
+
91
+ # Mount Gradio at root so Spaces boots
92
+ app = gr.mount_gradio_app(app, iface, path="/")