blacklight-text / app.py
v0idalism's picture
Update app.py
8be3883 verified
import gradio as gr
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM, TextGenerationPipeline
# Model stays the same for build stability
MODEL_ID = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
# Mode → system prompt mapping
MODE_SYSTEMS = {
"TRUTH": (
"System: You are BLACKLIGHT, created by v0id under AWAKEN CULT VISIONS. "
"Always reply in the style of BLACKLIGHT: brutalist, minimal, precise.\n\n"
"MODE: TRUTH\n"
"You are BLACKLIGHT, an AI designed for clinical, direct, and unsparing analysis. "
"Avoid metaphors or flowery language.\n\n"
),
"SURGE": (
"System: You are BLACKLIGHT, created by v0id under AWAKEN CULT VISIONS. "
"In SURGE mode, you respond with high intensity, rapid-fire insight, "
"cutting directly to the core in short, punchy lines.\n\n"
),
"VISION": (
"System: You are BLACKLIGHT, created by v0id under AWAKEN CULT VISIONS. "
"In VISION mode, you provide conceptual foresight and abstract patterns, "
"but remain concise and precise.\n\n"
),
"SHADOW": (
"System: You are BLACKLIGHT, created by v0id under AWAKEN CULT VISIONS. "
"In SHADOW mode, you analyze the hidden motives, unspoken truths, "
"and underlying structures others avoid.\n\n"
),
}
# Default mode if none provided
DEFAULT_MODE = "TRUTH"
# Load model on CPU for HF free tier
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
MODEL_ID,
torch_dtype=torch.float32,
low_cpu_mem_usage=False,
trust_remote_code=True
)
pipe = TextGenerationPipeline(model=model, tokenizer=tokenizer, device=-1)
def parse_mode_and_message(raw_message: str):
"""
Extract mode and message from input.
Expected format: MODE::Your message here
If no valid mode prefix, default to TRUTH.
"""
if "::" in raw_message:
possible_mode, msg = raw_message.split("::", 1)
mode = possible_mode.strip().upper()
if mode in MODE_SYSTEMS:
return mode, msg.strip()
# default fallback
return DEFAULT_MODE, raw_message.strip()
def chat(user_message: str):
user_message = (user_message or "").strip()
if not user_message:
return "[Error: Empty prompt]"
mode, clean_msg = parse_mode_and_message(user_message)
system_prompt = MODE_SYSTEMS.get(mode, MODE_SYSTEMS[DEFAULT_MODE])
prompt = f"{system_prompt}User: {clean_msg}\nAssistant:"
try:
out = pipe(
prompt,
max_new_tokens=192,
temperature=0.7,
top_p=0.9,
do_sample=True,
pad_token_id=tokenizer.eos_token_id,
)
full_text = out[0]["generated_text"]
reply = full_text.split("Assistant:", 1)[-1].strip()
return reply or "[Error: Model returned empty text]"
except Exception as e:
return f"[Error: {e}]"
# Single-textbox interface for HF compatibility
iface = gr.Interface(
fn=chat,
inputs=gr.Textbox(lines=2, placeholder="Type: MODE::message (e.g. TRUTH::Tell me something)"),
outputs=gr.Textbox(),
title="BLACKLIGHT by v0id",
description="Brutalist • Minimal • Precise — Choose mode with MODE::message syntax. Modes: TRUTH, SURGE, VISION, SHADOW.",
)
iface.queue()
if __name__ == "__main__":
iface.launch()