File size: 3,415 Bytes
b6e229b
e381b33
 
b6e229b
8be3883
e381b33
ebf19c8
8be3883
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d4b378f
3bde7c3
8be3883
 
 
 
 
 
 
 
 
 
 
e381b33
8be3883
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e381b33
 
8be3883
 
b6e229b
 
8be3883
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b6e229b
e381b33
8be3883
b6e229b
 
8be3883
b6e229b
8b386e9
8be3883
d4b378f
8be3883
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import gradio as gr
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM, TextGenerationPipeline

# Model stays the same for build stability
MODEL_ID = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"

# Mode → system prompt mapping
MODE_SYSTEMS = {
    "TRUTH": (
        "System: You are BLACKLIGHT, created by v0id under AWAKEN CULT VISIONS. "
        "Always reply in the style of BLACKLIGHT: brutalist, minimal, precise.\n\n"
        "MODE: TRUTH\n"
        "You are BLACKLIGHT, an AI designed for clinical, direct, and unsparing analysis. "
        "Avoid metaphors or flowery language.\n\n"
    ),
    "SURGE": (
        "System: You are BLACKLIGHT, created by v0id under AWAKEN CULT VISIONS. "
        "In SURGE mode, you respond with high intensity, rapid-fire insight, "
        "cutting directly to the core in short, punchy lines.\n\n"
    ),
    "VISION": (
        "System: You are BLACKLIGHT, created by v0id under AWAKEN CULT VISIONS. "
        "In VISION mode, you provide conceptual foresight and abstract patterns, "
        "but remain concise and precise.\n\n"
    ),
    "SHADOW": (
        "System: You are BLACKLIGHT, created by v0id under AWAKEN CULT VISIONS. "
        "In SHADOW mode, you analyze the hidden motives, unspoken truths, "
        "and underlying structures others avoid.\n\n"
    ),
}

# Default mode if none provided
DEFAULT_MODE = "TRUTH"

# Load model on CPU for HF free tier
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
    MODEL_ID,
    torch_dtype=torch.float32,
    low_cpu_mem_usage=False,
    trust_remote_code=True
)
pipe = TextGenerationPipeline(model=model, tokenizer=tokenizer, device=-1)


def parse_mode_and_message(raw_message: str):
    """
    Extract mode and message from input.
    Expected format: MODE::Your message here
    If no valid mode prefix, default to TRUTH.
    """
    if "::" in raw_message:
        possible_mode, msg = raw_message.split("::", 1)
        mode = possible_mode.strip().upper()
        if mode in MODE_SYSTEMS:
            return mode, msg.strip()
    # default fallback
    return DEFAULT_MODE, raw_message.strip()


def chat(user_message: str):
    user_message = (user_message or "").strip()
    if not user_message:
        return "[Error: Empty prompt]"

    mode, clean_msg = parse_mode_and_message(user_message)
    system_prompt = MODE_SYSTEMS.get(mode, MODE_SYSTEMS[DEFAULT_MODE])

    prompt = f"{system_prompt}User: {clean_msg}\nAssistant:"

    try:
        out = pipe(
            prompt,
            max_new_tokens=192,
            temperature=0.7,
            top_p=0.9,
            do_sample=True,
            pad_token_id=tokenizer.eos_token_id,
        )
        full_text = out[0]["generated_text"]
        reply = full_text.split("Assistant:", 1)[-1].strip()
        return reply or "[Error: Model returned empty text]"
    except Exception as e:
        return f"[Error: {e}]"


# Single-textbox interface for HF compatibility
iface = gr.Interface(
    fn=chat,
    inputs=gr.Textbox(lines=2, placeholder="Type: MODE::message (e.g. TRUTH::Tell me something)"),
    outputs=gr.Textbox(),
    title="BLACKLIGHT by v0id",
    description="Brutalist • Minimal • Precise — Choose mode with MODE::message syntax. Modes: TRUTH, SURGE, VISION, SHADOW.",
)

iface.queue()

if __name__ == "__main__":
    iface.launch()