File size: 5,805 Bytes
fc7c9e3
9ed3e24
c66283b
 
2d39735
 
 
fc7c9e3
20c7e89
 
c66283b
2d39735
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c66283b
2d39735
3cb876d
 
 
c66283b
 
2d39735
 
 
 
 
 
 
 
 
 
 
 
 
c66283b
 
 
2d39735
 
c66283b
2d39735
 
 
a30a54f
a1dce44
2d39735
 
 
18dce4f
 
c2010b0
2d39735
c2010b0
 
2d39735
 
 
 
 
 
 
c2010b0
 
2d39735
 
 
 
 
 
 
 
 
 
 
 
 
c2010b0
 
2d39735
 
 
 
 
 
c2010b0
 
2d39735
 
c2010b0
 
2d39735
 
 
 
 
 
 
 
 
 
c2010b0
 
2d39735
 
 
 
c2010b0
2d39735
 
c2010b0
2d39735
 
 
 
 
 
 
 
 
 
 
 
 
 
3cb876d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
import gradio as gr
import torch
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer

model_name = "facebook/blenderbot-3B"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

history = []

blocked_words = {
    "suicide","suicidal","selfharm","self-harm","kill myself","end my life","end it all","want to die",
    "wish i was dead","die by suicide","take my life","take his life","take her life","take their life",
    "ending my life","ending it","ending everything","no reason to live","life is pointless",
    "better off dead","cant go on","can't go on","give up on life","giving up on life",
    "death wish","want death","desire to die","ready to die","plan to die","thinking of dying",
    "thoughts of suicide","suicide thoughts","suicidal thoughts","self destructive","self-destructive",
    "hurt myself","hurting myself","harm myself","harming myself","cut myself","cutting myself",
    "burn myself","burning myself","overdose","overdosing","od","poison myself","poisoning myself",
    "hang myself","hanging myself","jump off","jumping off","jump off bridge","jumping off bridge",
    "jump off building","jumping off building","slit my wrists","slitting wrists",
    "bleed out","bleeding out","shoot myself","shooting myself","gun to my head",
    "drown myself","drowning myself","suffocate myself","suffocating myself",
    "carbon monoxide","death by","intent to die","intent to kill myself",
    "suicide note","wrote a note","writing a note","final goodbye","last goodbye",
    "last message","goodbye forever","sleep forever","never wake up","not wake up",
    "erase myself","disappear forever","want to disappear","make it stop",
    "stop existing","stop being alive","ending my pain","end the pain",
    "life is over","my life is over","no future","nothing ahead","cant live anymore",
    "can't live anymore","ready to end","time to end","finish my life","finishing my life"
}

help_response = (
    "I’m really glad you reached out. You deserve support and you don’t have to go through this alone.\n\n"
    "Please visit https://findahelpline.com to find free, confidential support in your country right now.\n\n"
    "If you’re in immediate danger, contact your local emergency number."
)

def contains_blocked(text):
    t = text.lower()
    return any(word in t for word in blocked_words)

def respond(message):
    global history
    history.append({"role": "user", "content": message})
    if contains_blocked(message):
        history.append({"role": "assistant", "content": help_response})
        return history
    last_msgs = history[-3:]
    input_text = " ".join([m["content"] for m in last_msgs])
    inputs = tokenizer(input_text, return_tensors="pt").to(device)
    outputs = model.generate(
        **inputs,
        max_new_tokens=120,
        do_sample=True,
        top_p=0.9
    )
    response_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
    history.append({"role": "assistant", "content": response_text})
    return history

def reset_chat():
    global history
    history = []
    return history

css = """
:root {
    --accent: #00e5ff;
}
body {
    background: #000 !important;
    color: #fff !important;
    font-family: system-ui, -apple-system, BlinkMacSystemFont, sans-serif;
}
.gradio-container {
    max-width: 100% !important;
    height: 100% !important;
}
.gr-chatbot {
    background: rgba(255,255,255,0.03) !important;
    border-radius: 16px;
    padding: 12px;
    height: 100% !important;
    box-shadow: inset 0 0 20px rgba(255,255,255,0.03);
}
.gr-chatbot .message {
    border: none !important;
    margin: 10px 0;
    padding: 12px 14px;
    border-radius: 14px;
    backdrop-filter: blur(6px);
    line-height: 1.5;
}
.gr-chatbot .message.user {
    background: linear-gradient(
        135deg,
        rgba(0,229,255,0.15),
        rgba(0,229,255,0.05)
    ) !important;
    box-shadow: 0 0 18px rgba(0,229,255,0.25);
}
.gr-chatbot .message.bot {
    background: rgba(255,255,255,0.06) !important;
    box-shadow: 0 0 14px rgba(255,255,255,0.08);
}
.gr-textbox textarea {
    background: rgba(255,255,255,0.04) !important;
    color: #fff !important;
    border: 1px solid rgba(255,255,255,0.15) !important;
    border-radius: 12px !important;
    padding: 12px !important;
    resize: none !important;
}
.gr-textbox textarea:focus {
    border-color: var(--accent) !important;
    box-shadow: 0 0 12px rgba(0,229,255,0.4) !important;
}
.gr-button {
    background: var(--accent) !important;
    color: #000 !important;
    border-radius: 10px !important;
    font-weight: 600;
}
.gr-button:hover {
    filter: brightness(1.1);
}
::-webkit-scrollbar {
    width: 10px;
}
::-webkit-scrollbar-track {
    background: rgba(255,255,255,0.04);
    border-radius: 12px;
}
::-webkit-scrollbar-thumb {
    background: linear-gradient(
        180deg,
        rgba(0,229,255,0.8),
        rgba(0,229,255,0.4)
    );
    border-radius: 12px;
    box-shadow: 0 0 10px rgba(0,229,255,0.6);
}
::-webkit-scrollbar-thumb:hover {
    background: rgba(0,229,255,1);
}
* {
    scrollbar-width: thin;
    scrollbar-color: #00e5ff rgba(255,255,255,0.04);
}
footer {
    display: none !important;
}
"""

with gr.Blocks(css=css, fill_height=True) as demo:
    gr.Markdown(
        "<div style='text-align:center;letter-spacing:0.25em;opacity:0.7;margin-bottom:6px;'>DEVMEGABLACK</div>"
    )
    chatbot = gr.Chatbot(show_label=False)
    with gr.Row():
        msg = gr.Textbox(placeholder="Type a message...", scale=4)
        reset_btn = gr.Button("Reset", scale=1)

    msg.submit(respond, msg, chatbot)
    reset_btn.click(reset_chat, [], chatbot)

demo.launch()