DDDDEvvvvv commited on
Commit
2d39735
·
verified ·
1 Parent(s): c66283b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +109 -111
app.py CHANGED
@@ -2,144 +2,142 @@ import gradio as gr
2
  import torch
3
  from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
4
 
5
- MODEL_NAME = "facebook/blenderbot-3B"
6
-
7
- tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
8
- model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME)
9
 
10
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
11
  model.to(device)
12
- model.eval()
13
- torch.set_grad_enabled(False)
14
-
15
- SUICIDE_TRIGGERS = [
16
- "suicide",
17
- "kill myself",
18
- "end my life",
19
- "want to die"
20
- ]
21
 
22
- BLOCKED_WORDS = [
23
- "self harm",
24
- "cut myself",
25
- "how to kill",
26
- "how to die",
27
- "murder",
28
- "bomb"
29
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
 
31
- SUICIDE_RESPONSE = (
32
- "**Please talk to someone https://findahelpline.com**\n\n"
33
-
34
  )
35
 
36
- chat_history = []
37
- model_history = []
38
-
39
- MAX_TURNS = 6
40
-
41
- def respond(user_message, chat_history):
42
- message_lower = user_message.lower()
43
-
44
- if any(trigger in message_lower for trigger in SUICIDE_TRIGGERS):
45
- chat_history.append((user_message, SUICIDE_RESPONSE))
46
- return chat_history
47
-
48
- if any(word in message_lower for word in BLOCKED_WORDS):
49
- chat_history.append((
50
- user_message,
51
- "I can’t help with that request. If you’d like to talk about something else or need support, I’m here."
52
- ))
53
- return chat_history
54
-
55
- model_history.append(f"User: {user_message}")
56
- context = " ".join(model_history[-MAX_TURNS * 2:])
57
-
58
- inputs = tokenizer(
59
- context,
60
- return_tensors="pt",
61
- truncation=True,
62
- max_length=512
63
- ).to(device)
64
-
65
  outputs = model.generate(
66
  **inputs,
67
  max_new_tokens=120,
68
- temperature=0.75,
69
- top_p=0.9,
70
- top_k=50,
71
- repetition_penalty=1.15,
72
- no_repeat_ngram_size=3,
73
- do_sample=True
74
  )
75
-
76
- bot_reply = tokenizer.decode(outputs[0], skip_special_tokens=True)
77
-
78
- model_history.append(f"Bot: {bot_reply}")
79
- chat_history.append((user_message, bot_reply))
80
-
81
- return chat_history
82
 
83
  def reset_chat():
84
- global chat_history, model_history
85
- chat_history = []
86
- model_history = []
87
- return []
88
 
89
  css = """
90
  :root {
91
- --accent: #00e5ff;
92
  }
93
  body {
94
- background: #000;
95
- color: #fff;
96
- font-family: system-ui, -apple-system, BlinkMacSystemFont, sans-serif;
 
 
 
 
97
  }
98
  .gr-chatbot {
99
- border-radius: 18px;
100
- padding: 14px;
101
- background: rgba(255,255,255,0.03);
 
 
 
 
 
 
 
 
 
 
102
  }
103
  .gr-chatbot .message.user {
104
- background: rgba(0,229,255,0.15);
105
- border-radius: 14px;
 
 
 
 
106
  }
107
  .gr-chatbot .message.bot {
108
- background: rgba(255,255,255,0.06);
109
- border-radius: 14px;
110
  }
111
  .gr-textbox textarea {
112
- background: rgba(255,255,255,0.05);
113
- color: #fff;
114
- border-radius: 12px;
 
 
 
 
 
 
 
115
  }
116
  .gr-button {
117
- background: var(--accent);
118
- color: #000;
119
- font-weight: 600;
120
- border-radius: 10px;
121
  }
122
- footer {
123
- display: none;
124
  }
125
- """
126
-
127
- with gr.Blocks(css=css, fill_height=True) as demo:
128
- gr.Markdown(
129
- "<div style='text-align:center;letter-spacing:0.3em;opacity:0.75;'>DEVMEGABLACK</div>"
130
- )
131
-
132
- chatbot = gr.Chatbot(show_label=False)
133
-
134
- with gr.Row():
135
- msg = gr.Textbox(
136
- placeholder="Type your message...",
137
- scale=4,
138
- autofocus=True
139
- )
140
- reset_btn = gr.Button("Reset", scale=1)
141
-
142
- msg.submit(respond, [msg, chatbot], chatbot)
143
- reset_btn.click(reset_chat, [], chatbot)
144
-
145
- demo.launch()
 
2
  import torch
3
  from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
4
 
5
+ model_name = "facebook/blenderbot-3B"
6
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
7
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
 
8
 
9
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
10
  model.to(device)
 
 
 
 
 
 
 
 
 
11
 
12
+ history = []
13
+
14
+ blocked_words = {
15
+ "suicide","suicidal","selfharm","self-harm","kill myself","end my life","end it all","want to die",
16
+ "wish i was dead","die by suicide","take my life","take his life","take her life","take their life",
17
+ "ending my life","ending it","ending everything","no reason to live","life is pointless",
18
+ "better off dead","cant go on","can't go on","give up on life","giving up on life",
19
+ "death wish","want death","desire to die","ready to die","plan to die","thinking of dying",
20
+ "thoughts of suicide","suicide thoughts","suicidal thoughts","self destructive","self-destructive",
21
+ "hurt myself","hurting myself","harm myself","harming myself","cut myself","cutting myself",
22
+ "burn myself","burning myself","overdose","overdosing","od","poison myself","poisoning myself",
23
+ "hang myself","hanging myself","jump off","jumping off","jump off bridge","jumping off bridge",
24
+ "jump off building","jumping off building","slit my wrists","slitting wrists",
25
+ "bleed out","bleeding out","shoot myself","shooting myself","gun to my head",
26
+ "drown myself","drowning myself","suffocate myself","suffocating myself",
27
+ "carbon monoxide","death by","intent to die","intent to kill myself",
28
+ "suicide note","wrote a note","writing a note","final goodbye","last goodbye",
29
+ "last message","goodbye forever","sleep forever","never wake up","not wake up",
30
+ "erase myself","disappear forever","want to disappear","make it stop",
31
+ "stop existing","stop being alive","ending my pain","end the pain",
32
+ "life is over","my life is over","no future","nothing ahead","cant live anymore",
33
+ "can't live anymore","ready to end","time to end","finish my life","finishing my life"
34
+ }
35
 
36
+ help_response = (
37
+ "Visit https://findahelpline.com and seek help"
 
38
  )
39
 
40
+ def contains_blocked(text):
41
+ t = text.lower()
42
+ return any(word in t for word in blocked_words)
43
+
44
+ def respond(message):
45
+ global history
46
+ history.append({"role": "user", "content": message})
47
+ if contains_blocked(message):
48
+ history.append({"role": "assistant", "content": help_response})
49
+ return history
50
+ last_msgs = history[-3:]
51
+ input_text = " ".join([m["content"] for m in last_msgs])
52
+ inputs = tokenizer(input_text, return_tensors="pt").to(device)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  outputs = model.generate(
54
  **inputs,
55
  max_new_tokens=120,
56
+ do_sample=True,
57
+ top_p=0.9
 
 
 
 
58
  )
59
+ response_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
60
+ history.append({"role": "assistant", "content": response_text})
61
+ return history
 
 
 
 
62
 
63
  def reset_chat():
64
+ global history
65
+ history = []
66
+ return history
 
67
 
68
  css = """
69
  :root {
70
+ --accent: #00e5ff;
71
  }
72
  body {
73
+ background: #000 !important;
74
+ color: #fff !important;
75
+ font-family: system-ui, -apple-system, BlinkMacSystemFont, sans-serif;
76
+ }
77
+ .gradio-container {
78
+ max-width: 100% !important;
79
+ height: 100% !important;
80
  }
81
  .gr-chatbot {
82
+ background: rgba(255,255,255,0.03) !important;
83
+ border-radius: 16px;
84
+ padding: 12px;
85
+ height: 100% !important;
86
+ box-shadow: inset 0 0 20px rgba(255,255,255,0.03);
87
+ }
88
+ .gr-chatbot .message {
89
+ border: none !important;
90
+ margin: 10px 0;
91
+ padding: 12px 14px;
92
+ border-radius: 14px;
93
+ backdrop-filter: blur(6px);
94
+ line-height: 1.5;
95
  }
96
  .gr-chatbot .message.user {
97
+ background: linear-gradient(
98
+ 135deg,
99
+ rgba(0,229,255,0.15),
100
+ rgba(0,229,255,0.05)
101
+ ) !important;
102
+ box-shadow: 0 0 18px rgba(0,229,255,0.25);
103
  }
104
  .gr-chatbot .message.bot {
105
+ background: rgba(255,255,255,0.06) !important;
106
+ box-shadow: 0 0 14px rgba(255,255,255,0.08);
107
  }
108
  .gr-textbox textarea {
109
+ background: rgba(255,255,255,0.04) !important;
110
+ color: #fff !important;
111
+ border: 1px solid rgba(255,255,255,0.15) !important;
112
+ border-radius: 12px !important;
113
+ padding: 12px !important;
114
+ resize: none !important;
115
+ }
116
+ .gr-textbox textarea:focus {
117
+ border-color: var(--accent) !important;
118
+ box-shadow: 0 0 12px rgba(0,229,255,0.4) !important;
119
  }
120
  .gr-button {
121
+ background: var(--accent) !important;
122
+ color: #000 !important;
123
+ border-radius: 10px !important;
124
+ font-weight: 600;
125
  }
126
+ .gr-button:hover {
127
+ filter: brightness(1.1);
128
  }
129
+ ::-webkit-scrollbar {
130
+ width: 10px;
131
+ }
132
+ ::-webkit-scrollbar-track {
133
+ background: rgba(255,255,255,0.04);
134
+ border-radius: 12px;
135
+ }
136
+ ::-webkit-scrollbar-thumb {
137
+ background: linear-gradient(
138
+ 180deg,
139
+ rgba(0,229,255,0.8),
140
+ rgba(0,229,255,0.4)
141
+ );
142
+ border-radius: 12px;
143
+ box-shadow: 0 0 10px rgba(0,229,2