JohanBeytell commited on
Commit
d3049bb
·
verified ·
1 Parent(s): c3dcfd6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -0
app.py CHANGED
@@ -8,6 +8,7 @@ import pandas as pd
8
  import tempfile
9
  import time
10
  import os
 
11
 
12
  DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
13
  SEED = 1337
@@ -87,6 +88,14 @@ def generate_names(prompt, temperature, top_k, count, retries, seed, randomize_s
87
  random.seed(seed)
88
 
89
  prompt = prompt.strip()
 
 
 
 
 
 
 
 
90
  if not prompt:
91
  raise gr.Error("Prompt cannot be empty.")
92
  if len(prompt) > 64:
@@ -100,6 +109,13 @@ def generate_names(prompt, temperature, top_k, count, retries, seed, randomize_s
100
  for _ in range(count):
101
  for attempt in range(retries):
102
  name, t = sample_once(prompt, temperature=temperature, top_k=top_k)
 
 
 
 
 
 
 
103
  retry_count += 1
104
  if len(name) >= 3:
105
  results.append({"Generated Name": name, "Time (s)": f"{t:.2f}"})
 
8
  import tempfile
9
  import time
10
  import os
11
+ from valx import detect_profanity, detect_hate_speech
12
 
13
  DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
14
  SEED = 1337
 
88
  random.seed(seed)
89
 
90
  prompt = prompt.strip()
91
+ promptx = prompt.lower()
92
+ if detect_profanity([promptx], language='All'):
93
+ gr.Warning("Profanity detected in the prompt, using the default prompt.")
94
+ prompt = 'a kind king'
95
+ elif (hate_speech_result := detect_hate_speech(promptx)) and hate_speech_result[0] in ['Hate Speech', 'Offensive Speech']:
96
+ gr.Warning('Harmful speech detected in the prompt, using default prompt.')
97
+ prompt = 'a kind king'
98
+
99
  if not prompt:
100
  raise gr.Error("Prompt cannot be empty.")
101
  if len(prompt) > 64:
 
109
  for _ in range(count):
110
  for attempt in range(retries):
111
  name, t = sample_once(prompt, temperature=temperature, top_k=top_k)
112
+ namex = name.strip().lower()
113
+ if detect_profanity([namex], language='All'):
114
+ gr.Warning("Profanity detected in the generated name, flagging...")
115
+ rejected.append(name + " (Profanity Detected)")
116
+ elif (hate_speech_result := detect_hate_speech(namex)) and hate_speech_result[0] in ['Hate Speech', 'Offensive Speech']:
117
+ gr.Warning('Harmful speech detected in the generated name, flagging...')
118
+ rejected.append(name + " (Harmful Speech Detected)")
119
  retry_count += 1
120
  if len(name) >= 3:
121
  results.append({"Generated Name": name, "Time (s)": f"{t:.2f}"})