Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -116,11 +116,11 @@ def classify_toxicity(audio_file, text_input, classify_anxiety, emo_class, expli
|
|
| 116 |
# plot.update(x=classification_df["labels"], y=classification_df["scores"])
|
| 117 |
if toxicity_score > threshold:
|
| 118 |
print("threshold exceeded!! Launch intervention")
|
| 119 |
-
|
| 120 |
else:
|
| 121 |
intervene = " "
|
| 122 |
|
| 123 |
-
return toxicity_score, classification_output, transcribed_text,
|
| 124 |
# return f"Toxicity Score ({available_models[selected_model]}): {toxicity_score:.4f}"
|
| 125 |
else:
|
| 126 |
threshold = slider_logic(slider)
|
|
@@ -157,16 +157,17 @@ def classify_toxicity(audio_file, text_input, classify_anxiety, emo_class, expli
|
|
| 157 |
if toxicity_score > threshold:
|
| 158 |
print("threshold exceeded!! Launch intervention")
|
| 159 |
return classify_anxiety
|
|
|
|
| 160 |
def intervention_output(intervene):
|
| 161 |
if intervene == "Audio File":
|
| 162 |
print("audio updated")
|
| 163 |
-
return { out_aud : gr.update(value="calm.wav", visible=True, autoplay=True)
|
| 164 |
elif intervene == "Therapy App":
|
| 165 |
print("therapy app updated")
|
| 166 |
-
return { out_img : gr.update(value="hrv-breathing.gif", visible=True)}
|
| 167 |
elif intervene == "Text Message":
|
| 168 |
phrase = positive_affirmations()
|
| 169 |
-
return { out_text : gr.update(visible=True, value=phrase)
|
| 170 |
else:
|
| 171 |
return " "
|
| 172 |
|
|
@@ -181,7 +182,7 @@ def positive_affirmations():
|
|
| 181 |
return selected_affirm
|
| 182 |
|
| 183 |
with gr.Blocks() as iface:
|
| 184 |
-
|
| 185 |
with gr.Column():
|
| 186 |
anxiety_class = gr.Radio(["racism", "LGBTQ+ hate", "sexually explicit", "misophonia"])
|
| 187 |
explit_preference = gr.Radio(choices=["N-Word", "B-Word", "All Explitives"], label="Words to omit from general anxiety classes", info="certain words may be acceptible within certain contects for given groups of people, and some people may be unbothered by explitives broadly speaking.")
|
|
@@ -195,10 +196,10 @@ with gr.Blocks() as iface:
|
|
| 195 |
with gr.Column():
|
| 196 |
out_val = gr.Textbox()
|
| 197 |
out_class = gr.Textbox()
|
| 198 |
-
|
| 199 |
out_text = gr.Textbox(visible=False)
|
| 200 |
out_img = gr.Image(value="hrv-breathing.gif", visible=False)
|
| 201 |
out_aud = gr.Audio(value="calm.wav", visible=False)
|
| 202 |
-
submit_btn.click(fn=classify_toxicity, inputs=[aud_input, text, anxiety_class, emo_class, explit_preference, sense_slider, intervention_type], outputs=[out_val, out_class, out_text,
|
| 203 |
|
| 204 |
iface.launch()
|
|
|
|
| 116 |
# plot.update(x=classification_df["labels"], y=classification_df["scores"])
|
| 117 |
if toxicity_score > threshold:
|
| 118 |
print("threshold exceeded!! Launch intervention")
|
| 119 |
+
output_col = intervention_output(intervention)
|
| 120 |
else:
|
| 121 |
intervene = " "
|
| 122 |
|
| 123 |
+
return toxicity_score, classification_output, transcribed_text, output_col
|
| 124 |
# return f"Toxicity Score ({available_models[selected_model]}): {toxicity_score:.4f}"
|
| 125 |
else:
|
| 126 |
threshold = slider_logic(slider)
|
|
|
|
| 157 |
if toxicity_score > threshold:
|
| 158 |
print("threshold exceeded!! Launch intervention")
|
| 159 |
return classify_anxiety
|
| 160 |
+
|
| 161 |
def intervention_output(intervene):
|
| 162 |
if intervene == "Audio File":
|
| 163 |
print("audio updated")
|
| 164 |
+
return { output_col : gr.update(visible=True), out_aud : gr.update(value="calm.wav", visible=True, autoplay=True)}
|
| 165 |
elif intervene == "Therapy App":
|
| 166 |
print("therapy app updated")
|
| 167 |
+
return { output_col : gr.update(visible=True), out_img : gr.update(value="hrv-breathing.gif", visible=True)}
|
| 168 |
elif intervene == "Text Message":
|
| 169 |
phrase = positive_affirmations()
|
| 170 |
+
return { output_col : gr.update(visible=True), out_text : gr.update(value=phrase, visible=True, value=phrase)}
|
| 171 |
else:
|
| 172 |
return " "
|
| 173 |
|
|
|
|
| 182 |
return selected_affirm
|
| 183 |
|
| 184 |
with gr.Blocks() as iface:
|
| 185 |
+
show_state = gr.State([])
|
| 186 |
with gr.Column():
|
| 187 |
anxiety_class = gr.Radio(["racism", "LGBTQ+ hate", "sexually explicit", "misophonia"])
|
| 188 |
explit_preference = gr.Radio(choices=["N-Word", "B-Word", "All Explitives"], label="Words to omit from general anxiety classes", info="certain words may be acceptible within certain contects for given groups of people, and some people may be unbothered by explitives broadly speaking.")
|
|
|
|
| 196 |
with gr.Column():
|
| 197 |
out_val = gr.Textbox()
|
| 198 |
out_class = gr.Textbox()
|
| 199 |
+
with gr.Column(visible=False) as output_col:
|
| 200 |
out_text = gr.Textbox(visible=False)
|
| 201 |
out_img = gr.Image(value="hrv-breathing.gif", visible=False)
|
| 202 |
out_aud = gr.Audio(value="calm.wav", visible=False)
|
| 203 |
+
submit_btn.click(fn=classify_toxicity, inputs=[aud_input, text, anxiety_class, emo_class, explit_preference, sense_slider, intervention_type], outputs=[out_val, out_class, out_text, output_col])
|
| 204 |
|
| 205 |
iface.launch()
|