Spaces:
Runtime error
Runtime error
Rob Caamano
commited on
Update app.py
Browse files
app.py
CHANGED
|
@@ -5,10 +5,23 @@ from transformers import (
|
|
| 5 |
TFAutoModelForSequenceClassification as AutoModelForSequenceClassification,
|
| 6 |
)
|
| 7 |
|
| 8 |
-
st.title("
|
| 9 |
|
| 10 |
demo = """Your words are like poison. They seep into my mind and make me feel worthless."""
|
| 11 |
-
text = st.text_area("Input text", demo, height=
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
|
| 13 |
submit = False
|
| 14 |
model_name = ""
|
|
@@ -39,13 +52,13 @@ if submit:
|
|
| 39 |
|
| 40 |
if results['toxic'] >= 0.5:
|
| 41 |
result_df = pd.DataFrame({
|
| 42 |
-
'Toxic':
|
| 43 |
'Toxicity Class': [max_class],
|
| 44 |
'Probability': [probability]
|
| 45 |
})
|
| 46 |
else:
|
| 47 |
result_df = pd.DataFrame({
|
| 48 |
-
'Toxic':
|
| 49 |
'Toxicity Class': 'This text is not toxic',
|
| 50 |
})
|
| 51 |
|
|
|
|
| 5 |
TFAutoModelForSequenceClassification as AutoModelForSequenceClassification,
|
| 6 |
)
|
| 7 |
|
| 8 |
+
st.title("Classifier")
|
| 9 |
|
| 10 |
demo = """Your words are like poison. They seep into my mind and make me feel worthless."""
|
| 11 |
+
text = st.text_area("Input text", demo, height=250)
|
| 12 |
+
|
| 13 |
+
demo_options = {
|
| 14 |
+
"non-toxic": "Had a wonderful weekend at the park. Enjoyed the beautiful weather!",
|
| 15 |
+
"toxic": "WIP",
|
| 16 |
+
"severe_toxic": "WIP",
|
| 17 |
+
"obscene": "I don't give a fuck about your opinion",
|
| 18 |
+
"threat": "WIP",
|
| 19 |
+
"insult": "Are you always this incompetent?",
|
| 20 |
+
"identity_hate": "WIP",
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
selected_demo = st.selectbox("Demos", options=list(demo_options.keys()))
|
| 24 |
+
text = st.text_area("Input text", demo_options[selected_demo], height=250)
|
| 25 |
|
| 26 |
submit = False
|
| 27 |
model_name = ""
|
|
|
|
| 52 |
|
| 53 |
if results['toxic'] >= 0.5:
|
| 54 |
result_df = pd.DataFrame({
|
| 55 |
+
'Toxic': 'Yes',
|
| 56 |
'Toxicity Class': [max_class],
|
| 57 |
'Probability': [probability]
|
| 58 |
})
|
| 59 |
else:
|
| 60 |
result_df = pd.DataFrame({
|
| 61 |
+
'Toxic': 'No',
|
| 62 |
'Toxicity Class': 'This text is not toxic',
|
| 63 |
})
|
| 64 |
|