Spaces:
Runtime error
Runtime error
Rob Caamano
commited on
App.py New Model
Browse files
app.py
CHANGED
|
@@ -9,13 +9,12 @@ st.title("Detecting Toxic Tweets")
|
|
| 9 |
|
| 10 |
demo = """Your words are like poison. They seep into my mind and make me feel worthless."""
|
| 11 |
|
| 12 |
-
text = st.text_area("Input
|
| 13 |
|
| 14 |
model_options = {
|
| 15 |
"DistilBERT Base Uncased (SST-2)": "distilbert-base-uncased-finetuned-sst-2-english",
|
| 16 |
-
"Fine-tuned Toxicity Model": "RobCaamano/
|
| 17 |
-
"Fine-tuned Toxicity Model
|
| 18 |
-
"Model 3.0": "RobCaamano/toxicity_RObert2"
|
| 19 |
}
|
| 20 |
selected_model = st.selectbox("Select Model", options=list(model_options.keys()))
|
| 21 |
|
|
@@ -24,7 +23,7 @@ mod_name = model_options[selected_model]
|
|
| 24 |
tokenizer = AutoTokenizer.from_pretrained(mod_name)
|
| 25 |
model = AutoModelForSequenceClassification.from_pretrained(mod_name)
|
| 26 |
|
| 27 |
-
if selected_model in ["Fine-tuned Toxicity Model", "Fine-tuned Toxicity Model
|
| 28 |
toxicity_classes = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"]
|
| 29 |
model.config.id2label = {i: toxicity_classes[i] for i in range(model.config.num_labels)}
|
| 30 |
|
|
@@ -46,11 +45,11 @@ if st.button("Submit", type="primary"):
|
|
| 46 |
column_name = "Prediction"
|
| 47 |
|
| 48 |
if probability < 0.1:
|
| 49 |
-
st.write("This
|
| 50 |
|
| 51 |
df = pd.DataFrame(
|
| 52 |
{
|
| 53 |
-
"
|
| 54 |
column_name: [label],
|
| 55 |
"Probability": [probability],
|
| 56 |
}
|
|
|
|
| 9 |
|
| 10 |
demo = """Your words are like poison. They seep into my mind and make me feel worthless."""
|
| 11 |
|
| 12 |
+
text = st.text_area("Input Text", demo, height=250)
|
| 13 |
|
| 14 |
model_options = {
|
| 15 |
"DistilBERT Base Uncased (SST-2)": "distilbert-base-uncased-finetuned-sst-2-english",
|
| 16 |
+
"Fine-tuned Toxicity Model": "RobCaamano/toxicity",
|
| 17 |
+
"Fine-tuned Toxicity Model - Optimized": "RobCaamano/toxicity_optimized",
|
|
|
|
| 18 |
}
|
| 19 |
selected_model = st.selectbox("Select Model", options=list(model_options.keys()))
|
| 20 |
|
|
|
|
| 23 |
tokenizer = AutoTokenizer.from_pretrained(mod_name)
|
| 24 |
model = AutoModelForSequenceClassification.from_pretrained(mod_name)
|
| 25 |
|
| 26 |
+
if selected_model in ["Fine-tuned Toxicity Model", "Fine-tuned Toxicity Model - Optimized"]:
|
| 27 |
toxicity_classes = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"]
|
| 28 |
model.config.id2label = {i: toxicity_classes[i] for i in range(model.config.num_labels)}
|
| 29 |
|
|
|
|
| 45 |
column_name = "Prediction"
|
| 46 |
|
| 47 |
if probability < 0.1:
|
| 48 |
+
st.write("This text is not toxic.")
|
| 49 |
|
| 50 |
df = pd.DataFrame(
|
| 51 |
{
|
| 52 |
+
"Text (portion)": [tweet_portion],
|
| 53 |
column_name: [label],
|
| 54 |
"Probability": [probability],
|
| 55 |
}
|