OatNapat commited on
Commit
82968da
·
1 Parent(s): f8a06c1

Upload 3 files

Browse files
Files changed (3) hide show
  1. README (1).md +12 -0
  2. app (3).py +149 -0
  3. requirements (3).txt +4 -0
README (1).md ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Sentiment Analysis App
3
+ emoji: 🌖
4
+ colorFrom: blue
5
+ colorTo: blue
6
+ sdk: streamlit
7
+ sdk_version: 1.17.0
8
+ app_file: app.py
9
+ pinned: false
10
+ ---
11
+
12
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app (3).py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import pandas as pd
3
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
4
+
5
+ # Function to load the pre-trained model
6
+
7
+ def load_finetune_model(model_name):
8
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
9
+ model = AutoModelForSequenceClassification.from_pretrained(model_name)
10
+ return tokenizer, model
11
+
12
+ def load_model(model_name):
13
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
14
+ model = AutoModelForSequenceClassification.from_pretrained(model_name)
15
+ sentiment_pipeline = pipeline("sentiment-analysis", tokenizer=tokenizer, model=model)
16
+ return sentiment_pipeline
17
+
18
+ # Streamlit app
19
+ st.title("Multi-label Toxicity Detection App")
20
+ st.write("Enter a text and select the fine-tuned model to get the toxicity analysis.")
21
+
22
+ # Input text
23
+ default_text = "You might be the most stupid person in the world."
24
+ text = st.text_input("Enter your text:", value=default_text)
25
+
26
+ category = {'LABEL_0': 'toxic', 'LABEL_1': 'severe_toxic', 'LABEL_2': 'obscene', 'LABEL_3': 'threat', 'LABEL_4': 'insult', 'LABEL_5': 'identity_hate'}
27
+
28
+
29
+ # Model selection
30
+ model_options = {
31
+ "Olivernyu/finetuned_bert_base_uncased": {
32
+ "description": "This model detects different types of toxicity like threats, obscenity, insults, and identity-based hate in text. The table is prepopulated with some data, the table will be displayed once you hit analyze.",
33
+ },
34
+ "distilbert-base-uncased-finetuned-sst-2-english": {
35
+ "labels": ["NEGATIVE", "POSITIVE"],
36
+ "description": "This model classifies text into positive or negative sentiment. It is based on DistilBERT and fine-tuned on the Stanford Sentiment Treebank (SST-2) dataset.",
37
+ },
38
+ "textattack/bert-base-uncased-SST-2": {
39
+ "labels": ["LABEL_0", "LABEL_1"],
40
+ "description": "This model classifies text into positive(LABEL_1) or negative(LABEL_0) sentiment. It is based on BERT and fine-tuned on the Stanford Sentiment Treebank (SST-2) dataset.",
41
+ },
42
+ "cardiffnlp/twitter-roberta-base-sentiment": {
43
+ "labels": ["LABEL_0", "LABEL_1", "LABEL_2"],
44
+ "description": "This model classifies tweets into negative (LABEL_0), neutral(LABEL_1), or positive(LABEL_2) sentiment. It is based on RoBERTa and fine-tuned on a large dataset of tweets.",
45
+ },
46
+ }
47
+ selected_model = st.selectbox("Choose a fine-tuned model:", model_options)
48
+
49
+ st.write("### Model Information")
50
+ st.write(f"**Description:** {model_options[selected_model]['description']}")
51
+
52
+ initial_table_df = pd.DataFrame(columns=["Text (portion)", "Toxicity class 1", "Class 1 probability", "Toxicity class 2", "Class 2 probability"])
53
+ initial_table_data = [{'Text (portion)': ["who's speaking? \n you goddamn cocksucker you know "],
54
+ 'Toxicity class 1': ['obscene'],
55
+ 'Class 1 probability': 0.7282997369766235,
56
+ 'Toxicity class 2': ['toxic'],
57
+ 'Class 2 probability': 0.2139672487974167},
58
+ {'Text (portion)': ['::Here is another source: Melissa Sue Halverson (2'],
59
+ 'Toxicity class 1': ['toxic'],
60
+ 'Class 1 probability': 0.24484945833683014,
61
+ 'Toxicity class 2': ['obscene'],
62
+ 'Class 2 probability': 0.1627064049243927},
63
+ {'Text (portion)': [', 8 November 2007 (UTC) \n\n All I can say is, havin'],
64
+ 'Toxicity class 1': ['toxic'],
65
+ 'Class 1 probability': 0.7277262806892395,
66
+ 'Toxicity class 2': ['obscene'],
67
+ 'Class 2 probability': 0.2502792477607727},
68
+ {'Text (portion)': ['::::I only see that at birth two persons are given'],
69
+ 'Toxicity class 1': ['toxic'],
70
+ 'Class 1 probability': 0.2711867094039917,
71
+ 'Toxicity class 2': ['insult'],
72
+ 'Class 2 probability': 0.15477754175662994},
73
+ {'Text (portion)': ["* There you have it: one man's Barnstar is another"],
74
+ 'Toxicity class 1': ['toxic'],
75
+ 'Class 1 probability': 0.5408656001091003,
76
+ 'Toxicity class 2': ['insult'],
77
+ 'Class 2 probability': 0.12563346326351166},
78
+ {'Text (portion)': ['" \n\n == Fact == \n\n Could just be abit of trivial f'],
79
+ 'Toxicity class 1': ['toxic'],
80
+ 'Class 1 probability': 0.35239243507385254,
81
+ 'Toxicity class 2': ['obscene'],
82
+ 'Class 2 probability': 0.1686778962612152},
83
+ {'Text (portion)': ['HE IS A GHAY ASS FUCKER@@!!'],
84
+ 'Toxicity class 1': ['obscene'],
85
+ 'Class 1 probability': 0.7819343209266663,
86
+ 'Toxicity class 2': ['toxic'],
87
+ 'Class 2 probability': 0.16951803863048553},
88
+ {'Text (portion)': ["I'VE SEEN YOUR CRIMES AGAINST CHILDREN AND I'M ASH"],
89
+ 'Toxicity class 1': ['toxic'],
90
+ 'Class 1 probability': 0.8491994738578796,
91
+ 'Toxicity class 2': ['threat'],
92
+ 'Class 2 probability': 0.04749392718076706},
93
+ {'Text (portion)': [':While with a lot of that essay says, general time'],
94
+ 'Toxicity class 1': ['toxic'],
95
+ 'Class 1 probability': 0.282654732465744,
96
+ 'Toxicity class 2': ['obscene'],
97
+ 'Class 2 probability': 0.15901680290699005},
98
+ {'Text (portion)': ['== Help == \n\n Please members of wiki, help me. My '],
99
+ 'Toxicity class 1': ['toxic'],
100
+ 'Class 1 probability': 0.3118911385536194,
101
+ 'Toxicity class 2': ['obscene'],
102
+ 'Class 2 probability': 0.16506287455558777}]
103
+ for d in initial_table_data:
104
+ initial_table_df = pd.concat([initial_table_df, pd.DataFrame(d)], ignore_index=True)
105
+ # Load the model and perform toxicity analysis
106
+
107
+ if "table" not in st.session_state:
108
+ st.session_state['table'] = initial_table_df
109
+
110
+ if st.button("Analyze"):
111
+ if not text:
112
+ st.write("Please enter a text.")
113
+ else:
114
+ with st.spinner("Analyzing toxicity..."):
115
+ if selected_model == "Olivernyu/finetuned_bert_base_uncased":
116
+ toxicity_detector = load_model(selected_model)
117
+ outputs = toxicity_detector(text, top_k=2)
118
+ category_names = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"]
119
+ results = []
120
+ for item in outputs:
121
+ results.append((category[item['label']], item['score']))
122
+
123
+ # Create a table with the input text (or a portion of it), the highest toxicity class, and its probability
124
+ table_data = {
125
+ "Text (portion)": [text[:50]],
126
+ "Toxicity class 1": [results[0][0]],
127
+ f"Class 1 probability": results[0][1],
128
+ "Toxicity class 2": [results[1][0]],
129
+ f"Class 2 probability": results[1][1]
130
+ }
131
+ # print("Before concatenation:")
132
+ # print(table_df)
133
+ # Concatenate the new data frame with the existing data frame
134
+ st.session_state['table'] = pd.concat([pd.DataFrame(table_data), st.session_state['table']], ignore_index=True)
135
+ # print("After concatenation:")
136
+ # print(table_df)
137
+ # Update the table with the new data frame
138
+ st.table(st.session_state['table'])
139
+ else:
140
+ st.empty()
141
+ sentiment_pipeline = load_model(selected_model)
142
+ result = sentiment_pipeline(text)
143
+ st.write(f"Sentiment: {result[0]['label']} (confidence: {result[0]['score']:.2f})")
144
+ if result[0]['label'] in ['POSITIVE', 'LABEL_1'] and result[0]['score']> 0.9:
145
+ st.balloons()
146
+ elif result[0]['label'] in ['NEGATIVE', 'LABEL_0'] and result[0]['score']> 0.9:
147
+ st.error("Hater detected.")
148
+ else:
149
+ st.write("Enter a text and click 'Analyze' to perform toxicity analysis.")
requirements (3).txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ streamlit
2
+ torch
3
+ transformers
4
+ pandas