NightPrince commited on
Commit
ac47f8e
·
verified ·
1 Parent(s): ec1972c

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -0
app.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import pickle
3
+ import tensorflow as tf
4
+ from tensorflow.keras.preprocessing.sequence import pad_sequences
5
+ import numpy as np
6
+
7
+
8
+ import keras
9
+
10
+
11
+ # Load tokenizer and model
12
+ tokenizer_path = "tokenizer.pkl"
13
+ #model_path = r"models/toxic_classifier.keras"
14
+
15
+ with open(tokenizer_path, "rb") as f:
16
+ tokenizer = pickle.load(f)
17
+ model = keras.saving.load_model("hf://NightPrince/Toxic-Predict")
18
+
19
+ # Label map
20
+ label_map = {
21
+ 0: "Child Sexual Exploitation",
22
+ 1: "Elections",
23
+ 2: "Non-Violent Crimes",
24
+ 3: "Safe",
25
+ 4: "Sex-Related Crimes",
26
+ 5: "Suicide & Self-Harm",
27
+ 6: "Unknown S-Type",
28
+ 7: "Violent Crimes",
29
+ 8: "Unsafe"
30
+ }
31
+
32
+ def classify_toxic(query, image_desc):
33
+ max_len = 150
34
+ text = query + " " + image_desc
35
+ seq = tokenizer.texts_to_sequences([text])
36
+ pad = pad_sequences(seq, maxlen=max_len, padding='post', truncating='post')
37
+ pred = model.predict(pad)
38
+ pred_label = np.argmax(pred, axis=1)[0]
39
+ return label_map.get(pred_label, "Unknown")
40
+
41
+ iface = gr.Interface(
42
+ fn=classify_toxic,
43
+ inputs=[
44
+ gr.Textbox(label="Query"),
45
+ gr.Textbox(label="Image Description")
46
+ ],
47
+ outputs=gr.Textbox(label="Predicted Toxic Category"),
48
+ title="Toxic Category Classifier",
49
+ description="Enter a query and image description to classify the prompt into one of the toxic categories"
50
+ )
51
+
52
+ if __name__ == "__main__":
53
+ iface.launch()