Toxic-Predict / app.py
NightPrince's picture
Update app.py
05f7045 verified
import gradio as gr
import tensorflow as tf
from tensorflow.keras.preprocessing.sequence import pad_sequences
import numpy as np
import keras
from tensorflow.keras.preprocessing.text import tokenizer_from_json
import json
# Load tokenizer
tokenizer_path = "tokenizer.json"
with open(tokenizer_path, "r", encoding="utf-8") as f:
tokenizer_json = f.read()
tokenizer = tokenizer_from_json(tokenizer_json)
model_path = "toxic_classifier.h5"
# Load model from Hugging Face Hub
model = tf.keras.models.load_model(model_path)
# Label map
label_map = {
0: "Child Sexual Exploitation",
1: "Elections",
2: "Non-Violent Crimes",
3: "Safe",
4: "Sex-Related Crimes",
5: "Suicide & Self-Harm",
6: "Unknown S-Type",
7: "Violent Crimes",
8: "Unsafe"
}
# Inference function
def classify_toxic(query, image_desc):
max_len = 150
text = query + " " + image_desc
seq = tokenizer.texts_to_sequences([text])
pad = pad_sequences(seq, maxlen=max_len, padding='post', truncating='post')
pred = model.predict(pad)
pred_label = np.argmax(pred, axis=1)[0]
return label_map.get(pred_label, "Unknown")
# Gradio interface
iface = gr.Interface(
fn=classify_toxic,
inputs=[
gr.Textbox(label="Query"),
gr.Textbox(label="Image Description")
],
outputs=gr.Textbox(label="Predicted Toxic Category"),
title="Toxic Category Classifier",
description="Enter a query and image description to classify the prompt into one of the toxic categories"
)
if __name__ == "__main__":
iface.launch()