SHAH-MEER's picture
Update app.py
b8b5c4e verified
import gradio as gr
import tensorflow as tf
import numpy as np
from tensorflow.keras.layers import TextVectorization
MAX_FEATURES = 200000
OUTPUT_SEQUENCE_LENGTH = 1800
class ToxicityClassifier:
def __init__(self, model_path='toxicity.h5'):
"""Initialize the toxicity classifier with model and vectorizer."""
try:
self.model = tf.keras.models.load_model(model_path)
self.vectorizer = self._initialize_vectorizer()
print("βœ… Model and vectorizer loaded successfully!")
except Exception as e:
raise RuntimeError(f"Failed to initialize classifier: {str(e)}")
def _initialize_vectorizer(self):
"""Initialize and adapt text vectorizer (dummy adapt for demo)."""
vectorizer = TextVectorization(
max_tokens=MAX_FEATURES,
output_sequence_length=OUTPUT_SEQUENCE_LENGTH,
output_mode='int'
)
vectorizer.adapt(["dummy text"])
return vectorizer
def predict_toxicity(self, text):
"""Predict toxicity probabilities for input text."""
try:
# Vectorize and predict
vectorized_text = self.vectorizer([text])
predictions = self.model.predict(vectorized_text, verbose=0)[0]
# Format results
results = {
"toxic": float(predictions[0]),
"severe_toxic": float(predictions[1]),
"obscene": float(predictions[2]),
"threat": float(predictions[3]),
"insult": float(predictions[4]),
"identity_hate": float(predictions[5])
}
return results
except Exception as e:
print(f"Prediction error: {e}")
return None
def create_interface():
"""Create and configure Gradio interface."""
classifier = ToxicityClassifier()
def classify_text(text):
results = classifier.predict_toxicity(text)
if results is None:
return "Error processing your request. Please try again."
# Format output with emojis and percentages
output = []
for label, score in results.items():
emoji = "πŸ”΄" if score > 0.5 else "🟒"
percentage = f"{score*100:.1f}%"
output.append(f"{emoji} {label.replace('_', ' ').title()}: {percentage}")
return "\n".join(output)
# Custom CSS for better UI
css = """
.gradio-container { max-width: 600px !important; }
.label { font-weight: bold; }
.warning { color: #ff4d4d; }
"""
# Interface components
with gr.Blocks(css=css, title="Toxicity Classifier",theme = gr.themes.Soft()) as app:
gr.Markdown("""
# 🚨 Toxicity Classifier
Enter text to analyze for toxic content. The model evaluates:
- Toxicity
- Severe Toxicity
- Obscenity
- Threats
- Insults
- Identity Hate
""")
with gr.Row():
text_input = gr.Textbox(
placeholder="Enter text to analyze...",
label="Input Text",
lines=3,
max_lines=6
)
submit_btn = gr.Button("Analyze", variant="primary")
with gr.Group():
gr.Markdown("### Results")
output = gr.Textbox(label="Analysis", interactive=False)
examples = gr.Examples(
examples=[
"You're an idiot!",
"I hate you"
],
inputs=text_input,
label="Try these examples"
)
submit_btn.click(
fn=classify_text,
inputs=text_input,
outputs=output
)
return app
if __name__ == "__main__":
app = create_interface()
app.launch(
server_name="0.0.0.0",
share=True,
favicon_path=None
)