Sulitha's picture
Update app.py
8ca97f6 verified
import gradio as gr
import pickle
#for loading models
with open('singlish_comment_classifier_v3.pkl', "rb") as f:
model_singlish = pickle.load(f)
with open('sinhala_comment_classifier_v1.pkl', "rb") as f:
model_sinhala = pickle.load(f)
def predict_sinhala(text):
prediction = model_sinhala.predict([text])
return "No Harmful" if prediction[0] == 1 else "Harmful"
def predict_singlish(text):
prediction = model_singlish.predict([text])
return "No Harmful" if prediction[0] == 1 else "Harmful"
interface = gr.Interface(
fn=lambda text, model_choice: predict_singlish(text) if model_choice == "Singlish" else predict_sinhala(text),
inputs=[
gr.Textbox(
label="Enter Text",
placeholder="Type a comment to analyze here...",
lines=3,
elem_id="input_text"
),
gr.Radio(
choices=["Singlish", "Sinhala"],
label="Choose Language",
info="Select which language model to use for the analysis.",
elem_id="radio_choice"
),
],
outputs="text",
title="Sinhala and Singlish Harmful Comment Detection for Anonymous Feedback",
description="Use these 2 models to detect harmful content in anonymous messages shared through feedback links about someone. Designed for Sinhala and Singlish (one model for each lang), the models help identify offensive or hurtful comments about individuals",
theme="huggingface",
css="""
.gradio-container {
background-color: #f4f4f9;
padding: 20px;
border-radius: 8px;
box-shadow: 0px 4px 10px rgba(0, 0, 0, 0.1);
}
.gradio-input {
border: 1px solid #ddd;
padding: 10px;
border-radius: 6px;
}
.gradio-button {
background-color: #4CAF50;
color: white;
font-weight: bold;
border-radius: 6px;
padding: 12px 24px;
}
.gradio-button:hover {
background-color: #45a049;
}
h1 {
color: #333;
}
.prose p {
color: #555;
}
""",
allow_flagging="never"
)
interface.launch(share=True)