Update app.py
Browse files
app.py
CHANGED
|
@@ -5,31 +5,30 @@ import easyocr
|
|
| 5 |
from PIL import Image
|
| 6 |
import numpy as np
|
| 7 |
|
| 8 |
-
# Sarcasm
|
| 9 |
SARCASM_MODEL_NAME = "j-hartmann/emotion-english-distilroberta-base"
|
| 10 |
sarcasm_labels = ["not sarcastic", "sarcastic"]
|
| 11 |
-
|
| 12 |
sarcasm_tokenizer = AutoTokenizer.from_pretrained(SARCASM_MODEL_NAME)
|
| 13 |
sarcasm_model = AutoModelForSequenceClassification.from_pretrained(SARCASM_MODEL_NAME)
|
| 14 |
|
| 15 |
-
# Hate
|
| 16 |
HATE_MODEL_NAME = "your-username/deberta-hate-speech-custom"
|
| 17 |
hate_labels = [
|
| 18 |
-
"
|
| 19 |
"harassment",
|
| 20 |
-
"
|
|
|
|
| 21 |
"sexism",
|
| 22 |
-
"
|
| 23 |
"not_hate"
|
| 24 |
]
|
| 25 |
-
|
| 26 |
hate_tokenizer = AutoTokenizer.from_pretrained(HATE_MODEL_NAME)
|
| 27 |
hate_model = AutoModelForSequenceClassification.from_pretrained(HATE_MODEL_NAME)
|
| 28 |
|
| 29 |
-
# OCR
|
| 30 |
reader = easyocr.Reader(['en'], gpu=False)
|
| 31 |
|
| 32 |
-
def
|
| 33 |
if isinstance(image, Image.Image):
|
| 34 |
image = np.array(image)
|
| 35 |
result = reader.readtext(image, detail=0)
|
|
@@ -53,41 +52,38 @@ def classify_hate(text):
|
|
| 53 |
confidence = float(probs[0][pred])
|
| 54 |
return hate_labels[pred], confidence
|
| 55 |
|
| 56 |
-
def
|
| 57 |
-
input_text = ""
|
| 58 |
if image is not None:
|
| 59 |
-
input_text =
|
| 60 |
if not input_text.strip():
|
| 61 |
-
return "No text found in image.", None
|
| 62 |
elif text and text.strip():
|
| 63 |
input_text = text
|
| 64 |
else:
|
| 65 |
-
return "Please provide
|
| 66 |
|
| 67 |
sarcasm_label, sarcasm_conf = detect_sarcasm(input_text)
|
| 68 |
if sarcasm_label == "sarcastic":
|
| 69 |
-
return f"Text
|
| 70 |
|
| 71 |
hate_label, hate_conf = classify_hate(input_text)
|
| 72 |
return (
|
| 73 |
-
f"Input
|
| 74 |
-
hate_label
|
| 75 |
-
sarcasm_label
|
| 76 |
)
|
| 77 |
|
| 78 |
iface = gr.Interface(
|
| 79 |
-
fn=
|
| 80 |
inputs=[
|
| 81 |
gr.Image(type="pil", label="Upload Screenshot (optional)"),
|
| 82 |
gr.Textbox(lines=3, placeholder="Or, type/paste text here")
|
| 83 |
],
|
| 84 |
outputs=[
|
| 85 |
-
gr.Textbox(label="
|
| 86 |
-
gr.Label(num_top_classes=len(hate_labels), label="Hate Speech Class")
|
| 87 |
-
gr.Label(num_top_classes=2, label="Sarcasm")
|
| 88 |
],
|
| 89 |
-
title="
|
| 90 |
-
description="
|
| 91 |
)
|
| 92 |
|
| 93 |
if __name__ == "__main__":
|
|
|
|
| 5 |
from PIL import Image
|
| 6 |
import numpy as np
|
| 7 |
|
| 8 |
+
# Sarcasm Detection Model (public)
|
| 9 |
SARCASM_MODEL_NAME = "j-hartmann/emotion-english-distilroberta-base"
|
| 10 |
sarcasm_labels = ["not sarcastic", "sarcastic"]
|
|
|
|
| 11 |
sarcasm_tokenizer = AutoTokenizer.from_pretrained(SARCASM_MODEL_NAME)
|
| 12 |
sarcasm_model = AutoModelForSequenceClassification.from_pretrained(SARCASM_MODEL_NAME)
|
| 13 |
|
| 14 |
+
# Hate Speech Model - DeBERTa fine-tuned (replace with your own model)
|
| 15 |
HATE_MODEL_NAME = "your-username/deberta-hate-speech-custom"
|
| 16 |
hate_labels = [
|
| 17 |
+
"abusive_language",
|
| 18 |
"harassment",
|
| 19 |
+
"threat",
|
| 20 |
+
"racism",
|
| 21 |
"sexism",
|
| 22 |
+
"religious_hate",
|
| 23 |
"not_hate"
|
| 24 |
]
|
|
|
|
| 25 |
hate_tokenizer = AutoTokenizer.from_pretrained(HATE_MODEL_NAME)
|
| 26 |
hate_model = AutoModelForSequenceClassification.from_pretrained(HATE_MODEL_NAME)
|
| 27 |
|
| 28 |
+
# OCR
|
| 29 |
reader = easyocr.Reader(['en'], gpu=False)
|
| 30 |
|
| 31 |
+
def extract_text(image):
|
| 32 |
if isinstance(image, Image.Image):
|
| 33 |
image = np.array(image)
|
| 34 |
result = reader.readtext(image, detail=0)
|
|
|
|
| 52 |
confidence = float(probs[0][pred])
|
| 53 |
return hate_labels[pred], confidence
|
| 54 |
|
| 55 |
+
def pipeline(image=None, text=None):
|
|
|
|
| 56 |
if image is not None:
|
| 57 |
+
input_text = extract_text(image)
|
| 58 |
if not input_text.strip():
|
| 59 |
+
return "No text found in image.", None
|
| 60 |
elif text and text.strip():
|
| 61 |
input_text = text
|
| 62 |
else:
|
| 63 |
+
return "Please provide text or upload an image.", None
|
| 64 |
|
| 65 |
sarcasm_label, sarcasm_conf = detect_sarcasm(input_text)
|
| 66 |
if sarcasm_label == "sarcastic":
|
| 67 |
+
return f"Text detected as SARCASTIC (Confidence: {sarcasm_conf:.2f}). Hate speech classification skipped.", sarcasm_label
|
| 68 |
|
| 69 |
hate_label, hate_conf = classify_hate(input_text)
|
| 70 |
return (
|
| 71 |
+
f"Input Text: {input_text}\nHate Speech Category: {hate_label} (Confidence: {hate_conf:.2f})\nSarcasm: {sarcasm_label} (Confidence: {sarcasm_conf:.2f})",
|
| 72 |
+
hate_label
|
|
|
|
| 73 |
)
|
| 74 |
|
| 75 |
iface = gr.Interface(
|
| 76 |
+
fn=pipeline,
|
| 77 |
inputs=[
|
| 78 |
gr.Image(type="pil", label="Upload Screenshot (optional)"),
|
| 79 |
gr.Textbox(lines=3, placeholder="Or, type/paste text here")
|
| 80 |
],
|
| 81 |
outputs=[
|
| 82 |
+
gr.Textbox(label="Result"),
|
| 83 |
+
gr.Label(num_top_classes=len(hate_labels), label="Hate Speech Class")
|
|
|
|
| 84 |
],
|
| 85 |
+
title="Cyber Bully Detection System",
|
| 86 |
+
description="Detects sarcasm first; if no sarcasm detected, classifies hate speech into abusive language, harassment, threat, racism, sexism, religious hate, or no hate."
|
| 87 |
)
|
| 88 |
|
| 89 |
if __name__ == "__main__":
|