Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -6,15 +6,25 @@ import gradio as gr
|
|
| 6 |
# Import your local files
|
| 7 |
import database, schemas, moderator
|
| 8 |
|
| 9 |
-
# Initialize Database
|
| 10 |
database.init_db()
|
| 11 |
|
| 12 |
# --- DASHBOARD LOGIC ---
|
| 13 |
def update_dashboard(user, text):
|
| 14 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
analysis = moderator.moderator.analyze(text)
|
| 16 |
|
| 17 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
db = database.SessionLocal()
|
| 19 |
db_comment = database.Comment(
|
| 20 |
video_id=1, user=user, text=text,
|
|
@@ -25,76 +35,90 @@ def update_dashboard(user, text):
|
|
| 25 |
db.add(db_comment)
|
| 26 |
db.commit()
|
| 27 |
|
| 28 |
-
#
|
| 29 |
all_comments = db.query(database.Comment).all()
|
| 30 |
total_count = len(all_comments)
|
| 31 |
toxic_comments = [c for c in all_comments if c.is_toxic]
|
| 32 |
toxic_count = len(toxic_comments)
|
| 33 |
safe_count = total_count - toxic_count
|
| 34 |
|
| 35 |
-
# Safety Score
|
| 36 |
safety_score = (safe_count / total_count * 100) if total_count > 0 else 100
|
| 37 |
|
| 38 |
-
# Toxicity Classification breakdown
|
| 39 |
reasons = {"Identity Hate": 0, "Insult": 0, "Online Harassment": 0, "Threat": 0}
|
| 40 |
for c in toxic_comments:
|
| 41 |
if c.flagged_reason in reasons:
|
| 42 |
reasons[c.flagged_reason] += 1
|
| 43 |
|
| 44 |
-
# Format for
|
| 45 |
-
# Normalized for the Label component
|
| 46 |
chart_data = {k: v / toxic_count if toxic_count > 0 else 0 for k, v in reasons.items()}
|
| 47 |
|
| 48 |
-
#
|
| 49 |
shame_query = db.query(database.Comment).filter(database.Comment.is_toxic == True).order_by(database.Comment.id.desc()).limit(5).all()
|
| 50 |
shame_data = [[c.user, c.text, f"{c.toxicity_score:.2f}", c.flagged_reason] for c in shame_query]
|
| 51 |
|
| 52 |
db.close()
|
| 53 |
|
| 54 |
-
|
|
|
|
|
|
|
| 55 |
return (
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
chart_data,
|
| 61 |
-
shame_data
|
| 62 |
)
|
| 63 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 64 |
# --- UI LAYOUT ---
|
| 65 |
with gr.Blocks(theme=gr.themes.Default(), title="Admin Intelligence Hub") as demo:
|
| 66 |
-
gr.
|
|
|
|
|
|
|
| 67 |
|
| 68 |
-
#
|
| 69 |
with gr.Row():
|
| 70 |
total_signals = gr.Number(label="Total Signals", value=0, precision=0)
|
| 71 |
threats_found = gr.Number(label="🚫 Threats Identified", value=0, precision=0)
|
| 72 |
-
|
| 73 |
|
| 74 |
with gr.Row():
|
| 75 |
-
#
|
| 76 |
with gr.Column(scale=2):
|
| 77 |
gr.HTML('<iframe width="100%" height="350" src="https://www.youtube.com/embed/dQw4w9WgXcQ" frameborder="0" allowfullscreen></iframe>')
|
| 78 |
with gr.Group():
|
| 79 |
user_input = gr.Textbox(label="User", value="Guest")
|
| 80 |
-
msg_input = gr.Textbox(label="Message Inference", placeholder="
|
| 81 |
submit_btn = gr.Button("ANALYZE SIGNAL", variant="primary")
|
| 82 |
|
| 83 |
-
#
|
| 84 |
with gr.Column(scale=1):
|
| 85 |
gr.Markdown("### Toxicity Classification")
|
| 86 |
donut_chart = gr.Label(num_top_classes=4, label="Distribution")
|
| 87 |
-
current_alert = gr.Label(label="Threat Level")
|
| 88 |
|
| 89 |
-
# Bottom Row: History
|
| 90 |
gr.Markdown("### 💀 Strategic Insights & History")
|
| 91 |
shame_table = gr.Dataframe(headers=["User", "Comment", "Score", "Reason"], interactive=False)
|
| 92 |
|
| 93 |
-
# Event
|
| 94 |
submit_btn.click(
|
| 95 |
update_dashboard,
|
| 96 |
inputs=[user_input, msg_input],
|
| 97 |
-
outputs=[
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 98 |
)
|
| 99 |
|
| 100 |
if __name__ == "__main__":
|
|
|
|
| 6 |
# Import your local files
|
| 7 |
import database, schemas, moderator
|
| 8 |
|
| 9 |
+
# 1. Initialize Database
|
| 10 |
database.init_db()
|
| 11 |
|
| 12 |
# --- DASHBOARD LOGIC ---
|
| 13 |
def update_dashboard(user, text):
|
| 14 |
+
# EMOJI ANALYSIS: Quick check for known aggressive emojis
|
| 15 |
+
bad_emojis = ["🔪", "😡", "👊", "🖕", "🔫", "🤮"]
|
| 16 |
+
emoji_flag = any(e in text for e in bad_emojis)
|
| 17 |
+
|
| 18 |
+
# AI ANALYSIS
|
| 19 |
analysis = moderator.moderator.analyze(text)
|
| 20 |
|
| 21 |
+
# If emoji found, boost toxicity
|
| 22 |
+
if emoji_flag and not analysis["is_toxic"]:
|
| 23 |
+
analysis["is_toxic"] = True
|
| 24 |
+
analysis["reason"] = "Aggressive Emoji Detected"
|
| 25 |
+
analysis["score"] = max(analysis["score"], 0.85)
|
| 26 |
+
|
| 27 |
+
# Save to Database
|
| 28 |
db = database.SessionLocal()
|
| 29 |
db_comment = database.Comment(
|
| 30 |
video_id=1, user=user, text=text,
|
|
|
|
| 35 |
db.add(db_comment)
|
| 36 |
db.commit()
|
| 37 |
|
| 38 |
+
# Calculate Global Metrics
|
| 39 |
all_comments = db.query(database.Comment).all()
|
| 40 |
total_count = len(all_comments)
|
| 41 |
toxic_comments = [c for c in all_comments if c.is_toxic]
|
| 42 |
toxic_count = len(toxic_comments)
|
| 43 |
safe_count = total_count - toxic_count
|
| 44 |
|
| 45 |
+
# FIX: Safety Score as a raw number (0-100)
|
| 46 |
safety_score = (safe_count / total_count * 100) if total_count > 0 else 100
|
| 47 |
|
| 48 |
+
# Toxicity Classification breakdown for the Label component
|
| 49 |
reasons = {"Identity Hate": 0, "Insult": 0, "Online Harassment": 0, "Threat": 0}
|
| 50 |
for c in toxic_comments:
|
| 51 |
if c.flagged_reason in reasons:
|
| 52 |
reasons[c.flagged_reason] += 1
|
| 53 |
|
| 54 |
+
# Format for visual bars (Confidence score style)
|
|
|
|
| 55 |
chart_data = {k: v / toxic_count if toxic_count > 0 else 0 for k, v in reasons.items()}
|
| 56 |
|
| 57 |
+
# Hall of Shame Data (Last 5 toxic)
|
| 58 |
shame_query = db.query(database.Comment).filter(database.Comment.is_toxic == True).order_by(database.Comment.id.desc()).limit(5).all()
|
| 59 |
shame_data = [[c.user, c.text, f"{c.toxicity_score:.2f}", c.flagged_reason] for c in shame_query]
|
| 60 |
|
| 61 |
db.close()
|
| 62 |
|
| 63 |
+
status_label = "🔴 High Toxicity" if analysis["is_toxic"] else "🟢 Safe"
|
| 64 |
+
|
| 65 |
+
# FIX: Return raw numbers for gr.Number to avoid rounding errors
|
| 66 |
return (
|
| 67 |
+
int(safety_score), # Safety Score %
|
| 68 |
+
int(total_count), # Total Signals
|
| 69 |
+
int(toxic_count), # Threats Detected
|
| 70 |
+
status_label, # Alert Level
|
| 71 |
+
chart_data, # Classification Bars
|
| 72 |
+
shame_data # History Table
|
| 73 |
)
|
| 74 |
|
| 75 |
+
def clear_db():
|
| 76 |
+
db = database.SessionLocal()
|
| 77 |
+
db.query(database.Comment).delete()
|
| 78 |
+
db.commit()
|
| 79 |
+
db.close()
|
| 80 |
+
return 100, 0, 0, "🟢 NORMAL", {}, []
|
| 81 |
+
|
| 82 |
# --- UI LAYOUT ---
|
| 83 |
with gr.Blocks(theme=gr.themes.Default(), title="Admin Intelligence Hub") as demo:
|
| 84 |
+
with gr.Row():
|
| 85 |
+
gr.Markdown("# 🛡️ Admin Intelligence Hub\n*Real-time threat monitoring and classification*")
|
| 86 |
+
clear_btn = gr.Button("🗑️ Clear Database", variant="stop", size="sm")
|
| 87 |
|
| 88 |
+
# KPI Row
|
| 89 |
with gr.Row():
|
| 90 |
total_signals = gr.Number(label="Total Signals", value=0, precision=0)
|
| 91 |
threats_found = gr.Number(label="🚫 Threats Identified", value=0, precision=0)
|
| 92 |
+
safety_score = gr.Number(label="✅ Safety Score (%)", value=100, precision=0)
|
| 93 |
|
| 94 |
with gr.Row():
|
| 95 |
+
# Main Interface
|
| 96 |
with gr.Column(scale=2):
|
| 97 |
gr.HTML('<iframe width="100%" height="350" src="https://www.youtube.com/embed/dQw4w9WgXcQ" frameborder="0" allowfullscreen></iframe>')
|
| 98 |
with gr.Group():
|
| 99 |
user_input = gr.Textbox(label="User", value="Guest")
|
| 100 |
+
msg_input = gr.Textbox(label="Message Inference", placeholder="Analyze text or emojis...")
|
| 101 |
submit_btn = gr.Button("ANALYZE SIGNAL", variant="primary")
|
| 102 |
|
| 103 |
+
# Classification & Alerts
|
| 104 |
with gr.Column(scale=1):
|
| 105 |
gr.Markdown("### Toxicity Classification")
|
| 106 |
donut_chart = gr.Label(num_top_classes=4, label="Distribution")
|
| 107 |
+
current_alert = gr.Label(label="Current Threat Level")
|
| 108 |
|
|
|
|
| 109 |
gr.Markdown("### 💀 Strategic Insights & History")
|
| 110 |
shame_table = gr.Dataframe(headers=["User", "Comment", "Score", "Reason"], interactive=False)
|
| 111 |
|
| 112 |
+
# Event Triggers
|
| 113 |
submit_btn.click(
|
| 114 |
update_dashboard,
|
| 115 |
inputs=[user_input, msg_input],
|
| 116 |
+
outputs=[safety_score, total_signals, threats_found, current_alert, donut_chart, shame_table]
|
| 117 |
+
)
|
| 118 |
+
|
| 119 |
+
clear_btn.click(
|
| 120 |
+
clear_db,
|
| 121 |
+
outputs=[safety_score, total_signals, threats_found, current_alert, donut_chart, shame_table]
|
| 122 |
)
|
| 123 |
|
| 124 |
if __name__ == "__main__":
|