Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,8 +1,6 @@
|
|
| 1 |
import os
|
| 2 |
import uvicorn
|
| 3 |
import pandas as pd
|
| 4 |
-
from fastapi import FastAPI, Depends
|
| 5 |
-
from fastapi.middleware.cors import CORSMiddleware
|
| 6 |
import gradio as gr
|
| 7 |
|
| 8 |
# Import your local files
|
|
@@ -11,22 +9,12 @@ import database, schemas, moderator
|
|
| 11 |
# Initialize Database
|
| 12 |
database.init_db()
|
| 13 |
|
| 14 |
-
# ---
|
| 15 |
-
def
|
| 16 |
-
#
|
| 17 |
-
bad_emojis = ["🔪", "😡", "👊", "🖕", "🔫", "🤮"]
|
| 18 |
-
emoji_flag = any(e in text for e in bad_emojis)
|
| 19 |
-
|
| 20 |
-
# AI ANALYSIS
|
| 21 |
analysis = moderator.moderator.analyze(text)
|
| 22 |
|
| 23 |
-
#
|
| 24 |
-
if emoji_flag and not analysis["is_toxic"]:
|
| 25 |
-
analysis["is_toxic"] = True
|
| 26 |
-
analysis["reason"] = "Aggressive Emoji Detected"
|
| 27 |
-
analysis["score"] = max(analysis["score"], 0.85)
|
| 28 |
-
|
| 29 |
-
# Save to Database
|
| 30 |
db = database.SessionLocal()
|
| 31 |
db_comment = database.Comment(
|
| 32 |
video_id=1, user=user, text=text,
|
|
@@ -36,54 +24,77 @@ def moderate_and_update(user, text):
|
|
| 36 |
)
|
| 37 |
db.add(db_comment)
|
| 38 |
db.commit()
|
| 39 |
-
db.close()
|
| 40 |
|
| 41 |
-
#
|
| 42 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
shame_query = db.query(database.Comment).filter(database.Comment.is_toxic == True).order_by(database.Comment.id.desc()).limit(5).all()
|
| 44 |
shame_data = [[c.user, c.text, f"{c.toxicity_score:.2f}", c.flagged_reason] for c in shame_query]
|
| 45 |
|
| 46 |
-
# Get Stats for Chart
|
| 47 |
-
total_safe = db.query(database.Comment).filter(database.Comment.is_toxic == False).count()
|
| 48 |
-
total_toxic = db.query(database.Comment).filter(database.Comment.is_toxic == True).count()
|
| 49 |
-
stats_df = pd.DataFrame({"Status": ["Safe ✅", "Toxic 🚫"], "Count": [total_safe, total_toxic]})
|
| 50 |
db.close()
|
| 51 |
|
| 52 |
status = "🔴 High Toxicity" if analysis["is_toxic"] else "🟢 Safe"
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
|
| 57 |
# --- UI LAYOUT ---
|
| 58 |
-
with gr.Blocks(theme=gr.themes.
|
| 59 |
-
gr.Markdown("# 🛡️
|
| 60 |
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
gr.HTML('<iframe width="100%" height="400" src="https://www.youtube.com/embed/dQw4w9WgXcQ" frameborder="0" allowfullscreen></iframe>')
|
| 67 |
-
with gr.Column(scale=1):
|
| 68 |
-
user_input = gr.Textbox(label="Username", value="Guest")
|
| 69 |
-
msg_input = gr.Textbox(label="Type a comment...", placeholder="Try a mean emoji like 🔪 or text!")
|
| 70 |
-
submit_btn = gr.Button("Post Comment", variant="primary")
|
| 71 |
-
status_box = gr.Label(label="Current Moderation Status")
|
| 72 |
-
output_feed = gr.Textbox(label="Last Action", interactive=False)
|
| 73 |
|
| 74 |
-
|
| 75 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
stats_plot = gr.BarPlot(x="Status", y="Count", title="Community Health Overview", color="Status", color_map={"Safe ✅": "green", "Toxic 🚫": "red"})
|
| 81 |
|
| 82 |
-
# Event
|
| 83 |
submit_btn.click(
|
| 84 |
-
|
| 85 |
inputs=[user_input, msg_input],
|
| 86 |
-
outputs=[
|
| 87 |
)
|
| 88 |
|
| 89 |
if __name__ == "__main__":
|
|
|
|
| 1 |
import os
|
| 2 |
import uvicorn
|
| 3 |
import pandas as pd
|
|
|
|
|
|
|
| 4 |
import gradio as gr
|
| 5 |
|
| 6 |
# Import your local files
|
|
|
|
| 9 |
# Initialize Database
|
| 10 |
database.init_db()
|
| 11 |
|
| 12 |
+
# --- DASHBOARD LOGIC ---
|
| 13 |
+
def update_dashboard(user, text):
|
| 14 |
+
# 1. Run AI Analysis
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
analysis = moderator.moderator.analyze(text)
|
| 16 |
|
| 17 |
+
# 2. Save to DB
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
db = database.SessionLocal()
|
| 19 |
db_comment = database.Comment(
|
| 20 |
video_id=1, user=user, text=text,
|
|
|
|
| 24 |
)
|
| 25 |
db.add(db_comment)
|
| 26 |
db.commit()
|
|
|
|
| 27 |
|
| 28 |
+
# 3. Calculate Global Metrics
|
| 29 |
+
all_comments = db.query(database.Comment).all()
|
| 30 |
+
total_count = len(all_comments)
|
| 31 |
+
toxic_comments = [c for c in all_comments if c.is_toxic]
|
| 32 |
+
toxic_count = len(toxic_comments)
|
| 33 |
+
safe_count = total_count - toxic_count
|
| 34 |
+
|
| 35 |
+
# Safety Score calculation: (Safe / Total) * 100
|
| 36 |
+
safety_score = (safe_count / total_count * 100) if total_count > 0 else 100
|
| 37 |
+
|
| 38 |
+
# Toxicity Classification breakdown
|
| 39 |
+
reasons = {"Identity Hate": 0, "Insult": 0, "Online Harassment": 0, "Threat": 0}
|
| 40 |
+
for c in toxic_comments:
|
| 41 |
+
if c.flagged_reason in reasons:
|
| 42 |
+
reasons[c.flagged_reason] += 1
|
| 43 |
+
|
| 44 |
+
# Format for Donut/Label chart
|
| 45 |
+
# Normalized for the Label component
|
| 46 |
+
chart_data = {k: v / toxic_count if toxic_count > 0 else 0 for k, v in reasons.items()}
|
| 47 |
+
|
| 48 |
+
# Get Hall of Shame Data
|
| 49 |
shame_query = db.query(database.Comment).filter(database.Comment.is_toxic == True).order_by(database.Comment.id.desc()).limit(5).all()
|
| 50 |
shame_data = [[c.user, c.text, f"{c.toxicity_score:.2f}", c.flagged_reason] for c in shame_query]
|
| 51 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
db.close()
|
| 53 |
|
| 54 |
status = "🔴 High Toxicity" if analysis["is_toxic"] else "🟢 Safe"
|
| 55 |
+
return (
|
| 56 |
+
f"{int(safety_score)}%", # Safety Score
|
| 57 |
+
f"{total_count}", # Total Signals
|
| 58 |
+
f"{toxic_count}", # Threats Detected
|
| 59 |
+
status, # Current Status
|
| 60 |
+
chart_data, # Donut Chart Data
|
| 61 |
+
shame_data # History Table
|
| 62 |
+
)
|
| 63 |
|
| 64 |
# --- UI LAYOUT ---
|
| 65 |
+
with gr.Blocks(theme=gr.themes.Default(), title="Admin Intelligence Hub") as demo:
|
| 66 |
+
gr.Markdown("# 🛡️ Admin Intelligence Hub\n*Real-time threat monitoring and classification*")
|
| 67 |
|
| 68 |
+
# Top Row: KPI Cards
|
| 69 |
+
with gr.Row():
|
| 70 |
+
total_signals = gr.Number(label="Total Signals", value=0, precision=0)
|
| 71 |
+
threats_found = gr.Number(label="🚫 Threats Identified", value=0, precision=0)
|
| 72 |
+
safety_score_gauge = gr.Label(label="✅ Safety Score")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 73 |
|
| 74 |
+
with gr.Row():
|
| 75 |
+
# Left: Live Input & Video
|
| 76 |
+
with gr.Column(scale=2):
|
| 77 |
+
gr.HTML('<iframe width="100%" height="350" src="https://www.youtube.com/embed/dQw4w9WgXcQ" frameborder="0" allowfullscreen></iframe>')
|
| 78 |
+
with gr.Group():
|
| 79 |
+
user_input = gr.Textbox(label="User", value="Guest")
|
| 80 |
+
msg_input = gr.Textbox(label="Message Inference", placeholder="Type to test...")
|
| 81 |
+
submit_btn = gr.Button("ANALYZE SIGNAL", variant="primary")
|
| 82 |
+
|
| 83 |
+
# Right: Classification Chart
|
| 84 |
+
with gr.Column(scale=1):
|
| 85 |
+
gr.Markdown("### Toxicity Classification")
|
| 86 |
+
donut_chart = gr.Label(num_top_classes=4, label="Distribution")
|
| 87 |
+
current_alert = gr.Label(label="Threat Level")
|
| 88 |
|
| 89 |
+
# Bottom Row: History
|
| 90 |
+
gr.Markdown("### 💀 Strategic Insights & History")
|
| 91 |
+
shame_table = gr.Dataframe(headers=["User", "Comment", "Score", "Reason"], interactive=False)
|
|
|
|
| 92 |
|
| 93 |
+
# Event Wiring
|
| 94 |
submit_btn.click(
|
| 95 |
+
update_dashboard,
|
| 96 |
inputs=[user_input, msg_input],
|
| 97 |
+
outputs=[safety_score_gauge, total_signals, threats_found, current_alert, donut_chart, shame_table]
|
| 98 |
)
|
| 99 |
|
| 100 |
if __name__ == "__main__":
|