Shubhi324 commited on
Commit
f0f0758
·
verified ·
1 Parent(s): 56682d2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -37
app.py CHANGED
@@ -2,29 +2,20 @@ import os
2
  import uvicorn
3
  import pandas as pd
4
  import gradio as gr
 
5
 
6
  # Import your local files
7
  import database, schemas, moderator
8
 
9
- # 1. Initialize Database
10
  database.init_db()
11
 
12
  # --- DASHBOARD LOGIC ---
13
  def update_dashboard(user, text):
14
- # EMOJI ANALYSIS: Quick check for known aggressive emojis
15
- bad_emojis = ["🔪", "😡", "👊", "🖕", "🔫", "🤮"]
16
- emoji_flag = any(e in text for e in bad_emojis)
17
-
18
- # AI ANALYSIS
19
  analysis = moderator.moderator.analyze(text)
20
 
21
- # If emoji found, boost toxicity
22
- if emoji_flag and not analysis["is_toxic"]:
23
- analysis["is_toxic"] = True
24
- analysis["reason"] = "Aggressive Emoji Detected"
25
- analysis["score"] = max(analysis["score"], 0.85)
26
-
27
- # Save to Database
28
  db = database.SessionLocal()
29
  db_comment = database.Comment(
30
  video_id=1, user=user, text=text,
@@ -35,24 +26,40 @@ def update_dashboard(user, text):
35
  db.add(db_comment)
36
  db.commit()
37
 
38
- # Calculate Global Metrics
39
  all_comments = db.query(database.Comment).all()
40
  total_count = len(all_comments)
41
  toxic_comments = [c for c in all_comments if c.is_toxic]
42
  toxic_count = len(toxic_comments)
43
  safe_count = total_count - toxic_count
44
 
45
- # FIX: Safety Score as a raw number (0-100)
46
- safety_score = (safe_count / total_count * 100) if total_count > 0 else 100
47
 
48
- # Toxicity Classification breakdown for the Label component
49
  reasons = {"Identity Hate": 0, "Insult": 0, "Online Harassment": 0, "Threat": 0}
50
  for c in toxic_comments:
51
- if c.flagged_reason in reasons:
52
- reasons[c.flagged_reason] += 1
53
-
54
- # Format for visual bars (Confidence score style)
55
- chart_data = {k: v / toxic_count if toxic_count > 0 else 0 for k, v in reasons.items()}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
 
57
  # Hall of Shame Data (Last 5 toxic)
58
  shame_query = db.query(database.Comment).filter(database.Comment.is_toxic == True).order_by(database.Comment.id.desc()).limit(5).all()
@@ -60,16 +67,15 @@ def update_dashboard(user, text):
60
 
61
  db.close()
62
 
63
- status_label = "🔴 High Toxicity" if analysis["is_toxic"] else "🟢 Safe"
64
 
65
- # FIX: Return raw numbers for gr.Number to avoid rounding errors
66
  return (
67
- int(safety_score), # Safety Score %
68
- int(total_count), # Total Signals
69
- int(toxic_count), # Threats Detected
70
- status_label, # Alert Level
71
- chart_data, # Classification Bars
72
- shame_data # History Table
73
  )
74
 
75
  def clear_db():
@@ -77,7 +83,8 @@ def clear_db():
77
  db.query(database.Comment).delete()
78
  db.commit()
79
  db.close()
80
- return 100, 0, 0, "🟢 NORMAL", {}, []
 
81
 
82
  # --- UI LAYOUT ---
83
  with gr.Blocks(theme=gr.themes.Default(), title="Admin Intelligence Hub") as demo:
@@ -85,14 +92,12 @@ with gr.Blocks(theme=gr.themes.Default(), title="Admin Intelligence Hub") as dem
85
  gr.Markdown("# 🛡️ Admin Intelligence Hub\n*Real-time threat monitoring and classification*")
86
  clear_btn = gr.Button("🗑️ Clear Database", variant="stop", size="sm")
87
 
88
- # KPI Row
89
  with gr.Row():
90
  total_signals = gr.Number(label="Total Signals", value=0, precision=0)
91
  threats_found = gr.Number(label="🚫 Threats Identified", value=0, precision=0)
92
  safety_score = gr.Number(label="✅ Safety Score (%)", value=100, precision=0)
93
 
94
  with gr.Row():
95
- # Main Interface
96
  with gr.Column(scale=2):
97
  gr.HTML('<iframe width="100%" height="350" src="https://www.youtube.com/embed/dQw4w9WgXcQ" frameborder="0" allowfullscreen></iframe>')
98
  with gr.Group():
@@ -100,16 +105,14 @@ with gr.Blocks(theme=gr.themes.Default(), title="Admin Intelligence Hub") as dem
100
  msg_input = gr.Textbox(label="Message Inference", placeholder="Analyze text or emojis...")
101
  submit_btn = gr.Button("ANALYZE SIGNAL", variant="primary")
102
 
103
- # Classification & Alerts
104
  with gr.Column(scale=1):
105
- gr.Markdown("### Toxicity Classification")
106
- donut_chart = gr.Label(num_top_classes=4, label="Distribution")
107
  current_alert = gr.Label(label="Current Threat Level")
108
 
109
  gr.Markdown("### 💀 Strategic Insights & History")
110
  shame_table = gr.Dataframe(headers=["User", "Comment", "Score", "Reason"], interactive=False)
111
 
112
- # Event Triggers
113
  submit_btn.click(
114
  update_dashboard,
115
  inputs=[user_input, msg_input],
 
2
  import uvicorn
3
  import pandas as pd
4
  import gradio as gr
5
+ import plotly.graph_objects as go
6
 
7
  # Import your local files
8
  import database, schemas, moderator
9
 
10
+ # Initialize Database
11
  database.init_db()
12
 
13
  # --- DASHBOARD LOGIC ---
14
  def update_dashboard(user, text):
15
+ # 1. Run AI Analysis
 
 
 
 
16
  analysis = moderator.moderator.analyze(text)
17
 
18
+ # 2. Save to DB
 
 
 
 
 
 
19
  db = database.SessionLocal()
20
  db_comment = database.Comment(
21
  video_id=1, user=user, text=text,
 
26
  db.add(db_comment)
27
  db.commit()
28
 
29
+ # 3. Calculate Global Metrics
30
  all_comments = db.query(database.Comment).all()
31
  total_count = len(all_comments)
32
  toxic_comments = [c for c in all_comments if c.is_toxic]
33
  toxic_count = len(toxic_comments)
34
  safe_count = total_count - toxic_count
35
 
36
+ # Safety Score calculation
37
+ safety_score = int((safe_count / total_count * 100)) if total_count > 0 else 100
38
 
39
+ # 4. FIXED CHART LOGIC: Ensure data is never zero if toxic comments exist
40
  reasons = {"Identity Hate": 0, "Insult": 0, "Online Harassment": 0, "Threat": 0}
41
  for c in toxic_comments:
42
+ # If the AI reason isn't in our list, default to Online Harassment so it shows on chart
43
+ cat = c.flagged_reason if c.flagged_reason in reasons else "Online Harassment"
44
+ reasons[cat] += 1
45
+
46
+ # CREATE REAL DONUT CHART
47
+ colors = ['#7B68EE', '#FF4500', '#1E90FF', '#FFA500']
48
+ fig = go.Figure(data=[go.Pie(
49
+ labels=list(reasons.keys()),
50
+ values=list(reasons.values()),
51
+ hole=.6,
52
+ marker_colors=colors,
53
+ textinfo='label+percent'
54
+ )])
55
+ fig.update_layout(
56
+ showlegend=True,
57
+ margin=dict(t=10, b=10, l=10, r=10),
58
+ height=300,
59
+ paper_bgcolor='rgba(0,0,0,0)',
60
+ plot_bgcolor='rgba(0,0,0,0)',
61
+ font=dict(color="white") if os.getenv("GRADIO_THEME") == "dark" else dict(color="black")
62
+ )
63
 
64
  # Hall of Shame Data (Last 5 toxic)
65
  shame_query = db.query(database.Comment).filter(database.Comment.is_toxic == True).order_by(database.Comment.id.desc()).limit(5).all()
 
67
 
68
  db.close()
69
 
70
+ status_label = "🔴 High Toxicity" if analysis["is_toxic"] else "🟢 NORMAL"
71
 
 
72
  return (
73
+ safety_score, # gr.Number
74
+ total_count, # gr.Number
75
+ toxic_count, # gr.Number
76
+ status_label, # gr.Label
77
+ fig, # gr.Plot (Donut Chart)
78
+ shame_data # gr.Dataframe
79
  )
80
 
81
  def clear_db():
 
83
  db.query(database.Comment).delete()
84
  db.commit()
85
  db.close()
86
+ empty_fig = go.Figure(data=[go.Pie(labels=['No Data'], values=[1], hole=.6)])
87
+ return 100, 0, 0, "🟢 NORMAL", empty_fig, []
88
 
89
  # --- UI LAYOUT ---
90
  with gr.Blocks(theme=gr.themes.Default(), title="Admin Intelligence Hub") as demo:
 
92
  gr.Markdown("# 🛡️ Admin Intelligence Hub\n*Real-time threat monitoring and classification*")
93
  clear_btn = gr.Button("🗑️ Clear Database", variant="stop", size="sm")
94
 
 
95
  with gr.Row():
96
  total_signals = gr.Number(label="Total Signals", value=0, precision=0)
97
  threats_found = gr.Number(label="🚫 Threats Identified", value=0, precision=0)
98
  safety_score = gr.Number(label="✅ Safety Score (%)", value=100, precision=0)
99
 
100
  with gr.Row():
 
101
  with gr.Column(scale=2):
102
  gr.HTML('<iframe width="100%" height="350" src="https://www.youtube.com/embed/dQw4w9WgXcQ" frameborder="0" allowfullscreen></iframe>')
103
  with gr.Group():
 
105
  msg_input = gr.Textbox(label="Message Inference", placeholder="Analyze text or emojis...")
106
  submit_btn = gr.Button("ANALYZE SIGNAL", variant="primary")
107
 
 
108
  with gr.Column(scale=1):
109
+ gr.Markdown("### 📊 Toxicity Classification")
110
+ donut_chart = gr.Plot(label="Toxicity Distribution") # Changed to gr.Plot
111
  current_alert = gr.Label(label="Current Threat Level")
112
 
113
  gr.Markdown("### 💀 Strategic Insights & History")
114
  shame_table = gr.Dataframe(headers=["User", "Comment", "Score", "Reason"], interactive=False)
115
 
 
116
  submit_btn.click(
117
  update_dashboard,
118
  inputs=[user_input, msg_input],