Shubhi324 commited on
Commit
efc1bd4
·
verified ·
1 Parent(s): f0f0758

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -25
app.py CHANGED
@@ -3,6 +3,7 @@ import uvicorn
3
  import pandas as pd
4
  import gradio as gr
5
  import plotly.graph_objects as go
 
6
 
7
  # Import your local files
8
  import database, schemas, moderator
@@ -33,17 +34,15 @@ def update_dashboard(user, text):
33
  toxic_count = len(toxic_comments)
34
  safe_count = total_count - toxic_count
35
 
36
- # Safety Score calculation
37
  safety_score = int((safe_count / total_count * 100)) if total_count > 0 else 100
38
 
39
- # 4. FIXED CHART LOGIC: Ensure data is never zero if toxic comments exist
40
  reasons = {"Identity Hate": 0, "Insult": 0, "Online Harassment": 0, "Threat": 0}
41
  for c in toxic_comments:
42
- # If the AI reason isn't in our list, default to Online Harassment so it shows on chart
43
  cat = c.flagged_reason if c.flagged_reason in reasons else "Online Harassment"
44
  reasons[cat] += 1
45
 
46
- # CREATE REAL DONUT CHART
47
  colors = ['#7B68EE', '#FF4500', '#1E90FF', '#FFA500']
48
  fig = go.Figure(data=[go.Pie(
49
  labels=list(reasons.keys()),
@@ -52,16 +51,30 @@ def update_dashboard(user, text):
52
  marker_colors=colors,
53
  textinfo='label+percent'
54
  )])
55
- fig.update_layout(
56
- showlegend=True,
57
- margin=dict(t=10, b=10, l=10, r=10),
58
- height=300,
59
- paper_bgcolor='rgba(0,0,0,0)',
60
- plot_bgcolor='rgba(0,0,0,0)',
61
- font=dict(color="white") if os.getenv("GRADIO_THEME") == "dark" else dict(color="black")
62
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
 
64
- # Hall of Shame Data (Last 5 toxic)
65
  shame_query = db.query(database.Comment).filter(database.Comment.is_toxic == True).order_by(database.Comment.id.desc()).limit(5).all()
66
  shame_data = [[c.user, c.text, f"{c.toxicity_score:.2f}", c.flagged_reason] for c in shame_query]
67
 
@@ -69,14 +82,7 @@ def update_dashboard(user, text):
69
 
70
  status_label = "🔴 High Toxicity" if analysis["is_toxic"] else "🟢 NORMAL"
71
 
72
- return (
73
- safety_score, # gr.Number
74
- total_count, # gr.Number
75
- toxic_count, # gr.Number
76
- status_label, # gr.Label
77
- fig, # gr.Plot (Donut Chart)
78
- shame_data # gr.Dataframe
79
- )
80
 
81
  def clear_db():
82
  db = database.SessionLocal()
@@ -84,7 +90,7 @@ def clear_db():
84
  db.commit()
85
  db.close()
86
  empty_fig = go.Figure(data=[go.Pie(labels=['No Data'], values=[1], hole=.6)])
87
- return 100, 0, 0, "🟢 NORMAL", empty_fig, []
88
 
89
  # --- UI LAYOUT ---
90
  with gr.Blocks(theme=gr.themes.Default(), title="Admin Intelligence Hub") as demo:
@@ -104,10 +110,13 @@ with gr.Blocks(theme=gr.themes.Default(), title="Admin Intelligence Hub") as dem
104
  user_input = gr.Textbox(label="User", value="Guest")
105
  msg_input = gr.Textbox(label="Message Inference", placeholder="Analyze text or emojis...")
106
  submit_btn = gr.Button("ANALYZE SIGNAL", variant="primary")
 
 
 
107
 
108
  with gr.Column(scale=1):
109
  gr.Markdown("### 📊 Toxicity Classification")
110
- donut_chart = gr.Plot(label="Toxicity Distribution") # Changed to gr.Plot
111
  current_alert = gr.Label(label="Current Threat Level")
112
 
113
  gr.Markdown("### 💀 Strategic Insights & History")
@@ -116,12 +125,12 @@ with gr.Blocks(theme=gr.themes.Default(), title="Admin Intelligence Hub") as dem
116
  submit_btn.click(
117
  update_dashboard,
118
  inputs=[user_input, msg_input],
119
- outputs=[safety_score, total_signals, threats_found, current_alert, donut_chart, shame_table]
120
  )
121
 
122
  clear_btn.click(
123
  clear_db,
124
- outputs=[safety_score, total_signals, threats_found, current_alert, donut_chart, shame_table]
125
  )
126
 
127
  if __name__ == "__main__":
 
3
  import pandas as pd
4
  import gradio as gr
5
  import plotly.graph_objects as go
6
+ from datetime import datetime
7
 
8
  # Import your local files
9
  import database, schemas, moderator
 
34
  toxic_count = len(toxic_comments)
35
  safe_count = total_count - toxic_count
36
 
 
37
  safety_score = int((safe_count / total_count * 100)) if total_count > 0 else 100
38
 
39
+ # 4. Chart Logic
40
  reasons = {"Identity Hate": 0, "Insult": 0, "Online Harassment": 0, "Threat": 0}
41
  for c in toxic_comments:
 
42
  cat = c.flagged_reason if c.flagged_reason in reasons else "Online Harassment"
43
  reasons[cat] += 1
44
 
45
+ # Create Donut Chart
46
  colors = ['#7B68EE', '#FF4500', '#1E90FF', '#FFA500']
47
  fig = go.Figure(data=[go.Pie(
48
  labels=list(reasons.keys()),
 
51
  marker_colors=colors,
52
  textinfo='label+percent'
53
  )])
54
+ fig.update_layout(showlegend=True, margin=dict(t=10, b=10, l=10, r=10), height=300,
55
+ paper_bgcolor='rgba(0,0,0,0)', plot_bgcolor='rgba(0,0,0,0)')
56
+
57
+ # 5. Create the "Visual Block" HTML (Your requested feature)
58
+ current_date = datetime.now().strftime("%m/%d/%Y")
59
+ if analysis["is_toxic"]:
60
+ block_html = f"""
61
+ <div style="background-color: #ffebee; border-radius: 10px; padding: 15px; border-left: 5px solid #f44336; margin-top: 20px;">
62
+ <div style="display: flex; justify-content: space-between; align-items: center;">
63
+ <strong style="color: #333;">👤 {user}</strong>
64
+ <span style="color: #888; font-size: 0.8em;">{current_date}</span>
65
+ </div>
66
+ <p style="color: #d32f2f; font-weight: bold; margin: 10px 0;">⚠️ Comment flagged as Toxic (Confidence: {analysis['score']*100:.1f}%)</p>
67
+ <p style="color: #f44336; font-size: 0.9em;">AI Reason: Classified as {analysis['reason'].upper()} with confidence {analysis['score']:.2f}</p>
68
+ </div>
69
+ """
70
+ else:
71
+ block_html = f"""
72
+ <div style="background-color: #e8f5e9; border-radius: 10px; padding: 15px; border-left: 5px solid #4caf50; margin-top: 20px;">
73
+ <strong style="color: #333;">👤 {user}</strong>
74
+ <p style="color: #2e7d32; margin: 10px 0;">✅ Comment allowed: {text}</p>
75
+ </div>
76
+ """
77
 
 
78
  shame_query = db.query(database.Comment).filter(database.Comment.is_toxic == True).order_by(database.Comment.id.desc()).limit(5).all()
79
  shame_data = [[c.user, c.text, f"{c.toxicity_score:.2f}", c.flagged_reason] for c in shame_query]
80
 
 
82
 
83
  status_label = "🔴 High Toxicity" if analysis["is_toxic"] else "🟢 NORMAL"
84
 
85
+ return safety_score, total_count, toxic_count, status_label, fig, shame_data, block_html
 
 
 
 
 
 
 
86
 
87
  def clear_db():
88
  db = database.SessionLocal()
 
90
  db.commit()
91
  db.close()
92
  empty_fig = go.Figure(data=[go.Pie(labels=['No Data'], values=[1], hole=.6)])
93
+ return 100, 0, 0, "🟢 NORMAL", empty_fig, [], ""
94
 
95
  # --- UI LAYOUT ---
96
  with gr.Blocks(theme=gr.themes.Default(), title="Admin Intelligence Hub") as demo:
 
110
  user_input = gr.Textbox(label="User", value="Guest")
111
  msg_input = gr.Textbox(label="Message Inference", placeholder="Analyze text or emojis...")
112
  submit_btn = gr.Button("ANALYZE SIGNAL", variant="primary")
113
+
114
+ # This is where the red block notification will appear
115
+ moderation_alert = gr.HTML()
116
 
117
  with gr.Column(scale=1):
118
  gr.Markdown("### 📊 Toxicity Classification")
119
+ donut_chart = gr.Plot(label="Toxicity Distribution")
120
  current_alert = gr.Label(label="Current Threat Level")
121
 
122
  gr.Markdown("### 💀 Strategic Insights & History")
 
125
  submit_btn.click(
126
  update_dashboard,
127
  inputs=[user_input, msg_input],
128
+ outputs=[safety_score, total_signals, threats_found, current_alert, donut_chart, shame_table, moderation_alert]
129
  )
130
 
131
  clear_btn.click(
132
  clear_db,
133
+ outputs=[safety_score, total_signals, threats_found, current_alert, donut_chart, shame_table, moderation_alert]
134
  )
135
 
136
  if __name__ == "__main__":