File size: 5,749 Bytes
767b02b
f767df6
1e9fff8
533b555
f0f0758
efc1bd4
f767df6
 
1e9fff8
f767df6
f0f0758
f767df6
 
51c6011
 
f0f0758
1e9fff8
 
f0f0758
f767df6
 
1e9fff8
 
 
 
f767df6
 
 
 
f0f0758
51c6011
 
 
 
 
 
f0f0758
51c6011
efc1bd4
51c6011
 
f0f0758
 
 
efc1bd4
f0f0758
 
 
 
 
 
 
 
efc1bd4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51c6011
1e9fff8
 
 
 
 
f0f0758
4df52b5
efc1bd4
f767df6
4df52b5
 
 
 
 
f0f0758
efc1bd4
4df52b5
1e9fff8
51c6011
4df52b5
 
 
1e9fff8
51c6011
 
 
4df52b5
1e9fff8
51c6011
 
 
 
 
4df52b5
51c6011
efc1bd4
 
 
51c6011
 
f0f0758
efc1bd4
4df52b5
1e9fff8
51c6011
 
1e9fff8
 
51c6011
1e9fff8
efc1bd4
4df52b5
 
 
 
efc1bd4
1e9fff8
533b555
f767df6
767b02b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
import os
import uvicorn
import pandas as pd
import gradio as gr
import plotly.graph_objects as go
from datetime import datetime

# Import your local files
import database, schemas, moderator

# Initialize Database
database.init_db()

# --- DASHBOARD LOGIC ---
def update_dashboard(user, text):
    # 1. Run AI Analysis
    analysis = moderator.moderator.analyze(text)
    
    # 2. Save to DB
    db = database.SessionLocal()
    db_comment = database.Comment(
        video_id=1, user=user, text=text,
        is_toxic=analysis["is_toxic"],
        toxicity_score=analysis["score"], 
        flagged_reason=analysis["reason"]
    )
    db.add(db_comment)
    db.commit()

    # 3. Calculate Global Metrics
    all_comments = db.query(database.Comment).all()
    total_count = len(all_comments)
    toxic_comments = [c for c in all_comments if c.is_toxic]
    toxic_count = len(toxic_comments)
    safe_count = total_count - toxic_count
    
    safety_score = int((safe_count / total_count * 100)) if total_count > 0 else 100
    
    # 4. Chart Logic
    reasons = {"Identity Hate": 0, "Insult": 0, "Online Harassment": 0, "Threat": 0}
    for c in toxic_comments:
        cat = c.flagged_reason if c.flagged_reason in reasons else "Online Harassment"
        reasons[cat] += 1

    # Create Donut Chart
    colors = ['#7B68EE', '#FF4500', '#1E90FF', '#FFA500']
    fig = go.Figure(data=[go.Pie(
        labels=list(reasons.keys()), 
        values=list(reasons.values()), 
        hole=.6,
        marker_colors=colors,
        textinfo='label+percent'
    )])
    fig.update_layout(showlegend=True, margin=dict(t=10, b=10, l=10, r=10), height=300, 
                      paper_bgcolor='rgba(0,0,0,0)', plot_bgcolor='rgba(0,0,0,0)')

    # 5. Create the "Visual Block" HTML (Your requested feature)
    current_date = datetime.now().strftime("%m/%d/%Y")
    if analysis["is_toxic"]:
        block_html = f"""
        <div style="background-color: #ffebee; border-radius: 10px; padding: 15px; border-left: 5px solid #f44336; margin-top: 20px;">
            <div style="display: flex; justify-content: space-between; align-items: center;">
                <strong style="color: #333;">👤 {user}</strong>
                <span style="color: #888; font-size: 0.8em;">{current_date}</span>
            </div>
            <p style="color: #d32f2f; font-weight: bold; margin: 10px 0;">⚠️ Comment flagged as Toxic (Confidence: {analysis['score']*100:.1f}%)</p>
            <p style="color: #f44336; font-size: 0.9em;">AI Reason: Classified as {analysis['reason'].upper()} with confidence {analysis['score']:.2f}</p>
        </div>
        """
    else:
        block_html = f"""
        <div style="background-color: #e8f5e9; border-radius: 10px; padding: 15px; border-left: 5px solid #4caf50; margin-top: 20px;">
            <strong style="color: #333;">👤 {user}</strong>
            <p style="color: #2e7d32; margin: 10px 0;">✅ Comment allowed: {text}</p>
        </div>
        """

    shame_query = db.query(database.Comment).filter(database.Comment.is_toxic == True).order_by(database.Comment.id.desc()).limit(5).all()
    shame_data = [[c.user, c.text, f"{c.toxicity_score:.2f}", c.flagged_reason] for c in shame_query]
    
    db.close()

    status_label = "🔴 High Toxicity" if analysis["is_toxic"] else "🟢 NORMAL"
    
    return safety_score, total_count, toxic_count, status_label, fig, shame_data, block_html

def clear_db():
    db = database.SessionLocal()
    db.query(database.Comment).delete()
    db.commit()
    db.close()
    empty_fig = go.Figure(data=[go.Pie(labels=['No Data'], values=[1], hole=.6)])
    return 100, 0, 0, "🟢 NORMAL", empty_fig, [], ""

# --- UI LAYOUT ---
with gr.Blocks(theme=gr.themes.Default(), title="Admin Intelligence Hub") as demo:
    with gr.Row():
        gr.Markdown("# 🛡️ Admin Intelligence Hub\n*Real-time threat monitoring and classification*")
        clear_btn = gr.Button("🗑️ Clear Database", variant="stop", size="sm")
    
    with gr.Row():
        total_signals = gr.Number(label="Total Signals", value=0, precision=0)
        threats_found = gr.Number(label="🚫 Threats Identified", value=0, precision=0)
        safety_score = gr.Number(label="✅ Safety Score (%)", value=100, precision=0)

    with gr.Row():
        with gr.Column(scale=2):
            gr.HTML('<iframe width="100%" height="350" src="https://www.youtube.com/embed/dQw4w9WgXcQ" frameborder="0" allowfullscreen></iframe>')
            with gr.Group():
                user_input = gr.Textbox(label="User", value="Guest")
                msg_input = gr.Textbox(label="Message Inference", placeholder="Analyze text or emojis...")
                submit_btn = gr.Button("ANALYZE SIGNAL", variant="primary")
            
            # This is where the red block notification will appear
            moderation_alert = gr.HTML() 
        
        with gr.Column(scale=1):
            gr.Markdown("### 📊 Toxicity Classification")
            donut_chart = gr.Plot(label="Toxicity Distribution")
            current_alert = gr.Label(label="Current Threat Level")

    gr.Markdown("### 💀 Strategic Insights & History")
    shame_table = gr.Dataframe(headers=["User", "Comment", "Score", "Reason"], interactive=False)

    submit_btn.click(
        update_dashboard, 
        inputs=[user_input, msg_input], 
        outputs=[safety_score, total_signals, threats_found, current_alert, donut_chart, shame_table, moderation_alert]
    )
    
    clear_btn.click(
        clear_db,
        outputs=[safety_score, total_signals, threats_found, current_alert, donut_chart, shame_table, moderation_alert]
    )

if __name__ == "__main__":
    demo.launch(server_name="0.0.0.0", server_port=7860)