Spaces:
Sleeping
Sleeping
| import os | |
| import uvicorn | |
| import pandas as pd | |
| import gradio as gr | |
| import plotly.graph_objects as go | |
| from datetime import datetime | |
| # Import your local files | |
| import database, schemas, moderator | |
| # Initialize Database | |
| database.init_db() | |
| # --- DASHBOARD LOGIC --- | |
| def update_dashboard(user, text): | |
| # 1. Run AI Analysis | |
| analysis = moderator.moderator.analyze(text) | |
| # 2. Save to DB | |
| db = database.SessionLocal() | |
| db_comment = database.Comment( | |
| video_id=1, user=user, text=text, | |
| is_toxic=analysis["is_toxic"], | |
| toxicity_score=analysis["score"], | |
| flagged_reason=analysis["reason"] | |
| ) | |
| db.add(db_comment) | |
| db.commit() | |
| # 3. Calculate Global Metrics | |
| all_comments = db.query(database.Comment).all() | |
| total_count = len(all_comments) | |
| toxic_comments = [c for c in all_comments if c.is_toxic] | |
| toxic_count = len(toxic_comments) | |
| safe_count = total_count - toxic_count | |
| safety_score = int((safe_count / total_count * 100)) if total_count > 0 else 100 | |
| # 4. Chart Logic | |
| reasons = {"Identity Hate": 0, "Insult": 0, "Online Harassment": 0, "Threat": 0} | |
| for c in toxic_comments: | |
| cat = c.flagged_reason if c.flagged_reason in reasons else "Online Harassment" | |
| reasons[cat] += 1 | |
| # Create Donut Chart | |
| colors = ['#7B68EE', '#FF4500', '#1E90FF', '#FFA500'] | |
| fig = go.Figure(data=[go.Pie( | |
| labels=list(reasons.keys()), | |
| values=list(reasons.values()), | |
| hole=.6, | |
| marker_colors=colors, | |
| textinfo='label+percent' | |
| )]) | |
| fig.update_layout(showlegend=True, margin=dict(t=10, b=10, l=10, r=10), height=300, | |
| paper_bgcolor='rgba(0,0,0,0)', plot_bgcolor='rgba(0,0,0,0)') | |
| # 5. Create the "Visual Block" HTML (Your requested feature) | |
| current_date = datetime.now().strftime("%m/%d/%Y") | |
| if analysis["is_toxic"]: | |
| block_html = f""" | |
| <div style="background-color: #ffebee; border-radius: 10px; padding: 15px; border-left: 5px solid #f44336; margin-top: 20px;"> | |
| <div style="display: flex; justify-content: space-between; align-items: center;"> | |
| <strong style="color: #333;">👤 {user}</strong> | |
| <span style="color: #888; font-size: 0.8em;">{current_date}</span> | |
| </div> | |
| <p style="color: #d32f2f; font-weight: bold; margin: 10px 0;">⚠️ Comment flagged as Toxic (Confidence: {analysis['score']*100:.1f}%)</p> | |
| <p style="color: #f44336; font-size: 0.9em;">AI Reason: Classified as {analysis['reason'].upper()} with confidence {analysis['score']:.2f}</p> | |
| </div> | |
| """ | |
| else: | |
| block_html = f""" | |
| <div style="background-color: #e8f5e9; border-radius: 10px; padding: 15px; border-left: 5px solid #4caf50; margin-top: 20px;"> | |
| <strong style="color: #333;">👤 {user}</strong> | |
| <p style="color: #2e7d32; margin: 10px 0;">✅ Comment allowed: {text}</p> | |
| </div> | |
| """ | |
| shame_query = db.query(database.Comment).filter(database.Comment.is_toxic == True).order_by(database.Comment.id.desc()).limit(5).all() | |
| shame_data = [[c.user, c.text, f"{c.toxicity_score:.2f}", c.flagged_reason] for c in shame_query] | |
| db.close() | |
| status_label = "🔴 High Toxicity" if analysis["is_toxic"] else "🟢 NORMAL" | |
| return safety_score, total_count, toxic_count, status_label, fig, shame_data, block_html | |
| def clear_db(): | |
| db = database.SessionLocal() | |
| db.query(database.Comment).delete() | |
| db.commit() | |
| db.close() | |
| empty_fig = go.Figure(data=[go.Pie(labels=['No Data'], values=[1], hole=.6)]) | |
| return 100, 0, 0, "🟢 NORMAL", empty_fig, [], "" | |
| # --- UI LAYOUT --- | |
| with gr.Blocks(theme=gr.themes.Default(), title="Admin Intelligence Hub") as demo: | |
| with gr.Row(): | |
| gr.Markdown("# 🛡️ Admin Intelligence Hub\n*Real-time threat monitoring and classification*") | |
| clear_btn = gr.Button("🗑️ Clear Database", variant="stop", size="sm") | |
| with gr.Row(): | |
| total_signals = gr.Number(label="Total Signals", value=0, precision=0) | |
| threats_found = gr.Number(label="🚫 Threats Identified", value=0, precision=0) | |
| safety_score = gr.Number(label="✅ Safety Score (%)", value=100, precision=0) | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| gr.HTML('<iframe width="100%" height="350" src="https://www.youtube.com/embed/dQw4w9WgXcQ" frameborder="0" allowfullscreen></iframe>') | |
| with gr.Group(): | |
| user_input = gr.Textbox(label="User", value="Guest") | |
| msg_input = gr.Textbox(label="Message Inference", placeholder="Analyze text or emojis...") | |
| submit_btn = gr.Button("ANALYZE SIGNAL", variant="primary") | |
| # This is where the red block notification will appear | |
| moderation_alert = gr.HTML() | |
| with gr.Column(scale=1): | |
| gr.Markdown("### 📊 Toxicity Classification") | |
| donut_chart = gr.Plot(label="Toxicity Distribution") | |
| current_alert = gr.Label(label="Current Threat Level") | |
| gr.Markdown("### 💀 Strategic Insights & History") | |
| shame_table = gr.Dataframe(headers=["User", "Comment", "Score", "Reason"], interactive=False) | |
| submit_btn.click( | |
| update_dashboard, | |
| inputs=[user_input, msg_input], | |
| outputs=[safety_score, total_signals, threats_found, current_alert, donut_chart, shame_table, moderation_alert] | |
| ) | |
| clear_btn.click( | |
| clear_db, | |
| outputs=[safety_score, total_signals, threats_found, current_alert, donut_chart, shame_table, moderation_alert] | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch(server_name="0.0.0.0", server_port=7860) |