Shubhi324 commited on
Commit
1e9fff8
·
verified ·
1 Parent(s): 767b02b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +70 -44
app.py CHANGED
@@ -1,64 +1,90 @@
1
  import os
2
  import uvicorn
3
- from fastapi import FastAPI, Depends, HTTPException
 
4
  from fastapi.middleware.cors import CORSMiddleware
5
- from sqlalchemy.orm import Session
6
- from typing import List
7
  import gradio as gr
8
 
9
  # Import your local files
10
- import database
11
- import schemas
12
- import moderator
13
 
14
- # 1. Initialize Database
15
  database.init_db()
16
 
17
- # 2. Setup FastAPI (for your API endpoints)
18
- app = FastAPI(title="SafeStream API")
19
- app.add_middleware(CORSMiddleware, allow_origins=["*"], allow_methods=["*"], allow_headers=["*"])
 
 
 
 
 
 
 
 
 
 
 
20
 
21
- def get_db():
22
  db = database.SessionLocal()
23
- try: yield db
24
- finally: db.close()
25
-
26
- @app.post("/comments", response_model=schemas.Comment)
27
- def create_comment(comment: schemas.CommentCreate, db: Session = Depends(get_db)):
28
- analysis = moderator.moderator.analyze(comment.text)
29
  db_comment = database.Comment(
30
- video_id=comment.video_id, user=comment.user, text=comment.text,
31
- timestamp=comment.timestamp, is_toxic=analysis["is_toxic"],
32
- toxicity_score=analysis["score"], flagged_reason=analysis["reason"]
 
33
  )
34
  db.add(db_comment)
35
  db.commit()
36
- db.refresh(db_comment)
37
- return db_comment
38
 
39
- # 3. Setup Gradio UI
40
- def moderate_chat(user, text):
41
- analysis = moderator.moderator.analyze(text)
42
- if analysis["is_toxic"]:
43
- return f"🚫 BLOCKED: {analysis['reason']} (Score: {analysis['score']:.2f})", "🔴 High Toxicity"
44
- return f"✅ Posted: {text}", "🟢 Safe"
 
 
 
 
 
 
 
 
 
45
 
46
- with gr.Blocks(theme=gr.themes.Soft()) as demo:
47
- gr.Markdown("# 🛡️ SafeStream AI Moderator")
48
- with gr.Row():
49
- with gr.Column(scale=2):
50
- gr.HTML('<iframe width="100%" height="400" src="https://www.youtube.com/embed/dQw4w9WgXcQ" frameborder="0" allowfullscreen></iframe>')
51
- with gr.Column(scale=1):
52
- gr.Markdown("### Live Chat Moderation")
53
- user_input = gr.Textbox(label="Username", value="Guest")
54
- msg_input = gr.Textbox(label="Type a comment...", placeholder="Try typing something mean or nice!")
55
- status_box = gr.Label(label="Moderation Status")
56
- output_text = gr.Textbox(label="Chat Feed", interactive=False)
57
- submit_btn = gr.Button("Post Comment")
58
- submit_btn.click(moderate_chat, inputs=[user_input, msg_input], outputs=[output_text, status_box])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
 
60
- # 4. LAUNCH (The Fix)
61
- # Instead of mounting, we run Gradio directly on port 7860.
62
- # Hugging Face will automatically find it.
63
  if __name__ == "__main__":
64
  demo.launch(server_name="0.0.0.0", server_port=7860)
 
1
  import os
2
  import uvicorn
3
+ import pandas as pd
4
+ from fastapi import FastAPI, Depends
5
  from fastapi.middleware.cors import CORSMiddleware
 
 
6
  import gradio as gr
7
 
8
  # Import your local files
9
+ import database, schemas, moderator
 
 
10
 
11
+ # Initialize Database
12
  database.init_db()
13
 
14
+ # --- MODERATION LOGIC ---
15
+ def moderate_and_update(user, text):
16
+ # EMOJI ANALYSIS: Quick check for known aggressive emojis
17
+ bad_emojis = ["🔪", "😡", "👊", "🖕", "🔫", "🤮"]
18
+ emoji_flag = any(e in text for e in bad_emojis)
19
+
20
+ # AI ANALYSIS
21
+ analysis = moderator.moderator.analyze(text)
22
+
23
+ # If emoji found, override or boost toxicity
24
+ if emoji_flag and not analysis["is_toxic"]:
25
+ analysis["is_toxic"] = True
26
+ analysis["reason"] = "Aggressive Emoji Detected"
27
+ analysis["score"] = max(analysis["score"], 0.85)
28
 
29
+ # Save to Database
30
  db = database.SessionLocal()
 
 
 
 
 
 
31
  db_comment = database.Comment(
32
+ video_id=1, user=user, text=text,
33
+ is_toxic=analysis["is_toxic"],
34
+ toxicity_score=analysis["score"],
35
+ flagged_reason=analysis["reason"]
36
  )
37
  db.add(db_comment)
38
  db.commit()
39
+ db.close()
 
40
 
41
+ # Get updated history (Last 5 toxic)
42
+ db = database.SessionLocal()
43
+ shame_query = db.query(database.Comment).filter(database.Comment.is_toxic == True).order_by(database.Comment.id.desc()).limit(5).all()
44
+ shame_data = [[c.user, c.text, f"{c.toxicity_score:.2f}", c.flagged_reason] for c in shame_query]
45
+
46
+ # Get Stats for Chart
47
+ total_safe = db.query(database.Comment).filter(database.Comment.is_toxic == False).count()
48
+ total_toxic = db.query(database.Comment).filter(database.Comment.is_toxic == True).count()
49
+ stats_df = pd.DataFrame({"Status": ["Safe ✅", "Toxic 🚫"], "Count": [total_safe, total_toxic]})
50
+ db.close()
51
+
52
+ status = "🔴 High Toxicity" if analysis["is_toxic"] else "🟢 Safe"
53
+ msg = f"🚫 BLOCKED: {analysis['reason']}" if analysis["is_toxic"] else f"✅ Posted: {text}"
54
+
55
+ return msg, status, shame_data, stats_df
56
 
57
+ # --- UI LAYOUT ---
58
+ with gr.Blocks(theme=gr.themes.Soft(), fill_width=True) as demo:
59
+ gr.Markdown("# 🛡️ SafeStream AI Pro Dashboard")
60
+
61
+ with gr.Tabs():
62
+ # TAB 1: LIVE MODERATOR
63
+ with gr.Tab("Live Chat"):
64
+ with gr.Row():
65
+ with gr.Column(scale=2):
66
+ gr.HTML('<iframe width="100%" height="400" src="https://www.youtube.com/embed/dQw4w9WgXcQ" frameborder="0" allowfullscreen></iframe>')
67
+ with gr.Column(scale=1):
68
+ user_input = gr.Textbox(label="Username", value="Guest")
69
+ msg_input = gr.Textbox(label="Type a comment...", placeholder="Try a mean emoji like 🔪 or text!")
70
+ submit_btn = gr.Button("Post Comment", variant="primary")
71
+ status_box = gr.Label(label="Current Moderation Status")
72
+ output_feed = gr.Textbox(label="Last Action", interactive=False)
73
+
74
+ gr.Markdown("### 💀 The Hall of Shame (Recent Flagged Comments)")
75
+ shame_table = gr.Dataframe(headers=["User", "Comment", "Score", "Reason"], datatype=["str", "str", "str", "str"])
76
+
77
+ # TAB 2: ANALYTICS
78
+ with gr.Tab("Safety Analytics"):
79
+ gr.Markdown("### Real-time Toxicity Distribution")
80
+ stats_plot = gr.BarPlot(x="Status", y="Count", title="Community Health Overview", color="Status", color_map={"Safe ✅": "green", "Toxic 🚫": "red"})
81
+
82
+ # Event Trigger
83
+ submit_btn.click(
84
+ moderate_and_update,
85
+ inputs=[user_input, msg_input],
86
+ outputs=[output_feed, status_box, shame_table, stats_plot]
87
+ )
88
 
 
 
 
89
  if __name__ == "__main__":
90
  demo.launch(server_name="0.0.0.0", server_port=7860)