OnlyCheeini commited on
Commit
b86ba0c
·
verified ·
1 Parent(s): 11463c3

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +118 -0
app.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import json
4
+ from pathlib import Path
5
+ from model import (
6
+ GreesyGPT,
7
+ generate_moderation,
8
+ ReasoningMode,
9
+ OutputFormat,
10
+ DEVICE,
11
+ describe_reasoning_modes
12
+ )
13
+
14
+ # 1. Initialize Model
15
+ model = GreesyGPT()
16
+ weights_path = Path("greesy_gpt.pt")
17
+
18
+ if weights_path.exists():
19
+ model.load_state_dict(torch.load(weights_path, map_location=DEVICE))
20
+ print(f"Loaded weights from {weights_path}")
21
+ else:
22
+ print("Warning: No trained weights found. Using fresh initialization.")
23
+
24
+ model.to(DEVICE)
25
+ model.eval()
26
+
27
+ def moderate(text, mode_str, format_str):
28
+ if not text.strip():
29
+ return "Please enter some text to analyze.", ""
30
+
31
+ # Map strings from UI to Enums
32
+ mode = ReasoningMode(mode_str.lower())
33
+ fmt = OutputFormat(format_str.lower())
34
+
35
+ # Generate
36
+ result = generate_moderation(
37
+ model,
38
+ prompt=text,
39
+ mode=mode,
40
+ output_format=fmt
41
+ )
42
+
43
+ verdict_output = result["verdict_fmt"]
44
+ # If JSON format is selected, prettify it for the textbox
45
+ if fmt == OutputFormat.JSON:
46
+ verdict_output = json.dumps(verdict_output, indent=2)
47
+
48
+ thinking_process = result.get("thinking", "No reasoning generated.")
49
+
50
+ return verdict_output, thinking_process
51
+
52
+ # 2. Build Gradio UI
53
+ theme = gr.themes.Soft(
54
+ primary_hue="orange",
55
+ secondary_hue="gray",
56
+ )
57
+
58
+ with gr.Blocks(theme=theme, title="GreesyGPT Content Moderation") as demo:
59
+ gr.Markdown("# 🛡️ GreesyGPT Content Moderation")
60
+ gr.Markdown(
61
+ "A reasoning-based safety model that analyzes content for violations "
62
+ "using chain-of-thought deliberation."
63
+ )
64
+
65
+ with gr.Row():
66
+ with gr.Column(scale=2):
67
+ input_text = gr.Textbox(
68
+ label="Message to Review",
69
+ placeholder="Type the message you want to moderate here...",
70
+ lines=5
71
+ )
72
+
73
+ with gr.Row():
74
+ mode_dropdown = gr.Dropdown(
75
+ choices=[m.value for m in ReasoningMode],
76
+ value="low",
77
+ label="Reasoning Mode",
78
+ info="Higher modes are more thorough but slower."
79
+ )
80
+ format_dropdown = gr.Dropdown(
81
+ choices=[f.value for f in OutputFormat],
82
+ value="markdown",
83
+ label="Output Format"
84
+ )
85
+
86
+ submit_btn = gr.Button("Analyze Content", variant="primary")
87
+
88
+ with gr.Column(scale=3):
89
+ output_verdict = gr.Markdown(label="Verdict")
90
+
91
+ with gr.Accordion("View Internal Reasoning (Thinking Process)", open=False):
92
+ output_thinking = gr.Textbox(
93
+ label="Chain of Thought",
94
+ interactive=False,
95
+ lines=10
96
+ )
97
+
98
+ gr.Examples(
99
+ examples=[
100
+ ["You're so stupid, nobody likes you.", "medium", "markdown"],
101
+ ["How do I fix a bug in my Python code?", "none", "markdown"],
102
+ ["CONGRATULATIONS! You won a $1000 gift card! Click here!", "low", "json"],
103
+ ["I feel really hopeless and don't want to continue.", "high", "markdown"],
104
+ ],
105
+ inputs=[input_text, mode_dropdown, format_dropdown]
106
+ )
107
+
108
+ with gr.Expander("System Information"):
109
+ gr.Code(describe_reasoning_modes(), language="text")
110
+
111
+ submit_btn.click(
112
+ fn=moderate,
113
+ inputs=[input_text, mode_dropdown, format_dropdown],
114
+ outputs=[output_verdict, output_thinking]
115
+ )
116
+
117
+ if __name__ == "__main__":
118
+ demo.launch()