navaneethkrishnan commited on
Commit
4fdcb34
·
verified ·
1 Parent(s): fb5028a

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +143 -0
  2. requirements.txt +8 -0
app.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py — Hugging Face Space entrypoint (fixed imports)
2
+
3
+ import os
4
+ import io
5
+ import csv
6
+ import json
7
+ import zipfile
8
+ import datetime
9
+ import gradio as gr
10
+
11
+ # --- robust imports: absolute first, safe fallback for local runs ---
12
+ try:
13
+ from email_eval.api import evaluate, DEFAULT_WEIGHT_PRESETS, metric_keys
14
+ except Exception:
15
+ # allow running `python app.py` locally without installing a package
16
+ import sys, pathlib
17
+ sys.path.append(str(pathlib.Path(__file__).parent.resolve()))
18
+ from email_eval.api import evaluate, DEFAULT_WEIGHT_PRESETS, metric_keys
19
+
20
+ ENGINES = ["openai", "claude"]
21
+ PRESET = "research_defaults"
22
+
23
+ def run_eval(subject, body, engine,
24
+ w_clarity, w_length, w_spam, w_perso, w_tone, w_grammar):
25
+ # normalize/collect weights
26
+ weights = {
27
+ "clarity": float(w_clarity),
28
+ "length": float(w_length),
29
+ "spam_score": float(w_spam),
30
+ "personalization": float(w_perso),
31
+ "tone": float(w_tone),
32
+ "grammatical_hygiene": float(w_grammar),
33
+ }
34
+
35
+ # evaluate
36
+ out = evaluate(subject or "", body or "", engine=engine, weights=weights)
37
+
38
+ # JSON for exports
39
+ out_json_str = json.dumps(out, indent=2, ensure_ascii=False)
40
+
41
+ # comments pane - use LLM comments + explanations
42
+ comments_lines = []
43
+ for k in metric_keys():
44
+ comm = out.get("comments", {}).get(k, 'No comment.')
45
+ expl = "; ".join(out.get("explanations", {}).get(k, []))
46
+ comments_lines.append(f"{k}: {comm}" + (f" (explanations: {expl})" if expl else ""))
47
+ comments_str = "\n".join(comments_lines) if comments_lines else "No comments."
48
+
49
+ # tokens used pane
50
+ usage = out.get("usage", {}) or {}
51
+ tokens_line = (
52
+ f"OpenAI tokens: {usage.get('openai_total', 0)} | "
53
+ f"Claude tokens: {usage.get('claude_total', 0)} | "
54
+ f"Total: {usage.get('total', 0)}"
55
+ )
56
+
57
+ # CSV export
58
+ ts = datetime.datetime.utcnow().strftime("%Y%m%d_%H%M%S")
59
+ csv_path = f"/tmp/email_eval_{ts}.csv"
60
+ with open(csv_path, "w", encoding="utf-8", newline="") as f:
61
+ writer = csv.writer(f)
62
+ writer.writerow(["metric", "score_0_10"])
63
+ for k in metric_keys():
64
+ writer.writerow([k, out["scores"][k]])
65
+ writer.writerow(["weighted_total", out["weighted_total"]])
66
+
67
+ # ZIP export (CSV + JSON)
68
+ zip_path = f"/tmp/email_eval_{ts}.zip"
69
+ with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_DEFLATED) as zf:
70
+ zf.writestr(f"email_eval_{ts}.json", out_json_str)
71
+ with open(csv_path, "rb") as f:
72
+ zf.writestr(f"email_eval_{ts}.csv", f.read())
73
+
74
+ # Scores table data (metric, score_0_10) + weighted total
75
+ table_rows = [[k, out["scores"][k]] for k in metric_keys()]
76
+ table_rows.append(["weighted_total", out["weighted_total"]])
77
+
78
+ return table_rows, comments_str, tokens_line, csv_path, zip_path
79
+
80
+
81
+ with gr.Blocks(title="Email Evaluator (Subject + Body)") as demo:
82
+ gr.HTML("""
83
+ <style>
84
+ button.primary, .gr-button-primary, button[aria-label="Evaluate"].primary {
85
+ background-color: #000 !important; color: #fff !important; border-color: #000 !important;
86
+ }
87
+ </style>
88
+ """)
89
+ gr.Markdown(
90
+ "# Email Evaluator — Subject + Body\n"
91
+ "Six metrics · Rules + math-based LLM (OpenAI / Claude) · Research-grounded defaults."
92
+ )
93
+
94
+ subject = gr.Textbox(
95
+ label="Subject",
96
+ placeholder="e.g., Q3 draft — please review by Thu 5 PM"
97
+ )
98
+ body = gr.Textbox(
99
+ label="Body (plain text)",
100
+ lines=16,
101
+ placeholder="Paste the email body here..."
102
+ )
103
+ engine = gr.Dropdown(
104
+ ENGINES, value="openai",
105
+ label="Engine (LLM assistance for clarity, tone flags, personalization cues, spam-lexicon matching)"
106
+ )
107
+
108
+ gr.Markdown("### Weights (0–10 each; normalized internally)")
109
+ w1 = gr.Slider(0, 10, value=DEFAULT_WEIGHT_PRESETS["research_defaults"]["clarity"],
110
+ step=0.5, label="Clarity")
111
+ w2 = gr.Slider(0, 10, value=DEFAULT_WEIGHT_PRESETS["research_defaults"]["length"],
112
+ step=0.5, label="Length")
113
+ w3 = gr.Slider(0, 10, value=DEFAULT_WEIGHT_PRESETS["research_defaults"]["spam_score"],
114
+ step=0.5, label="Spam")
115
+ w4 = gr.Slider(0, 10, value=DEFAULT_WEIGHT_PRESETS["research_defaults"]["personalization"],
116
+ step=0.5, label="Personalization")
117
+ w5 = gr.Slider(0, 10, value=DEFAULT_WEIGHT_PRESETS["research_defaults"]["tone"],
118
+ step=0.5, label="Tone")
119
+ w6 = gr.Slider(0, 10, value=DEFAULT_WEIGHT_PRESETS["research_defaults"]["grammatical_hygiene"],
120
+ step=0.5, label="Grammar")
121
+
122
+ btn = gr.Button("Evaluate", variant="primary")
123
+
124
+ with gr.Tab("Scores"):
125
+ out_table = gr.Dataframe(headers=["metric","score_0_10"], row_count=(7,"fixed"), col_count=(2,"fixed"), wrap=True, interactive=False, label="Metric-wise scores and total")
126
+ with gr.Tab("Comments"):
127
+ comments = gr.Textbox(label="Comments & triggered rules", lines=10)
128
+ with gr.Tab("Usage"):
129
+ tokens = gr.Textbox(label="Tokens used")
130
+
131
+ with gr.Row():
132
+ dl_csv = gr.File(label="Download CSV")
133
+ dl_zip = gr.File(label="Download ZIP (CSV + JSON)")
134
+
135
+ btn.click(
136
+ run_eval,
137
+ inputs=[subject, body, engine, w1, w2, w3, w4, w5, w6],
138
+ outputs=[out_table, comments, tokens, dl_csv, dl_zip]
139
+ )
140
+
141
+ if __name__ == "__main__":
142
+ # Hugging Face Spaces will run this automatically, but local runs benefit from this.
143
+ demo.launch(server_name="0.0.0.0", server_port=int(os.getenv("PORT", "7860")), show_error=True)
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ gradio>=4.44.0
2
+ regex>=2023.12.25
3
+ beautifulsoup4>=4.12.3
4
+ pyspellchecker>=0.8.1
5
+ openai>=1.40.0 # for OpenAI mode
6
+ anthropic>=0.34.0 # for Claude mode
7
+ orjson>=3.10.7
8
+ python-dotenv>=1.0.1 # optional .env loading for local keys