davidbeaver commited on
Commit
fea1f8d
·
verified ·
1 Parent(s): f0253ab

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +120 -90
app.py CHANGED
@@ -4,30 +4,39 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
4
  import torch
5
 
6
  # ---- Model config ----
7
- MODEL_NAME = "gpt2" # e.g., "distilgpt2", "gpt2", "gpt2-medium"
8
  device = "cuda" if torch.cuda.is_available() else "cpu"
9
 
10
  tok = AutoTokenizer.from_pretrained(MODEL_NAME)
11
  model = AutoModelForCausalLM.from_pretrained(MODEL_NAME).to(device)
12
  model.eval()
13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  def seq_logprob(context: str, candidate: str, assume_leading_space: bool, show_topk: int):
15
  """
16
- Return (total_logprob, step_detail_text, token_list)
17
- Computes P(candidate | context) via chain rule over the candidate tokens.
18
  """
19
- if not context.strip():
20
- return None, "Please enter context.", []
21
-
22
- # Helpful for GPT-2 BPE “word starts”
23
  cand_text = (" " + candidate) if assume_leading_space else candidate
24
-
25
  with torch.no_grad():
26
  ctx_ids = tok.encode(context, return_tensors="pt").to(device)
27
  cand_ids = tok.encode(cand_text, add_special_tokens=False)
28
 
29
  if len(cand_ids) == 0:
30
- return None, "Candidate tokenized to empty sequence (check spacing).", []
31
 
32
  total_logprob = 0.0
33
  step_lines = []
@@ -38,8 +47,8 @@ def seq_logprob(context: str, candidate: str, assume_leading_space: bool, show_t
38
  outputs = model(input_ids=input_ids)
39
  logits = outputs.logits[:, -1, :]
40
  logprobs = torch.log_softmax(logits, dim=-1)
41
- token_logprob = logprobs[0, t_id].item()
42
- total_logprob += token_logprob
43
 
44
  tok_str = tok.decode([t_id])
45
  token_texts.append(tok_str)
@@ -49,126 +58,147 @@ def seq_logprob(context: str, candidate: str, assume_leading_space: bool, show_t
49
  tops = ", ".join([f"{repr(tok.decode([int(idx)]))}:{val.item():.2f}"
50
  for idx, val in zip(topk_idx[0], topk_vals[0])])
51
  step_lines.append(
52
- f"Step {i+1}: token={repr(tok_str)} logprob={token_logprob:.6f} "
53
- f"prob={math.exp(token_logprob):.6e}\n top-{show_topk}: {tops}"
54
  )
55
  else:
56
  step_lines.append(
57
- f"Step {i+1}: token={repr(tok_str)} logprob={token_logprob:.6f} "
58
- f"prob={math.exp(token_logprob):.6e}"
59
  )
60
 
61
  # teacher-forcing: append the true token to continue conditioning
62
  input_ids = torch.cat([input_ids, torch.tensor([[t_id]], device=device)], dim=1)
63
 
64
- detail_text = "\n".join(step_lines)
65
- return total_logprob, detail_text, token_texts
66
 
67
- def compare_candidates(context, cand1, cand2, assume_space, topk):
68
- # Basic input checks
69
- errs = []
70
  if not context.strip():
71
- errs.append("Please enter a context.")
72
- if not cand1.strip():
73
- errs.append("Please enter Candidate A.")
74
- if not cand2.strip():
75
- errs.append("Please enter Candidate B.")
76
- if errs:
77
- return (
78
- f"<div style='color:#b00020;font-weight:600'>{' '.join(errs)}</div>",
79
- "", "", "", "", ""
80
- )
81
-
82
- # Compute log-probs
83
- logp1, details1, toks1 = seq_logprob(context, cand1, assume_space, topk)
84
- logp2, details2, toks2 = seq_logprob(context, cand2, assume_space, topk)
85
-
86
- if logp1 is None or logp2 is None:
87
- return (
88
- "<div style='color:#b00020;font-weight:600'>Tokenization error. Check inputs.</div>",
89
- details1, details2, "", "", ""
90
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
 
92
- # Summaries for each candidate
93
- def make_summary(label, cand, logp, toks):
94
- seq_prob = math.exp(logp)
95
- return (
96
- f"**{label}**: {cand}\n\n"
97
- f"Tokenization: {toks}\n"
98
- f"Total logprob: {logp:.6f}\n"
99
- f"Sequence probability: {seq_prob:.6e}"
100
- )
101
 
102
- summary1 = make_summary("Candidate A", cand1, logp1, toks1)
103
- summary2 = make_summary("Candidate B", cand2, logp2, toks2)
 
 
 
 
 
104
 
105
- # Ratio, odds, and winner
106
- log_odds = logp1 - logp2 # log(P(A)/P(B))
107
- # Cap extreme ratios for display; still show exact log-odds
108
- try:
109
- ratio = math.exp(log_odds)
110
- ratio_str = f"{ratio:.6e}"
111
- except OverflowError:
112
- ratio_str = "∞ (overflow)"
113
- winner = "Candidate A" if logp1 > logp2 else ("Tie" if abs(log_odds) < 1e-12 else "Candidate B")
114
 
 
115
  if winner == "Candidate A":
116
  win_color = "#166534" # green
117
  elif winner == "Candidate B":
118
  win_color = "#1d4ed8" # blue
119
  else:
120
- win_color = "#92400e" # amber (tie)
121
 
 
122
  headline = (
123
  f"<div style='padding:14px;border-radius:12px;background:#f8fafc;"
124
  f"border:1px solid #e2e8f0;margin-bottom:10px'>"
125
- f"<div style='font-size:20px;font-weight:800;color:{win_color};'>Winner: {winner}</div>"
126
  f"<div style='margin-top:6px;font-size:16px;'>"
127
- f"Odds (A/B) = <b>{ratio_str}</b> &nbsp;|&nbsp; "
128
- f"log-odds = <b>{log_odds:.6f}</b>"
129
  f"</div>"
130
  f"<div style='margin-top:6px;color:#475569'>"
131
- f"(Odds &gt; 1 means A is more probable; &lt; 1 means B is more probable.)"
 
132
  f"</div></div>"
133
  )
134
 
135
- return headline, summary1, details1, summary2, details2, ""
 
 
 
 
 
 
 
 
 
 
136
 
137
- with gr.Blocks(title="Two-Candidate Next-Token Probability Comparator") as demo:
 
 
 
 
 
138
  gr.Markdown(
139
- "# Two-Candidate Next-Word/Token Probability\n"
140
- "Given a **context**, compare the conditional probabilities of **two candidate continuations**.\n"
141
- "- Uses a pretrained causal LM (default: GPT-2). No fine-tuning.\n"
142
- "- Works at the **token** level; multi-token “words” are handled via the chain rule.\n"
143
- "- The **Winner** is the higher-probability candidate; we also show the **odds ratio (A/B)** and log-odds."
144
  )
145
-
146
  with gr.Row():
147
- context = gr.Textbox(label="Context (prompt)", lines=6, placeholder="Paste your prior text here...")
148
-
149
  with gr.Row():
150
- cand1 = gr.Textbox(label="Candidate A (follow-up)")
151
- cand2 = gr.Textbox(label="Candidate B (follow-up)")
152
-
153
  with gr.Row():
154
- assume_space = gr.Checkbox(
155
- value=True,
156
- label="Assume leading space before candidates (helps align with word starts in GPT-2 tokenization)"
157
- )
158
  topk = gr.Slider(0, 20, value=5, step=1, label="Show top-k alternatives (per token step)")
 
 
 
 
159
 
160
- btn = gr.Button("Compare")
161
  winner_html = gr.HTML()
162
- summary1 = gr.Markdown()
163
- details1 = gr.Textbox(label="Candidate A — step-by-step", lines=10)
164
- summary2 = gr.Markdown()
165
- details2 = gr.Textbox(label="Candidate B — step-by-step", lines=10)
166
  _hidden = gr.Textbox(visible=False)
167
 
168
- btn.click(
169
  fn=compare_candidates,
170
- inputs=[context, cand1, cand2, assume_space, topk],
171
- outputs=[winner_html, summary1, details1, summary2, details2, _hidden]
 
 
 
 
172
  )
173
 
174
  demo.launch()
 
4
  import torch
5
 
6
  # ---- Model config ----
7
+ MODEL_NAME = "gpt2" # e.g., "distilgpt2", "gpt2-medium"
8
  device = "cuda" if torch.cuda.is_available() else "cpu"
9
 
10
  tok = AutoTokenizer.from_pretrained(MODEL_NAME)
11
  model = AutoModelForCausalLM.from_pretrained(MODEL_NAME).to(device)
12
  model.eval()
13
 
14
+ EPS = 1e-9 # tie tolerance
15
+
16
+ def safe_exp(x: float) -> str:
17
+ # Pretty string even for big magnitudes
18
+ try:
19
+ return f"{math.exp(x):.6e}"
20
+ except OverflowError:
21
+ return "∞ (overflow)"
22
+ except Exception:
23
+ return "—"
24
+
25
+ def is_finite(x: float) -> bool:
26
+ return x is not None and math.isfinite(x)
27
+
28
  def seq_logprob(context: str, candidate: str, assume_leading_space: bool, show_topk: int):
29
  """
30
+ Compute log P(candidate | context) via chain rule over tokens.
31
+ Returns (total_logprob, detail_text, token_list, num_tokens).
32
  """
 
 
 
 
33
  cand_text = (" " + candidate) if assume_leading_space else candidate
 
34
  with torch.no_grad():
35
  ctx_ids = tok.encode(context, return_tensors="pt").to(device)
36
  cand_ids = tok.encode(cand_text, add_special_tokens=False)
37
 
38
  if len(cand_ids) == 0:
39
+ return None, "Candidate tokenized to empty sequence (check spacing).", [], 0
40
 
41
  total_logprob = 0.0
42
  step_lines = []
 
47
  outputs = model(input_ids=input_ids)
48
  logits = outputs.logits[:, -1, :]
49
  logprobs = torch.log_softmax(logits, dim=-1)
50
+ token_lp = logprobs[0, t_id].item()
51
+ total_logprob += token_lp
52
 
53
  tok_str = tok.decode([t_id])
54
  token_texts.append(tok_str)
 
58
  tops = ", ".join([f"{repr(tok.decode([int(idx)]))}:{val.item():.2f}"
59
  for idx, val in zip(topk_idx[0], topk_vals[0])])
60
  step_lines.append(
61
+ f"Step {i+1}: token={repr(tok_str)} logprob={token_lp:.6f} "
62
+ f"prob={math.exp(token_lp):.6e}\n top-{show_topk}: {tops}"
63
  )
64
  else:
65
  step_lines.append(
66
+ f"Step {i+1}: token={repr(tok_str)} logprob={token_lp:.6f} "
67
+ f"prob={math.exp(token_lp):.6e}"
68
  )
69
 
70
  # teacher-forcing: append the true token to continue conditioning
71
  input_ids = torch.cat([input_ids, torch.tensor([[t_id]], device=device)], dim=1)
72
 
73
+ return total_logprob, "\n".join(step_lines), token_texts, len(cand_ids)
 
74
 
75
+ def compare_candidates(context, candA, candB, assume_space, topk, use_len_norm):
76
+ # Basic checks
77
+ errors = []
78
  if not context.strip():
79
+ errors.append("Please enter a context.")
80
+ if not candA.strip():
81
+ errors.append("Please enter Candidate A.")
82
+ if not candB.strip():
83
+ errors.append("Please enter Candidate B.")
84
+ if errors:
85
+ msg = " ".join(errors)
86
+ return (f"<div style='color:#b00020;font-weight:600'>{msg}</div>",
87
+ "", "", "", "", "")
88
+
89
+ # Compute
90
+ lpA, detA, toksA, nA = seq_logprob(context, candA, assume_space, topk)
91
+ lpB, detB, toksB, nB = seq_logprob(context, candB, assume_space, topk)
92
+
93
+ # Validate numbers
94
+ if not (is_finite(lpA) and is_finite(lpB)):
95
+ return ("<div style='color:#b00020;font-weight:600'>Numerical issue (NaN/Inf). "
96
+ "Try shorter context, smaller model (e.g., distilgpt2), or disable length normalization.</div>",
97
+ "", "", "", "", "")
98
+
99
+ # Optionally length-normalize (per-token average log-prob)
100
+ # Note: odds under length-normalization are "per-token odds", not whole-sequence odds.
101
+ if use_len_norm:
102
+ if nA == 0 or nB == 0:
103
+ return ("<div style='color:#b00020;font-weight:600'>Empty tokenization. "
104
+ "Check spacing or turn off 'assume leading space'.</div>",
105
+ "", "", "", "", "")
106
+ scoreA = lpA / nA
107
+ scoreB = lpB / nB
108
+ label_suffix = " (per-token)"
109
+ else:
110
+ scoreA = lpA
111
+ scoreB = lpB
112
+ label_suffix = ""
113
 
114
+ diff = scoreA - scoreB # log-odds if unnormalized; log per-token odds otherwise
 
 
 
 
 
 
 
 
115
 
116
+ # Winner logic with proper tie handling
117
+ if abs(diff) <= EPS:
118
+ winner = "Tie"
119
+ elif diff > 0:
120
+ winner = "Candidate A"
121
+ else:
122
+ winner = "Candidate B"
123
 
124
+ ratio_str = safe_exp(diff)
 
 
 
 
 
 
 
 
125
 
126
+ # Colors
127
  if winner == "Candidate A":
128
  win_color = "#166534" # green
129
  elif winner == "Candidate B":
130
  win_color = "#1d4ed8" # blue
131
  else:
132
+ win_color = "#92400e" # amber
133
 
134
+ # Headline
135
  headline = (
136
  f"<div style='padding:14px;border-radius:12px;background:#f8fafc;"
137
  f"border:1px solid #e2e8f0;margin-bottom:10px'>"
138
+ f"<div style='font-size:20px;font-weight:800;color:{win_color};'>Winner: {winner}{label_suffix}</div>"
139
  f"<div style='margin-top:6px;font-size:16px;'>"
140
+ f"Odds A/B{label_suffix} = <b>{ratio_str}</b> &nbsp;|&nbsp; "
141
+ f"log-odds A−B{label_suffix} = <b>{diff:.6f}</b>"
142
  f"</div>"
143
  f"<div style='margin-top:6px;color:#475569'>"
144
+ f"(Odds &gt; 1 A more probable; &lt; 1 B more probable. "
145
+ f"{'Per-token uses average log-prob.' if use_len_norm else 'Whole-sequence comparison.'})"
146
  f"</div></div>"
147
  )
148
 
149
+ def summarize(label, cand, lp, toks, n):
150
+ return (
151
+ f"**{label}**: {cand}\n\n"
152
+ f"Tokenization: {toks}\n"
153
+ f"Total logprob: {lp:.6f}\n"
154
+ f"Sequence probability: {math.exp(lp):.6e}\n"
155
+ f"Tokens: {n}"
156
+ )
157
+
158
+ sumA = summarize("Candidate A", candA, lpA, toksA, nA)
159
+ sumB = summarize("Candidate B", candB, lpB, toksB, nB)
160
 
161
+ return headline, sumA, detA, sumB, detB, ""
162
+
163
+ def swap(a, b):
164
+ return b, a
165
+
166
+ with gr.Blocks(title="Two-Candidate Next-Token Comparator (Robust)") as demo:
167
  gr.Markdown(
168
+ "# Two-Candidate Next-Word/Token Probability (Robust)\n"
169
+ "Compare **P(A|context)** vs **P(B|context)** from a pretrained causal LM (no fine-tuning).\n"
170
+ "- Proper tie handling and numerical guards.\n"
171
+ "- Optional **length normalization** (per-token).\n"
172
+ "- Use **Swap** to sanity-check symmetry."
173
  )
 
174
  with gr.Row():
175
+ context = gr.Textbox(label="Context (prompt)", lines=6, placeholder="Paste prior text here...")
 
176
  with gr.Row():
177
+ candA = gr.Textbox(label="Candidate A (follow-up)")
178
+ candB = gr.Textbox(label="Candidate B (follow-up)")
 
179
  with gr.Row():
180
+ assume_space = gr.Checkbox(value=True, label="Assume leading space before candidates (useful for GPT-2 tokenization)")
 
 
 
181
  topk = gr.Slider(0, 20, value=5, step=1, label="Show top-k alternatives (per token step)")
182
+ use_len_norm = gr.Checkbox(value=False, label="Use length normalization (average log-prob per token)")
183
+ with gr.Row():
184
+ btn_compare = gr.Button("Compare", variant="primary")
185
+ btn_swap = gr.Button("Swap A ↔ B")
186
 
 
187
  winner_html = gr.HTML()
188
+ summaryA = gr.Markdown()
189
+ detailsA = gr.Textbox(label="Candidate A — step-by-step", lines=10)
190
+ summaryB = gr.Markdown()
191
+ detailsB = gr.Textbox(label="Candidate B — step-by-step", lines=10)
192
  _hidden = gr.Textbox(visible=False)
193
 
194
+ btn_compare.click(
195
  fn=compare_candidates,
196
+ inputs=[context, candA, candB, assume_space, topk, use_len_norm],
197
+ outputs=[winner_html, summaryA, detailsA, summaryB, detailsB, _hidden]
198
+ )
199
+
200
+ btn_swap.click(
201
+ fn=swap, inputs=[candA, candB], outputs=[candA, candB]
202
  )
203
 
204
  demo.launch()