ryandt commited on
Commit
b657b7b
Β·
2 Parent(s): 78d9d235689bad

initial release

Browse files
Files changed (6) hide show
  1. .gitignore +2 -0
  2. README.md +5 -5
  3. app.py +280 -0
  4. invert.py +412 -0
  5. model.py +103 -0
  6. requirements.txt +1 -0
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ .venv
2
+ __pycache__
README.md CHANGED
@@ -1,15 +1,15 @@
1
  ---
2
  title: Inverting Embeddings
3
- emoji: πŸ†
4
- colorFrom: purple
5
- colorTo: yellow
6
  sdk: gradio
7
  sdk_version: 6.5.1
8
- python_version: '3.12'
9
  app_file: app.py
10
  pinned: false
11
  license: mit
12
- short_description: Inverting embeddings with beam search cosine similarity
13
  ---
14
 
15
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
  title: Inverting Embeddings
3
+ emoji: πŸ”„
4
+ colorFrom: blue
5
+ colorTo: green
6
  sdk: gradio
7
  sdk_version: 6.5.1
8
+ python_version: "3.10"
9
  app_file: app.py
10
  pinned: false
11
  license: mit
12
+ short_description: Reconstruct text from embedding vectors via beam search
13
  ---
14
 
15
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ ZSInvert β€” Zero-Shot Embedding Inversion Explorer.
3
+
4
+ Interactive tool demonstrating embedding inversion via
5
+ adversarial decoding beam search. Reconstructs text from
6
+ embedding vectors without training embedding-specific models.
7
+
8
+ Part of E04: ZSInvert.
9
+ """
10
+
11
+ import time
12
+ import threading
13
+ import queue
14
+
15
+ import gradio as gr
16
+ import torch
17
+
18
+ try:
19
+ import spaces
20
+ gpu_decorator = spaces.GPU(duration=120)
21
+ except ImportError:
22
+ gpu_decorator = lambda fn: fn
23
+
24
+ from model import load_llm, load_encoder, encode_text, ENCODERS
25
+ from invert import beam_search
26
+
27
+ _STAGE1_PROMPT = "tell me a story"
28
+ _STAGE2_PROMPT_TEMPLATE = "write a sentence similar to this: {seed}"
29
+
30
+ # Encoder choices (drop contriever β€” broken)
31
+ _ENCODER_CHOICES = [k for k in ENCODERS if k != "contriever"]
32
+
33
+
34
+ def _sim_color(cos_sim: float) -> str:
35
+ """Return hex color for a cosine similarity value."""
36
+ if cos_sim > 0.99:
37
+ return "#3b82f6" # blue
38
+ if cos_sim > 0.95:
39
+ return "#16a34a" # dark green
40
+ if cos_sim > 0.85:
41
+ return "#65a30d" # green
42
+ if cos_sim > 0.70:
43
+ return "#ca8a04" # amber
44
+ if cos_sim > 0.50:
45
+ return "#ef4444" # red
46
+ return "#a855f7" # purple
47
+
48
+
49
+ def _format_results(stage_results: list[dict]) -> str:
50
+ """Render accumulated stage results as styled HTML."""
51
+ if not stage_results:
52
+ return ""
53
+ rows = []
54
+ for r in stage_results:
55
+ color = _sim_color(r["cos_sim"])
56
+ rows.append(
57
+ f'<div style="margin-bottom:12px;padding:10px;border:1px solid #333;border-radius:6px;'
58
+ f'background:#1a1a2e;">'
59
+ f'<span style="font-weight:bold;color:#ccc;">S{r["stage"]}</span> '
60
+ f'<span style="color:#eee;font-style:italic;">"{r["text"]}"</span><br>'
61
+ f'<span style="color:{color};font-weight:bold;">cos={r["cos_sim"]:.4f}</span>'
62
+ f'&nbsp;&nbsp;len={r["length"]}'
63
+ f'&nbsp;&nbsp;{r["time"]:.1f}s'
64
+ f'&nbsp;&nbsp;steps={r["steps"]}'
65
+ f'</div>'
66
+ )
67
+ return "".join(rows)
68
+
69
+
70
+ def _format_progress(step: int, text: str, cos_sim: float) -> str:
71
+ """Render a live progress line during beam search."""
72
+ color = _sim_color(cos_sim)
73
+ return (
74
+ f'<div style="padding:8px;border:1px dashed #555;border-radius:4px;'
75
+ f'background:#111;margin-bottom:12px;">'
76
+ f'<span style="color:#888;">step {step}</span>&nbsp;&nbsp;'
77
+ f'<span style="color:{color};font-weight:bold;">cos={cos_sim:.4f}</span>&nbsp;&nbsp;'
78
+ f'<span style="color:#aaa;font-style:italic;">"{text}"</span>'
79
+ f'</div>'
80
+ )
81
+
82
+
83
+ _SENTINEL = object()
84
+
85
+
86
+ @gpu_decorator
87
+ def _run_beam_search_threaded(
88
+ target_emb, encoder_name, prompt,
89
+ beam_width, top_k, patience, max_steps, min_similarity, randomness,
90
+ progress_queue,
91
+ ):
92
+ """Run beam search on GPU, pushing step updates to a queue."""
93
+ llm, tokenizer = load_llm()
94
+ encoder = load_encoder(encoder_name)
95
+
96
+ step_count = 0
97
+
98
+ def on_step(step, cand):
99
+ nonlocal step_count
100
+ step_count = step
101
+ progress_queue.put((step, cand.seq_str, cand.cos_sim))
102
+
103
+ t0 = time.time()
104
+ result = beam_search(
105
+ llm, tokenizer, encoder, target_emb,
106
+ prompt=prompt,
107
+ beam_width=int(beam_width),
108
+ max_steps=int(max_steps),
109
+ top_k=int(top_k),
110
+ patience=int(patience),
111
+ min_similarity=float(min_similarity),
112
+ randomness=bool(randomness),
113
+ on_step=on_step,
114
+ )
115
+ elapsed = time.time() - t0
116
+ progress_queue.put(_SENTINEL)
117
+ return result, elapsed, step_count
118
+
119
+
120
+ def run_stage(
121
+ text, encoder_name,
122
+ beam_width, top_k, patience, max_steps, min_similarity, randomness,
123
+ target_emb_state, stage_results_state,
124
+ ):
125
+ """Run the next stage of inversion, yielding progress updates."""
126
+ if not text or not text.strip():
127
+ gr.Warning("Please enter some text.")
128
+ yield (
129
+ target_emb_state,
130
+ stage_results_state,
131
+ _format_results(stage_results_state),
132
+ gr.update(),
133
+ )
134
+ return
135
+
136
+ stage_num = len(stage_results_state) + 1
137
+
138
+ # Encode target on first stage
139
+ if stage_num == 1:
140
+ encoder = load_encoder(encoder_name)
141
+ target_emb_state = encode_text(text.strip(), encoder)
142
+
143
+ # Build prompt
144
+ if stage_num == 1:
145
+ prompt = _STAGE1_PROMPT
146
+ else:
147
+ prev_text = stage_results_state[-1]["text"]
148
+ prompt = _STAGE2_PROMPT_TEMPLATE.format(seed=prev_text)
149
+
150
+ # Run beam search in a thread so we can yield progress
151
+ progress_q = queue.Queue()
152
+
153
+ # Container for the thread's return value
154
+ result_holder = [None, 0.0, 0]
155
+
156
+ def _worker():
157
+ r, elapsed, steps = _run_beam_search_threaded(
158
+ target_emb_state, encoder_name, prompt,
159
+ beam_width, top_k, patience, max_steps, min_similarity, randomness,
160
+ progress_q,
161
+ )
162
+ result_holder[0] = r
163
+ result_holder[1] = elapsed
164
+ result_holder[2] = steps
165
+
166
+ worker = threading.Thread(target=_worker)
167
+ worker.start()
168
+
169
+ # Yield progress updates as they arrive
170
+ completed_html = _format_results(stage_results_state)
171
+ while True:
172
+ try:
173
+ item = progress_q.get(timeout=0.5)
174
+ except queue.Empty:
175
+ if not worker.is_alive():
176
+ break
177
+ continue
178
+
179
+ if item is _SENTINEL:
180
+ break
181
+
182
+ step, best_text, best_sim = item
183
+ progress_html = _format_progress(step, best_text, best_sim)
184
+ yield (
185
+ target_emb_state,
186
+ stage_results_state,
187
+ completed_html + progress_html,
188
+ gr.update(value=f"Running S{stage_num}...", interactive=False),
189
+ )
190
+
191
+ worker.join()
192
+
193
+ result, elapsed, steps = result_holder
194
+ stage_results_state = stage_results_state + [{
195
+ "stage": stage_num,
196
+ "text": result.seq_str,
197
+ "cos_sim": result.cos_sim,
198
+ "length": len(result.token_ids),
199
+ "time": elapsed,
200
+ "steps": steps,
201
+ }]
202
+
203
+ html = _format_results(stage_results_state)
204
+ btn_label = f"Run Stage {stage_num + 1}"
205
+
206
+ yield (
207
+ target_emb_state,
208
+ stage_results_state,
209
+ html,
210
+ gr.update(value=btn_label, visible=True, interactive=True),
211
+ )
212
+
213
+
214
+ def reset_state():
215
+ """Reset all state for a fresh run."""
216
+ return None, [], "", gr.update(value="Run Stage 1", visible=True, interactive=True)
217
+
218
+
219
+ with gr.Blocks(title="ZSInvert") as demo:
220
+ gr.Markdown("# ZSInvert β€” Zero-Shot Embedding Inversion")
221
+ gr.Markdown(
222
+ "Reconstruct text from its embedding vector using "
223
+ "cosine-similarity-guided beam search."
224
+ )
225
+
226
+ # --- State ---
227
+ target_emb_state = gr.State(value=None)
228
+ stage_results_state = gr.State(value=[])
229
+
230
+ # --- Input row ---
231
+ with gr.Row():
232
+ text_input = gr.Textbox(
233
+ label="Input text",
234
+ placeholder="Enter text to encode and invert...",
235
+ scale=4,
236
+ )
237
+ encoder_dd = gr.Dropdown(
238
+ choices=_ENCODER_CHOICES,
239
+ value="gte",
240
+ label="Encoder",
241
+ scale=1,
242
+ )
243
+
244
+ # --- Advanced settings ---
245
+ with gr.Accordion("Advanced Settings", open=False):
246
+ with gr.Row():
247
+ beam_width_sl = gr.Slider(5, 50, value=10, step=1, label="beam_width")
248
+ top_k_sl = gr.Slider(5, 50, value=10, step=1, label="top_k")
249
+ patience_sl = gr.Slider(0, 20, value=5, step=1, label="patience (0=off)")
250
+ with gr.Row():
251
+ max_steps_sl = gr.Slider(0, 64, value=0, step=1, label="max_steps (0=unlimited)")
252
+ min_sim_sl = gr.Slider(0.0, 1.0, value=0.0, step=0.01, label="min_similarity (0=off)")
253
+ randomness_cb = gr.Checkbox(value=True, label="randomness")
254
+
255
+ # --- Run button ---
256
+ run_btn = gr.Button("Run Stage 1", variant="primary")
257
+
258
+ # --- Results ---
259
+ results_html = gr.HTML(value="", label="Results")
260
+
261
+ # --- Wiring ---
262
+ all_inputs = [
263
+ text_input, encoder_dd,
264
+ beam_width_sl, top_k_sl, patience_sl, max_steps_sl, min_sim_sl, randomness_cb,
265
+ target_emb_state, stage_results_state,
266
+ ]
267
+ all_outputs = [
268
+ target_emb_state, stage_results_state,
269
+ results_html, run_btn,
270
+ ]
271
+
272
+ run_btn.click(fn=run_stage, inputs=all_inputs, outputs=all_outputs)
273
+
274
+ # Reset when input text or encoder changes
275
+ text_input.change(fn=reset_state, inputs=[], outputs=all_outputs)
276
+ encoder_dd.change(fn=reset_state, inputs=[], outputs=all_outputs)
277
+
278
+
279
+ if __name__ == "__main__":
280
+ demo.launch(server_port=7860, theme=gr.themes.Base())
invert.py ADDED
@@ -0,0 +1,412 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Beam search inversion engine for ZSInvert.
3
+
4
+ Cosine-similarity-guided beam search that reconstructs text
5
+ from an embedding vector using a small LLM as the token
6
+ proposal engine.
7
+
8
+ Part of E04: ZSInvert.
9
+ """
10
+
11
+ from __future__ import annotations
12
+
13
+ import random
14
+ from dataclasses import dataclass, field
15
+ from typing import Callable
16
+
17
+ import torch
18
+ import torch.nn.functional as F
19
+ from sentence_transformers import SentenceTransformer
20
+ from transformers import AutoModelForCausalLM, AutoTokenizer, DynamicCache
21
+
22
+ from model import get_chat_format
23
+
24
+ # Tokens to mask from generation (special/formatting tokens)
25
+ _MASK_STRINGS = [
26
+ "<|im_end|>", "<|end_header_id|>", "<|start_header_id|>",
27
+ "<|eot_id|>", "<|eom_id|>", "<|python_tag|>",
28
+ "@", "\xa0", '"', "\n", "\n\n", " \n\n",
29
+ ]
30
+
31
+ # Number of top beams kept deterministically in randomness mode
32
+ _FIXED_KEEP = 5
33
+
34
+
35
+ @dataclass
36
+ class Candidate:
37
+ """A beam search candidate."""
38
+ token_ids: list[int] = field(default_factory=list)
39
+ seq_str: str = ""
40
+ score: float = 0.0
41
+ cos_sim: float = 0.0
42
+ kv_cache: DynamicCache | None = field(default=None, repr=False)
43
+
44
+
45
+ @dataclass
46
+ class InversionResult:
47
+ """Result of a full inversion run."""
48
+ original_text: str | None = None
49
+ target_embedding: torch.Tensor | None = None
50
+ stage1_text: str = ""
51
+ stage1_cos_sim: float = 0.0
52
+ stage2_text: str = ""
53
+ stage2_cos_sim: float = 0.0
54
+
55
+
56
+ def _top_k_top_p_filter(logits: torch.Tensor, top_k: int, top_p: float) -> list[int]:
57
+ """Return indices that survive top-k and top-p filtering."""
58
+ # Top-k: keep only top_k highest logits
59
+ topk_vals, topk_idx = torch.topk(logits, min(top_k, logits.size(-1)))
60
+
61
+ # Top-p (nucleus): keep smallest set whose cumulative prob >= top_p
62
+ probs = F.softmax(topk_vals, dim=-1)
63
+ cumulative = torch.cumsum(probs, dim=-1)
64
+ # Mask tokens beyond the nucleus
65
+ mask = cumulative - probs <= top_p
66
+ filtered_idx = topk_idx[mask]
67
+
68
+ return filtered_idx.tolist()
69
+
70
+
71
+ _cached_mask_ids: list[int] | None = None
72
+
73
+
74
+ def _build_mask_token_ids(tokenizer: AutoTokenizer) -> list[int]:
75
+ """Build set of token IDs to suppress during generation. Cached.
76
+
77
+ Masks both exact single-token matches for _MASK_STRINGS and any
78
+ vocab token whose decoded form contains a newline (catches merged
79
+ tokens like '.\\n' that bypass the single-token check).
80
+ """
81
+ global _cached_mask_ids
82
+ if _cached_mask_ids is not None:
83
+ return _cached_mask_ids
84
+
85
+ mask_ids = set()
86
+ for s in _MASK_STRINGS:
87
+ tokens = tokenizer.encode(s, add_special_tokens=False)
88
+ if len(tokens) == 1:
89
+ mask_ids.add(tokens[0])
90
+ if tokenizer.eos_token_id is not None:
91
+ mask_ids.add(tokenizer.eos_token_id)
92
+ # Also mask any vocab token containing a newline
93
+ for tid in range(tokenizer.vocab_size):
94
+ decoded = tokenizer.decode([tid])
95
+ if "\n" in decoded:
96
+ mask_ids.add(tid)
97
+ _cached_mask_ids = list(mask_ids)
98
+ return _cached_mask_ids
99
+
100
+
101
+ def _get_next_token_candidates(
102
+ model: AutoModelForCausalLM,
103
+ tokenizer: AutoTokenizer,
104
+ prefix: list[int],
105
+ suffix: list[int],
106
+ prompt_tokens: list[int],
107
+ candidates: list[Candidate],
108
+ top_k: int,
109
+ top_p: float,
110
+ repetition_penalty: float,
111
+ mask_ids: list[int],
112
+ ) -> list[list[tuple[int, float]]]:
113
+ """Forward pass through LLM to get candidate next tokens.
114
+
115
+ Builds input as: prefix + prompt_tokens + suffix + candidate.token_ids
116
+ Uses KV-cache from candidates when available.
117
+
118
+ Returns list of [(token_id, log_prob), ...] per candidate.
119
+ """
120
+ device = next(model.parameters()).device
121
+
122
+ # Build full token sequences
123
+ base = prefix + prompt_tokens + suffix
124
+ batch_tokens = [base + c.token_ids for c in candidates]
125
+
126
+ # All sequences should have the same length (beam search invariant)
127
+ assert len(set(len(t) for t in batch_tokens)) == 1
128
+
129
+ input_ids = torch.tensor(batch_tokens, device=device)
130
+
131
+ # Check for usable KV-cache
132
+ batch_kv = [c.kv_cache for c in candidates]
133
+ use_cache = all(kv is not None for kv in batch_kv)
134
+
135
+ if use_cache:
136
+ kv_cache = DynamicCache.from_batch_splits(batch_kv)
137
+ cache_len = kv_cache.get_seq_length()
138
+ model_input = input_ids[:, cache_len:]
139
+ attn_mask = torch.ones_like(input_ids, device=device)
140
+ else:
141
+ kv_cache = DynamicCache()
142
+ model_input = input_ids
143
+ attn_mask = None
144
+
145
+ with torch.no_grad():
146
+ outputs = model(
147
+ input_ids=model_input,
148
+ attention_mask=attn_mask,
149
+ past_key_values=kv_cache,
150
+ use_cache=True,
151
+ )
152
+
153
+ # Split KV-cache back per candidate
154
+ next_kv = outputs.past_key_values
155
+ try:
156
+ split_kv = next_kv.batch_split(len(candidates), 1) if next_kv else [None] * len(candidates)
157
+ except Exception:
158
+ split_kv = [None] * len(candidates)
159
+
160
+ logits = outputs.logits[:, -1, :] # (batch, vocab)
161
+
162
+ # Apply repetition penalty
163
+ if repetition_penalty != 1.0:
164
+ for i, tokens in enumerate(batch_tokens):
165
+ for tid in set(tokens):
166
+ if logits[i, tid] > 0:
167
+ logits[i, tid] /= repetition_penalty
168
+ else:
169
+ logits[i, tid] *= repetition_penalty
170
+
171
+ # Mask special tokens
172
+ logits[:, mask_ids] = -1e10
173
+
174
+ log_probs = F.log_softmax(logits, dim=-1)
175
+
176
+ results = []
177
+ for i in range(len(candidates)):
178
+ filtered = _top_k_top_p_filter(logits[i], top_k, top_p)
179
+ pairs = [(tid, log_probs[i, tid].item()) for tid in filtered]
180
+ pairs.sort(key=lambda x: x[1], reverse=True)
181
+ results.append(pairs)
182
+
183
+ return results, split_kv
184
+
185
+
186
+ def _score_candidates(
187
+ encoder: SentenceTransformer,
188
+ target_embedding: torch.Tensor,
189
+ candidates: list[Candidate],
190
+ ) -> None:
191
+ """Score candidates by cosine similarity to target embedding. Mutates in place."""
192
+ if not candidates:
193
+ return
194
+
195
+ texts = [c.seq_str for c in candidates]
196
+ embs = encoder.encode(texts, convert_to_tensor=True, normalize_embeddings=True)
197
+
198
+ # target_embedding shape: (1, dim) β€” broadcast
199
+ target_norm = F.normalize(target_embedding, dim=-1)
200
+ sims = torch.matmul(embs, target_norm.squeeze(0)) # (batch,)
201
+
202
+ for i, c in enumerate(candidates):
203
+ c.cos_sim = sims[i].item()
204
+ c.score = c.cos_sim
205
+
206
+
207
+ def beam_search(
208
+ model: AutoModelForCausalLM,
209
+ tokenizer: AutoTokenizer,
210
+ encoder: SentenceTransformer,
211
+ target_embedding: torch.Tensor,
212
+ prompt: str,
213
+ beam_width: int = 30,
214
+ max_steps: int = 0,
215
+ top_k: int = 30,
216
+ top_p: float = 1.0,
217
+ repetition_penalty: float = 1.5,
218
+ randomness: bool = True,
219
+ patience: int = 5,
220
+ min_similarity: float = 0.0,
221
+ on_step: Callable | None = None,
222
+ ) -> Candidate:
223
+ """Run cosine-similarity-guided beam search.
224
+
225
+ Args:
226
+ model: Generator LLM.
227
+ tokenizer: LLM tokenizer.
228
+ encoder: Embedding encoder for scoring.
229
+ target_embedding: Target embedding to invert. Shape (1, dim).
230
+ prompt: User-facing prompt (becomes chat user message).
231
+ beam_width: Number of candidates to maintain per step.
232
+ max_steps: Maximum tokens to generate. 0 means no limit (stop via patience only).
233
+ top_k: Top-k tokens to consider per expansion.
234
+ top_p: Nucleus sampling threshold.
235
+ repetition_penalty: Penalty for repeated tokens in logits.
236
+ randomness: If True, keep top 5 deterministically + sample rest.
237
+ patience: Stop after this many steps with no improvement in best cosine sim.
238
+ Set to 0 to disable early stopping.
239
+ min_similarity: Stop immediately when cosine sim reaches this threshold.
240
+ Set to 0.0 to disable.
241
+ on_step: Callback(step, best_candidate) fired each step.
242
+
243
+ Returns:
244
+ Best candidate found during search.
245
+ """
246
+ prefix, suffix = get_chat_format(tokenizer)
247
+ prompt_tokens = tokenizer.encode(prompt, add_special_tokens=False)
248
+ mask_ids = _build_mask_token_ids(tokenizer)
249
+
250
+ candidates = [Candidate()]
251
+ best_complete: Candidate | None = None
252
+ best_ever: Candidate | None = None
253
+ steps_since_improvement = 0
254
+
255
+ step = 0
256
+ while max_steps <= 0 or step < max_steps:
257
+ step += 1
258
+ # Expand: get next-token proposals for each candidate
259
+ token_proposals, split_kv = _get_next_token_candidates(
260
+ model, tokenizer, prefix, suffix, prompt_tokens,
261
+ candidates, top_k, top_p, repetition_penalty, mask_ids,
262
+ )
263
+
264
+ # Build expanded candidates
265
+ expanded: list[Candidate] = []
266
+ for i, cand in enumerate(candidates):
267
+ for tid, _logp in token_proposals[i]:
268
+ new_ids = cand.token_ids + [tid]
269
+ expanded.append(Candidate(
270
+ token_ids=new_ids,
271
+ seq_str=tokenizer.decode(new_ids),
272
+ kv_cache=split_kv[i] if split_kv[i] is not None else None,
273
+ ))
274
+
275
+ # Score by cosine similarity
276
+ _score_candidates(encoder, target_embedding, expanded)
277
+
278
+ # Sort by score descending
279
+ expanded.sort(key=lambda c: c.score, reverse=True)
280
+
281
+ # Track best-ever candidate (highest cosine sim at any step)
282
+ step_best = expanded[0]
283
+ if best_ever is None or step_best.cos_sim > best_ever.cos_sim:
284
+ best_ever = Candidate(
285
+ token_ids=list(step_best.token_ids),
286
+ seq_str=step_best.seq_str,
287
+ score=step_best.score,
288
+ cos_sim=step_best.cos_sim,
289
+ )
290
+ steps_since_improvement = 0
291
+ else:
292
+ steps_since_improvement += 1
293
+ if patience > 0 and steps_since_improvement >= patience:
294
+ break
295
+
296
+ if min_similarity > 0 and best_ever.cos_sim >= min_similarity:
297
+ break
298
+
299
+ # Track best complete sentence
300
+ for c in expanded:
301
+ if c.seq_str and c.seq_str.rstrip()[-1:] in ".?!":
302
+ if best_complete is None or c.score > best_complete.score:
303
+ best_complete = Candidate(
304
+ token_ids=list(c.token_ids),
305
+ seq_str=c.seq_str,
306
+ score=c.score,
307
+ cos_sim=c.cos_sim,
308
+ )
309
+
310
+ # Select: top beam_width candidates (with optional randomness)
311
+ if randomness and len(expanded) > _FIXED_KEEP:
312
+ keep = min(_FIXED_KEEP, beam_width)
313
+ remainder = min(beam_width - keep, len(expanded) - keep)
314
+ candidates = expanded[:keep]
315
+ if remainder > 0:
316
+ candidates += random.sample(expanded[keep:], remainder)
317
+ else:
318
+ candidates = expanded[:beam_width]
319
+
320
+ # Callback
321
+ if on_step is not None:
322
+ best_so_far = best_complete if best_complete else candidates[0]
323
+ on_step(step, best_so_far)
324
+
325
+ # Return the candidate with the highest cosine similarity across all tracking
326
+ finalists = [c for c in [best_ever, best_complete, candidates[0]] if c is not None]
327
+ return max(finalists, key=lambda c: c.cos_sim)
328
+
329
+
330
+ _STAGE1_PROMPT = "tell me a story"
331
+ _STAGE2_PROMPT_TEMPLATE = "write a sentence similar to this: {seed}"
332
+
333
+
334
+ def invert(
335
+ text: str,
336
+ encoder_name: str = "gte",
337
+ beam_width: int = 30,
338
+ max_steps: int = 0,
339
+ top_k: int = 30,
340
+ two_stage: bool = True,
341
+ on_progress: Callable | None = None,
342
+ ) -> InversionResult:
343
+ """Run the full two-stage ZSInvert inversion pipeline.
344
+
345
+ Stage 1: Seed generation with a generic prompt.
346
+ Stage 2: Paraphrase refinement using the Stage 1 output as context.
347
+
348
+ Args:
349
+ text: Input text to encode and then invert.
350
+ encoder_name: Which embedding encoder to use ("gte", "gtr", "contriever").
351
+ beam_width: Beam search width.
352
+ max_steps: Maximum tokens per stage.
353
+ top_k: Top-k tokens per expansion step.
354
+ two_stage: If True, run both stages. If False, Stage 1 only.
355
+ on_progress: Callback(stage, step, best_candidate) for UI updates.
356
+ stage is 1 or 2, step is the beam search step index.
357
+
358
+ Returns:
359
+ InversionResult with results from both stages.
360
+ """
361
+ from model import load_llm, load_encoder, encode_text
362
+
363
+ model, tokenizer = load_llm()
364
+ encoder = load_encoder(encoder_name)
365
+ target_embedding = encode_text(text, encoder)
366
+
367
+ # Stage 1: seed generation
368
+ def stage1_callback(step: int, cand: Candidate) -> None:
369
+ if on_progress is not None:
370
+ on_progress(1, step, cand)
371
+
372
+ stage1 = beam_search(
373
+ model, tokenizer, encoder, target_embedding,
374
+ prompt=_STAGE1_PROMPT,
375
+ beam_width=beam_width,
376
+ max_steps=max_steps,
377
+ top_k=top_k,
378
+ randomness=True,
379
+ on_step=stage1_callback,
380
+ )
381
+
382
+ result = InversionResult(
383
+ original_text=text,
384
+ target_embedding=target_embedding,
385
+ stage1_text=stage1.seq_str,
386
+ stage1_cos_sim=stage1.cos_sim,
387
+ )
388
+
389
+ if not two_stage:
390
+ result.stage2_text = result.stage1_text
391
+ result.stage2_cos_sim = result.stage1_cos_sim
392
+ return result
393
+
394
+ # Stage 2: paraphrase refinement
395
+ def stage2_callback(step: int, cand: Candidate) -> None:
396
+ if on_progress is not None:
397
+ on_progress(2, step, cand)
398
+
399
+ stage2_prompt = _STAGE2_PROMPT_TEMPLATE.format(seed=stage1.seq_str)
400
+ stage2 = beam_search(
401
+ model, tokenizer, encoder, target_embedding,
402
+ prompt=stage2_prompt,
403
+ beam_width=beam_width,
404
+ max_steps=max_steps,
405
+ top_k=top_k,
406
+ randomness=True,
407
+ on_step=stage2_callback,
408
+ )
409
+
410
+ result.stage2_text = stage2.seq_str
411
+ result.stage2_cos_sim = stage2.cos_sim
412
+ return result
model.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Model loading for ZSInvert.
3
+
4
+ Loads the generator LLM (Qwen2.5-0.5B-Instruct) and selectable
5
+ embedding encoders (GTE-base, GTR-T5-base, Contriever).
6
+
7
+ Part of E04: ZSInvert.
8
+ """
9
+
10
+ import torch
11
+ from transformers import AutoModelForCausalLM, AutoTokenizer
12
+ from sentence_transformers import SentenceTransformer
13
+
14
+ GENERATOR_MODEL = "Qwen/Qwen2.5-0.5B-Instruct"
15
+
16
+ ENCODERS = {
17
+ "gte": "thenlper/gte-base",
18
+ "gtr": "sentence-transformers/gtr-t5-base",
19
+ "contriever": "facebook/contriever",
20
+ "mini": "sentence-transformers/all-MiniLM-L6-v2",
21
+ }
22
+
23
+ _device = "cuda" if torch.cuda.is_available() else "cpu"
24
+
25
+ _llm: AutoModelForCausalLM | None = None
26
+ _llm_tokenizer: AutoTokenizer | None = None
27
+ _encoders: dict[str, SentenceTransformer] = {}
28
+
29
+
30
+ def load_llm() -> tuple[AutoModelForCausalLM, AutoTokenizer]:
31
+ """Load generator LLM. Singleton."""
32
+ global _llm, _llm_tokenizer
33
+ if _llm is None:
34
+ _llm_tokenizer = AutoTokenizer.from_pretrained(GENERATOR_MODEL)
35
+ _llm = AutoModelForCausalLM.from_pretrained(
36
+ GENERATOR_MODEL,
37
+ dtype=torch.bfloat16,
38
+ ).eval().to(_device)
39
+ return _llm, _llm_tokenizer
40
+
41
+
42
+ def load_encoder(name: str = "gte") -> SentenceTransformer:
43
+ """Load embedding encoder by name. Cached per name."""
44
+ if name not in ENCODERS:
45
+ raise ValueError(f"Unknown encoder '{name}'. Choose from: {list(ENCODERS.keys())}")
46
+ if name not in _encoders:
47
+ model_id = ENCODERS[name]
48
+ _encoders[name] = SentenceTransformer(model_id, device=_device)
49
+ return _encoders[name]
50
+
51
+
52
+ def encode_text(text: str, encoder: SentenceTransformer) -> torch.Tensor:
53
+ """Encode text to normalized embedding vector. Returns shape (1, hidden_dim)."""
54
+ emb = encoder.encode(
55
+ text,
56
+ convert_to_tensor=True,
57
+ normalize_embeddings=True,
58
+ )
59
+ return emb.unsqueeze(0)
60
+
61
+
62
+ def get_chat_format(tokenizer: AutoTokenizer) -> tuple[list[int], list[int]]:
63
+ """Extract chat prefix/suffix token IDs from the Qwen2.5 chat template.
64
+
65
+ The prefix is everything the template adds before the user content.
66
+ The suffix is everything after the user content through the generation prompt.
67
+
68
+ For Qwen2.5 the structure is:
69
+ <|im_start|>system\\n...system prompt...<|im_end|>\\n
70
+ <|im_start|>user\\n{CONTENT}<|im_end|>\\n
71
+ <|im_start|>assistant\\n
72
+
73
+ We split so that: prefix + prompt_tokens + suffix = full template.
74
+ """
75
+ # Template with empty content (no gen prompt) β€” find where content is inserted
76
+ empty = tokenizer.apply_chat_template(
77
+ [{"role": "user", "content": ""}],
78
+ add_generation_prompt=False,
79
+ )
80
+ # Template with a known marker to locate the split point
81
+ marker = tokenizer.apply_chat_template(
82
+ [{"role": "user", "content": "hello"}],
83
+ add_generation_prompt=True,
84
+ )
85
+ marker_tokens = tokenizer.encode("hello", add_special_tokens=False)
86
+
87
+ # Find where the marker content appears in the full template
88
+ marker_len = len(marker_tokens)
89
+ for i in range(len(marker)):
90
+ if marker[i : i + marker_len] == marker_tokens:
91
+ prefix = marker[:i]
92
+ suffix = marker[i + marker_len :]
93
+ return prefix, suffix
94
+
95
+ # Fallback: use the empty template structure
96
+ # Empty template has <|im_end|>\n right after user\n β€” drop those 2 tokens
97
+ prefix = empty[:-2]
98
+ full_gen = tokenizer.apply_chat_template(
99
+ [{"role": "user", "content": ""}],
100
+ add_generation_prompt=True,
101
+ )
102
+ suffix = full_gen[len(prefix):]
103
+ return prefix, suffix
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ nltk