root commited on
Commit
106d1a2
·
1 Parent(s): f6606f8

upload demo

Browse files
Files changed (2) hide show
  1. app.py +648 -0
  2. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,648 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dream_app.py
2
+ import torch
3
+ import numpy as np
4
+ import gradio as gr
5
+ #import spaces # Ensure spaces is installed if needed for GPU decorator
6
+ import torch.nn.functional as F
7
+ from transformers import AutoTokenizer, AutoModel, AutoConfig
8
+ import time
9
+ import re
10
+ from typing import List, Dict, Tuple, Optional, Any # Added Any
11
+ import torch.distributions as dists # Added import
12
+ import traceback # For better error printing
13
+
14
+ # --- START: Copied Helper functions from generation_utils.py ---
15
+ # [Keep the copied functions: top_p_logits, top_k_logits, sample_tokens]
16
+ def top_p_logits(logits, top_p=None):
17
+ """ Applies top-p filtering to logits. """
18
+ if top_p is None or top_p >= 1.0:
19
+ return logits
20
+ sorted_logits, sorted_indices = torch.sort(logits, descending=True)
21
+ cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
22
+ sorted_indices_to_remove = cumulative_probs > top_p
23
+ sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
24
+ sorted_indices_to_remove[..., 0] = 0
25
+ mask = torch.zeros_like(logits, dtype=torch.bool, device=logits.device)
26
+ mask = mask.scatter_(-1, sorted_indices, sorted_indices_to_remove)
27
+ logits = logits.masked_fill(mask, torch.finfo(logits.dtype).min)
28
+ return logits
29
+
30
+ def top_k_logits(logits, top_k=None):
31
+ """ Applies top-k filtering to logits. """
32
+ if top_k is None or top_k <= 0:
33
+ return logits
34
+ top_k = min(top_k, logits.size(-1))
35
+ indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
36
+ logits = logits.masked_fill(indices_to_remove, torch.finfo(logits.dtype).min)
37
+ return logits
38
+
39
+ def sample_tokens(logits, temperature=0.0, top_p=None, top_k=None, margin_confidence=False, neg_entropy=False, use_ori_logits = True):
40
+ """ Samples tokens based on logits and calculates confidence. """
41
+ original_dtype = logits.dtype
42
+ logits = logits.to(torch.float32)
43
+ if use_ori_logits:
44
+ ori_logits = logits.clone()
45
+ if temperature > 0:
46
+ safe_temp = max(temperature, 1e-6)
47
+ logits = logits / safe_temp
48
+ if top_p is not None and 0.0 < top_p < 1.0:
49
+ logits = top_p_logits(logits, top_p)
50
+ if top_k is not None and top_k > 0:
51
+ logits = top_k_logits(logits, top_k)
52
+
53
+ is_all_neg_inf = torch.all(logits <= torch.finfo(logits.dtype).min, dim=-1, keepdim=True)
54
+ if torch.any(is_all_neg_inf):
55
+ uniform_logits = torch.zeros_like(logits)
56
+ logits = torch.where(is_all_neg_inf, uniform_logits, logits)
57
+
58
+ probs = torch.softmax(logits, dim=-1)
59
+ probs = torch.clamp(probs, min=0.0)
60
+ prob_sum = probs.sum(dim=-1, keepdim=True)
61
+ safe_prob_sum = torch.max(prob_sum, torch.tensor(1e-12, device=probs.device, dtype=probs.dtype))
62
+ probs = probs / safe_prob_sum
63
+ probs = torch.nan_to_num(probs, nan=0.0)
64
+
65
+ if temperature > 0:
66
+ x0 = dists.Categorical(probs=probs).sample()
67
+ if use_ori_logits:
68
+ confidence = torch.gather(torch.softmax(ori_logits, dim=-1), -1, x0.unsqueeze(-1)).squeeze(-1)
69
+ else:
70
+ confidence = torch.gather(probs, -1, x0.unsqueeze(-1)).squeeze(-1)
71
+ else:
72
+ confidence, x0 = probs.max(dim=-1)
73
+
74
+ if margin_confidence:
75
+ if use_ori_logits:
76
+ sorted_probs, _ = torch.sort(torch.softmax(ori_logits, dim=-1), dim=-1, descending=True)
77
+ else:
78
+ sorted_probs, _ = torch.sort(probs, dim=-1, descending=True)
79
+ top1_probs = sorted_probs[..., 0]
80
+ top2_probs = sorted_probs[..., 1] if sorted_probs.shape[-1] > 1 else top1_probs
81
+ confidence = top1_probs - top2_probs
82
+ elif neg_entropy: # Use elif to avoid calculating entropy if margin_confidence was True
83
+ epsilon = 1e-10
84
+ log_probs = torch.log(probs + epsilon)
85
+ confidence = torch.sum(probs * log_probs, dim=-1) # Negative entropy
86
+ # Else: confidence is just the probability of the sampled token if temperature > 0, or max prob otherwise
87
+
88
+ confidence = torch.nan_to_num(confidence, nan=0.0)
89
+ return confidence, x0
90
+ # --- END: Copied Helper functions ---
91
+
92
+
93
+ # --- Model Loading and Constants ---
94
+ # Load model configuration to get special token IDs
95
+ config = AutoConfig.from_pretrained("Dream-org/Dream-v0-Instruct-7B", trust_remote_code=True)
96
+ model_path = "Dream-org/Dream-Coder-v0-Instruct-7B"
97
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
98
+ print(f"Using device: {device}")
99
+
100
+ print("Loading tokenizer...")
101
+ tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
102
+ print("Loading model...")
103
+ model = AutoModel.from_pretrained(
104
+ model_path,
105
+ torch_dtype=torch.bfloat16 if device == 'cuda' else torch.float32,
106
+ trust_remote_code=True,
107
+ attn_implementation="sdpa" # Explicitly request SDPA
108
+ )
109
+ model = model.to(device).eval()
110
+ print("Model loaded.")
111
+
112
+ MASK_TOKEN = tokenizer.mask_token
113
+ MASK_ID = tokenizer.mask_token_id
114
+ PAD_ID = tokenizer.pad_token_id
115
+ EOS_ID = tokenizer.eos_token_id
116
+
117
+ if MASK_ID is None:
118
+ raise ValueError("Cannot determine MASK_ID. Check model's tokenizer configuration.")
119
+
120
+ SPECIAL_TOKEN_IDS = {PAD_ID, EOS_ID, MASK_ID}
121
+ try:
122
+ IM_START_ID = tokenizer.convert_tokens_to_ids("<|im_start|>")
123
+ IM_END_ID = tokenizer.convert_tokens_to_ids("<|im_end|>")
124
+ SPECIAL_TOKEN_IDS.add(IM_START_ID)
125
+ SPECIAL_TOKEN_IDS.add(IM_END_ID)
126
+ except KeyError:
127
+ print("Warning: <|im_start|> or <|im_end|> not found in tokenizer vocab.")
128
+ IM_START_ID = None
129
+ IM_END_ID = None
130
+
131
+
132
+ # --- Helper Functions ---
133
+ def parse_constraints(constraints_text: str) -> Dict[int, List[int]]:
134
+ """ Parses word constraints. """
135
+ constraints = {}
136
+ if not constraints_text: return constraints
137
+ parts = constraints_text.split(',')
138
+ for part in parts:
139
+ part = part.strip()
140
+ if ':' not in part: continue
141
+ pos_str, word = part.split(':', 1)
142
+ try:
143
+ pos = int(pos_str.strip())
144
+ word = word.strip()
145
+ token_ids = []
146
+ if word:
147
+ text_to_encode = (" " + word) if (pos > 0 and not word.startswith(" ")) else word
148
+ token_ids = tokenizer.encode(text_to_encode, add_special_tokens=False)
149
+ if token_ids and pos >= 0: constraints[pos] = token_ids
150
+ elif not token_ids and word: print(f"Warning: Could not tokenize constraint word '{word}'")
151
+ except ValueError: print(f"Warning: Invalid position '{pos_str}' in constraint part '{part}'")
152
+ except Exception as e: print(f"Warning: Error processing constraint '{part}': {e}")
153
+ return constraints
154
+
155
+ # Removed format_chat_history as the state will now be in the correct format
156
+
157
+ def apply_constraints_to_state(
158
+ x: torch.Tensor,
159
+ prompt_length: int,
160
+ total_length: int,
161
+ parsed_constraints: Dict[int, List[int]],
162
+ current_step: Optional[int] = None
163
+ ) -> torch.Tensor:
164
+ """ Applies constraints directly to the state tensor `x`. """
165
+ modified_x = x.clone()
166
+ for rel_pos, word_token_ids in parsed_constraints.items():
167
+ abs_start_pos = prompt_length + rel_pos
168
+ abs_end_pos = abs_start_pos + len(word_token_ids)
169
+ if abs_start_pos < total_length and abs_end_pos <= total_length:
170
+ try:
171
+ constraint_tensor = torch.tensor(word_token_ids, dtype=torch.long, device=modified_x.device)
172
+ modified_x[0, abs_start_pos:abs_end_pos] = constraint_tensor
173
+ except IndexError: print(f"Warning (Step {current_step}): Constraint at {rel_pos} ('{tokenizer.decode(word_token_ids)}') goes out of bounds.")
174
+ except Exception as e: print(f"Warning (Step {current_step}): Failed to apply constraint at {rel_pos}: {e}")
175
+ return modified_x
176
+
177
+
178
+ # --- Core Generation Logic with Live Visualization ---
179
+
180
+ #@spaces.GPU
181
+ @torch.no_grad()
182
+ def generate_dream_response(
183
+ history_dict_list: List[Dict[str, str]], # Now expects list of dicts
184
+ gen_length: int,
185
+ steps: int,
186
+ constraints_text: str,
187
+ temperature: float,
188
+ top_p: Optional[float],
189
+ top_k: Optional[int],
190
+ alg: str,
191
+ alg_temp: Optional[float],
192
+ pad_penalty: Optional[float],
193
+ visualization_delay: float
194
+ ) -> List[Tuple[str, str]]:
195
+ """ Generates text step-by-step and yields visualization states live. """
196
+
197
+ if not history_dict_list or history_dict_list[-1]['role'] != 'user':
198
+ # Handle cases where history is empty or doesn't end with user message
199
+ # This check might be redundant if add_user_message handles it, but good for safety.
200
+ yield history_dict_list, [("No user message found.", "red")], ""
201
+ return
202
+
203
+ # --- 1. Preparation ---
204
+ parsed_constraints = parse_constraints(constraints_text)
205
+
206
+ # Prepare history for the model template (don't include the empty assistant msg yet)
207
+ history_for_template = history_dict_list # Already in list-of-dicts format
208
+
209
+ try:
210
+ inputs = tokenizer.apply_chat_template(
211
+ history_for_template, # Pass the list of dicts directly
212
+ return_tensors="pt",
213
+ return_dict=True,
214
+ add_generation_prompt=True # Crucial: Adds the '<|im_start|>assistant\n' turn
215
+ )
216
+ input_ids = inputs.input_ids.to(device)
217
+ prompt_attention_mask = inputs.attention_mask.to(device) if 'attention_mask' in inputs else torch.ones_like(input_ids)
218
+ prompt_length = input_ids.shape[1]
219
+ except Exception as e:
220
+ print(f"Error applying chat template: {e}")
221
+ traceback.print_exc()
222
+ yield history_dict_list, [("Error preparing input.", "red")], ""
223
+ return
224
+
225
+ eps = 1e-3
226
+ top_p_val = top_p if top_p is not None and 0.0 < top_p < 1.0 else None
227
+ top_k_val = top_k if top_k is not None and top_k > 0 else None
228
+ alg_temp_val = alg_temp if alg in ['maskgit_plus', 'topk_margin', 'entropy'] and alg_temp is not None and alg_temp > 0 else None
229
+
230
+ # --- 2. Initialize Generation State ---
231
+ total_length = prompt_length + gen_length
232
+ initial_generation_part = torch.full((1, gen_length), MASK_ID, dtype=torch.long, device=device)
233
+ x = torch.cat((input_ids, initial_generation_part), dim=1)
234
+
235
+ generation_attention_mask = torch.ones((1, gen_length), dtype=torch.long, device=device)
236
+ full_attention_mask_long = torch.cat((prompt_attention_mask, generation_attention_mask), dim=1)
237
+
238
+ attention_mask_for_model = full_attention_mask_long.to(model.dtype)
239
+ large_neg_val = torch.finfo(model.dtype).min
240
+ attention_mask_for_model = (1.0 - attention_mask_for_model) * large_neg_val
241
+ attention_mask_for_model = attention_mask_for_model.unsqueeze(1).unsqueeze(2) # [B, 1, 1, N]
242
+
243
+ timesteps = torch.linspace(1, eps, steps + 1, device=device)
244
+ x = apply_constraints_to_state(x, prompt_length, total_length, parsed_constraints, current_step=-1)
245
+
246
+ # --- 3. Visualization & History Setup ---
247
+ previous_tokens_vis = None
248
+ final_response_text = ""
249
+ # The history_dict_list is the state we update and yield for the chatbot UI
250
+ # Add the empty assistant message placeholder *to the history state* now
251
+ history_dict_list.append({"role": "assistant", "content": ""})
252
+
253
+ # --- 4. Initial Yield (Masked State) ---
254
+ initial_generated_tokens = x[0, prompt_length:].cpu()
255
+ vis_data_initial = []
256
+ for tok_id in initial_generated_tokens.tolist():
257
+ display_token = MASK_TOKEN
258
+ color = "#444444"
259
+ vis_data_initial.append((display_token, color))
260
+
261
+ previous_tokens_vis = initial_generated_tokens
262
+ # Yield the history (which now includes the empty assistant turn)
263
+ yield history_dict_list, vis_data_initial, ""
264
+ time.sleep(visualization_delay)
265
+
266
+ # --- 5. Step-by-Step Diffusion Loop ---
267
+ eps = 1e-3
268
+ try:
269
+ start_time = time.time()
270
+ for i in range(steps):
271
+ mask_index = (x == MASK_ID)
272
+ if not mask_index.any():
273
+ print(f"No mask tokens left at step {i}. Stopping early.")
274
+ break
275
+
276
+ outputs = model(
277
+ input_ids=x,
278
+ attention_mask=attention_mask_for_model,
279
+ position_ids=None, use_cache=False, return_dict=True
280
+ )
281
+ logits = outputs.logits
282
+ logits = torch.cat([logits[:,:1], logits[:, :-1]], dim=1)
283
+
284
+ mask_logits = logits[mask_index]
285
+ if mask_logits.numel() == 0:
286
+ print(f"No masked tokens found for logit selection at step {i}. Stopping.")
287
+ break
288
+
289
+ t = timesteps[i]
290
+ s = timesteps[i + 1]
291
+ mask_logits[:, PAD_ID] += pad_penalty * torch.log(1 - t + eps)
292
+ x_new_masked_part = torch.full_like(x[mask_index], MASK_ID, device=device, dtype=torch.long)
293
+
294
+ # [Keep sampling logic the same - 'origin' and confidence-based]
295
+ if alg == 'origin':
296
+ p_transfer = (1.0 - s / t) if i < steps - 1 else 1.0
297
+ num_masked = mask_logits.shape[0]
298
+ transfer_indices_relative = torch.rand(num_masked, device=device) < p_transfer
299
+ logits_to_sample = mask_logits[transfer_indices_relative]
300
+ if logits_to_sample.numel() > 0:
301
+ _, sampled_tokens = sample_tokens(logits_to_sample, temperature=temperature, top_p=top_p_val, top_k=top_k_val)
302
+ if transfer_indices_relative.sum() == sampled_tokens.numel(): # Basic check
303
+ x_new_masked_part[transfer_indices_relative] = sampled_tokens
304
+ else: print(f"Warning step {i} (origin): Mismatch transfer indices and sampled tokens.")
305
+
306
+
307
+ else: # Confidence-based
308
+ use_margin = (alg == 'topk_margin')
309
+ use_entropy = (alg == 'entropy')
310
+ confidence, x0_candidates = sample_tokens(mask_logits, temperature=temperature, top_p=top_p_val, top_k=top_k_val, margin_confidence=use_margin, neg_entropy=use_entropy)
311
+
312
+ num_mask_token = mask_logits.shape[0]
313
+ target_num_revealed_float = num_mask_token * (1.0 - s / t)
314
+ number_transfer_tokens = int(target_num_revealed_float) if i < steps - 1 else num_mask_token
315
+
316
+ if number_transfer_tokens > 0:
317
+ num_samples = min(number_transfer_tokens, num_mask_token)
318
+ if num_samples > 0:
319
+ transfer_indices_relative = torch.tensor([], dtype=torch.long, device=device) # Init empty
320
+ if alg_temp_val is None or alg_temp_val <= 0: # Top-k
321
+ sort_metric = confidence
322
+ k_topk = min(num_samples, sort_metric.numel())
323
+ if k_topk > 0: _, transfer_indices_relative = torch.topk(sort_metric, k=k_topk)
324
+ else: # Sample based on temp
325
+ if confidence.numel() > 0:
326
+ conf_probs = confidence / alg_temp_val
327
+ conf_probs = torch.nan_to_num(conf_probs, nan=0.0, posinf=1e9, neginf=-1e9)
328
+ conf_probs = torch.clamp(conf_probs - conf_probs.max(), min=-30)
329
+ conf_probs = F.softmax(conf_probs, dim=-1)
330
+ conf_probs = torch.clamp(conf_probs, min=0.0)
331
+ conf_probs = torch.nan_to_num(conf_probs, nan=0.0)
332
+ prob_sum = conf_probs.sum()
333
+ target_sum_tensor = torch.tensor(1.0, device=device, dtype=prob_sum.dtype)
334
+ if not torch.isclose(prob_sum, target_sum_tensor, atol=1e-4) and prob_sum > 0:
335
+ safe_prob_sum = torch.max(prob_sum, torch.tensor(1e-12, device=device, dtype=prob_sum.dtype))
336
+ conf_probs = conf_probs / safe_prob_sum
337
+ final_prob_sum_check = conf_probs.sum()
338
+ if conf_probs.numel() > 0 and num_samples > 0 and torch.all(conf_probs >= 0) and torch.isclose(final_prob_sum_check, target_sum_tensor, atol=1e-4):
339
+ try: transfer_indices_relative = torch.multinomial(conf_probs, num_samples=num_samples, replacement=False)
340
+ except RuntimeError as e:
341
+ print(f"Warning step {i}: Multinomial sampling failed ('{e}'). Falling back to top-k.")
342
+ sort_metric = confidence
343
+ k_multinomial_fallback = min(num_samples, sort_metric.numel())
344
+ if k_multinomial_fallback > 0: _, transfer_indices_relative = torch.topk(sort_metric, k=k_multinomial_fallback)
345
+ else: # Fallback if probs invalid for multinomial
346
+ # print(f"Warning step {i}: Invalid probabilities for multinomial sampling (sum={final_prob_sum_check:.4f}). Falling back to top-k.")
347
+ sort_metric = confidence
348
+ k_multinomial_fallback = min(num_samples, sort_metric.numel())
349
+ if k_multinomial_fallback > 0: _, transfer_indices_relative = torch.topk(sort_metric, k=k_multinomial_fallback)
350
+
351
+ # Apply transfer
352
+ if transfer_indices_relative.numel() > 0:
353
+ if x0_candidates.numel() > 0 and transfer_indices_relative.max() < x0_candidates.shape[0]:
354
+ if transfer_indices_relative.max() < x_new_masked_part.shape[0]:
355
+ x_new_masked_part[transfer_indices_relative] = x0_candidates[transfer_indices_relative].clone()
356
+ else: print(f"Warning step {i}: transfer_indices out of bounds for x_new_masked_part.")
357
+ else: print(f"Warning step {i}: transfer_indices out of bounds for x0_candidates or x0_candidates empty.")
358
+
359
+
360
+ x[mask_index] = x_new_masked_part
361
+ x = apply_constraints_to_state(x, prompt_length, total_length, parsed_constraints, current_step=i)
362
+
363
+ # --- Yield Visualization & Update History ---
364
+ current_generated_tokens = x[0, prompt_length:].cpu()
365
+ vis_data = []
366
+ # [Visualization formatting logic remains the same]
367
+ for j in range(gen_length):
368
+ current_tok_id = current_generated_tokens[j].item()
369
+ previous_tok_id = previous_tokens_vis[j].item() if previous_tokens_vis is not None and j < len(previous_tokens_vis) else MASK_ID
370
+ try:
371
+ decoded_token = tokenizer.decode([current_tok_id], skip_special_tokens=False, clean_up_tokenization_spaces=False)
372
+ display_token = MASK_TOKEN if current_tok_id == MASK_ID else decoded_token
373
+ except Exception: display_token = f"[ID:{current_tok_id}]"
374
+ color = None; token_to_display = display_token
375
+ if current_tok_id == MASK_ID: color = "#444444"
376
+ elif previous_tok_id == MASK_ID: color = "#66CC66"
377
+ else: color = "#6699CC"
378
+ should_hide = (PAD_ID is not None and current_tok_id == PAD_ID) or (EOS_ID is not None and current_tok_id == EOS_ID)
379
+ if should_hide and previous_tok_id == current_tok_id: token_to_display = ""; color = None
380
+ if token_to_display: vis_data.append((token_to_display, color))
381
+
382
+ previous_tokens_vis = current_generated_tokens
383
+
384
+ intermediate_response_tokens = x[0, prompt_length:]
385
+ intermediate_response_text = tokenizer.decode(
386
+ intermediate_response_tokens, skip_special_tokens=True, clean_up_tokenization_spaces=True
387
+ ).strip()
388
+
389
+ # --- Update the *last* message in history_dict_list ---
390
+ history_dict_list[-1]['content'] = intermediate_response_text
391
+
392
+ # Yield the updated history list (for chatbot UI), vis data, and response text
393
+ yield history_dict_list, vis_data, intermediate_response_text
394
+ time.sleep(visualization_delay)
395
+
396
+ end_time = time.time()
397
+ print(f"Dream generation finished in {end_time - start_time:.2f} seconds.")
398
+
399
+ # --- 6. Final Processing & Yield ---
400
+ final_sequence = x[0]
401
+ response_tokens = final_sequence[prompt_length:]
402
+ final_response_text = tokenizer.decode(
403
+ response_tokens, skip_special_tokens=True, clean_up_tokenization_spaces=True
404
+ ).strip()
405
+
406
+ # Ensure the final text is in the history object before the last yield
407
+ history_dict_list[-1]['content'] = final_response_text
408
+
409
+ final_generated_tokens = x[0, prompt_length:].cpu()
410
+ vis_data_final = []
411
+ # [Final visualization formatting logic remains the same]
412
+ for j in range(gen_length):
413
+ current_tok_id = final_generated_tokens[j].item()
414
+ previous_tok_id = previous_tokens_vis[j].item() if previous_tokens_vis is not None and j < len(previous_tokens_vis) else MASK_ID
415
+ try:
416
+ decoded_token = tokenizer.decode([current_tok_id], skip_special_tokens=False, clean_up_tokenization_spaces=False)
417
+ display_token = MASK_TOKEN if current_tok_id == MASK_ID else decoded_token
418
+ except Exception: display_token = f"[ID:{current_tok_id}]"
419
+ color = None; token_to_display = display_token
420
+ if current_tok_id == MASK_ID: color = "#444444"
421
+ elif previous_tok_id == MASK_ID: color = "#66CC66"
422
+ else: color = "#6699CC"
423
+ should_hide = (PAD_ID is not None and current_tok_id == PAD_ID) or (EOS_ID is not None and current_tok_id == EOS_ID)
424
+ if should_hide and previous_tok_id == current_tok_id: token_to_display = ""; color = None
425
+ if token_to_display: vis_data_final.append((token_to_display, color))
426
+
427
+ yield history_dict_list, vis_data_final, final_response_text
428
+ print("Visualization streaming complete.")
429
+
430
+ except Exception as e:
431
+ print(f"Error during generation or processing: {e}")
432
+ traceback.print_exc()
433
+ # Attempt to add error message to history if possible
434
+ if history_dict_list and history_dict_list[-1]['role'] == 'assistant':
435
+ history_dict_list[-1]['content'] = f"Error: {e}"
436
+ yield history_dict_list, [("Error during generation.", "red")], f"Error: {e}" # Also show error in text box
437
+ return
438
+
439
+ input_examples = ["""You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests. You will NOT return anything except for the program.
440
+
441
+ Question:
442
+ There are N cells in a row, numbered 1 to N.
443
+ For each 1 \\leq i < N, cells i and i+1 are adjacent.
444
+ Initially, cell i is painted with color i.
445
+ You are given Q queries. Process them in order. Each query is of one of the following two types.
446
+
447
+ - 1 x c: Repaint the following to color c: all reachable cells reachable from cell x by repeatedly moving to an adjacent cell painted in the same color as the current cell.
448
+ - 2 c: Print the number of cells painted with color c.
449
+
450
+ Input
451
+
452
+ The input is given from Standard Input in the following format:
453
+ N Q
454
+ \\mathrm{query}_1
455
+ \\vdots
456
+ \\mathrm{query}_Q
457
+
458
+ Each query is given in one of the following two formats:
459
+ 1 x c
460
+
461
+ 2 c
462
+
463
+ Output
464
+
465
+ Let q be the number of queries of the second type. Print q lines.
466
+ The i-th line should contain the answer to the i-th such query.
467
+
468
+ Constraints
469
+
470
+
471
+ - 1 \\leq N \\leq 5 \times 10^5
472
+ - 1 \\leq Q \\leq 2 \times 10^5
473
+ - In queries of the first type, 1 \\leq x \\leq N.
474
+ - In queries of the first and second types, 1 \\leq c \\leq N.
475
+ - There is at least one query of the second type.
476
+ - All input values are integers.
477
+
478
+ Sample Input 1
479
+
480
+ 5 6
481
+ 1 5 4
482
+ 1 4 2
483
+ 2 2
484
+ 1 3 2
485
+ 1 2 3
486
+ 2 3
487
+
488
+ Sample Output 1
489
+
490
+ 3
491
+ 4
492
+
493
+ The queries recolor the cells as shown in the figure.
494
+
495
+ Read the inputs from stdin solve the problem and write the answer to stdout (do not directly test on the sample inputs). Enclose your code within delimiters as follows. Ensure that when the python program runs, it reads the inputs, runs the algorithm and writes output to STDOUT.
496
+
497
+ ```python
498
+ # YOUR CODE HERE
499
+ ```
500
+ ""","""Please provide a self-contained Python script that solves the following problem in a markdown code block:
501
+ Calculates the average of the sums of absolute differences between each pair of consecutive numbers for all permutations of a given list. Each permutation is shuffled before calculating the differences. Args: - numbers (list): A list of numbers. Default is numbers from 1 to 10.
502
+ The function should output with:
503
+ float: The average of the sums of absolute differences for each shuffled permutation of the list.
504
+ You should write self-contained code starting with:
505
+ ```
506
+ import itertools
507
+ from random import shuffle
508
+ def task_func(numbers=list(range(1, 3))):
509
+ ```""","You are given a Python function and an assertion containing an input to the function. Complete the assertion with a literal (no unsimplified expressions, no function calls) containing the output when executing the provided code on the given input, even if the function is incorrect or incomplete. Do NOT output any extra information. Execute the program step by step before arriving at an answer, and provide the full assertion with the correct output in [ANSWER] and [/ANSWER] tags, following the examples.\n\n[PYTHON]\ndef f(s):\n s = s + s\n return \"b\" + s + \"a\"\nassert f(\"hi\") == ??\n[/PYTHON]\n[THOUGHT]\nLet's execute the code step by step:\n\n1. The function f is defined, which takes a single argument s.\n2. The function is called with the argument \"hi\", so within the function, s is initially \"hi\".\n3. Inside the function, s is concatenated with itself, so s becomes \"hihi\".\n4. The function then returns a new string that starts with \"b\", followed by the value of s (which is now \"hihi\"), and ends with \"a\".\n5. The return value of the function is therefore \"bhihia\".\n[/THOUGHT]\n[ANSWER]\nassert f(\"hi\") == \"bhihia\"\n[/ANSWER]\n\n[PYTHON]\ndef f(nums):\n output = []\n for n in nums:\n output.append((nums.count(n), n))\n output.sort(reverse=True)\n return output\nassert f([1, 1, 3, 1, 3, 1]) == ??\n[/PYTHON]\n[THOUGHT]\n", "Write a quick sort algorithm."]
510
+
511
+ labels = [ 'Sketch-First Generation (from LiveCodeBench)', 'Left-to-Right Generation (from BigCodeBench)',' Interleaved Reasoning Generation (from CRUXEval)', ' Quicksort algorithm']
512
+
513
+ # --- Gradio UI ---
514
+ css = '''
515
+ .category-legend{display:none}
516
+ '''
517
+ def create_chatbot_demo():
518
+ with gr.Blocks(css=css) as demo:
519
+ gr.Markdown("# Dream-Coder-7B-Instruct ")
520
+ gr.Markdown(
521
+ "[[Model Card](https://huggingface.co/Dream-org/Dream-Coder-v0-Instruct-7B)] "
522
+ "[[Blog](https://hkunlp.github.io/blog/2025/dream-coder/)]"
523
+ "[[Github](https://github.com/DreamLM/Dream-Coder)]"
524
+ )
525
+
526
+ with gr.Row():
527
+ with gr.Column(scale=3):
528
+ chatbot_ui = gr.Chatbot(
529
+ label="Conversation",
530
+ height=500,
531
+ show_copy_button=True,
532
+ bubble_full_width=False,
533
+ value=[], # Initialize empty
534
+ type="messages" # Crucial: Use the messages format
535
+ )
536
+ with gr.Group():
537
+ with gr.Row():
538
+ user_input = gr.Textbox(
539
+ label="Your Message", placeholder="Type your message here...",
540
+ scale=7, autofocus=True, show_label=False, container=False
541
+ )
542
+ send_btn = gr.Button("Send", scale=1, variant="primary")
543
+ constraints_input = gr.Textbox(
544
+ label="Word Constraints (Optional)",
545
+ info="Format: 'pos:word, pos:word,...'. Example: '0:Once, 5:upon'",
546
+ placeholder="0:Hello, 10:world", value="",
547
+ visible=False
548
+ )
549
+ clear_btn = gr.Button("Clear Conversation")
550
+ examples = gr.Examples(
551
+ examples = input_examples,
552
+ example_labels = labels,
553
+ inputs = user_input
554
+ )
555
+
556
+
557
+ with gr.Column(scale=2):
558
+ output_vis = gr.HighlightedText(
559
+ label="Denoising Process Visualization", combine_adjacent=False,
560
+ show_legend=True, interactive=False
561
+ )
562
+ response_text_display = gr.Textbox(
563
+ label="Current/Final Response", interactive=False, lines=5, visible=False
564
+ )
565
+
566
+ # [Keep Accordion with Generation Settings the same]
567
+ with gr.Accordion("Generation Settings", open=False):
568
+ with gr.Row():
569
+ gen_length = gr.Slider(minimum=16, maximum=1024, value=512, step=8, label="Max New Tokens")
570
+ steps = gr.Slider(minimum=8, maximum=512, value=512, step=8, label="Diffusion Steps")
571
+ with gr.Row():
572
+ temperature = gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.05, label="Temperature (0 = greedy)")
573
+ alg_temp = gr.Slider(minimum=0.0, maximum=1.0, value=0.0, step=0.05, label="Algorithm Temperature")
574
+ with gr.Row():
575
+ top_p = gr.Slider(minimum=0.0, maximum=1.0, value=1.0, step=0.05, label="Top-P (0 disables)")
576
+ top_k = gr.Slider(minimum=0, maximum=200, value=0, step=5, label="Top-K (0 disables)")
577
+ with gr.Row():
578
+ pad_penalty = gr.Slider(minimum=0, maximum=5,value=3, step=0.5, label="Pad Penalty")
579
+ remasking_strategy = gr.Radio(choices=['origin', 'maskgit_plus', 'topk_margin', 'entropy'], value='entropy', label="Generation Algorithm")
580
+ with gr.Row():
581
+ visualization_delay = gr.Slider(minimum=0.0, maximum=0.5, value=0.0, step=0.01, label="Visualization Delay (s)")
582
+
583
+
584
+
585
+ # --- Event Handlers ---
586
+
587
+ # User function: Appends user message to the history (list of dicts)
588
+ def add_user_message(message: str, history: List[Dict[str, str]]):
589
+ if not message.strip():
590
+ gr.Warning("Please enter a message.")
591
+ return history, "" # Return unchanged history, empty input
592
+ history.append({"role": "user", "content": message})
593
+ # Return updated history for chatbot UI, and clear input box
594
+ return history, ""
595
+
596
+ # Bot function (now the generator)
597
+ # Inputs: Chatbot history (list of dicts), generation params
598
+ # Outputs: Chatbot history (updated list of dicts), visualization, response text
599
+ generation_inputs = [
600
+ chatbot_ui, # Pass chatbot state directly (list of dicts)
601
+ gen_length, steps, constraints_input,
602
+ temperature, top_p, top_k, remasking_strategy, alg_temp, pad_penalty,
603
+ visualization_delay
604
+ ]
605
+ generation_outputs = [chatbot_ui, output_vis, response_text_display]
606
+
607
+ # --- Connect UI elements ---
608
+
609
+ # Textbox Submission (Enter key)
610
+ submit_listener = user_input.submit(
611
+ fn=add_user_message,
612
+ inputs=[user_input, chatbot_ui],
613
+ outputs=[chatbot_ui, user_input] # Update chatbot UI and clear input
614
+ ).then(
615
+ fn=generate_dream_response,
616
+ inputs=generation_inputs,
617
+ outputs=generation_outputs,
618
+ show_progress="hidden" # Hide default progress bar
619
+ )
620
+
621
+ # Send Button Click
622
+ click_listener = send_btn.click(
623
+ fn=add_user_message,
624
+ inputs=[user_input, chatbot_ui],
625
+ outputs=[chatbot_ui, user_input] # Update chatbot UI and clear input
626
+ ).then(
627
+ fn=generate_dream_response,
628
+ inputs=generation_inputs,
629
+ outputs=generation_outputs,
630
+ show_progress="hidden"
631
+ )
632
+
633
+ # Clear Button Action
634
+ clear_btn.click(
635
+ lambda: ([], [], ""), # Function to return empty values
636
+ inputs=[],
637
+ outputs=[chatbot_ui, output_vis, response_text_display], # Clear chatbot, vis, text
638
+ queue=False # No need to queue clearing usually
639
+ )
640
+
641
+
642
+
643
+ return demo
644
+
645
+ # --- Launch ---
646
+ if __name__ == "__main__":
647
+ demo = create_chatbot_demo()
648
+ demo.queue().launch(debug=True, share=True)
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ transformers
2
+ accelerate
3
+ sentencepiece