Shiro2 commited on
Commit
0dc3f81
·
verified ·
1 Parent(s): 80225cf

Upload app1.py

Browse files
Files changed (1) hide show
  1. app1.py +380 -96
app1.py CHANGED
@@ -2,7 +2,8 @@ import torch
2
  import numpy as np
3
  import gradio as gr
4
  import torch.nn.functional as F
5
- from transformers import AutoTokenizer, AutoModel
 
6
  import time
7
  import re
8
 
@@ -10,13 +11,47 @@ device = 'cuda' if torch.cuda.is_available() else 'cpu'
10
  print(f"Using device: {device}")
11
 
12
  # Load model and tokenizer
13
- tokenizer = AutoTokenizer.from_pretrained('mradermacher/LLaDA-1.5-GGUF', gguf_file = 'LLaDA-1.5.Q8_0.gguf', trust_remote_code=True)
14
- model = AutoModel.from_pretrained('mradermacher/LLaDA-1.5-GGUF', gguf_file = 'LLaDA-1.5.Q8_0.gguf', trust_remote_code=True,
15
- torch_dtype='auto').to(device)
16
 
17
  # Constants
18
  MASK_TOKEN = "[MASK]"
19
  MASK_ID = 126336 # The token ID of [MASK] in LLaDA
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
  def parse_constraints(constraints_text):
22
  """Parse constraints in format: 'position:word, position:word, ...'"""
@@ -91,8 +126,182 @@ def get_num_transfer_tokens(mask_index, steps):
91
 
92
  return num_transfer_tokens
93
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
  def generate_response_with_visualization(model, tokenizer, device, messages, gen_length=64, steps=32,
95
- constraints=None, temperature=0.0, cfg_scale=0.0, block_length=32,
96
  remasking='low_confidence'):
97
  """
98
  Generate text with LLaDA model with visualization using the same sampling as in generate.py
@@ -103,7 +312,6 @@ def generate_response_with_visualization(model, tokenizer, device, messages, gen
103
  steps: Number of denoising steps
104
  constraints: Dictionary mapping positions to words
105
  temperature: Sampling temperature
106
- cfg_scale: Classifier-free guidance scale
107
  block_length: Block length for semi-autoregressive generation
108
  remasking: Remasking strategy ('low_confidence' or 'random')
109
 
@@ -164,9 +372,6 @@ def generate_response_with_visualization(model, tokenizer, device, messages, gen
164
  if steps_per_block < 1:
165
  steps_per_block = 1
166
 
167
- # Track the current state of x for visualization
168
- current_x = x.clone()
169
-
170
  # Process each block
171
  for num_block in range(num_blocks):
172
  # Calculate the start and end indices for the current block
@@ -192,16 +397,8 @@ def generate_response_with_visualization(model, tokenizer, device, messages, gen
192
  if not mask_index.any():
193
  break
194
 
195
- # Apply classifier-free guidance if enabled
196
- if cfg_scale > 0.0:
197
- un_x = x.clone()
198
- un_x[prompt_index] = MASK_ID
199
- x_ = torch.cat([x, un_x], dim=0)
200
- logits = model(x_).logits
201
- logits, un_logits = torch.chunk(logits, 2, dim=0)
202
- logits = un_logits + (cfg_scale + 1) * (logits - un_logits)
203
- else:
204
- logits = model(x).logits
205
 
206
  # Apply Gumbel noise for sampling
207
  logits_with_noise = add_gumbel_noise(logits, temperature=temperature)
@@ -258,21 +455,6 @@ def generate_response_with_visualization(model, tokenizer, device, messages, gen
258
  if x[0, pos] == MASK_ID:
259
  # Still masked
260
  current_state.append((MASK_TOKEN, "#444444")) # Dark gray for masks
261
-
262
- elif old_x[0, pos] == MASK_ID:
263
- # Newly revealed in this step
264
- token = tokenizer.decode([x[0, pos].item()], skip_special_tokens=True)
265
- # Color based on confidence
266
- confidence = float(x0_p[0, pos].cpu())
267
- if confidence < 0.3:
268
- color = "#FF6666" # Light red
269
- elif confidence < 0.7:
270
- color = "#FFAA33" # Orange
271
- else:
272
- color = "#66CC66" # Light green
273
-
274
- current_state.append((token, color))
275
-
276
  else:
277
  # Previously revealed
278
  token = tokenizer.decode([x[0, pos].item()], skip_special_tokens=True)
@@ -290,53 +472,97 @@ def generate_response_with_visualization(model, tokenizer, device, messages, gen
290
 
291
  css = '''
292
  .category-legend{display:none}
293
- button{height: 60px}
 
 
 
 
 
294
  '''
295
  def create_chatbot_demo():
296
  with gr.Blocks(css=css) as demo:
297
- gr.Markdown("# LLaDA - Large Language Diffusion Model Demo")
298
- gr.Markdown("[model](https://huggingface.co/GSAI-ML/LLaDA-8B-Instruct), [project page](https://ml-gsai.github.io/LLaDA-demo/)")
299
 
300
  # STATE MANAGEMENT
301
- chat_history = gr.State([])
 
302
 
303
  # UI COMPONENTS
304
  with gr.Row():
305
  with gr.Column(scale=3):
306
  chatbot_ui = gr.Chatbot(label="Conversation", height=500)
307
-
308
- # Message input
309
- with gr.Group():
310
- with gr.Row():
311
- user_input = gr.Textbox(
312
- label="Your Message",
313
- placeholder="Type your message here...",
314
- show_label=False
315
- )
316
- send_btn = gr.Button("Send")
317
-
318
- constraints_input = gr.Textbox(
319
- label="Word Constraints",
320
- info="This model allows for placing specific words at specific positions using 'position:word' format. Example: 1st word once, 6th word 'upon' and 11th word 'time', would be: '0:Once, 5:upon, 10:time",
321
- placeholder="0:Once, 5:upon, 10:time",
322
- value=""
323
- )
324
  with gr.Column(scale=2):
325
  output_vis = gr.HighlightedText(
326
  label="Denoising Process Visualization",
327
  combine_adjacent=False,
328
  show_legend=True,
329
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
330
 
331
  # Advanced generation settings
332
  with gr.Accordion("Generation Settings", open=False):
333
  with gr.Row():
334
  gen_length = gr.Slider(
335
- minimum=16, maximum=128, value=64, step=8,
336
  label="Generation Length"
337
  )
338
  steps = gr.Slider(
339
- minimum=8, maximum=64, value=32, step=4,
340
  label="Denoising Steps"
341
  )
342
  with gr.Row():
@@ -344,9 +570,9 @@ def create_chatbot_demo():
344
  minimum=0.0, maximum=1.0, value=0.0, step=0.1,
345
  label="Temperature"
346
  )
347
- cfg_scale = gr.Slider(
348
- minimum=0.0, maximum=2.0, value=0.0, step=0.1,
349
- label="CFG Scale"
350
  )
351
  with gr.Row():
352
  block_length = gr.Slider(
@@ -382,37 +608,40 @@ def create_chatbot_demo():
382
  history.append([message, response])
383
  return history
384
 
385
- def user_message_submitted(message, history, gen_length, steps, constraints, delay):
386
  """Process a submitted user message"""
387
  # Skip empty messages
388
  if not message.strip():
389
  # Return current state unchanged
390
- history_for_display = history.copy()
391
- return history, history_for_display, "", [], ""
 
392
 
393
- # Add user message to history
394
- history = add_message(history, message, None)
 
395
 
396
  # Format for display - temporarily show user message with empty response
397
- history_for_display = history.copy()
 
398
 
399
  # Clear the input
400
  message_out = ""
401
 
402
  # Return immediately to update UI with user message
403
- return history, history_for_display, message_out, [], ""
404
 
405
- def bot_response(history, gen_length, steps, constraints, delay, temperature, cfg_scale, block_length, remasking):
406
  """Generate bot response for the latest message"""
407
- if not history:
408
- return history, [], ""
409
 
410
  # Get the last user message
411
- last_user_message = history[-1][0]
412
 
413
  try:
414
  # Format all messages except the last one (which has no response yet)
415
- messages = format_chat_history(history[:-1])
416
 
417
  # Add the last user message
418
  messages.append({"role": "user", "content": last_user_message})
@@ -420,7 +649,10 @@ def create_chatbot_demo():
420
  # Parse constraints
421
  parsed_constraints = parse_constraints(constraints)
422
 
423
- # Generate response with visualization
 
 
 
424
  vis_states, response_text = generate_response_with_visualization(
425
  model, tokenizer, device,
426
  messages,
@@ -428,21 +660,55 @@ def create_chatbot_demo():
428
  steps=steps,
429
  constraints=parsed_constraints,
430
  temperature=temperature,
431
- cfg_scale=cfg_scale,
432
  block_length=block_length,
433
- remasking=remasking
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
434
  )
 
 
 
 
 
 
435
 
436
- # Update history with the assistant's response
437
- history[-1][1] = response_text
 
438
 
439
  # Return the initial state immediately
440
- yield history, vis_states[0], response_text
441
 
442
  # Then animate through visualization states
443
  for state in vis_states[1:]:
444
  time.sleep(delay)
445
- yield history, state, response_text
 
 
 
 
446
 
447
  except Exception as e:
448
  error_msg = f"Error: {str(e)}"
@@ -451,12 +717,30 @@ def create_chatbot_demo():
451
  # Show error in visualization
452
  error_vis = [(error_msg, "red")]
453
 
454
- # Don't update history with error
455
- yield history, error_vis, error_msg
456
 
457
  def clear_conversation():
458
  """Clear the conversation history"""
459
- return [], [], "", []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
460
 
461
  # EVENT HANDLERS
462
 
@@ -464,22 +748,22 @@ def create_chatbot_demo():
464
  clear_btn.click(
465
  fn=clear_conversation,
466
  inputs=[],
467
- outputs=[chat_history, chatbot_ui, current_response, output_vis]
468
  )
469
 
470
  # User message submission flow (2-step process)
471
  # Step 1: Add user message to history and update UI
472
  msg_submit = user_input.submit(
473
  fn=user_message_submitted,
474
- inputs=[user_input, chat_history, gen_length, steps, constraints_input, visualization_delay],
475
- outputs=[chat_history, chatbot_ui, user_input, output_vis, current_response]
476
  )
477
 
478
  # Also connect the send button
479
  send_click = send_btn.click(
480
  fn=user_message_submitted,
481
- inputs=[user_input, chat_history, gen_length, steps, constraints_input, visualization_delay],
482
- outputs=[chat_history, chatbot_ui, user_input, output_vis, current_response]
483
  )
484
 
485
  # Step 2: Generate bot response
@@ -487,21 +771,21 @@ def create_chatbot_demo():
487
  msg_submit.then(
488
  fn=bot_response,
489
  inputs=[
490
- chat_history, gen_length, steps, constraints_input,
491
- visualization_delay, temperature, cfg_scale, block_length,
492
- remasking_strategy
493
  ],
494
- outputs=[chatbot_ui, output_vis, current_response]
495
  )
496
 
497
  send_click.then(
498
  fn=bot_response,
499
  inputs=[
500
- chat_history, gen_length, steps, constraints_input,
501
- visualization_delay, temperature, cfg_scale, block_length,
502
- remasking_strategy
503
  ],
504
- outputs=[chatbot_ui, output_vis, current_response]
505
  )
506
 
507
  return demo
@@ -509,4 +793,4 @@ def create_chatbot_demo():
509
  # Launch the demo
510
  if __name__ == "__main__":
511
  demo = create_chatbot_demo()
512
- demo.queue().launch(share=True)
 
2
  import numpy as np
3
  import gradio as gr
4
  import torch.nn.functional as F
5
+ from transformers import AutoTokenizer
6
+ from model.modeling_llada import LLaDAModelLM
7
  import time
8
  import re
9
 
 
11
  print(f"Using device: {device}")
12
 
13
  # Load model and tokenizer
14
+ tokenizer = AutoTokenizer.from_pretrained('GSAI-ML/LLaDA-8B-Instruct', trust_remote_code=True)
15
+ model = LLaDAModelLM.from_pretrained('GSAI-ML/LLaDA-8B-Instruct', trust_remote_code=True,
16
+ torch_dtype=torch.bfloat16, device_map = 'auto').to(device)
17
 
18
  # Constants
19
  MASK_TOKEN = "[MASK]"
20
  MASK_ID = 126336 # The token ID of [MASK] in LLaDA
21
+ question_gsm8k = '''Question: Jen and Tyler are gymnasts practicing flips. Jen is practicing the triple-flip while Tyler is practicing the double-flip. Jen did sixteen triple-flips during practice. Tyler flipped in the air half the number of times Jen did. How many double-flips did Tyler do?
22
+ Answer: Jen did 16 triple-flips, so she did 16 * 3 = <<16*3=48>>48 flips.
23
+ Tyler did half the number of flips, so he did 48 / 2 = <<48/2=24>>24 flips.
24
+ A double flip has two flips, so Tyler did 24 / 2 = <<24/2=12>>12 double-flips.
25
+ #### 12
26
+
27
+ Question: Four people in a law firm are planning a party. Mary will buy a platter of pasta for $20 and a loaf of bread for $2. Elle and Andrea will split the cost for buying 4 cans of soda which cost $1.50 each, and chicken wings for $10. Joe will buy a cake that costs $5. How much more will Mary spend than the rest of the firm put together?
28
+ Answer: Mary will spend $20 + $2 = $<<20+2=22>>22.
29
+ Elle and Andrea will spend $1.5 x 4 = $<<1.5*4=6>>6 for the soda.
30
+ Elle and Andrea will spend $6 + $10 = $<<6+10=16>>16 for the soda and chicken wings.
31
+ Elle, Andrea, and Joe together will spend $16 + $5 = $<<16+5=21>>21.
32
+ So, Mary will spend $22 - $21 = $<<22-21=1>>1 more than all of them combined.
33
+ #### 1
34
+
35
+ Question: A charcoal grill burns fifteen coals to ash every twenty minutes of grilling. The grill ran for long enough to burn three bags of coals. Each bag of coal contains 60 coals. How long did the grill run?
36
+ Answer: The grill burned 3 * 60 = <<3*60=180>>180 coals.
37
+ It takes 20 minutes to burn 15 coals, so the grill ran for 180 / 15 * 20 = <<180/15*20=240>>240 minutes.
38
+ #### 240
39
+
40
+ Question: A bear is preparing to hibernate for the winter and needs to gain 1000 pounds. At the end of summer, the bear feasts on berries and small woodland animals. During autumn, it devours acorns and salmon. It gained a fifth of the weight it needed from berries during summer, and during autumn, it gained twice that amount from acorns. Salmon made up half of the remaining weight it had needed to gain. How many pounds did it gain eating small animals?
41
+ Answer: The bear gained 1 / 5 * 1000 = <<1/5*1000=200>>200 pounds from berries.
42
+ It gained 2 * 200 = <<2*200=400>>400 pounds from acorns.
43
+ It still needed 1000 - 200 - 400 = <<1000-200-400=400>>400 pounds.
44
+ Thus, it gained 400 / 2 = <<400/2=200>>200 pounds from salmon.
45
+ Therefore, the bear gained 400 - 200 = <<400-200=200>>200 pounds from small animals.
46
+ #### 200
47
+
48
+ Question: Brendan can cut 8 yards of grass per day, he bought a lawnmower and it helped him to cut more yards by Fifty percent per day. How many yards will Brendan be able to cut after a week?
49
+ Answer: The additional yard Brendan can cut after buying the lawnmower is 8 x 0.50 = <<8*0.50=4>>4 yards.
50
+ So, the total yards he can cut with the lawnmower is 8 + 4 = <<8+4=12>>12.
51
+ Therefore, the total number of yards he can cut in a week is 12 x 7 = <<12*7=84>>84 yards.
52
+ #### 84
53
+
54
+ Question: Skyler has 100 hats on his hand with the colors red, blue, and white. Half of the hats are red, 3/5 of the remaining hats are blue, and the rest are white. How many white hats does Skyler have?'''
55
 
56
  def parse_constraints(constraints_text):
57
  """Parse constraints in format: 'position:word, position:word, ...'"""
 
126
 
127
  return num_transfer_tokens
128
 
129
+ def generate_response_with_visualization_cache_and_parallel(model, tokenizer, device, messages, gen_length=64, steps=32,
130
+ constraints=None, temperature=0.0, block_length=32,
131
+ remasking='low_confidence', threshold=0.9):
132
+ """
133
+ Generate text with LLaDA model with visualization using the same sampling as in generate.py
134
+
135
+ Args:
136
+ messages: List of message dictionaries with 'role' and 'content'
137
+ gen_length: Length of text to generate
138
+ steps: Number of denoising steps
139
+ constraints: Dictionary mapping positions to words
140
+ temperature: Sampling temperature
141
+ block_length: Block length for semi-autoregressive generation
142
+ remasking: Remasking strategy ('low_confidence' or 'random')
143
+
144
+ Returns:
145
+ List of visualization states showing the progression and final text
146
+ """
147
+
148
+ # Process constraints
149
+ if constraints is None:
150
+ constraints = {}
151
+
152
+ # Convert any string constraints to token IDs
153
+ processed_constraints = {}
154
+ for pos, word in constraints.items():
155
+ tokens = tokenizer.encode(" " + word, add_special_tokens=False)
156
+ for i, token_id in enumerate(tokens):
157
+ processed_constraints[pos + i] = token_id
158
+
159
+ # Prepare the prompt using chat template
160
+ chat_input = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
161
+ input_ids = tokenizer(chat_input)['input_ids']
162
+ input_ids = torch.tensor(input_ids).to(device).unsqueeze(0)
163
+
164
+ # For generation
165
+ prompt_length = input_ids.shape[1]
166
+
167
+ # Initialize the sequence with masks for the response part
168
+ x = torch.full((1, prompt_length + gen_length), MASK_ID, dtype=torch.long).to(device)
169
+ x[:, :prompt_length] = input_ids.clone()
170
+
171
+ # Initialize visualization states for the response part
172
+ visualization_states = []
173
+
174
+ # Add initial state (all masked)
175
+ initial_state = [(MASK_TOKEN, "#444444") for _ in range(gen_length)]
176
+ visualization_states.append(initial_state)
177
+
178
+ # Apply constraints to the initial state
179
+ for pos, token_id in processed_constraints.items():
180
+ absolute_pos = prompt_length + pos
181
+ if absolute_pos < x.shape[1]:
182
+ x[:, absolute_pos] = token_id
183
+
184
+ # Ensure block_length is valid
185
+ if block_length > gen_length:
186
+ block_length = gen_length
187
+
188
+ # Calculate number of blocks
189
+ num_blocks = gen_length // block_length
190
+ if gen_length % block_length != 0:
191
+ num_blocks += 1
192
+
193
+ # Adjust steps per block
194
+ steps_per_block = steps // num_blocks
195
+ if steps_per_block < 1:
196
+ steps_per_block = 1
197
+
198
+ # Process each block
199
+ for num_block in range(num_blocks):
200
+ current_block_start = prompt_length + num_block * block_length
201
+ current_block_end = current_block_start + block_length
202
+
203
+ block_mask_index = (x[:, current_block_start:current_block_end] == MASK_ID)
204
+ num_transfer_tokens = get_num_transfer_tokens(block_mask_index, steps)
205
+
206
+ output = model(x, use_cache=True)
207
+ past_key_values = output.past_key_values
208
+
209
+ mask_index = (x == MASK_ID)
210
+ mask_index[:, current_block_end:] = 0
211
+ x0, transfer_index = get_transfer_index(output.logits, temperature, remasking, mask_index, x, num_transfer_tokens[:, 0] if threshold is None else None, threshold)
212
+ x[transfer_index] = x0[transfer_index]
213
+
214
+ new_past_key_values = []
215
+ for i in range(len(past_key_values)):
216
+ new_past_key_values.append(())
217
+ for j in range(len(past_key_values[i])):
218
+ new_past_key_values[i] += (past_key_values[i][j][:, :, :current_block_start],)
219
+
220
+ past_key_values = new_past_key_values
221
+ # Create visualization state only for the response part
222
+ current_state = []
223
+ for i in range(gen_length):
224
+ pos = prompt_length + i # Absolute position in the sequence
225
+
226
+ if x[0, pos] == MASK_ID:
227
+ # Still masked
228
+ current_state.append((MASK_TOKEN, "#444444")) # Dark gray for masks
229
+ else:
230
+ # Previously revealed
231
+ token = tokenizer.decode([x[0, pos].item()], skip_special_tokens=True)
232
+ current_state.append((token, "#6699CC")) # Light blue
233
+
234
+ visualization_states.append(current_state)
235
+ i = 1
236
+ while True:
237
+ mask_index = (x[:, current_block_start:] == MASK_ID)
238
+ mask_index[:, block_length:] = 0
239
+
240
+ logits = model(x[:, current_block_start:], past_key_values=past_key_values, use_cache=True).logits
241
+
242
+ logits_with_noise = add_gumbel_noise(logits, temperature=temperature)
243
+ x0 = torch.argmax(logits_with_noise, dim=-1) # b, l
244
+
245
+ x0, transfer_index = get_transfer_index(logits, temperature, remasking, mask_index,
246
+ x[:, current_block_start:], num_transfer_tokens[:, i] if threshold is None else None, threshold)
247
+ x[:, current_block_start:][transfer_index] = x0[transfer_index]
248
+ # Create visualization state only for the response part
249
+ current_state = []
250
+ for i in range(gen_length):
251
+ pos = prompt_length + i # Absolute position in the sequence
252
+
253
+ if x[0, pos] == MASK_ID:
254
+ # Still masked
255
+ current_state.append((MASK_TOKEN, "#444444")) # Dark gray for masks
256
+ else:
257
+ # Previously revealed
258
+ token = tokenizer.decode([x[0, pos].item()], skip_special_tokens=True)
259
+ current_state.append((token, "#6699CC")) # Light blue
260
+
261
+ visualization_states.append(current_state)
262
+ if (x[:, current_block_start:current_block_end] == MASK_ID).sum() == 0:
263
+ break
264
+ i += 1
265
+
266
+ # Extract final text (just the assistant's response)
267
+ response_tokens = x[0, prompt_length:]
268
+ final_text = tokenizer.decode(response_tokens,
269
+ skip_special_tokens=True,
270
+ clean_up_tokenization_spaces=True)
271
+
272
+ return visualization_states, final_text
273
+
274
+
275
+ def get_transfer_index(logits, temperature, remasking, mask_index, x, num_transfer_tokens, threshold=None):
276
+ logits_with_noise = add_gumbel_noise(logits, temperature=temperature)
277
+ x0 = torch.argmax(logits_with_noise, dim=-1) # b, l
278
+
279
+ if remasking == 'low_confidence':
280
+ p = F.softmax(logits.to(torch.float64), dim=-1)
281
+ x0_p = torch.squeeze(
282
+ torch.gather(p, dim=-1, index=torch.unsqueeze(x0, -1)), -1) # b, l
283
+ elif remasking == 'random':
284
+ x0_p = torch.rand((x0.shape[0], x0.shape[1]), device=x0.device)
285
+ else:
286
+ raise NotImplementedError(remasking)
287
+
288
+ x0 = torch.where(mask_index, x0, x)
289
+ confidence = torch.where(mask_index, x0_p, -np.inf)
290
+
291
+ transfer_index = torch.zeros_like(x0, dtype=torch.bool, device=x0.device)
292
+ if threshold is not None:
293
+ num_transfer_tokens = mask_index.sum(dim=1, keepdim=True)
294
+ for j in range(confidence.shape[0]):
295
+ _, select_index = torch.topk(confidence[j], k=num_transfer_tokens[j])
296
+ transfer_index[j, select_index] = True
297
+ if threshold is not None:
298
+ for k in range(1, num_transfer_tokens[j]):
299
+ if confidence[j, select_index[k]] < threshold:
300
+ transfer_index[j, select_index[k]] = False
301
+ return x0, transfer_index
302
+
303
  def generate_response_with_visualization(model, tokenizer, device, messages, gen_length=64, steps=32,
304
+ constraints=None, temperature=0.0, block_length=32,
305
  remasking='low_confidence'):
306
  """
307
  Generate text with LLaDA model with visualization using the same sampling as in generate.py
 
312
  steps: Number of denoising steps
313
  constraints: Dictionary mapping positions to words
314
  temperature: Sampling temperature
 
315
  block_length: Block length for semi-autoregressive generation
316
  remasking: Remasking strategy ('low_confidence' or 'random')
317
 
 
372
  if steps_per_block < 1:
373
  steps_per_block = 1
374
 
 
 
 
375
  # Process each block
376
  for num_block in range(num_blocks):
377
  # Calculate the start and end indices for the current block
 
397
  if not mask_index.any():
398
  break
399
 
400
+ # Get logits from model
401
+ logits = model(x).logits
 
 
 
 
 
 
 
 
402
 
403
  # Apply Gumbel noise for sampling
404
  logits_with_noise = add_gumbel_noise(logits, temperature=temperature)
 
455
  if x[0, pos] == MASK_ID:
456
  # Still masked
457
  current_state.append((MASK_TOKEN, "#444444")) # Dark gray for masks
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
458
  else:
459
  # Previously revealed
460
  token = tokenizer.decode([x[0, pos].item()], skip_special_tokens=True)
 
472
 
473
  css = '''
474
  .category-legend{display:none}
475
+ .message, .bubble, .chatbot .message, .chatbot .bubble {
476
+ max-width: 80% !important;
477
+ white-space: pre-wrap !important;
478
+ word-break: break-word !important;
479
+ box-sizing: border-box !important;
480
+ }
481
  '''
482
  def create_chatbot_demo():
483
  with gr.Blocks(css=css) as demo:
484
+ gr.Markdown("# Fast-dLLM: Training-free Acceleration of Diffusion LLM by Enabling KV Cache and Parallel Decoding")
485
+ gr.Markdown("[code](https://github.com/NVlabs/Fast-dLLM), [project page](https://nvlabs.github.io/Fast-dLLM/)")
486
 
487
  # STATE MANAGEMENT
488
+ chat_history_baseline = gr.State([])
489
+ chat_history_cache = gr.State([])
490
 
491
  # UI COMPONENTS
492
  with gr.Row():
493
  with gr.Column(scale=3):
494
  chatbot_ui = gr.Chatbot(label="Conversation", height=500)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
495
  with gr.Column(scale=2):
496
  output_vis = gr.HighlightedText(
497
  label="Denoising Process Visualization",
498
  combine_adjacent=False,
499
  show_legend=True,
500
  )
501
+ generation_time = gr.Textbox(
502
+ label="Generation Time",
503
+ value="0.00s",
504
+ interactive=False
505
+ )
506
+ throughput = gr.Textbox(
507
+ label="Generation Speed",
508
+ value="0.00 tokens/s",
509
+ interactive=False
510
+ )
511
+
512
+ # Add separator line
513
+ gr.Markdown("---")
514
+
515
+ # Duplicate conversation interface
516
+ with gr.Row():
517
+ with gr.Column(scale=3):
518
+ chatbot_ui_copy = gr.Chatbot(label="Conversation (Accelerated)", height=500)
519
+ with gr.Column(scale=2):
520
+ output_vis_copy = gr.HighlightedText(
521
+ label="Denoising Process Visualization",
522
+ combine_adjacent=False,
523
+ show_legend=True,
524
+ )
525
+ generation_time_copy = gr.Textbox(
526
+ label="Generation Time",
527
+ value="0.00s",
528
+ interactive=False
529
+ )
530
+ throughput_copy = gr.Textbox(
531
+ label="Generation Speed",
532
+ value="0.00 tokens/s",
533
+ interactive=False
534
+ )
535
+ # Move input area below the duplicate conversation interface
536
+ with gr.Group():
537
+ user_input = gr.Textbox(
538
+ label="Your Message",
539
+ placeholder="Type your message here...",
540
+ show_label=False
541
+ )
542
+ send_btn = gr.Button("Send")
543
+ constraints_input = gr.Textbox(
544
+ label="Word Constraints",
545
+ info="This model allows for placing specific words at specific positions using 'position:word' format. Example: 1st word once, 6th word 'upon' and 11th word 'time', would be: '0:Once, 5:upon, 10:time",
546
+ placeholder="0:Once, 5:upon, 10:time",
547
+ value=""
548
+ )
549
+ gr.Examples(
550
+ examples=[
551
+ [question_gsm8k]
552
+ ],
553
+ inputs=user_input,
554
+ label="Example Inputs"
555
+ )
556
 
557
  # Advanced generation settings
558
  with gr.Accordion("Generation Settings", open=False):
559
  with gr.Row():
560
  gen_length = gr.Slider(
561
+ minimum=64, maximum=1024, value=256, step=64,
562
  label="Generation Length"
563
  )
564
  steps = gr.Slider(
565
+ minimum=8, maximum=1024, value=256, step=4,
566
  label="Denoising Steps"
567
  )
568
  with gr.Row():
 
570
  minimum=0.0, maximum=1.0, value=0.0, step=0.1,
571
  label="Temperature"
572
  )
573
+ threshold = gr.Slider(
574
+ minimum=0.5, maximum=1.0, value=0.9, step=0.1,
575
+ label="Threshold"
576
  )
577
  with gr.Row():
578
  block_length = gr.Slider(
 
608
  history.append([message, response])
609
  return history
610
 
611
+ def user_message_submitted(message, history_baseline, history_cache, gen_length, steps, constraints, delay):
612
  """Process a submitted user message"""
613
  # Skip empty messages
614
  if not message.strip():
615
  # Return current state unchanged
616
+ history_baseline_for_display = history_baseline.copy()
617
+ history_cache_for_display = history_cache.copy()
618
+ return history_baseline, history_cache, history_baseline_for_display, history_cache_for_display, "", [], [], "", "0.00s", "0.00 tokens/s", "0.00s", "0.00 tokens/s"
619
 
620
+ # Add user message to both histories
621
+ history_baseline = add_message(history_baseline, message, None)
622
+ history_cache = add_message(history_cache, message, None)
623
 
624
  # Format for display - temporarily show user message with empty response
625
+ history_baseline_for_display = history_baseline.copy()
626
+ history_cache_for_display = history_cache.copy()
627
 
628
  # Clear the input
629
  message_out = ""
630
 
631
  # Return immediately to update UI with user message
632
+ return history_baseline, history_cache, history_baseline_for_display, history_cache_for_display, message_out, [], [], "", "0.00s", "0.00 tokens/s", "0.00s", "0.00 tokens/s"
633
 
634
+ def bot_response(history_baseline, history_cache, gen_length, steps, constraints, delay, temperature, block_length, remasking, threshold):
635
  """Generate bot response for the latest message"""
636
+ if not history_baseline or not history_cache:
637
+ return history_baseline, history_cache, [], [], "", "0.00s", "0.00 tokens/s", "0.00s", "0.00 tokens/s"
638
 
639
  # Get the last user message
640
+ last_user_message = history_baseline[-1][0]
641
 
642
  try:
643
  # Format all messages except the last one (which has no response yet)
644
+ messages = format_chat_history(history_baseline[:-1])
645
 
646
  # Add the last user message
647
  messages.append({"role": "user", "content": last_user_message})
 
649
  # Parse constraints
650
  parsed_constraints = parse_constraints(constraints)
651
 
652
+ # Start timing for baseline
653
+ start_time = time.time()
654
+
655
+ # Generate response with visualization for baseline
656
  vis_states, response_text = generate_response_with_visualization(
657
  model, tokenizer, device,
658
  messages,
 
660
  steps=steps,
661
  constraints=parsed_constraints,
662
  temperature=temperature,
 
663
  block_length=block_length,
664
+ remasking=remasking,
665
+ )
666
+
667
+ # Calculate generation time and throughput for baseline
668
+ generation_time = time.time() - start_time
669
+ generation_time_str = f"{generation_time:.2f}s"
670
+
671
+ # Calculate throughput for baseline
672
+ response_tokens = tokenizer.encode(response_text, add_special_tokens=False)
673
+ num_tokens = len(response_tokens)
674
+ throughput = num_tokens / generation_time if generation_time > 0 else 0
675
+ throughput_str = f"{throughput:.2f} tokens/s"
676
+
677
+ # Start timing for cache version
678
+ cache_start_time = time.time()
679
+ cache_vis_states, cache_response_text = generate_response_with_visualization_cache_and_parallel(
680
+ model, tokenizer, device,
681
+ messages,
682
+ gen_length=gen_length,
683
+ steps=steps,
684
+ constraints=parsed_constraints,
685
+ temperature=temperature,
686
+ block_length=block_length,
687
+ remasking=remasking,
688
+ threshold=threshold
689
  )
690
+ cache_generation_time = time.time() - cache_start_time
691
+ cache_generation_time_str = f"{cache_generation_time:.2f}s"
692
+ cache_response_tokens = tokenizer.encode(cache_response_text, add_special_tokens=False)
693
+ cache_num_tokens = len(cache_response_tokens)
694
+ cache_throughput = cache_num_tokens / cache_generation_time if cache_generation_time > 0 else 0
695
+ cache_throughput_str = f"{cache_throughput:.2f} tokens/s"
696
 
697
+ # Update both histories with their respective responses
698
+ history_baseline[-1][1] = response_text
699
+ history_cache[-1][1] = cache_response_text
700
 
701
  # Return the initial state immediately
702
+ yield history_baseline, history_cache, vis_states[0], cache_vis_states[0], response_text, generation_time_str, throughput_str, cache_generation_time_str, cache_throughput_str
703
 
704
  # Then animate through visualization states
705
  for state in vis_states[1:]:
706
  time.sleep(delay)
707
+ yield history_baseline, history_cache, state, cache_vis_states[0], response_text, generation_time_str, throughput_str, cache_generation_time_str, cache_throughput_str
708
+
709
+ for state in cache_vis_states[1:]:
710
+ time.sleep(delay)
711
+ yield history_baseline, history_cache, vis_states[-1], state, response_text, generation_time_str, throughput_str, cache_generation_time_str, cache_throughput_str
712
 
713
  except Exception as e:
714
  error_msg = f"Error: {str(e)}"
 
717
  # Show error in visualization
718
  error_vis = [(error_msg, "red")]
719
 
720
+ # Don't update histories with error
721
+ yield history_baseline, history_cache, error_vis, error_vis, error_msg, "0.00s", "0.00 tokens/s", "0.00s", "0.00 tokens/s"
722
 
723
  def clear_conversation():
724
  """Clear the conversation history"""
725
+ empty_history = []
726
+ empty_response = ""
727
+ empty_vis = []
728
+ time_str = "0.00s"
729
+ throughput_str = "0.00 tokens/s"
730
+
731
+ return (
732
+ empty_history, # chat_history_baseline
733
+ empty_history, # chat_history_cache
734
+ empty_history, # chatbot_ui
735
+ empty_history, # chatbot_ui_copy
736
+ empty_response, # current_response
737
+ empty_vis, # output_vis
738
+ time_str, # generation_time
739
+ throughput_str, # throughput
740
+ empty_vis, # output_vis_copy
741
+ time_str, # generation_time_copy
742
+ throughput_str # throughput_copy
743
+ )
744
 
745
  # EVENT HANDLERS
746
 
 
748
  clear_btn.click(
749
  fn=clear_conversation,
750
  inputs=[],
751
+ outputs=[chat_history_baseline, chat_history_cache, chatbot_ui, chatbot_ui_copy, current_response, output_vis, generation_time, throughput, output_vis_copy, generation_time_copy, throughput_copy]
752
  )
753
 
754
  # User message submission flow (2-step process)
755
  # Step 1: Add user message to history and update UI
756
  msg_submit = user_input.submit(
757
  fn=user_message_submitted,
758
+ inputs=[user_input, chat_history_baseline, chat_history_cache, gen_length, steps, constraints_input, visualization_delay],
759
+ outputs=[chat_history_baseline, chat_history_cache, chatbot_ui, chatbot_ui_copy, user_input, output_vis, output_vis_copy, current_response, generation_time, throughput, generation_time_copy, throughput_copy]
760
  )
761
 
762
  # Also connect the send button
763
  send_click = send_btn.click(
764
  fn=user_message_submitted,
765
+ inputs=[user_input, chat_history_baseline, chat_history_cache, gen_length, steps, constraints_input, visualization_delay],
766
+ outputs=[chat_history_baseline, chat_history_cache, chatbot_ui, chatbot_ui_copy, user_input, output_vis, output_vis_copy, current_response, generation_time, throughput, generation_time_copy, throughput_copy]
767
  )
768
 
769
  # Step 2: Generate bot response
 
771
  msg_submit.then(
772
  fn=bot_response,
773
  inputs=[
774
+ chat_history_baseline, chat_history_cache, gen_length, steps, constraints_input,
775
+ visualization_delay, temperature, block_length,
776
+ remasking_strategy, threshold
777
  ],
778
+ outputs=[chatbot_ui, chatbot_ui_copy, output_vis, output_vis_copy, current_response, generation_time, throughput, generation_time_copy, throughput_copy]
779
  )
780
 
781
  send_click.then(
782
  fn=bot_response,
783
  inputs=[
784
+ chat_history_baseline, chat_history_cache, gen_length, steps, constraints_input,
785
+ visualization_delay, temperature, block_length,
786
+ remasking_strategy, threshold
787
  ],
788
+ outputs=[chatbot_ui, chatbot_ui_copy, output_vis, output_vis_copy, current_response, generation_time, throughput, generation_time_copy, throughput_copy]
789
  )
790
 
791
  return demo
 
793
  # Launch the demo
794
  if __name__ == "__main__":
795
  demo = create_chatbot_demo()
796
+ demo.queue().launch(share=True)