Shiro2 commited on
Commit
9926c60
·
verified ·
1 Parent(s): 2cd24ae

Upload app1.py

Browse files
Files changed (1) hide show
  1. app1.py +512 -0
app1.py ADDED
@@ -0,0 +1,512 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ import gradio as gr
4
+ import torch.nn.functional as F
5
+ from transformers import AutoTokenizer, AutoModel
6
+ import time
7
+ import re
8
+
9
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
10
+ print(f"Using device: {device}")
11
+
12
+ # Load model and tokenizer
13
+ tokenizer = AutoTokenizer.from_pretrained('mradermacher/LLaDA-1.5-GGUF', gguf_file = 'LLaDA-1.5.Q8_0.gguf', trust_remote_code=True)
14
+ model = AutoModel.from_pretrained('mradermacher/LLaDA-1.5-GGUF', gguf_file = 'LLaDA-1.5.Q8_0.gguf', trust_remote_code=True,
15
+ torch_dtype=torch.bfloat16).to(device)
16
+
17
+ # Constants
18
+ MASK_TOKEN = "[MASK]"
19
+ MASK_ID = 126336 # The token ID of [MASK] in LLaDA
20
+
21
+ def parse_constraints(constraints_text):
22
+ """Parse constraints in format: 'position:word, position:word, ...'"""
23
+ constraints = {}
24
+ if not constraints_text:
25
+ return constraints
26
+
27
+ parts = constraints_text.split(',')
28
+ for part in parts:
29
+ if ':' not in part:
30
+ continue
31
+ pos_str, word = part.split(':', 1)
32
+ try:
33
+ pos = int(pos_str.strip())
34
+ word = word.strip()
35
+ if word and pos >= 0:
36
+ constraints[pos] = word
37
+ except ValueError:
38
+ continue
39
+
40
+ return constraints
41
+
42
+ def format_chat_history(history):
43
+ """
44
+ Format chat history for the LLaDA model
45
+
46
+ Args:
47
+ history: List of [user_message, assistant_message] pairs
48
+
49
+ Returns:
50
+ Formatted conversation for the model
51
+ """
52
+ messages = []
53
+ for user_msg, assistant_msg in history:
54
+ messages.append({"role": "user", "content": user_msg})
55
+ if assistant_msg: # Skip if None (for the latest user message)
56
+ messages.append({"role": "assistant", "content": assistant_msg})
57
+
58
+ return messages
59
+
60
+ def add_gumbel_noise(logits, temperature):
61
+ '''
62
+ The Gumbel max is a method for sampling categorical distributions.
63
+ According to arXiv:2409.02908, for MDM, low-precision Gumbel Max improves perplexity score but reduces generation quality.
64
+ Thus, we use float64.
65
+ '''
66
+ if temperature <= 0:
67
+ return logits
68
+
69
+ logits = logits.to(torch.float64)
70
+ noise = torch.rand_like(logits, dtype=torch.float64)
71
+ gumbel_noise = (- torch.log(noise)) ** temperature
72
+ return logits.exp() / gumbel_noise
73
+
74
+ def get_num_transfer_tokens(mask_index, steps):
75
+ '''
76
+ In the reverse process, the interval [0, 1] is uniformly discretized into steps intervals.
77
+ Furthermore, because LLaDA employs a linear noise schedule (as defined in Eq. (8)),
78
+ the expected number of tokens transitioned at each step should be consistent.
79
+
80
+ This function is designed to precompute the number of tokens that need to be transitioned at each step.
81
+ '''
82
+ mask_num = mask_index.sum(dim=1, keepdim=True)
83
+
84
+ base = mask_num // steps
85
+ remainder = mask_num % steps
86
+
87
+ num_transfer_tokens = torch.zeros(mask_num.size(0), steps, device=mask_index.device, dtype=torch.int64) + base
88
+
89
+ for i in range(mask_num.size(0)):
90
+ num_transfer_tokens[i, :remainder[i]] += 1
91
+
92
+ return num_transfer_tokens
93
+
94
+ def generate_response_with_visualization(model, tokenizer, device, messages, gen_length=64, steps=32,
95
+ constraints=None, temperature=0.0, cfg_scale=0.0, block_length=32,
96
+ remasking='low_confidence'):
97
+ """
98
+ Generate text with LLaDA model with visualization using the same sampling as in generate.py
99
+
100
+ Args:
101
+ messages: List of message dictionaries with 'role' and 'content'
102
+ gen_length: Length of text to generate
103
+ steps: Number of denoising steps
104
+ constraints: Dictionary mapping positions to words
105
+ temperature: Sampling temperature
106
+ cfg_scale: Classifier-free guidance scale
107
+ block_length: Block length for semi-autoregressive generation
108
+ remasking: Remasking strategy ('low_confidence' or 'random')
109
+
110
+ Returns:
111
+ List of visualization states showing the progression and final text
112
+ """
113
+
114
+ # Process constraints
115
+ if constraints is None:
116
+ constraints = {}
117
+
118
+ # Convert any string constraints to token IDs
119
+ processed_constraints = {}
120
+ for pos, word in constraints.items():
121
+ tokens = tokenizer.encode(" " + word, add_special_tokens=False)
122
+ for i, token_id in enumerate(tokens):
123
+ processed_constraints[pos + i] = token_id
124
+
125
+ # Prepare the prompt using chat template
126
+ chat_input = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
127
+ input_ids = tokenizer(chat_input)['input_ids']
128
+ input_ids = torch.tensor(input_ids).to(device).unsqueeze(0)
129
+
130
+ # For generation
131
+ prompt_length = input_ids.shape[1]
132
+
133
+ # Initialize the sequence with masks for the response part
134
+ x = torch.full((1, prompt_length + gen_length), MASK_ID, dtype=torch.long).to(device)
135
+ x[:, :prompt_length] = input_ids.clone()
136
+
137
+ # Initialize visualization states for the response part
138
+ visualization_states = []
139
+
140
+ # Add initial state (all masked)
141
+ initial_state = [(MASK_TOKEN, "#444444") for _ in range(gen_length)]
142
+ visualization_states.append(initial_state)
143
+
144
+ # Apply constraints to the initial state
145
+ for pos, token_id in processed_constraints.items():
146
+ absolute_pos = prompt_length + pos
147
+ if absolute_pos < x.shape[1]:
148
+ x[:, absolute_pos] = token_id
149
+
150
+ # Mark prompt positions to exclude them from masking during classifier-free guidance
151
+ prompt_index = (x != MASK_ID)
152
+
153
+ # Ensure block_length is valid
154
+ if block_length > gen_length:
155
+ block_length = gen_length
156
+
157
+ # Calculate number of blocks
158
+ num_blocks = gen_length // block_length
159
+ if gen_length % block_length != 0:
160
+ num_blocks += 1
161
+
162
+ # Adjust steps per block
163
+ steps_per_block = steps // num_blocks
164
+ if steps_per_block < 1:
165
+ steps_per_block = 1
166
+
167
+ # Track the current state of x for visualization
168
+ current_x = x.clone()
169
+
170
+ # Process each block
171
+ for num_block in range(num_blocks):
172
+ # Calculate the start and end indices for the current block
173
+ block_start = prompt_length + num_block * block_length
174
+ block_end = min(prompt_length + (num_block + 1) * block_length, x.shape[1])
175
+
176
+ # Get mask indices for the current block
177
+ block_mask_index = (x[:, block_start:block_end] == MASK_ID)
178
+
179
+ # Skip if no masks in this block
180
+ if not block_mask_index.any():
181
+ continue
182
+
183
+ # Calculate number of tokens to unmask at each step
184
+ num_transfer_tokens = get_num_transfer_tokens(block_mask_index, steps_per_block)
185
+
186
+ # Process each step
187
+ for i in range(steps_per_block):
188
+ # Get all mask positions in the current sequence
189
+ mask_index = (x == MASK_ID)
190
+
191
+ # Skip if no masks
192
+ if not mask_index.any():
193
+ break
194
+
195
+ # Apply classifier-free guidance if enabled
196
+ if cfg_scale > 0.0:
197
+ un_x = x.clone()
198
+ un_x[prompt_index] = MASK_ID
199
+ x_ = torch.cat([x, un_x], dim=0)
200
+ logits = model(x_).logits
201
+ logits, un_logits = torch.chunk(logits, 2, dim=0)
202
+ logits = un_logits + (cfg_scale + 1) * (logits - un_logits)
203
+ else:
204
+ logits = model(x).logits
205
+
206
+ # Apply Gumbel noise for sampling
207
+ logits_with_noise = add_gumbel_noise(logits, temperature=temperature)
208
+ x0 = torch.argmax(logits_with_noise, dim=-1)
209
+
210
+ # Calculate confidence scores for remasking
211
+ if remasking == 'low_confidence':
212
+ p = F.softmax(logits.to(torch.float64), dim=-1)
213
+ x0_p = torch.squeeze(
214
+ torch.gather(p, dim=-1, index=torch.unsqueeze(x0, -1)), -1) # b, l
215
+ elif remasking == 'random':
216
+ x0_p = torch.rand((x0.shape[0], x0.shape[1]), device=x0.device)
217
+ else:
218
+ raise NotImplementedError(f"Remasking strategy '{remasking}' not implemented")
219
+
220
+ # Don't consider positions beyond the current block
221
+ x0_p[:, block_end:] = -float('inf')
222
+
223
+ # Apply predictions where we have masks
224
+ old_x = x.clone()
225
+ x0 = torch.where(mask_index, x0, x)
226
+ confidence = torch.where(mask_index, x0_p, -float('inf'))
227
+
228
+ # Select tokens to unmask based on confidence
229
+ transfer_index = torch.zeros_like(x0, dtype=torch.bool, device=x0.device)
230
+ for j in range(confidence.shape[0]):
231
+ # Only consider positions within the current block for unmasking
232
+ block_confidence = confidence[j, block_start:block_end]
233
+ if i < steps_per_block - 1: # Not the last step
234
+ # Take top-k confidences
235
+ _, select_indices = torch.topk(block_confidence,
236
+ k=min(num_transfer_tokens[j, i].item(),
237
+ block_confidence.numel()))
238
+ # Adjust indices to global positions
239
+ select_indices = select_indices + block_start
240
+ transfer_index[j, select_indices] = True
241
+ else: # Last step - unmask everything remaining
242
+ transfer_index[j, block_start:block_end] = mask_index[j, block_start:block_end]
243
+
244
+ # Apply the selected tokens
245
+ x = torch.where(transfer_index, x0, x)
246
+
247
+ # Ensure constraints are maintained
248
+ for pos, token_id in processed_constraints.items():
249
+ absolute_pos = prompt_length + pos
250
+ if absolute_pos < x.shape[1]:
251
+ x[:, absolute_pos] = token_id
252
+
253
+ # Create visualization state only for the response part
254
+ current_state = []
255
+ for i in range(gen_length):
256
+ pos = prompt_length + i # Absolute position in the sequence
257
+
258
+ if x[0, pos] == MASK_ID:
259
+ # Still masked
260
+ current_state.append((MASK_TOKEN, "#444444")) # Dark gray for masks
261
+
262
+ elif old_x[0, pos] == MASK_ID:
263
+ # Newly revealed in this step
264
+ token = tokenizer.decode([x[0, pos].item()], skip_special_tokens=True)
265
+ # Color based on confidence
266
+ confidence = float(x0_p[0, pos].cpu())
267
+ if confidence < 0.3:
268
+ color = "#FF6666" # Light red
269
+ elif confidence < 0.7:
270
+ color = "#FFAA33" # Orange
271
+ else:
272
+ color = "#66CC66" # Light green
273
+
274
+ current_state.append((token, color))
275
+
276
+ else:
277
+ # Previously revealed
278
+ token = tokenizer.decode([x[0, pos].item()], skip_special_tokens=True)
279
+ current_state.append((token, "#6699CC")) # Light blue
280
+
281
+ visualization_states.append(current_state)
282
+
283
+ # Extract final text (just the assistant's response)
284
+ response_tokens = x[0, prompt_length:]
285
+ final_text = tokenizer.decode(response_tokens,
286
+ skip_special_tokens=True,
287
+ clean_up_tokenization_spaces=True)
288
+
289
+ return visualization_states, final_text
290
+
291
+ css = '''
292
+ .category-legend{display:none}
293
+ button{height: 60px}
294
+ '''
295
+ def create_chatbot_demo():
296
+ with gr.Blocks(css=css) as demo:
297
+ gr.Markdown("# LLaDA - Large Language Diffusion Model Demo")
298
+ gr.Markdown("[model](https://huggingface.co/GSAI-ML/LLaDA-8B-Instruct), [project page](https://ml-gsai.github.io/LLaDA-demo/)")
299
+
300
+ # STATE MANAGEMENT
301
+ chat_history = gr.State([])
302
+
303
+ # UI COMPONENTS
304
+ with gr.Row():
305
+ with gr.Column(scale=3):
306
+ chatbot_ui = gr.Chatbot(label="Conversation", height=500)
307
+
308
+ # Message input
309
+ with gr.Group():
310
+ with gr.Row():
311
+ user_input = gr.Textbox(
312
+ label="Your Message",
313
+ placeholder="Type your message here...",
314
+ show_label=False
315
+ )
316
+ send_btn = gr.Button("Send")
317
+
318
+ constraints_input = gr.Textbox(
319
+ label="Word Constraints",
320
+ info="This model allows for placing specific words at specific positions using 'position:word' format. Example: 1st word once, 6th word 'upon' and 11th word 'time', would be: '0:Once, 5:upon, 10:time",
321
+ placeholder="0:Once, 5:upon, 10:time",
322
+ value=""
323
+ )
324
+ with gr.Column(scale=2):
325
+ output_vis = gr.HighlightedText(
326
+ label="Denoising Process Visualization",
327
+ combine_adjacent=False,
328
+ show_legend=True,
329
+ )
330
+
331
+ # Advanced generation settings
332
+ with gr.Accordion("Generation Settings", open=False):
333
+ with gr.Row():
334
+ gen_length = gr.Slider(
335
+ minimum=16, maximum=128, value=64, step=8,
336
+ label="Generation Length"
337
+ )
338
+ steps = gr.Slider(
339
+ minimum=8, maximum=64, value=32, step=4,
340
+ label="Denoising Steps"
341
+ )
342
+ with gr.Row():
343
+ temperature = gr.Slider(
344
+ minimum=0.0, maximum=1.0, value=0.0, step=0.1,
345
+ label="Temperature"
346
+ )
347
+ cfg_scale = gr.Slider(
348
+ minimum=0.0, maximum=2.0, value=0.0, step=0.1,
349
+ label="CFG Scale"
350
+ )
351
+ with gr.Row():
352
+ block_length = gr.Slider(
353
+ minimum=8, maximum=128, value=32, step=8,
354
+ label="Block Length"
355
+ )
356
+ remasking_strategy = gr.Radio(
357
+ choices=["low_confidence", "random"],
358
+ value="low_confidence",
359
+ label="Remasking Strategy"
360
+ )
361
+ with gr.Row():
362
+ visualization_delay = gr.Slider(
363
+ minimum=0.0, maximum=1.0, value=0.1, step=0.1,
364
+ label="Visualization Delay (seconds)"
365
+ )
366
+
367
+ # Current response text box (hidden)
368
+ current_response = gr.Textbox(
369
+ label="Current Response",
370
+ placeholder="The assistant's response will appear here...",
371
+ lines=3,
372
+ visible=False
373
+ )
374
+
375
+ # Clear button
376
+ clear_btn = gr.Button("Clear Conversation")
377
+
378
+ # HELPER FUNCTIONS
379
+ def add_message(history, message, response):
380
+ """Add a message pair to the history and return the updated history"""
381
+ history = history.copy()
382
+ history.append([message, response])
383
+ return history
384
+
385
+ def user_message_submitted(message, history, gen_length, steps, constraints, delay):
386
+ """Process a submitted user message"""
387
+ # Skip empty messages
388
+ if not message.strip():
389
+ # Return current state unchanged
390
+ history_for_display = history.copy()
391
+ return history, history_for_display, "", [], ""
392
+
393
+ # Add user message to history
394
+ history = add_message(history, message, None)
395
+
396
+ # Format for display - temporarily show user message with empty response
397
+ history_for_display = history.copy()
398
+
399
+ # Clear the input
400
+ message_out = ""
401
+
402
+ # Return immediately to update UI with user message
403
+ return history, history_for_display, message_out, [], ""
404
+
405
+ def bot_response(history, gen_length, steps, constraints, delay, temperature, cfg_scale, block_length, remasking):
406
+ """Generate bot response for the latest message"""
407
+ if not history:
408
+ return history, [], ""
409
+
410
+ # Get the last user message
411
+ last_user_message = history[-1][0]
412
+
413
+ try:
414
+ # Format all messages except the last one (which has no response yet)
415
+ messages = format_chat_history(history[:-1])
416
+
417
+ # Add the last user message
418
+ messages.append({"role": "user", "content": last_user_message})
419
+
420
+ # Parse constraints
421
+ parsed_constraints = parse_constraints(constraints)
422
+
423
+ # Generate response with visualization
424
+ vis_states, response_text = generate_response_with_visualization(
425
+ model, tokenizer, device,
426
+ messages,
427
+ gen_length=gen_length,
428
+ steps=steps,
429
+ constraints=parsed_constraints,
430
+ temperature=temperature,
431
+ cfg_scale=cfg_scale,
432
+ block_length=block_length,
433
+ remasking=remasking
434
+ )
435
+
436
+ # Update history with the assistant's response
437
+ history[-1][1] = response_text
438
+
439
+ # Return the initial state immediately
440
+ yield history, vis_states[0], response_text
441
+
442
+ # Then animate through visualization states
443
+ for state in vis_states[1:]:
444
+ time.sleep(delay)
445
+ yield history, state, response_text
446
+
447
+ except Exception as e:
448
+ error_msg = f"Error: {str(e)}"
449
+ print(error_msg)
450
+
451
+ # Show error in visualization
452
+ error_vis = [(error_msg, "red")]
453
+
454
+ # Don't update history with error
455
+ yield history, error_vis, error_msg
456
+
457
+ def clear_conversation():
458
+ """Clear the conversation history"""
459
+ return [], [], "", []
460
+
461
+ # EVENT HANDLERS
462
+
463
+ # Clear button handler
464
+ clear_btn.click(
465
+ fn=clear_conversation,
466
+ inputs=[],
467
+ outputs=[chat_history, chatbot_ui, current_response, output_vis]
468
+ )
469
+
470
+ # User message submission flow (2-step process)
471
+ # Step 1: Add user message to history and update UI
472
+ msg_submit = user_input.submit(
473
+ fn=user_message_submitted,
474
+ inputs=[user_input, chat_history, gen_length, steps, constraints_input, visualization_delay],
475
+ outputs=[chat_history, chatbot_ui, user_input, output_vis, current_response]
476
+ )
477
+
478
+ # Also connect the send button
479
+ send_click = send_btn.click(
480
+ fn=user_message_submitted,
481
+ inputs=[user_input, chat_history, gen_length, steps, constraints_input, visualization_delay],
482
+ outputs=[chat_history, chatbot_ui, user_input, output_vis, current_response]
483
+ )
484
+
485
+ # Step 2: Generate bot response
486
+ # This happens after the user message is displayed
487
+ msg_submit.then(
488
+ fn=bot_response,
489
+ inputs=[
490
+ chat_history, gen_length, steps, constraints_input,
491
+ visualization_delay, temperature, cfg_scale, block_length,
492
+ remasking_strategy
493
+ ],
494
+ outputs=[chatbot_ui, output_vis, current_response]
495
+ )
496
+
497
+ send_click.then(
498
+ fn=bot_response,
499
+ inputs=[
500
+ chat_history, gen_length, steps, constraints_input,
501
+ visualization_delay, temperature, cfg_scale, block_length,
502
+ remasking_strategy
503
+ ],
504
+ outputs=[chatbot_ui, output_vis, current_response]
505
+ )
506
+
507
+ return demo
508
+
509
+ # Launch the demo
510
+ if __name__ == "__main__":
511
+ demo = create_chatbot_demo()
512
+ demo.queue().launch(share=True)