Upload app1.py
Browse files
app1.py
CHANGED
|
@@ -1,796 +1,80 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
#
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
# Constants
|
| 19 |
-
MASK_TOKEN = "[MASK]"
|
| 20 |
-
MASK_ID = 126336 # The token ID of [MASK] in LLaDA
|
| 21 |
-
question_gsm8k = '''Question: Jen and Tyler are gymnasts practicing flips. Jen is practicing the triple-flip while Tyler is practicing the double-flip. Jen did sixteen triple-flips during practice. Tyler flipped in the air half the number of times Jen did. How many double-flips did Tyler do?
|
| 22 |
-
Answer: Jen did 16 triple-flips, so she did 16 * 3 = <<16*3=48>>48 flips.
|
| 23 |
-
Tyler did half the number of flips, so he did 48 / 2 = <<48/2=24>>24 flips.
|
| 24 |
-
A double flip has two flips, so Tyler did 24 / 2 = <<24/2=12>>12 double-flips.
|
| 25 |
-
#### 12
|
| 26 |
-
|
| 27 |
-
Question: Four people in a law firm are planning a party. Mary will buy a platter of pasta for $20 and a loaf of bread for $2. Elle and Andrea will split the cost for buying 4 cans of soda which cost $1.50 each, and chicken wings for $10. Joe will buy a cake that costs $5. How much more will Mary spend than the rest of the firm put together?
|
| 28 |
-
Answer: Mary will spend $20 + $2 = $<<20+2=22>>22.
|
| 29 |
-
Elle and Andrea will spend $1.5 x 4 = $<<1.5*4=6>>6 for the soda.
|
| 30 |
-
Elle and Andrea will spend $6 + $10 = $<<6+10=16>>16 for the soda and chicken wings.
|
| 31 |
-
Elle, Andrea, and Joe together will spend $16 + $5 = $<<16+5=21>>21.
|
| 32 |
-
So, Mary will spend $22 - $21 = $<<22-21=1>>1 more than all of them combined.
|
| 33 |
-
#### 1
|
| 34 |
-
|
| 35 |
-
Question: A charcoal grill burns fifteen coals to ash every twenty minutes of grilling. The grill ran for long enough to burn three bags of coals. Each bag of coal contains 60 coals. How long did the grill run?
|
| 36 |
-
Answer: The grill burned 3 * 60 = <<3*60=180>>180 coals.
|
| 37 |
-
It takes 20 minutes to burn 15 coals, so the grill ran for 180 / 15 * 20 = <<180/15*20=240>>240 minutes.
|
| 38 |
-
#### 240
|
| 39 |
-
|
| 40 |
-
Question: A bear is preparing to hibernate for the winter and needs to gain 1000 pounds. At the end of summer, the bear feasts on berries and small woodland animals. During autumn, it devours acorns and salmon. It gained a fifth of the weight it needed from berries during summer, and during autumn, it gained twice that amount from acorns. Salmon made up half of the remaining weight it had needed to gain. How many pounds did it gain eating small animals?
|
| 41 |
-
Answer: The bear gained 1 / 5 * 1000 = <<1/5*1000=200>>200 pounds from berries.
|
| 42 |
-
It gained 2 * 200 = <<2*200=400>>400 pounds from acorns.
|
| 43 |
-
It still needed 1000 - 200 - 400 = <<1000-200-400=400>>400 pounds.
|
| 44 |
-
Thus, it gained 400 / 2 = <<400/2=200>>200 pounds from salmon.
|
| 45 |
-
Therefore, the bear gained 400 - 200 = <<400-200=200>>200 pounds from small animals.
|
| 46 |
-
#### 200
|
| 47 |
-
|
| 48 |
-
Question: Brendan can cut 8 yards of grass per day, he bought a lawnmower and it helped him to cut more yards by Fifty percent per day. How many yards will Brendan be able to cut after a week?
|
| 49 |
-
Answer: The additional yard Brendan can cut after buying the lawnmower is 8 x 0.50 = <<8*0.50=4>>4 yards.
|
| 50 |
-
So, the total yards he can cut with the lawnmower is 8 + 4 = <<8+4=12>>12.
|
| 51 |
-
Therefore, the total number of yards he can cut in a week is 12 x 7 = <<12*7=84>>84 yards.
|
| 52 |
-
#### 84
|
| 53 |
-
|
| 54 |
-
Question: Skyler has 100 hats on his hand with the colors red, blue, and white. Half of the hats are red, 3/5 of the remaining hats are blue, and the rest are white. How many white hats does Skyler have?'''
|
| 55 |
-
|
| 56 |
-
def parse_constraints(constraints_text):
|
| 57 |
-
"""Parse constraints in format: 'position:word, position:word, ...'"""
|
| 58 |
-
constraints = {}
|
| 59 |
-
if not constraints_text:
|
| 60 |
-
return constraints
|
| 61 |
-
|
| 62 |
-
parts = constraints_text.split(',')
|
| 63 |
-
for part in parts:
|
| 64 |
-
if ':' not in part:
|
| 65 |
-
continue
|
| 66 |
-
pos_str, word = part.split(':', 1)
|
| 67 |
-
try:
|
| 68 |
-
pos = int(pos_str.strip())
|
| 69 |
-
word = word.strip()
|
| 70 |
-
if word and pos >= 0:
|
| 71 |
-
constraints[pos] = word
|
| 72 |
-
except ValueError:
|
| 73 |
-
continue
|
| 74 |
-
|
| 75 |
-
return constraints
|
| 76 |
-
|
| 77 |
-
def format_chat_history(history):
|
| 78 |
-
"""
|
| 79 |
-
Format chat history for the LLaDA model
|
| 80 |
-
|
| 81 |
-
Args:
|
| 82 |
-
history: List of [user_message, assistant_message] pairs
|
| 83 |
-
|
| 84 |
-
Returns:
|
| 85 |
-
Formatted conversation for the model
|
| 86 |
-
"""
|
| 87 |
-
messages = []
|
| 88 |
-
for user_msg, assistant_msg in history:
|
| 89 |
-
messages.append({"role": "user", "content": user_msg})
|
| 90 |
-
if assistant_msg: # Skip if None (for the latest user message)
|
| 91 |
-
messages.append({"role": "assistant", "content": assistant_msg})
|
| 92 |
-
|
| 93 |
-
return messages
|
| 94 |
-
|
| 95 |
-
def add_gumbel_noise(logits, temperature):
|
| 96 |
-
'''
|
| 97 |
-
The Gumbel max is a method for sampling categorical distributions.
|
| 98 |
-
According to arXiv:2409.02908, for MDM, low-precision Gumbel Max improves perplexity score but reduces generation quality.
|
| 99 |
-
Thus, we use float64.
|
| 100 |
-
'''
|
| 101 |
-
if temperature <= 0:
|
| 102 |
-
return logits
|
| 103 |
-
|
| 104 |
-
logits = logits.to(torch.float64)
|
| 105 |
-
noise = torch.rand_like(logits, dtype=torch.float64)
|
| 106 |
-
gumbel_noise = (- torch.log(noise)) ** temperature
|
| 107 |
-
return logits.exp() / gumbel_noise
|
| 108 |
-
|
| 109 |
-
def get_num_transfer_tokens(mask_index, steps):
|
| 110 |
-
'''
|
| 111 |
-
In the reverse process, the interval [0, 1] is uniformly discretized into steps intervals.
|
| 112 |
-
Furthermore, because LLaDA employs a linear noise schedule (as defined in Eq. (8)),
|
| 113 |
-
the expected number of tokens transitioned at each step should be consistent.
|
| 114 |
-
|
| 115 |
-
This function is designed to precompute the number of tokens that need to be transitioned at each step.
|
| 116 |
-
'''
|
| 117 |
-
mask_num = mask_index.sum(dim=1, keepdim=True)
|
| 118 |
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
num_transfer_tokens = torch.zeros(mask_num.size(0), steps, device=mask_index.device, dtype=torch.int64) + base
|
| 123 |
-
|
| 124 |
-
for i in range(mask_num.size(0)):
|
| 125 |
-
num_transfer_tokens[i, :remainder[i]] += 1
|
| 126 |
-
|
| 127 |
-
return num_transfer_tokens
|
| 128 |
-
|
| 129 |
-
def generate_response_with_visualization_cache_and_parallel(model, tokenizer, device, messages, gen_length=64, steps=32,
|
| 130 |
-
constraints=None, temperature=0.0, block_length=32,
|
| 131 |
-
remasking='low_confidence', threshold=0.9):
|
| 132 |
-
"""
|
| 133 |
-
Generate text with LLaDA model with visualization using the same sampling as in generate.py
|
| 134 |
-
|
| 135 |
-
Args:
|
| 136 |
-
messages: List of message dictionaries with 'role' and 'content'
|
| 137 |
-
gen_length: Length of text to generate
|
| 138 |
-
steps: Number of denoising steps
|
| 139 |
-
constraints: Dictionary mapping positions to words
|
| 140 |
-
temperature: Sampling temperature
|
| 141 |
-
block_length: Block length for semi-autoregressive generation
|
| 142 |
-
remasking: Remasking strategy ('low_confidence' or 'random')
|
| 143 |
-
|
| 144 |
-
Returns:
|
| 145 |
-
List of visualization states showing the progression and final text
|
| 146 |
-
"""
|
| 147 |
-
|
| 148 |
-
# Process constraints
|
| 149 |
-
if constraints is None:
|
| 150 |
-
constraints = {}
|
| 151 |
-
|
| 152 |
-
# Convert any string constraints to token IDs
|
| 153 |
-
processed_constraints = {}
|
| 154 |
-
for pos, word in constraints.items():
|
| 155 |
-
tokens = tokenizer.encode(" " + word, add_special_tokens=False)
|
| 156 |
-
for i, token_id in enumerate(tokens):
|
| 157 |
-
processed_constraints[pos + i] = token_id
|
| 158 |
-
|
| 159 |
-
# Prepare the prompt using chat template
|
| 160 |
-
chat_input = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
|
| 161 |
-
input_ids = tokenizer(chat_input)['input_ids']
|
| 162 |
-
input_ids = torch.tensor(input_ids).to(device).unsqueeze(0)
|
| 163 |
-
|
| 164 |
-
# For generation
|
| 165 |
-
prompt_length = input_ids.shape[1]
|
| 166 |
-
|
| 167 |
-
# Initialize the sequence with masks for the response part
|
| 168 |
-
x = torch.full((1, prompt_length + gen_length), MASK_ID, dtype=torch.long).to(device)
|
| 169 |
-
x[:, :prompt_length] = input_ids.clone()
|
| 170 |
-
|
| 171 |
-
# Initialize visualization states for the response part
|
| 172 |
-
visualization_states = []
|
| 173 |
-
|
| 174 |
-
# Add initial state (all masked)
|
| 175 |
-
initial_state = [(MASK_TOKEN, "#444444") for _ in range(gen_length)]
|
| 176 |
-
visualization_states.append(initial_state)
|
| 177 |
-
|
| 178 |
-
# Apply constraints to the initial state
|
| 179 |
-
for pos, token_id in processed_constraints.items():
|
| 180 |
-
absolute_pos = prompt_length + pos
|
| 181 |
-
if absolute_pos < x.shape[1]:
|
| 182 |
-
x[:, absolute_pos] = token_id
|
| 183 |
-
|
| 184 |
-
# Ensure block_length is valid
|
| 185 |
-
if block_length > gen_length:
|
| 186 |
-
block_length = gen_length
|
| 187 |
-
|
| 188 |
-
# Calculate number of blocks
|
| 189 |
-
num_blocks = gen_length // block_length
|
| 190 |
-
if gen_length % block_length != 0:
|
| 191 |
-
num_blocks += 1
|
| 192 |
-
|
| 193 |
-
# Adjust steps per block
|
| 194 |
-
steps_per_block = steps // num_blocks
|
| 195 |
-
if steps_per_block < 1:
|
| 196 |
-
steps_per_block = 1
|
| 197 |
-
|
| 198 |
-
# Process each block
|
| 199 |
-
for num_block in range(num_blocks):
|
| 200 |
-
current_block_start = prompt_length + num_block * block_length
|
| 201 |
-
current_block_end = current_block_start + block_length
|
| 202 |
-
|
| 203 |
-
block_mask_index = (x[:, current_block_start:current_block_end] == MASK_ID)
|
| 204 |
-
num_transfer_tokens = get_num_transfer_tokens(block_mask_index, steps)
|
| 205 |
-
|
| 206 |
-
output = model(x, use_cache=True)
|
| 207 |
-
past_key_values = output.past_key_values
|
| 208 |
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
|
| 212 |
-
x[transfer_index] = x0[transfer_index]
|
| 213 |
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
|
| 222 |
-
|
| 223 |
-
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 229 |
else:
|
| 230 |
-
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
visualization_states.append(current_state)
|
| 235 |
-
i = 1
|
| 236 |
-
while True:
|
| 237 |
-
mask_index = (x[:, current_block_start:] == MASK_ID)
|
| 238 |
-
mask_index[:, block_length:] = 0
|
| 239 |
-
|
| 240 |
-
logits = model(x[:, current_block_start:], past_key_values=past_key_values, use_cache=True).logits
|
| 241 |
-
|
| 242 |
-
logits_with_noise = add_gumbel_noise(logits, temperature=temperature)
|
| 243 |
-
x0 = torch.argmax(logits_with_noise, dim=-1) # b, l
|
| 244 |
-
|
| 245 |
-
x0, transfer_index = get_transfer_index(logits, temperature, remasking, mask_index,
|
| 246 |
-
x[:, current_block_start:], num_transfer_tokens[:, i] if threshold is None else None, threshold)
|
| 247 |
-
x[:, current_block_start:][transfer_index] = x0[transfer_index]
|
| 248 |
-
# Create visualization state only for the response part
|
| 249 |
-
current_state = []
|
| 250 |
-
for i in range(gen_length):
|
| 251 |
-
pos = prompt_length + i # Absolute position in the sequence
|
| 252 |
-
|
| 253 |
-
if x[0, pos] == MASK_ID:
|
| 254 |
-
# Still masked
|
| 255 |
-
current_state.append((MASK_TOKEN, "#444444")) # Dark gray for masks
|
| 256 |
-
else:
|
| 257 |
-
# Previously revealed
|
| 258 |
-
token = tokenizer.decode([x[0, pos].item()], skip_special_tokens=True)
|
| 259 |
-
current_state.append((token, "#6699CC")) # Light blue
|
| 260 |
-
|
| 261 |
-
visualization_states.append(current_state)
|
| 262 |
-
if (x[:, current_block_start:current_block_end] == MASK_ID).sum() == 0:
|
| 263 |
-
break
|
| 264 |
-
i += 1
|
| 265 |
-
|
| 266 |
-
# Extract final text (just the assistant's response)
|
| 267 |
-
response_tokens = x[0, prompt_length:]
|
| 268 |
-
final_text = tokenizer.decode(response_tokens,
|
| 269 |
-
skip_special_tokens=True,
|
| 270 |
-
clean_up_tokenization_spaces=True)
|
| 271 |
-
|
| 272 |
-
return visualization_states, final_text
|
| 273 |
|
|
|
|
|
|
|
|
|
|
| 274 |
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
|
|
|
|
| 278 |
|
| 279 |
-
if remasking == 'low_confidence':
|
| 280 |
-
p = F.softmax(logits.to(torch.float64), dim=-1)
|
| 281 |
-
x0_p = torch.squeeze(
|
| 282 |
-
torch.gather(p, dim=-1, index=torch.unsqueeze(x0, -1)), -1) # b, l
|
| 283 |
-
elif remasking == 'random':
|
| 284 |
-
x0_p = torch.rand((x0.shape[0], x0.shape[1]), device=x0.device)
|
| 285 |
-
else:
|
| 286 |
-
raise NotImplementedError(remasking)
|
| 287 |
-
|
| 288 |
-
x0 = torch.where(mask_index, x0, x)
|
| 289 |
-
confidence = torch.where(mask_index, x0_p, -np.inf)
|
| 290 |
|
| 291 |
-
|
| 292 |
-
|
| 293 |
-
|
| 294 |
-
|
| 295 |
-
|
| 296 |
-
|
| 297 |
-
|
| 298 |
-
|
| 299 |
-
if confidence[j, select_index[k]] < threshold:
|
| 300 |
-
transfer_index[j, select_index[k]] = False
|
| 301 |
-
return x0, transfer_index
|
| 302 |
|
| 303 |
-
|
| 304 |
-
|
| 305 |
-
remasking='low_confidence'):
|
| 306 |
-
"""
|
| 307 |
-
Generate text with LLaDA model with visualization using the same sampling as in generate.py
|
| 308 |
-
|
| 309 |
-
Args:
|
| 310 |
-
messages: List of message dictionaries with 'role' and 'content'
|
| 311 |
-
gen_length: Length of text to generate
|
| 312 |
-
steps: Number of denoising steps
|
| 313 |
-
constraints: Dictionary mapping positions to words
|
| 314 |
-
temperature: Sampling temperature
|
| 315 |
-
block_length: Block length for semi-autoregressive generation
|
| 316 |
-
remasking: Remasking strategy ('low_confidence' or 'random')
|
| 317 |
-
|
| 318 |
-
Returns:
|
| 319 |
-
List of visualization states showing the progression and final text
|
| 320 |
-
"""
|
| 321 |
-
|
| 322 |
-
# Process constraints
|
| 323 |
-
if constraints is None:
|
| 324 |
-
constraints = {}
|
| 325 |
-
|
| 326 |
-
# Convert any string constraints to token IDs
|
| 327 |
-
processed_constraints = {}
|
| 328 |
-
for pos, word in constraints.items():
|
| 329 |
-
tokens = tokenizer.encode(" " + word, add_special_tokens=False)
|
| 330 |
-
for i, token_id in enumerate(tokens):
|
| 331 |
-
processed_constraints[pos + i] = token_id
|
| 332 |
-
|
| 333 |
-
# Prepare the prompt using chat template
|
| 334 |
-
chat_input = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
|
| 335 |
-
input_ids = tokenizer(chat_input)['input_ids']
|
| 336 |
-
input_ids = torch.tensor(input_ids).to(device).unsqueeze(0)
|
| 337 |
-
|
| 338 |
-
# For generation
|
| 339 |
-
prompt_length = input_ids.shape[1]
|
| 340 |
-
|
| 341 |
-
# Initialize the sequence with masks for the response part
|
| 342 |
-
x = torch.full((1, prompt_length + gen_length), MASK_ID, dtype=torch.long).to(device)
|
| 343 |
-
x[:, :prompt_length] = input_ids.clone()
|
| 344 |
-
|
| 345 |
-
# Initialize visualization states for the response part
|
| 346 |
-
visualization_states = []
|
| 347 |
-
|
| 348 |
-
# Add initial state (all masked)
|
| 349 |
-
initial_state = [(MASK_TOKEN, "#444444") for _ in range(gen_length)]
|
| 350 |
-
visualization_states.append(initial_state)
|
| 351 |
-
|
| 352 |
-
# Apply constraints to the initial state
|
| 353 |
-
for pos, token_id in processed_constraints.items():
|
| 354 |
-
absolute_pos = prompt_length + pos
|
| 355 |
-
if absolute_pos < x.shape[1]:
|
| 356 |
-
x[:, absolute_pos] = token_id
|
| 357 |
-
|
| 358 |
-
# Mark prompt positions to exclude them from masking during classifier-free guidance
|
| 359 |
-
prompt_index = (x != MASK_ID)
|
| 360 |
-
|
| 361 |
-
# Ensure block_length is valid
|
| 362 |
-
if block_length > gen_length:
|
| 363 |
-
block_length = gen_length
|
| 364 |
-
|
| 365 |
-
# Calculate number of blocks
|
| 366 |
-
num_blocks = gen_length // block_length
|
| 367 |
-
if gen_length % block_length != 0:
|
| 368 |
-
num_blocks += 1
|
| 369 |
-
|
| 370 |
-
# Adjust steps per block
|
| 371 |
-
steps_per_block = steps // num_blocks
|
| 372 |
-
if steps_per_block < 1:
|
| 373 |
-
steps_per_block = 1
|
| 374 |
-
|
| 375 |
-
# Process each block
|
| 376 |
-
for num_block in range(num_blocks):
|
| 377 |
-
# Calculate the start and end indices for the current block
|
| 378 |
-
block_start = prompt_length + num_block * block_length
|
| 379 |
-
block_end = min(prompt_length + (num_block + 1) * block_length, x.shape[1])
|
| 380 |
-
|
| 381 |
-
# Get mask indices for the current block
|
| 382 |
-
block_mask_index = (x[:, block_start:block_end] == MASK_ID)
|
| 383 |
-
|
| 384 |
-
# Skip if no masks in this block
|
| 385 |
-
if not block_mask_index.any():
|
| 386 |
-
continue
|
| 387 |
-
|
| 388 |
-
# Calculate number of tokens to unmask at each step
|
| 389 |
-
num_transfer_tokens = get_num_transfer_tokens(block_mask_index, steps_per_block)
|
| 390 |
-
|
| 391 |
-
# Process each step
|
| 392 |
-
for i in range(steps_per_block):
|
| 393 |
-
# Get all mask positions in the current sequence
|
| 394 |
-
mask_index = (x == MASK_ID)
|
| 395 |
-
|
| 396 |
-
# Skip if no masks
|
| 397 |
-
if not mask_index.any():
|
| 398 |
-
break
|
| 399 |
-
|
| 400 |
-
# Get logits from model
|
| 401 |
-
logits = model(x).logits
|
| 402 |
-
|
| 403 |
-
# Apply Gumbel noise for sampling
|
| 404 |
-
logits_with_noise = add_gumbel_noise(logits, temperature=temperature)
|
| 405 |
-
x0 = torch.argmax(logits_with_noise, dim=-1)
|
| 406 |
-
|
| 407 |
-
# Calculate confidence scores for remasking
|
| 408 |
-
if remasking == 'low_confidence':
|
| 409 |
-
p = F.softmax(logits.to(torch.float64), dim=-1)
|
| 410 |
-
x0_p = torch.squeeze(
|
| 411 |
-
torch.gather(p, dim=-1, index=torch.unsqueeze(x0, -1)), -1) # b, l
|
| 412 |
-
elif remasking == 'random':
|
| 413 |
-
x0_p = torch.rand((x0.shape[0], x0.shape[1]), device=x0.device)
|
| 414 |
-
else:
|
| 415 |
-
raise NotImplementedError(f"Remasking strategy '{remasking}' not implemented")
|
| 416 |
-
|
| 417 |
-
# Don't consider positions beyond the current block
|
| 418 |
-
x0_p[:, block_end:] = -float('inf')
|
| 419 |
-
|
| 420 |
-
# Apply predictions where we have masks
|
| 421 |
-
old_x = x.clone()
|
| 422 |
-
x0 = torch.where(mask_index, x0, x)
|
| 423 |
-
confidence = torch.where(mask_index, x0_p, -float('inf'))
|
| 424 |
-
|
| 425 |
-
# Select tokens to unmask based on confidence
|
| 426 |
-
transfer_index = torch.zeros_like(x0, dtype=torch.bool, device=x0.device)
|
| 427 |
-
for j in range(confidence.shape[0]):
|
| 428 |
-
# Only consider positions within the current block for unmasking
|
| 429 |
-
block_confidence = confidence[j, block_start:block_end]
|
| 430 |
-
if i < steps_per_block - 1: # Not the last step
|
| 431 |
-
# Take top-k confidences
|
| 432 |
-
_, select_indices = torch.topk(block_confidence,
|
| 433 |
-
k=min(num_transfer_tokens[j, i].item(),
|
| 434 |
-
block_confidence.numel()))
|
| 435 |
-
# Adjust indices to global positions
|
| 436 |
-
select_indices = select_indices + block_start
|
| 437 |
-
transfer_index[j, select_indices] = True
|
| 438 |
-
else: # Last step - unmask everything remaining
|
| 439 |
-
transfer_index[j, block_start:block_end] = mask_index[j, block_start:block_end]
|
| 440 |
-
|
| 441 |
-
# Apply the selected tokens
|
| 442 |
-
x = torch.where(transfer_index, x0, x)
|
| 443 |
-
|
| 444 |
-
# Ensure constraints are maintained
|
| 445 |
-
for pos, token_id in processed_constraints.items():
|
| 446 |
-
absolute_pos = prompt_length + pos
|
| 447 |
-
if absolute_pos < x.shape[1]:
|
| 448 |
-
x[:, absolute_pos] = token_id
|
| 449 |
-
|
| 450 |
-
# Create visualization state only for the response part
|
| 451 |
-
current_state = []
|
| 452 |
-
for i in range(gen_length):
|
| 453 |
-
pos = prompt_length + i # Absolute position in the sequence
|
| 454 |
-
|
| 455 |
-
if x[0, pos] == MASK_ID:
|
| 456 |
-
# Still masked
|
| 457 |
-
current_state.append((MASK_TOKEN, "#444444")) # Dark gray for masks
|
| 458 |
-
else:
|
| 459 |
-
# Previously revealed
|
| 460 |
-
token = tokenizer.decode([x[0, pos].item()], skip_special_tokens=True)
|
| 461 |
-
current_state.append((token, "#6699CC")) # Light blue
|
| 462 |
-
|
| 463 |
-
visualization_states.append(current_state)
|
| 464 |
-
|
| 465 |
-
# Extract final text (just the assistant's response)
|
| 466 |
-
response_tokens = x[0, prompt_length:]
|
| 467 |
-
final_text = tokenizer.decode(response_tokens,
|
| 468 |
-
skip_special_tokens=True,
|
| 469 |
-
clean_up_tokenization_spaces=True)
|
| 470 |
-
|
| 471 |
-
return visualization_states, final_text
|
| 472 |
|
| 473 |
-
css = '''
|
| 474 |
-
.category-legend{display:none}
|
| 475 |
-
.message, .bubble, .chatbot .message, .chatbot .bubble {
|
| 476 |
-
max-width: 80% !important;
|
| 477 |
-
white-space: pre-wrap !important;
|
| 478 |
-
word-break: break-word !important;
|
| 479 |
-
box-sizing: border-box !important;
|
| 480 |
-
}
|
| 481 |
-
'''
|
| 482 |
-
def create_chatbot_demo():
|
| 483 |
-
with gr.Blocks(css=css) as demo:
|
| 484 |
-
gr.Markdown("# Fast-dLLM: Training-free Acceleration of Diffusion LLM by Enabling KV Cache and Parallel Decoding")
|
| 485 |
-
gr.Markdown("[code](https://github.com/NVlabs/Fast-dLLM), [project page](https://nvlabs.github.io/Fast-dLLM/)")
|
| 486 |
-
|
| 487 |
-
# STATE MANAGEMENT
|
| 488 |
-
chat_history_baseline = gr.State([])
|
| 489 |
-
chat_history_cache = gr.State([])
|
| 490 |
-
|
| 491 |
-
# UI COMPONENTS
|
| 492 |
-
with gr.Row():
|
| 493 |
-
with gr.Column(scale=3):
|
| 494 |
-
chatbot_ui = gr.Chatbot(label="Conversation", height=500)
|
| 495 |
-
with gr.Column(scale=2):
|
| 496 |
-
output_vis = gr.HighlightedText(
|
| 497 |
-
label="Denoising Process Visualization",
|
| 498 |
-
combine_adjacent=False,
|
| 499 |
-
show_legend=True,
|
| 500 |
-
)
|
| 501 |
-
generation_time = gr.Textbox(
|
| 502 |
-
label="Generation Time",
|
| 503 |
-
value="0.00s",
|
| 504 |
-
interactive=False
|
| 505 |
-
)
|
| 506 |
-
throughput = gr.Textbox(
|
| 507 |
-
label="Generation Speed",
|
| 508 |
-
value="0.00 tokens/s",
|
| 509 |
-
interactive=False
|
| 510 |
-
)
|
| 511 |
-
|
| 512 |
-
# Add separator line
|
| 513 |
-
gr.Markdown("---")
|
| 514 |
-
|
| 515 |
-
# Duplicate conversation interface
|
| 516 |
-
with gr.Row():
|
| 517 |
-
with gr.Column(scale=3):
|
| 518 |
-
chatbot_ui_copy = gr.Chatbot(label="Conversation (Accelerated)", height=500)
|
| 519 |
-
with gr.Column(scale=2):
|
| 520 |
-
output_vis_copy = gr.HighlightedText(
|
| 521 |
-
label="Denoising Process Visualization",
|
| 522 |
-
combine_adjacent=False,
|
| 523 |
-
show_legend=True,
|
| 524 |
-
)
|
| 525 |
-
generation_time_copy = gr.Textbox(
|
| 526 |
-
label="Generation Time",
|
| 527 |
-
value="0.00s",
|
| 528 |
-
interactive=False
|
| 529 |
-
)
|
| 530 |
-
throughput_copy = gr.Textbox(
|
| 531 |
-
label="Generation Speed",
|
| 532 |
-
value="0.00 tokens/s",
|
| 533 |
-
interactive=False
|
| 534 |
-
)
|
| 535 |
-
# Move input area below the duplicate conversation interface
|
| 536 |
-
with gr.Group():
|
| 537 |
-
user_input = gr.Textbox(
|
| 538 |
-
label="Your Message",
|
| 539 |
-
placeholder="Type your message here...",
|
| 540 |
-
show_label=False
|
| 541 |
-
)
|
| 542 |
-
send_btn = gr.Button("Send")
|
| 543 |
-
constraints_input = gr.Textbox(
|
| 544 |
-
label="Word Constraints",
|
| 545 |
-
info="This model allows for placing specific words at specific positions using 'position:word' format. Example: 1st word once, 6th word 'upon' and 11th word 'time', would be: '0:Once, 5:upon, 10:time",
|
| 546 |
-
placeholder="0:Once, 5:upon, 10:time",
|
| 547 |
-
value=""
|
| 548 |
-
)
|
| 549 |
-
gr.Examples(
|
| 550 |
-
examples=[
|
| 551 |
-
[question_gsm8k]
|
| 552 |
-
],
|
| 553 |
-
inputs=user_input,
|
| 554 |
-
label="Example Inputs"
|
| 555 |
-
)
|
| 556 |
-
|
| 557 |
-
# Advanced generation settings
|
| 558 |
-
with gr.Accordion("Generation Settings", open=False):
|
| 559 |
-
with gr.Row():
|
| 560 |
-
gen_length = gr.Slider(
|
| 561 |
-
minimum=64, maximum=1024, value=256, step=64,
|
| 562 |
-
label="Generation Length"
|
| 563 |
-
)
|
| 564 |
-
steps = gr.Slider(
|
| 565 |
-
minimum=8, maximum=1024, value=256, step=4,
|
| 566 |
-
label="Denoising Steps"
|
| 567 |
-
)
|
| 568 |
-
with gr.Row():
|
| 569 |
-
temperature = gr.Slider(
|
| 570 |
-
minimum=0.0, maximum=1.0, value=0.0, step=0.1,
|
| 571 |
-
label="Temperature"
|
| 572 |
-
)
|
| 573 |
-
threshold = gr.Slider(
|
| 574 |
-
minimum=0.5, maximum=1.0, value=0.9, step=0.1,
|
| 575 |
-
label="Threshold"
|
| 576 |
-
)
|
| 577 |
-
with gr.Row():
|
| 578 |
-
block_length = gr.Slider(
|
| 579 |
-
minimum=8, maximum=128, value=32, step=8,
|
| 580 |
-
label="Block Length"
|
| 581 |
-
)
|
| 582 |
-
remasking_strategy = gr.Radio(
|
| 583 |
-
choices=["low_confidence", "random"],
|
| 584 |
-
value="low_confidence",
|
| 585 |
-
label="Remasking Strategy"
|
| 586 |
-
)
|
| 587 |
-
with gr.Row():
|
| 588 |
-
visualization_delay = gr.Slider(
|
| 589 |
-
minimum=0.0, maximum=1.0, value=0.1, step=0.1,
|
| 590 |
-
label="Visualization Delay (seconds)"
|
| 591 |
-
)
|
| 592 |
-
|
| 593 |
-
# Current response text box (hidden)
|
| 594 |
-
current_response = gr.Textbox(
|
| 595 |
-
label="Current Response",
|
| 596 |
-
placeholder="The assistant's response will appear here...",
|
| 597 |
-
lines=3,
|
| 598 |
-
visible=False
|
| 599 |
-
)
|
| 600 |
-
|
| 601 |
-
# Clear button
|
| 602 |
-
clear_btn = gr.Button("Clear Conversation")
|
| 603 |
-
|
| 604 |
-
# HELPER FUNCTIONS
|
| 605 |
-
def add_message(history, message, response):
|
| 606 |
-
"""Add a message pair to the history and return the updated history"""
|
| 607 |
-
history = history.copy()
|
| 608 |
-
history.append([message, response])
|
| 609 |
-
return history
|
| 610 |
-
|
| 611 |
-
def user_message_submitted(message, history_baseline, history_cache, gen_length, steps, constraints, delay):
|
| 612 |
-
"""Process a submitted user message"""
|
| 613 |
-
# Skip empty messages
|
| 614 |
-
if not message.strip():
|
| 615 |
-
# Return current state unchanged
|
| 616 |
-
history_baseline_for_display = history_baseline.copy()
|
| 617 |
-
history_cache_for_display = history_cache.copy()
|
| 618 |
-
return history_baseline, history_cache, history_baseline_for_display, history_cache_for_display, "", [], [], "", "0.00s", "0.00 tokens/s", "0.00s", "0.00 tokens/s"
|
| 619 |
-
|
| 620 |
-
# Add user message to both histories
|
| 621 |
-
history_baseline = add_message(history_baseline, message, None)
|
| 622 |
-
history_cache = add_message(history_cache, message, None)
|
| 623 |
-
|
| 624 |
-
# Format for display - temporarily show user message with empty response
|
| 625 |
-
history_baseline_for_display = history_baseline.copy()
|
| 626 |
-
history_cache_for_display = history_cache.copy()
|
| 627 |
-
|
| 628 |
-
# Clear the input
|
| 629 |
-
message_out = ""
|
| 630 |
-
|
| 631 |
-
# Return immediately to update UI with user message
|
| 632 |
-
return history_baseline, history_cache, history_baseline_for_display, history_cache_for_display, message_out, [], [], "", "0.00s", "0.00 tokens/s", "0.00s", "0.00 tokens/s"
|
| 633 |
-
|
| 634 |
-
def bot_response(history_baseline, history_cache, gen_length, steps, constraints, delay, temperature, block_length, remasking, threshold):
|
| 635 |
-
"""Generate bot response for the latest message"""
|
| 636 |
-
if not history_baseline or not history_cache:
|
| 637 |
-
return history_baseline, history_cache, [], [], "", "0.00s", "0.00 tokens/s", "0.00s", "0.00 tokens/s"
|
| 638 |
-
|
| 639 |
-
# Get the last user message
|
| 640 |
-
last_user_message = history_baseline[-1][0]
|
| 641 |
-
|
| 642 |
-
try:
|
| 643 |
-
# Format all messages except the last one (which has no response yet)
|
| 644 |
-
messages = format_chat_history(history_baseline[:-1])
|
| 645 |
-
|
| 646 |
-
# Add the last user message
|
| 647 |
-
messages.append({"role": "user", "content": last_user_message})
|
| 648 |
-
|
| 649 |
-
# Parse constraints
|
| 650 |
-
parsed_constraints = parse_constraints(constraints)
|
| 651 |
-
|
| 652 |
-
# Start timing for baseline
|
| 653 |
-
start_time = time.time()
|
| 654 |
-
|
| 655 |
-
# Generate response with visualization for baseline
|
| 656 |
-
vis_states, response_text = generate_response_with_visualization(
|
| 657 |
-
model, tokenizer, device,
|
| 658 |
-
messages,
|
| 659 |
-
gen_length=gen_length,
|
| 660 |
-
steps=steps,
|
| 661 |
-
constraints=parsed_constraints,
|
| 662 |
-
temperature=temperature,
|
| 663 |
-
block_length=block_length,
|
| 664 |
-
remasking=remasking,
|
| 665 |
-
)
|
| 666 |
-
|
| 667 |
-
# Calculate generation time and throughput for baseline
|
| 668 |
-
generation_time = time.time() - start_time
|
| 669 |
-
generation_time_str = f"{generation_time:.2f}s"
|
| 670 |
-
|
| 671 |
-
# Calculate throughput for baseline
|
| 672 |
-
response_tokens = tokenizer.encode(response_text, add_special_tokens=False)
|
| 673 |
-
num_tokens = len(response_tokens)
|
| 674 |
-
throughput = num_tokens / generation_time if generation_time > 0 else 0
|
| 675 |
-
throughput_str = f"{throughput:.2f} tokens/s"
|
| 676 |
-
|
| 677 |
-
# Start timing for cache version
|
| 678 |
-
cache_start_time = time.time()
|
| 679 |
-
cache_vis_states, cache_response_text = generate_response_with_visualization_cache_and_parallel(
|
| 680 |
-
model, tokenizer, device,
|
| 681 |
-
messages,
|
| 682 |
-
gen_length=gen_length,
|
| 683 |
-
steps=steps,
|
| 684 |
-
constraints=parsed_constraints,
|
| 685 |
-
temperature=temperature,
|
| 686 |
-
block_length=block_length,
|
| 687 |
-
remasking=remasking,
|
| 688 |
-
threshold=threshold
|
| 689 |
-
)
|
| 690 |
-
cache_generation_time = time.time() - cache_start_time
|
| 691 |
-
cache_generation_time_str = f"{cache_generation_time:.2f}s"
|
| 692 |
-
cache_response_tokens = tokenizer.encode(cache_response_text, add_special_tokens=False)
|
| 693 |
-
cache_num_tokens = len(cache_response_tokens)
|
| 694 |
-
cache_throughput = cache_num_tokens / cache_generation_time if cache_generation_time > 0 else 0
|
| 695 |
-
cache_throughput_str = f"{cache_throughput:.2f} tokens/s"
|
| 696 |
-
|
| 697 |
-
# Update both histories with their respective responses
|
| 698 |
-
history_baseline[-1][1] = response_text
|
| 699 |
-
history_cache[-1][1] = cache_response_text
|
| 700 |
-
|
| 701 |
-
# Return the initial state immediately
|
| 702 |
-
yield history_baseline, history_cache, vis_states[0], cache_vis_states[0], response_text, generation_time_str, throughput_str, cache_generation_time_str, cache_throughput_str
|
| 703 |
-
|
| 704 |
-
# Then animate through visualization states
|
| 705 |
-
for state in vis_states[1:]:
|
| 706 |
-
time.sleep(delay)
|
| 707 |
-
yield history_baseline, history_cache, state, cache_vis_states[0], response_text, generation_time_str, throughput_str, cache_generation_time_str, cache_throughput_str
|
| 708 |
-
|
| 709 |
-
for state in cache_vis_states[1:]:
|
| 710 |
-
time.sleep(delay)
|
| 711 |
-
yield history_baseline, history_cache, vis_states[-1], state, response_text, generation_time_str, throughput_str, cache_generation_time_str, cache_throughput_str
|
| 712 |
-
|
| 713 |
-
except Exception as e:
|
| 714 |
-
error_msg = f"Error: {str(e)}"
|
| 715 |
-
print(error_msg)
|
| 716 |
-
|
| 717 |
-
# Show error in visualization
|
| 718 |
-
error_vis = [(error_msg, "red")]
|
| 719 |
-
|
| 720 |
-
# Don't update histories with error
|
| 721 |
-
yield history_baseline, history_cache, error_vis, error_vis, error_msg, "0.00s", "0.00 tokens/s", "0.00s", "0.00 tokens/s"
|
| 722 |
-
|
| 723 |
-
def clear_conversation():
|
| 724 |
-
"""Clear the conversation history"""
|
| 725 |
-
empty_history = []
|
| 726 |
-
empty_response = ""
|
| 727 |
-
empty_vis = []
|
| 728 |
-
time_str = "0.00s"
|
| 729 |
-
throughput_str = "0.00 tokens/s"
|
| 730 |
-
|
| 731 |
-
return (
|
| 732 |
-
empty_history, # chat_history_baseline
|
| 733 |
-
empty_history, # chat_history_cache
|
| 734 |
-
empty_history, # chatbot_ui
|
| 735 |
-
empty_history, # chatbot_ui_copy
|
| 736 |
-
empty_response, # current_response
|
| 737 |
-
empty_vis, # output_vis
|
| 738 |
-
time_str, # generation_time
|
| 739 |
-
throughput_str, # throughput
|
| 740 |
-
empty_vis, # output_vis_copy
|
| 741 |
-
time_str, # generation_time_copy
|
| 742 |
-
throughput_str # throughput_copy
|
| 743 |
-
)
|
| 744 |
-
|
| 745 |
-
# EVENT HANDLERS
|
| 746 |
-
|
| 747 |
-
# Clear button handler
|
| 748 |
-
clear_btn.click(
|
| 749 |
-
fn=clear_conversation,
|
| 750 |
-
inputs=[],
|
| 751 |
-
outputs=[chat_history_baseline, chat_history_cache, chatbot_ui, chatbot_ui_copy, current_response, output_vis, generation_time, throughput, output_vis_copy, generation_time_copy, throughput_copy]
|
| 752 |
-
)
|
| 753 |
-
|
| 754 |
-
# User message submission flow (2-step process)
|
| 755 |
-
# Step 1: Add user message to history and update UI
|
| 756 |
-
msg_submit = user_input.submit(
|
| 757 |
-
fn=user_message_submitted,
|
| 758 |
-
inputs=[user_input, chat_history_baseline, chat_history_cache, gen_length, steps, constraints_input, visualization_delay],
|
| 759 |
-
outputs=[chat_history_baseline, chat_history_cache, chatbot_ui, chatbot_ui_copy, user_input, output_vis, output_vis_copy, current_response, generation_time, throughput, generation_time_copy, throughput_copy]
|
| 760 |
-
)
|
| 761 |
-
|
| 762 |
-
# Also connect the send button
|
| 763 |
-
send_click = send_btn.click(
|
| 764 |
-
fn=user_message_submitted,
|
| 765 |
-
inputs=[user_input, chat_history_baseline, chat_history_cache, gen_length, steps, constraints_input, visualization_delay],
|
| 766 |
-
outputs=[chat_history_baseline, chat_history_cache, chatbot_ui, chatbot_ui_copy, user_input, output_vis, output_vis_copy, current_response, generation_time, throughput, generation_time_copy, throughput_copy]
|
| 767 |
-
)
|
| 768 |
-
|
| 769 |
-
# Step 2: Generate bot response
|
| 770 |
-
# This happens after the user message is displayed
|
| 771 |
-
msg_submit.then(
|
| 772 |
-
fn=bot_response,
|
| 773 |
-
inputs=[
|
| 774 |
-
chat_history_baseline, chat_history_cache, gen_length, steps, constraints_input,
|
| 775 |
-
visualization_delay, temperature, block_length,
|
| 776 |
-
remasking_strategy, threshold
|
| 777 |
-
],
|
| 778 |
-
outputs=[chatbot_ui, chatbot_ui_copy, output_vis, output_vis_copy, current_response, generation_time, throughput, generation_time_copy, throughput_copy]
|
| 779 |
-
)
|
| 780 |
-
|
| 781 |
-
send_click.then(
|
| 782 |
-
fn=bot_response,
|
| 783 |
-
inputs=[
|
| 784 |
-
chat_history_baseline, chat_history_cache, gen_length, steps, constraints_input,
|
| 785 |
-
visualization_delay, temperature, block_length,
|
| 786 |
-
remasking_strategy, threshold
|
| 787 |
-
],
|
| 788 |
-
outputs=[chatbot_ui, chatbot_ui_copy, output_vis, output_vis_copy, current_response, generation_time, throughput, generation_time_copy, throughput_copy]
|
| 789 |
-
)
|
| 790 |
-
|
| 791 |
-
return demo
|
| 792 |
|
| 793 |
-
# Launch the demo
|
| 794 |
-
if __name__ == "__main__":
|
| 795 |
-
demo = create_chatbot_demo()
|
| 796 |
-
demo.queue().launch(share=True)
|
|
|
|
| 1 |
+
# Copyright 2025 NVIDIA CORPORATION & AFFILIATES
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
#
|
| 15 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 16 |
+
# Modified from LLaDA repos: https://github.com/ML-GSAI/LLaDA
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
|
| 18 |
+
import torch
|
| 19 |
+
import argparse
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
+
from generate import generate, generate_with_prefix_cache, generate_with_dual_cache
|
| 22 |
+
from transformers import AutoTokenizer, AutoModel
|
| 23 |
+
from model.modeling_llada import LLaDAModelLM
|
|
|
|
| 24 |
|
| 25 |
+
def chat(args):
|
| 26 |
+
model = LLaDAModelLM.from_pretrained('GSAI-ML/LLaDA-8B-Instruct', trust_remote_code=True, torch_dtype=torch.float16, device_map = 'auto').eval()
|
| 27 |
+
tokenizer = AutoTokenizer.from_pretrained('GSAI-ML/LLaDA-8B-Instruct', trust_remote_code=True)
|
| 28 |
+
device = next(iter(model.parameters())).device.type
|
| 29 |
+
|
| 30 |
+
gen_length = args.gen_length
|
| 31 |
+
steps = args.steps
|
| 32 |
+
print('*' * 66)
|
| 33 |
+
print(f'** Answer Length: {gen_length} | Sampling Steps: {steps} **')
|
| 34 |
+
print('*' * 66)
|
| 35 |
+
|
| 36 |
+
conversation_num = 0
|
| 37 |
+
while True:
|
| 38 |
+
user_input = input("Enter your question: ")
|
| 39 |
+
|
| 40 |
+
m = [{"role": "user", "content": user_input}]
|
| 41 |
+
user_input = tokenizer.apply_chat_template(m, add_generation_prompt=True, tokenize=False)
|
| 42 |
+
input_ids = tokenizer(user_input)['input_ids']
|
| 43 |
+
input_ids = torch.tensor(input_ids).to(device).unsqueeze(0)
|
| 44 |
+
|
| 45 |
+
if conversation_num == 0:
|
| 46 |
+
prompt = input_ids
|
| 47 |
+
else:
|
| 48 |
+
prompt = torch.cat([prompt, input_ids[:, 1:]], dim=1)
|
| 49 |
+
print(f'use cache: {args.use_cache} use cache position: {args.if_cache_position} threshold: {args.threshold} block size: {args.block_size}')
|
| 50 |
+
if args.use_cache:
|
| 51 |
+
if args.if_cache_position:
|
| 52 |
+
out, nfe = generate_with_dual_cache(model, prompt, steps=steps, gen_length=gen_length, block_length=args.block_size, temperature=0., remasking='low_confidence', threshold=args.threshold)
|
| 53 |
else:
|
| 54 |
+
out, nfe = generate_with_prefix_cache(model, prompt, steps=steps, gen_length=gen_length, block_length=args.block_size, temperature=0., remasking='low_confidence', threshold=args.threshold)
|
| 55 |
+
else:
|
| 56 |
+
out, nfe = generate(model, prompt, steps=steps, gen_length=gen_length, block_length=args.block_size, temperature=0., remasking='low_confidence', threshold=args.threshold)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
|
| 58 |
+
answer = tokenizer.batch_decode(out[:, prompt.shape[1]:], skip_special_tokens=True)[0]
|
| 59 |
+
print(f"Bot's reply: {answer}")
|
| 60 |
+
print(f"Number of forward passes: {nfe}")
|
| 61 |
|
| 62 |
+
# remove the <EOS>
|
| 63 |
+
prompt = out[out != 126081].unsqueeze(0)
|
| 64 |
+
conversation_num += 1
|
| 65 |
+
print('-----------------------------------------------------------------------')
|
| 66 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 67 |
|
| 68 |
+
if __name__ == "__main__":
|
| 69 |
+
parser = argparse.ArgumentParser()
|
| 70 |
+
parser.add_argument("--gen_length", type=int, default=128)
|
| 71 |
+
parser.add_argument("--steps", type=int, default=128)
|
| 72 |
+
parser.add_argument("--block_size", type=int, default=32)
|
| 73 |
+
parser.add_argument("--use_cache", action="store_true")
|
| 74 |
+
parser.add_argument("--if_cache_position", action="store_true")
|
| 75 |
+
parser.add_argument("--threshold", type=float, default=None)
|
|
|
|
|
|
|
|
|
|
| 76 |
|
| 77 |
+
args = parser.parse_args()
|
| 78 |
+
chat(args)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 79 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 80 |
|
|
|
|
|
|
|
|
|
|
|
|