Add support for greedy decoding
#5
by
adityastomar
- opened
- modeling_llada2_moe.py +8 -0
modeling_llada2_moe.py
CHANGED
|
@@ -1431,6 +1431,14 @@ class LLaDA2MoeModelLM(LLaDA2MoePreTrainedModel, GenerationMixin):
|
|
| 1431 |
orig_shape = logits.shape[:-1]
|
| 1432 |
vocab_size = logits.shape[-1]
|
| 1433 |
logits = logits.reshape(-1, vocab_size)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1434 |
if temperature > 0 and temperature != 1.0:
|
| 1435 |
logits = logits / temperature
|
| 1436 |
logits = self._top_k_logits(logits, top_k)
|
|
|
|
| 1431 |
orig_shape = logits.shape[:-1]
|
| 1432 |
vocab_size = logits.shape[-1]
|
| 1433 |
logits = logits.reshape(-1, vocab_size)
|
| 1434 |
+
|
| 1435 |
+
# Greedy mode: temperature = 0, no top-k/p
|
| 1436 |
+
if temperature == 0.0 and (top_k in (None, 0)) and (top_p is None or top_p >= 1.0):
|
| 1437 |
+
probs = F.softmax(logits, dim=-1)
|
| 1438 |
+
token = logits.argmax(dim=-1, keepdim=True)
|
| 1439 |
+
token_prob = probs.gather(-1, token)
|
| 1440 |
+
return token.view(*orig_shape), token_prob.view(*orig_shape)
|
| 1441 |
+
|
| 1442 |
if temperature > 0 and temperature != 1.0:
|
| 1443 |
logits = logits / temperature
|
| 1444 |
logits = self._top_k_logits(logits, top_k)
|