""" Kimi-K2 Model Implementation for nanoKimi This is the main model implementation that combines all the Kimi-K2 innovations: - Latent Attention for memory efficiency - Mixture of Experts for sparse scaling - Compatible with Muon optimizer """ import torch import torch.nn as nn import torch.nn.functional as F import math import inspect from attention import LatentAttention, MultiHeadAttention from moe import MoELayer, StandardFFN class LayerNorm(nn.Module): """LayerNorm but with an optional bias. PyTorch doesn't support simply bias=False""" def __init__(self, ndim, bias): super().__init__() self.weight = nn.Parameter(torch.ones(ndim)) self.bias = nn.Parameter(torch.zeros(ndim)) if bias else None def forward(self, input): return F.layer_norm(input, self.weight.shape, self.weight, self.bias, 1e-5) class KimiBlock(nn.Module): """ Kimi-K2 Transformer Block Combines the innovations of Kimi-K2: - Latent Attention (optional) - Mixture of Experts (optional) - Standard layer normalization and residual connections """ def __init__(self, config): super().__init__() # Layer normalization self.ln_1 = LayerNorm(config['n_embd'], bias=config['bias']) self.ln_2 = LayerNorm(config['n_embd'], bias=config['bias']) # Attention layer if config.get('use_latent_attention', False): self.attn = LatentAttention( n_embd=config['n_embd'], n_head=config['n_head'], latent_dim=config.get('latent_dim', 64), dropout=config['dropout'], bias=config['bias'] ) else: self.attn = MultiHeadAttention( n_embd=config['n_embd'], n_head=config['n_head'], dropout=config['dropout'], bias=config['bias'] ) # Feed-forward layer if config.get('use_moe', False): self.mlp = MoELayer( n_embd=config['n_embd'], num_experts=config.get('num_experts', 8), expert_capacity=config.get('expert_capacity', 32), top_k=config.get('top_k_experts', 2), dropout=config['dropout'], bias=config['bias'] ) else: self.mlp = StandardFFN( n_embd=config['n_embd'], dropout=config['dropout'], bias=config['bias'] ) def forward(self, x): # Attention with residual connection x = x + self.attn(self.ln_1(x)) # MLP with residual connection mlp_out, load_balance_loss = self.mlp(self.ln_2(x)) x = x + mlp_out return x, load_balance_loss class KimiK2(nn.Module): """ Kimi-K2 Model A transformer model incorporating the key innovations from Kimi-K2: - Latent Attention for memory efficiency - Mixture of Experts for sparse scaling - Optimized for use with Muon optimizer """ def __init__(self, config): super().__init__() assert config['vocab_size'] is not None assert config['block_size'] is not None self.config = config # Embedding layers self.transformer = nn.ModuleDict(dict( wte = nn.Embedding(config['vocab_size'], config['n_embd']), # token embeddings wpe = nn.Embedding(config['block_size'], config['n_embd']), # position embeddings drop = nn.Dropout(config['dropout']), h = nn.ModuleList([KimiBlock(config) for _ in range(config['n_layer'])]), ln_f = LayerNorm(config['n_embd'], bias=config['bias']), )) self.lm_head = nn.Linear(config['n_embd'], config['vocab_size'], bias=False) # Weight tying self.transformer.wte.weight = self.lm_head.weight # Initialize weights self.apply(self._init_weights) # Apply special scaled init to the residual projections, per GPT-2 paper for pn, p in self.named_parameters(): if pn.endswith('o_proj.weight'): torch.nn.init.normal_(p, mean=0.0, std=0.02/math.sqrt(2 * config['n_layer'])) # Report number of parameters print("Number of parameters: %.2fM" % (self.get_num_params()/1e6,)) def get_num_params(self, non_embedding=True): """ Return the number of parameters in the model. For non-embedding count (default), the position embeddings get subtracted. The token embeddings would too, except due to the parameter sharing these params are actually used as weights in the final layer, so we include them. """ n_params = sum(p.numel() for p in self.parameters()) if non_embedding: n_params -= self.transformer.wpe.weight.numel() return n_params def _init_weights(self, module): if isinstance(module, nn.Linear): torch.nn.init.normal_(module.weight, mean=0.0, std=0.02) if module.bias is not None: torch.nn.init.zeros_(module.bias) elif isinstance(module, nn.Embedding): torch.nn.init.normal_(module.weight, mean=0.0, std=0.02) def forward(self, idx, targets=None): device = idx.device b, t = idx.size() assert t <= self.config['block_size'], f"Cannot forward sequence of length {t}, block size is only {self.config['block_size']}" pos = torch.arange(0, t, dtype=torch.long, device=device) # shape (t) # Forward the model tok_emb = self.transformer.wte(idx) # token embeddings of shape (b, t, n_embd) pos_emb = self.transformer.wpe(pos) # position embeddings of shape (t, n_embd) x = self.transformer.drop(tok_emb + pos_emb) # Accumulate load balance losses from MoE layers total_load_balance_loss = 0.0 for block in self.transformer.h: x, load_balance_loss = block(x) total_load_balance_loss += load_balance_loss x = self.transformer.ln_f(x) if targets is not None: # If we are given some desired targets also calculate the loss logits = self.lm_head(x) loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=-1) # Add load balance loss loss = loss + total_load_balance_loss else: # Inference-time mini-optimization: only forward the lm_head on the very last position logits = self.lm_head(x[:, [-1], :]) # note: using list [-1] to preserve the time dim loss = None return logits, loss def crop_block_size(self, block_size): # model surgery to decrease the block size if necessary # e.g. we may load the GPT2 pretrained model checkpoint (block size 1024) # but want to use a smaller block size for some smaller, simpler model assert block_size <= self.config['block_size'] self.config['block_size'] = block_size self.transformer.wpe.weight = nn.Parameter(self.transformer.wpe.weight[:block_size]) for block in self.transformer.h: if hasattr(block.attn, 'bias'): block.attn.bias = block.attn.bias[:,:,:block_size,:block_size] @classmethod def from_pretrained(cls, model_type, override_args=None): """ Initialize a pretrained GPT model by copying over the weights from a huggingface/transformers checkpoint. """ assert model_type in {'gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'} override_args = override_args or {} # default to empty dict # only dropout can be overridden see more notes below assert all(k == 'dropout' for k in override_args) from transformers import GPT2LMHeadModel print("loading weights from pretrained gpt: %s" % model_type) # n_layer, n_head and n_embd are determined from model_type config_args = { 'gpt2': dict(n_layer=12, n_head=12, n_embd=768), # 124M params 'gpt2-medium': dict(n_layer=24, n_head=16, n_embd=1024), # 350M params 'gpt2-large': dict(n_layer=36, n_head=20, n_embd=1280), # 774M params 'gpt2-xl': dict(n_layer=48, n_head=25, n_embd=1600), # 1558M params }[model_type] print("forcing vocab_size=50257, block_size=1024, bias=True") config_args['vocab_size'] = 50257 # always 50257 for GPT model checkpoints config_args['block_size'] = 1024 # always 1024 for GPT model checkpoints config_args['bias'] = True # always True for GPT model checkpoints # we can override the dropout rate, if desired if 'dropout' in override_args: print(f"overriding dropout rate to {override_args['dropout']}") config_args['dropout'] = override_args['dropout'] # create a from-scratch initialized KimiK2 model model = cls(config_args) sd = model.state_dict() sd_keys = sd.keys() sd_keys = [k for k in sd_keys if not k.endswith('.attn.bias')] # discard this mask / buffer, not a param # init a huggingface/transformers model model_hf = GPT2LMHeadModel.from_pretrained(model_type) sd_hf = model_hf.state_dict() # copy while ensuring all of the parameters are aligned and match in names and shapes sd_keys_hf = sd_hf.keys() sd_keys_hf = [k for k in sd_keys_hf if not k.endswith('.attn.masked_bias')] # ignore these, just a buffer sd_keys_hf = [k for k in sd_keys_hf if not k.endswith('.attn.bias')] # same, just the mask (buffer) transposed = ['attn.c_attn.weight', 'attn.c_proj.weight', 'mlp.c_fc.weight', 'mlp.c_proj.weight'] # basically the openai checkpoints use a "Conv1D" module, but we only want to use a vanilla Linear # this means that we have to transpose these weights when we import them assert len(sd_keys_hf) == len(sd_keys), f"mismatched keys: {len(sd_keys_hf)} != {len(sd_keys)}" for k in sd_keys_hf: if any(k.endswith(w) for w in transposed): # special treatment for the Conv1D weights we need to transpose assert sd_hf[k].shape[::-1] == sd[k].shape with torch.no_grad(): sd[k].copy_(sd_hf[k].t()) else: # vanilla copy over the other parameters assert sd_hf[k].shape == sd[k].shape with torch.no_grad(): sd[k].copy_(sd_hf[k]) return model def configure_optimizers(self, weight_decay, learning_rate, betas, device_type): """ This long function is unfortunately doing something very simple and is being very defensive: We are separating out all parameters of the model into two buckets: those that will experience weight decay for regularization and those that won't (biases, and layernorm/embedding weights). We are then returning the PyTorch optimizer object. """ # separate out all parameters to those that will and won't experience regularizing weight decay decay = set() no_decay = set() whitelist_weight_modules = (torch.nn.Linear, ) blacklist_weight_modules = (torch.nn.LayerNorm, LayerNorm, torch.nn.Embedding) for mn, m in self.named_modules(): for pn, p in m.named_parameters(): fpn = '%s.%s' % (mn, pn) if mn else pn # full param name # random note: because named_modules and named_parameters are recursive # we will see the same tensors p many times. but doing it this way # allows us to know which parent module any tensor p belongs to... if pn.endswith('bias'): # all biases will not be decayed no_decay.add(fpn) elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules): # weights of whitelist modules will be weight decayed decay.add(fpn) elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules): # weights of blacklist modules will NOT be weight decayed no_decay.add(fpn) # subtle: 'transformer.wte.weight' and 'lm_head.weight' are tied, so they # will appear in the no_decay and decay sets respectively after the above. # In addition, because named_parameters() doesn't return duplicates, it # will only return the first occurence, key'd by 'transformer.wte.weight', below. # so let's manually remove 'lm_head.weight' from decay set. This will leave us with # transformer.wte.weight decaying via no_decay set. decay.discard('lm_head.weight') # validate that we considered every parameter param_dict = {pn: p for pn, p in self.named_parameters()} inter_params = decay & no_decay union_params = decay | no_decay assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params), ) assert len(param_dict.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" \ % (str(param_dict.keys() - union_params), ) # create the pytorch optimizer object optim_groups = [ {"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": weight_decay}, {"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0}, ] # new PyTorch nightly has a new 'fused' option for AdamW that is much faster use_fused = (device_type == 'cuda') and ('fused' in inspect.signature(torch.optim.AdamW).parameters) print(f"using fused AdamW: {use_fused}") optimizer = torch.optim.AdamW(optim_groups, lr=learning_rate, betas=betas, fused=use_fused) return optimizer @torch.no_grad() def generate(self, idx, max_new_tokens, temperature=1.0, top_k=None): """ Take a conditioning sequence of indices idx (LongTensor of shape (b,t)) and complete the sequence max_new_tokens times, feeding the predictions back into the model each time. Most likely you'll want to make sure to be in model.eval() mode of operation for this. """ for _ in range(max_new_tokens): # if the sequence context is growing too long we must crop it at block_size idx_cond = idx if idx.size(1) <= self.config['block_size'] else idx[:, -self.config['block_size']:] # forward the model to get the logits for the index in the sequence logits, _ = self(idx_cond) # pluck the logits at the final step and scale by desired temperature logits = logits[:, -1, :] / temperature # optionally crop the logits to only the top k options if top_k is not None: v, _ = torch.topk(logits, min(top_k, logits.size(-1))) logits[logits < v[:, [-1]]] = -float('Inf') # apply softmax to convert logits to (normalized) probabilities probs = F.softmax(logits, dim=-1) # sample from the distribution idx_next = torch.multinomial(probs, num_samples=1) # append sampled index to the running sequence and continue idx = torch.cat((idx, idx_next), dim=1) return idx