import torch import torch.nn as nn import torch.nn.functional as F class Head(nn.Module): """One head of self-attention""" def __init__(self, n_embd, head_size, block_size, dropout): super().__init__() self.key = nn.Linear(n_embd, head_size, bias=False) self.query = nn.Linear(n_embd, head_size, bias=False) self.value = nn.Linear(n_embd, head_size, bias=False) self.register_buffer('tril', torch.tril(torch.ones(block_size, block_size))) self.dropout = nn.Dropout(dropout) def forward(self, x): B, T, C = x.shape k = self.key(x) q = self.query(x) wei = q @ k.transpose(-2, -1) * k.shape[-1] ** -0.5 wei = wei.masked_fill(self.tril[:T, :T] == 0, float('-inf')) wei = F.softmax(wei, dim=-1) wei = self.dropout(wei) v = self.value(x) out = wei @ v return out class MultiHeadAttention(nn.Module): """Multiple heads of self-attention in parallel""" def __init__(self, n_embd, num_heads, block_size, dropout): super().__init__() head_size = n_embd // num_heads self.heads = nn.ModuleList([Head(n_embd, head_size, block_size, dropout) for _ in range(num_heads)]) self.proj = nn.Linear(head_size * num_heads, n_embd) self.dropout = nn.Dropout(dropout) def forward(self, x): out = torch.cat([h(x) for h in self.heads], dim=-1) out = self.dropout(self.proj(out)) return out class FeedForward(nn.Module): """A simple feedforward layer""" def __init__(self, n_embd, dropout): super().__init__() self.net = nn.Sequential( nn.Linear(n_embd, 4 * n_embd), nn.ReLU(), nn.Linear(4 * n_embd, n_embd), nn.Dropout(dropout), ) def forward(self, x): return self.net(x) class Block(nn.Module): """Transformer block: Self-Attention followed by Feed Forward""" def __init__(self, n_embd, n_head, block_size, dropout): super().__init__() self.sa = MultiHeadAttention(n_embd, n_head, block_size, dropout) self.ffwd = FeedForward(n_embd, dropout) self.ln1 = nn.LayerNorm(n_embd) self.ln2 = nn.LayerNorm(n_embd) def forward(self, x): x = self.ln1(x + self.sa(x)) x = self.ln2(x + self.ffwd(x)) return x class BharatAI(nn.Module): def __init__(self, vocab_size, n_embd=768, n_head=12, n_layer=12, block_size=256, dropout=0.2): super().__init__() self.n_embd = n_embd self.n_head = n_head self.n_layer = n_layer self.block_size = block_size self.dropout = dropout self.token_embedding_table = nn.Embedding(vocab_size, n_embd) self.position_embedding_table = nn.Embedding(block_size, n_embd) self.blocks = nn.Sequential(*[Block(n_embd, n_head, block_size, dropout) for _ in range(n_layer)]) self.ln_f = nn.LayerNorm(n_embd) self.lm_head = nn.Linear(n_embd, vocab_size) self.apply(self._init_weights) def _init_weights(self, module): if isinstance(module, nn.Linear): torch.nn.init.normal_(module.weight, mean=0.0, std=0.02) if module.bias is not None: torch.nn.init.zeros_(module.bias) elif isinstance(module, nn.Embedding): torch.nn.init.normal_(module.weight, mean=0.0, std=0.02) def forward(self, index, targets=None): B, T = index.shape tok_emb = self.token_embedding_table(index) pos_emb = self.position_embedding_table(torch.arange(T, device=index.device)) x = tok_emb + pos_emb x = self.blocks(x) x = self.ln_f(x) logits = self.lm_head(x) if targets is None: loss = None else: B, T, C = logits.shape logits = logits.view(B * T, C) targets = targets.view(B * T) loss = F.cross_entropy(logits, targets) return logits, loss def generate(self, index, max_new_tokens): for _ in range(max_new_tokens): index_cond = index[:, -self.block_size:] logits, loss = self.forward(index_cond) logits = logits[:, -1, :] probs = F.softmax(logits, dim=-1) index_next = torch.multinomial(probs, num_samples=1) index = torch.cat((index, index_next), dim=1) return index