| Leap0 Model | |
| ## Model Description | |
| This is the Leap0 model, designed for text generation tasks. It leverages the GPT-2 tokenizer and architecture but is specifically trained on the Tiny Stories dataset. | |
| ## Model Architecture | |
| - **Model Type**: GPT-2 | |
| - **Number of Layers**: 8 | |
| - **Number of Heads**: 8 | |
| - **Embedding Size**: 768 | |
| - **Block Size**: 768 | |
| - **Vocabulary Size**: 50257 | |
| - **Dropout Rate**: 0.1 | |
| - **Attention Mechanism**: Causal Self-Attention | |
| - **Encoding**: GPT-2 Tokenizer | |
| ## Training Details | |
| - **Dataset**: Tiny Stories | |
| ## How to Use | |
| # change the input as per your desired string | |
| """ | |
| import torch | |
| import json | |
| from transformers import GPT2Tokenizer | |
| from safetensors.torch import load_file | |
| import os | |
| import math | |
| import time | |
| import inspect | |
| from dataclasses import dataclass | |
| import torch | |
| import torch.nn as nn | |
| from torch.nn import functional as F | |
| from datasets import load_dataset | |
| # Load the dataset | |
| dataset = load_dataset("hellaswag", trust_remote_code=True) | |
| print(dataset) | |
| # Define the CausalSelfAttention class | |
| class CausalSelfAttention(nn.Module): | |
| def __init__(self, config): | |
| super().__init__() | |
| assert config.n_embd % config.n_head == 0 | |
| self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd) | |
| self.c_proj = nn.Linear(config.n_embd, config.n_embd) | |
| self.c_proj.NANOGPT_SCALE_INIT = 1 | |
| self.n_head = config.n_head | |
| self.n_embd = config.n_embd | |
| def forward(self, x): | |
| B, T, C = x.size() | |
| qkv = self.c_attn(x) | |
| q, k, v = qkv.split(self.n_embd, dim=2) | |
| k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) | |
| q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) | |
| v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) | |
| y = F.scaled_dot_product_attention(q, k, v, is_causal=True) | |
| y = y.transpose(1, 2).contiguous().view(B, T, C) | |
| y = self.c_proj(y) | |
| return y | |
| # Define the MLP class | |
| class MLP(nn.Module): | |
| def __init__(self, config): | |
| super().__init__() | |
| self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd) | |
| self.gelu = nn.GELU(approximate='tanh') | |
| self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd) | |
| self.c_proj.NANOGPT_SCALE_INIT = 1 | |
| def forward(self, x): | |
| x = self.c_fc(x) | |
| x = self.gelu(x) | |
| x = self.c_proj(x) | |
| return x | |
| # Define the Block class | |
| class Block(nn.Module): | |
| def __init__(self, config): | |
| super().__init__() | |
| self.ln_1 = nn.LayerNorm(config.n_embd) | |
| self.attn = CausalSelfAttention(config) | |
| self.ln_2 = nn.LayerNorm(config.n_embd) | |
| self.mlp = MLP(config) | |
| def forward(self, x): | |
| x = x + self.attn(self.ln_1(x)) | |
| x = x + self.mlp(self.ln_2(x)) | |
| return x | |
| # Define the GPTConfig class | |
| @dataclass | |
| class GPTConfig: | |
| block_size: int = 768 | |
| vocab_size: int = 50257 | |
| n_layer: int = 8 | |
| n_head: int = 8 | |
| n_embd: int = 768 | |
| dropout: float = 0.1 | |
| model_type: str = "custom_gpt" | |
| def to_dict(self): | |
| return self.__dict__ | |
| @classmethod | |
| def from_dict(cls, config_dict): | |
| return cls(**config_dict) | |
| # Define the GPT class | |
| class GPT(nn.Module): | |
| def __init__(self, config): | |
| super().__init__() | |
| self.config = config | |
| self.transformer = nn.ModuleDict(dict( | |
| wte=nn.Embedding(config.vocab_size, config.n_embd), | |
| wpe=nn.Embedding(config.block_size, config.n_embd), | |
| h=nn.ModuleList([Block(config) for _ in range(config.n_layer)]), | |
| ln_f=nn.LayerNorm(config.n_embd), | |
| )) | |
| self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) | |
| # Weight sharing scheme | |
| self.transformer.wte.weight = self.lm_head.weight | |
| # Initialize parameters | |
| self.apply(self._init_weights) | |
| def _init_weights(self, module): | |
| if isinstance(module, nn.Linear): | |
| std = 0.02 | |
| if hasattr(module, 'NANOGPT_SCALE_INIT'): | |
| std *= (2 * self.config.n_layer) ** -0.5 | |
| torch.nn.init.normal_(module.weight, mean=0.0, std=std) | |
| if module.bias is not None: | |
| torch.nn.init.zeros_(module.bias) | |
| elif isinstance(module, nn.Embedding): | |
| torch.nn.init.normal_(module.weight, mean=0.0, std=0.02) | |
| def forward(self, idx, targets=None): | |
| B, T = idx.size() | |
| assert T <= self.config.block_size, f"Cannot forward sequence of length {T}, block size is only {self.config.block_size}" | |
| pos = torch.arange(0, T, dtype=torch.long, device=idx.device) | |
| pos_emb = self.transformer.wpe(pos) | |
| tok_emb = self.transformer.wte(idx) | |
| x = tok_emb + pos_emb | |
| for block in self.transformer.h: | |
| x = block(x) | |
| x = self.transformer.ln_f(x) | |
| logits = self.lm_head(x) | |
| loss = None | |
| if targets is not None: | |
| loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1)) | |
| return logits, loss | |
| # Manually specify the paths to the config and model files | |
| config_path = "/home/nll-workstation/Desktop/config.json" | |
| model_path = "/home/nll-workstation/Desktop/model.safetensors" | |
| # Load the configuration from the specified JSON file | |
| with open(config_path, "r") as f: | |
| config_dict = json.load(f) | |
| config = GPTConfig.from_dict(config_dict) | |
| # Load the model weights from the specified .safetensors file | |
| tensors = load_file(model_path) | |
| # Instantiate the model with the loaded config | |
| model = GPT(config) | |
| # Load the state dict (weights) into the model | |
| model.load_state_dict(tensors, strict=False) | |
| # Set the model to evaluation mode | |
| model.eval() | |
| # Load the tokenizer (same tokenizer used during training) | |
| tokenizer = GPT2Tokenizer.from_pretrained("gpt2") | |
| # Prepare input text and tokenize it | |
| input_text = "once upon a time in the village of " | |
| input_ids = tokenizer.encode(input_text, return_tensors="pt") | |
| # Run inference (forward pass) through the model | |
| logits, _ = model(input_ids) # Forward pass, extract logits from the tuple | |
| # Get predicted token IDs by taking the argmax of logits | |
| predicted_ids = torch.argmax(logits, dim=-1) | |
| # Convert predicted token IDs to text | |
| output_text = tokenizer.decode(predicted_ids[0], skip_special_tokens=True) | |
| # Print input and output | |
| print("Input Text:", input_text) | |
| print("Output Text:", output_text) | |
| """ |