JiRack_GPT5_236b / JiRackPyTorch_GPT5_class_236b.py
kgrabko's picture
Upload JiRackPyTorch_GPT5_class_236b.py
8d8d783 verified
# ==============================================================================
# COPYRIGHT (C) 2025 KONSTANTIN VLADIMIROVICH GRABKO. ALL RIGHTS RESERVED.
# PATENT PENDING | CMS MANHATTAN JIRACK TECHNOLOGY
#
# This software is licensed under the Commercial License Agreement V.1.2.
# Any use, modification, or distribution of this code requires compliance with
# the terms found in the LICENSE.md file in the root directory.
#
# NO PATENTING RIGHTS: Users are strictly prohibited from filing patent claims
# based on the BRE or SWA architectures disclosed herein.
# Contact: grabko@cmsmanhattan.com | +1 (516) 777-0945
# ==============================================================================
# Version: 236B Balanced Frontier (GQA + SwiGLU + RoPE)
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
# --- КОНФИГУРАЦИЯ 236B ---
VOCAB_SIZE = 50257
MODEL_DIM = 14336 # Промежуточный масштаб между 140B и 405B
NUM_HEADS = 112 # 112 голов Query
NUM_KV_HEADS = 8 # GQA (соотношение 14:1)
NUM_LAYERS = 108 # 108 слоев
MAX_SEQ_LEN = 4096
FFN_HIDDEN_DIM = 40960 # SwiGLU размер
HEAD_DIM = MODEL_DIM // NUM_HEADS
EPSILON = 1e-5
class RMSNorm(nn.Module):
def __init__(self, dim, eps=EPSILON):
super().__init__()
self.eps = eps
self.weight = nn.Parameter(torch.ones(dim))
def forward(self, x):
return (x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)) * self.weight
def precompute_freqs_cis(dim, seq_len, theta=100000.0):
freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim))
t = torch.arange(seq_len)
freqs = torch.outer(t, freqs).float()
return torch.polar(torch.ones_like(freqs), freqs)
def apply_rotary_emb(xq, xk, freqs_cis):
xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2))
xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2))
freqs_cis = freqs_cis.view(1, xq_.size(1), 1, xq_.size(3))
xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3)
xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3)
return xq_out.type_as(xq), xk_out.type_as(xk)
def repeat_kv(x: torch.Tensor, n_rep: int) -> torch.Tensor:
if n_rep == 1: return x
bs, slen, n_kv_heads, head_dim = x.shape
return x[:, :, :, None, :].expand(bs, slen, n_kv_heads, n_rep, head_dim).reshape(bs, slen, n_kv_heads * n_rep, head_dim)
class MultiHeadAttention(nn.Module):
def __init__(self):
super().__init__()
self.n_kv_heads = NUM_KV_HEADS
self.n_rep = NUM_HEADS // NUM_KV_HEADS
self.wq = nn.Linear(MODEL_DIM, NUM_HEADS * HEAD_DIM, bias=False)
self.wk = nn.Linear(MODEL_DIM, NUM_KV_HEADS * HEAD_DIM, bias=False)
self.wv = nn.Linear(MODEL_DIM, NUM_KV_HEADS * HEAD_DIM, bias=False)
self.wo = nn.Linear(NUM_HEADS * HEAD_DIM, MODEL_DIM, bias=False)
def forward(self, x, freqs_cis, past_kv=None):
b, t, _ = x.shape
q, k, v = self.wq(x), self.wk(x), self.wv(x)
q, k = apply_rotary_emb(q.view(b, t, NUM_HEADS, HEAD_DIM), k.view(b, t, self.n_kv_heads, HEAD_DIM), freqs_cis[:t])
if past_kv is not None:
k = torch.cat([past_kv[0], k], dim=1)
v = torch.cat([past_kv[1], v], dim=1)
current_kv = (k.detach(), v.detach())
k, v = repeat_kv(k, self.n_rep), repeat_kv(v, self.n_rep)
out = F.scaled_dot_product_attention(q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2), is_causal=True)
return self.wo(out.transpose(1, 2).contiguous().view(b, t, MODEL_DIM)), current_kv
class JiRackPyTorch(nn.Module):
def __init__(self):
super().__init__()
self.token_emb = nn.Embedding(VOCAB_SIZE, MODEL_DIM)
self.blocks = nn.ModuleList([nn.ModuleDict({
'norm1': RMSNorm(MODEL_DIM),
'attn': MultiHeadAttention(),
'norm2': RMSNorm(MODEL_DIM),
'ffn': nn.Sequential(
nn.Linear(MODEL_DIM, FFN_HIDDEN_DIM, bias=False),
nn.SiLU(),
nn.Linear(FFN_HIDDEN_DIM, MODEL_DIM, bias=False)
)
}) for _ in range(NUM_LAYERS)])
self.norm_f = RMSNorm(MODEL_DIM)
self.head = nn.Linear(MODEL_DIM, VOCAB_SIZE, bias=False)
self.head.weight = self.token_emb.weight
self.register_buffer("freqs_cis", precompute_freqs_cis(HEAD_DIM, MAX_SEQ_LEN))
def forward(self, idx, targets=None):
x = self.token_emb(idx)
for block in self.blocks:
h, _ = block['attn'](block['norm1'](x), self.freqs_cis)
x = x + h
x = x + block['ffn'](block['norm2'](x))
logits = self.head(self.norm_f(x))
if targets is not None:
return None, F.cross_entropy(logits.view(-1, VOCAB_SIZE), targets.view(-1)), None
return logits, None
def get_author_info(self):
return "Author: Konstantin Vladimirovich Grabko (CMS Manhattan) 2025 | JiRack 236B Dense"