kgrabko commited on
Commit
2d93eea
·
verified ·
1 Parent(s): 5947ead

Delete JiRackTernaryPyTorch_405b.py

Browse files
Files changed (1) hide show
  1. JiRackTernaryPyTorch_405b.py +0 -171
JiRackTernaryPyTorch_405b.py DELETED
@@ -1,171 +0,0 @@
1
- # ==============================================================================
2
- # COPYRIGHT (C) 2025 KONSTANTIN VLADIMIROVICH GRABKO. ALL RIGHTS RESERVED.
3
- # PATENT PENDING | CMS MANHATTAN JIRACK TECHNOLOGY
4
- #
5
- # This software is licensed under the Commercial License Agreement V.1.2.
6
- # Any use, modification, or distribution of this code requires compliance with
7
- # the terms found in the LICENSE.md file in the root directory.
8
- #
9
- # NO PATENTING RIGHTS: Users are strictly prohibited from filing patent claims
10
- # based on the BRE or SWA architectures disclosed herein.
11
- # Contact: grabko@cmsmanhattan.com | +1 (516) 777-0945
12
- # ==============================================================================
13
- # COPYRIGHT (C) 2025 KONSTANTIN VLADIMIROVICH GRABKO. ALL RIGHTS RESERVED.
14
- # PATENT PENDING | CMS MANHATTAN JIRACK TECHNOLOGY | MOE-VERSION 405B
15
- # ==============================================================================
16
-
17
- import torch
18
- import torch.nn as nn
19
- import torch.nn.functional as F
20
- import math
21
- from typing import Optional, Tuple
22
- from transformers import PreTrainedModel, PretrainedConfig
23
- from transformers.modeling_outputs import CausalLMOutputWithPast
24
-
25
- class JiRackMoEConfig(PretrainedConfig):
26
- model_type = "jirack_moe_transformer"
27
- def __init__(
28
- self,
29
- vocab_size=128256,
30
- hidden_size=12288,
31
- num_hidden_layers=112, # MoE allows reduced depth while increasing experts
32
- num_attention_heads=96,
33
- num_experts=16, # Total of 16 experts per layer
34
- num_experts_per_tok=2, # Activate only the top 2 experts per token
35
- intermediate_size=12288, # Size of each expert
36
- max_position_embeddings=32768,
37
- rms_norm_eps=1e-5,
38
- author="Author: Konstantin Vladimirovich Grabko (CMS Manhattan) 2025",
39
- **kwargs
40
- ):
41
- super().__init__(**kwargs)
42
- self.vocab_size = vocab_size
43
- self.hidden_size = hidden_size
44
- self.num_hidden_layers = num_hidden_layers
45
- self.num_attention_heads = num_attention_heads
46
- self.num_experts = num_experts
47
- self.num_experts_per_tok = num_experts_per_tok
48
- self.intermediate_size = intermediate_size
49
- self.max_position_embeddings = max_position_embeddings
50
- self.rms_norm_eps = rms_norm_eps
51
- self.author = author
52
-
53
- # --- Ternary Linear Logic (Bit-Response Engine) ---
54
-
55
- class JiRackBitLinear(nn.Linear):
56
- def __init__(self, in_features, out_features, bias=False):
57
- super().__init__(in_features, out_features, bias)
58
- nn.init.normal_(self.weight, std=0.02)
59
- def forward(self, x):
60
- w = self.weight
61
- gamma = w.abs().mean() + 1e-9
62
- w_quant = torch.clamp(torch.round(w / gamma), -1, 1)
63
- w_final = w + (w_quant * gamma - w).detach()
64
- x_norm = x - x.mean(dim=-1, keepdim=True)
65
- x_quant = x_norm + (torch.clamp(x_norm, -1.2, 1.2) - x_norm).detach()
66
- return F.linear(x_quant, w_final, self.bias)
67
-
68
- # --- Expert Block ---
69
-
70
- class JiRackExpert(nn.Module):
71
- def __init__(self, config: JiRackMoEConfig):
72
- super().__init__()
73
- self.w1 = JiRackBitLinear(config.hidden_size, config.intermediate_size)
74
- self.w2 = JiRackBitLinear(config.intermediate_size, config.hidden_size)
75
- self.w3 = JiRackBitLinear(config.hidden_size, config.intermediate_size)
76
- def forward(self, x):
77
- return self.w2(F.silu(self.w1(x)) * self.w3(x))
78
-
79
- # --- Router (Gate) for Expert Selection ---
80
-
81
- class JiRackMoEGate(nn.Module):
82
- def __init__(self, config: JiRackMoEConfig):
83
- super().__init__()
84
- self.gate = nn.Linear(config.hidden_size, config.num_experts, bias=False)
85
- self.top_k = config.num_experts_per_tok
86
- def forward(self, x):
87
- logits = self.gate(x)
88
- weights, indices = torch.topk(logits, self.top_k, dim=-1)
89
- weights = F.softmax(weights.float(), dim=-1).type_as(x)
90
- return weights, indices
91
-
92
- # --- Signature and Phase CMS Layer ---
93
-
94
- class SignatureLayer(nn.Module):
95
- def __init__(self, dim, author_name):
96
- super().__init__()
97
- self.gate = nn.Parameter(torch.ones(dim))
98
- seed = sum(ord(c) for c in author_name)
99
- torch.manual_seed(seed)
100
- self.signage_cms = nn.Parameter(torch.randn(dim, dim) * 0.001)
101
- def forward(self, x):
102
- sig = torch.tanh(F.linear(x, self.signage_cms))
103
- return x * torch.sigmoid(self.gate) + sig
104
-
105
- # --- Main MoE Transformer Block ---
106
-
107
- class JiRackMoEBlock(nn.Module):
108
- def __init__(self, config: JiRackMoEConfig):
109
- super().__init__()
110
- self.ln1 = nn.LayerNorm(config.hidden_size)
111
- self.attn = nn.MultiheadAttention(config.hidden_size, config.num_attention_heads, batch_first=True)
112
- self.ln2 = nn.LayerNorm(config.hidden_size)
113
-
114
- # MoE Integration
115
- self.gate = JiRackMoEGate(config)
116
- self.experts = nn.ModuleList([JiRackExpert(config) for _ in range(config.num_experts)])
117
-
118
- self.signature = SignatureLayer(config.hidden_size, config.author)
119
-
120
- def forward(self, x):
121
- # Attention
122
- residual = x
123
- x = self.ln1(x)
124
- x, _ = self.attn(x, x, x, need_weights=False)
125
- x = residual + x
126
-
127
- # MoE Routing
128
- residual = x
129
- x = self.ln2(x)
130
- weights, indices = self.gate(x)
131
-
132
- final_output = torch.zeros_like(x)
133
- # A loop is used here for simplicity; scatter/gather is used in production
134
- for i, expert in enumerate(self.experts):
135
- mask = (indices == i).any(dim=-1)
136
- if mask.any():
137
- expert_output = expert(x[mask])
138
- # Applying expert contribution weights
139
- w = weights[indices == i].unsqueeze(-1)
140
- final_output[mask] += expert_output * w
141
-
142
- x = residual + self.signature(final_output)
143
- return x
144
-
145
- # --- Main 405B MoE Model ---
146
-
147
- class JiRackTernaryMoE405B(PreTrainedModel):
148
- config_class = JiRackMoEConfig
149
- def __init__(self, config: JiRackMoEConfig):
150
- super().__init__(config)
151
- self.token_emb = nn.Embedding(config.vocab_size, config.hidden_size)
152
- self.blocks = nn.ModuleList([JiRackMoEBlock(config) for _ in range(config.num_hidden_layers)])
153
- self.ln_f = nn.LayerNorm(config.hidden_size)
154
- self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
155
-
156
- self.post_init()
157
-
158
- def forward(self, input_ids, labels=None):
159
- x = self.token_emb(input_ids)
160
- for block in self.blocks:
161
- if self.training:
162
- x = torch.utils.checkpoint.checkpoint(block, x, use_reentrant=False)
163
- else:
164
- x = block(x)
165
-
166
- logits = self.lm_head(self.ln_f(x))
167
- loss = None
168
- if labels is not None:
169
- loss = F.cross_entropy(logits.view(-1, self.config.vocab_size), labels.view(-1))
170
-
171
- return CausalLMOutputWithPast(loss=loss, logits=logits)