grapheneaffiliates commited on
Commit
f742bbc
·
verified ·
1 Parent(s): ffbffc0

Upload python/h4_language_model.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. python/h4_language_model.py +187 -0
python/h4_language_model.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ H4 Language Model — Transformer LM with H4 geometric attention.
3
+
4
+ Architecture:
5
+ - Token embedding + golden-angle positional encoding (PhiPositionalEncoding)
6
+ - N × H4TransformerBlock (H4 attention + FFN)
7
+ - LM head (Linear to vocab_size)
8
+
9
+ The frozen H4 geometry handles spatial partitioning of attention space.
10
+ Trainable adapters (nudge matrices, chamber bonuses, projections) learn
11
+ which directions to query and how to weight chambers.
12
+ """
13
+
14
+ import math
15
+ import torch
16
+ import torch.nn as nn
17
+ import torch.nn.functional as F
18
+
19
+ import sys
20
+ import os
21
+ sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
22
+
23
+ from h4_hybrid_attention import H4TransformerBlock
24
+ from utils.phi_positional import PhiPositionalEncoding
25
+ from bitlinear import BitLinear
26
+
27
+
28
+ class H4LanguageModel(nn.Module):
29
+ """
30
+ Full language model with H4 polytopic attention.
31
+
32
+ Args:
33
+ vocab_size: vocabulary size
34
+ d_model: model dimension
35
+ n_heads: number of H4 attention heads per layer
36
+ n_layers: number of transformer blocks
37
+ d_value: value dimension per head
38
+ d_ffn: FFN hidden dimension (default: 4 * d_model)
39
+ top_k: max candidates per query in ChamberTree lookup
40
+ max_seq_len: max sequence length for positional encoding cache
41
+ dropout: dropout rate
42
+ """
43
+
44
+ def __init__(
45
+ self,
46
+ vocab_size: int,
47
+ d_model: int = 64,
48
+ n_heads: int = 8,
49
+ n_layers: int = 4,
50
+ d_value: int = 16,
51
+ d_ffn: int = None,
52
+ top_k: int = 32,
53
+ max_seq_len: int = 8192,
54
+ dropout: float = 0.1,
55
+ use_bitlinear: bool = False,
56
+ ):
57
+ super().__init__()
58
+ self.vocab_size = vocab_size
59
+ self.d_model = d_model
60
+ self.n_layers = n_layers
61
+ self.use_bitlinear = use_bitlinear
62
+
63
+ if d_ffn is None:
64
+ d_ffn = d_model * 4
65
+
66
+ # Token embedding (always float — lookup table, not a matmul)
67
+ self.token_emb = nn.Embedding(vocab_size, d_model)
68
+ # Scale embedding by sqrt(d_model) as in original transformer
69
+ self.emb_scale = math.sqrt(d_model)
70
+
71
+ # Golden-angle positional encoding
72
+ self.pos_enc = PhiPositionalEncoding(d_model, max_cached=max_seq_len)
73
+
74
+ # Embedding dropout
75
+ self.emb_dropout = nn.Dropout(dropout)
76
+
77
+ # Transformer blocks with H4 attention
78
+ self.blocks = nn.ModuleList([
79
+ H4TransformerBlock(
80
+ d_model=d_model,
81
+ n_heads=n_heads,
82
+ d_value=d_value,
83
+ d_ffn=d_ffn,
84
+ top_k=top_k,
85
+ dropout=dropout,
86
+ use_bitlinear=use_bitlinear,
87
+ )
88
+ for _ in range(n_layers)
89
+ ])
90
+
91
+ # Final layer norm
92
+ self.ln_f = nn.LayerNorm(d_model)
93
+
94
+ # LM head (tied with token embedding weights — stays float)
95
+ self.lm_head = nn.Linear(d_model, vocab_size, bias=False)
96
+ # Weight tying
97
+ self.lm_head.weight = self.token_emb.weight
98
+
99
+ self._init_weights()
100
+
101
+ def _init_weights(self):
102
+ """Initialize weights following GPT-2 conventions."""
103
+ for module in self.modules():
104
+ if isinstance(module, BitLinear):
105
+ # BitLinear already has kaiming init; apply GPT-2 scale
106
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
107
+ elif isinstance(module, nn.Linear):
108
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
109
+ if module.bias is not None:
110
+ torch.nn.init.zeros_(module.bias)
111
+ elif isinstance(module, nn.Embedding):
112
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
113
+
114
+ def forward(
115
+ self,
116
+ input_ids: torch.Tensor,
117
+ use_tree: bool = True,
118
+ return_diagnostics: bool = False,
119
+ ) -> torch.Tensor:
120
+ """
121
+ Args:
122
+ input_ids: (batch, seq_len) token indices
123
+ use_tree: if True, use ChamberTree for O(log t) attention
124
+ return_diagnostics: if True, return (logits, list_of_diag_dicts)
125
+
126
+ Returns:
127
+ logits: (batch, seq_len, vocab_size)
128
+ """
129
+ B, T = input_ids.shape
130
+
131
+ # Token + positional embedding
132
+ tok_emb = self.token_emb(input_ids) * self.emb_scale # (B, T, D)
133
+ pos_emb = self.pos_enc(T).unsqueeze(0).to(tok_emb.device) # (1, T, D)
134
+ x = self.emb_dropout(tok_emb + pos_emb)
135
+
136
+ # Transformer blocks
137
+ diagnostics = []
138
+ for block in self.blocks:
139
+ if return_diagnostics:
140
+ x, diag = block(x, use_tree=use_tree, return_diagnostics=True)
141
+ diagnostics.append(diag)
142
+ else:
143
+ x = block(x, use_tree=use_tree)
144
+
145
+ # Final norm + LM head
146
+ x = self.ln_f(x)
147
+ logits = self.lm_head(x) # (B, T, vocab_size)
148
+
149
+ if return_diagnostics:
150
+ return logits, diagnostics
151
+ return logits
152
+
153
+ def count_params(self):
154
+ """Count trainable and frozen parameters."""
155
+ trainable = sum(p.numel() for p in self.parameters() if p.requires_grad)
156
+ frozen = sum(p.numel() for p in self.parameters() if not p.requires_grad)
157
+ buffers = sum(b.numel() for b in self.buffers())
158
+ return {
159
+ 'trainable': trainable,
160
+ 'frozen': frozen,
161
+ 'buffers': buffers,
162
+ 'total': trainable + frozen,
163
+ }
164
+
165
+ @torch.no_grad()
166
+ def generate(
167
+ self,
168
+ input_ids: torch.Tensor,
169
+ max_new_tokens: int = 100,
170
+ temperature: float = 1.0,
171
+ top_k_sample: int = 0,
172
+ ) -> torch.Tensor:
173
+ """Autoregressive generation."""
174
+ for _ in range(max_new_tokens):
175
+ # Crop to max sequence length if needed
176
+ logits = self.forward(input_ids, use_tree=False)
177
+ logits = logits[:, -1, :] / temperature
178
+
179
+ if top_k_sample > 0:
180
+ v, _ = torch.topk(logits, min(top_k_sample, logits.size(-1)))
181
+ logits[logits < v[:, [-1]]] = float('-inf')
182
+
183
+ probs = F.softmax(logits, dim=-1)
184
+ next_id = torch.multinomial(probs, num_samples=1)
185
+ input_ids = torch.cat([input_ids, next_id], dim=1)
186
+
187
+ return input_ids