Upload infer.py with huggingface_hub
Browse files
infer.py
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
import math
|
| 5 |
+
|
| 6 |
+
DEVICE = torch.device('cuda')
|
| 7 |
+
|
| 8 |
+
class Attention(nn.Module):
|
| 9 |
+
def __init__(self, d, heads=8):
|
| 10 |
+
super().__init__()
|
| 11 |
+
self.heads = heads
|
| 12 |
+
self.dk = d // heads
|
| 13 |
+
self.q_proj = nn.Linear(d, d, bias=False)
|
| 14 |
+
self.k_proj = nn.Linear(d, d, bias=False)
|
| 15 |
+
self.v_proj = nn.Linear(d, d, bias=False)
|
| 16 |
+
self.out_proj = nn.Linear(d, d, bias=False)
|
| 17 |
+
|
| 18 |
+
def forward(self, x, mask=None):
|
| 19 |
+
B, N, D = x.shape
|
| 20 |
+
q = self.q_proj(x).view(B, N, self.heads, self.dk).transpose(1, 2)
|
| 21 |
+
k = self.k_proj(x).view(B, N, self.heads, self.dk).transpose(1, 2)
|
| 22 |
+
v = self.v_proj(x).view(B, N, self.heads, self.dk).transpose(1, 2)
|
| 23 |
+
|
| 24 |
+
att = (q @ k.transpose(-1, -2)) / math.sqrt(self.dk)
|
| 25 |
+
if mask is not None:
|
| 26 |
+
att = att + mask
|
| 27 |
+
att = F.softmax(att, dim=-1)
|
| 28 |
+
out = (att @ v).transpose(1, 2).reshape(B, N, D)
|
| 29 |
+
return self.out_proj(out)
|
| 30 |
+
|
| 31 |
+
class MLP(nn.Module):
|
| 32 |
+
def __init__(self, d, mult=4):
|
| 33 |
+
super().__init__()
|
| 34 |
+
self.fc1 = nn.Linear(d, d * mult, bias=False)
|
| 35 |
+
self.fc2 = nn.Linear(d * mult, d, bias=False)
|
| 36 |
+
|
| 37 |
+
def forward(self, x):
|
| 38 |
+
return self.fc2(F.gelu(self.fc1(x)))
|
| 39 |
+
|
| 40 |
+
class Block(nn.Module):
|
| 41 |
+
def __init__(self, d, heads=8):
|
| 42 |
+
super().__init__()
|
| 43 |
+
self.ln1 = nn.LayerNorm(d)
|
| 44 |
+
self.attn = Attention(d, heads)
|
| 45 |
+
self.ln2 = nn.LayerNorm(d)
|
| 46 |
+
self.mlp = MLP(d)
|
| 47 |
+
|
| 48 |
+
def forward(self, x, mask):
|
| 49 |
+
x = x + self.attn(self.ln1(x), mask)
|
| 50 |
+
x = x + self.mlp(self.ln2(x))
|
| 51 |
+
return x
|
| 52 |
+
|
| 53 |
+
class PureBitModel(nn.Module):
|
| 54 |
+
def __init__(self, d=256, layers=6, heads=8):
|
| 55 |
+
super().__init__()
|
| 56 |
+
self.emb = nn.Embedding(2, d) # Binary vocab!
|
| 57 |
+
self.blocks = nn.ModuleList([Block(d, heads) for _ in range(layers)])
|
| 58 |
+
self.ln = nn.LayerNorm(d)
|
| 59 |
+
self.head = nn.Linear(d, 2, bias=False)
|
| 60 |
+
self.head.weight = self.emb.weight
|
| 61 |
+
|
| 62 |
+
def forward(self, x):
|
| 63 |
+
B, N = x.shape
|
| 64 |
+
mask = torch.triu(torch.ones(N, N, device=x.device), 1) * -1e9
|
| 65 |
+
h = self.emb(x)
|
| 66 |
+
for b in self.blocks:
|
| 67 |
+
h = b(h, mask)
|
| 68 |
+
return self.head(self.ln(h))
|
| 69 |
+
|
| 70 |
+
# Load
|
| 71 |
+
print("Loading purebit checkpoint...")
|
| 72 |
+
ckpt = torch.load('/workspace/purebit_ckpt_113000kb.pt', map_location=DEVICE)
|
| 73 |
+
print(f"Loss: {ckpt['loss']:.4f}")
|
| 74 |
+
print(f"Bits trained: {ckpt['bits']:,}")
|
| 75 |
+
print(f"Bytes trained: {ckpt['bytes']:,} ({ckpt['bytes']/1024/1024:.1f} MB)")
|
| 76 |
+
|
| 77 |
+
model = PureBitModel(d=256, layers=6, heads=8).to(DEVICE)
|
| 78 |
+
model.load_state_dict(ckpt['model'])
|
| 79 |
+
model.eval()
|
| 80 |
+
print("Model loaded!\n")
|
| 81 |
+
|
| 82 |
+
def text_to_bits(text):
|
| 83 |
+
bits = []
|
| 84 |
+
for byte in text.encode('utf-8'):
|
| 85 |
+
for i in range(7, -1, -1):
|
| 86 |
+
bits.append((byte >> i) & 1)
|
| 87 |
+
return bits
|
| 88 |
+
|
| 89 |
+
def bits_to_text(bits):
|
| 90 |
+
# Pad to multiple of 8
|
| 91 |
+
while len(bits) % 8 != 0:
|
| 92 |
+
bits.append(0)
|
| 93 |
+
bytes_out = []
|
| 94 |
+
for i in range(0, len(bits), 8):
|
| 95 |
+
byte = 0
|
| 96 |
+
for j in range(8):
|
| 97 |
+
byte = (byte << 1) | bits[i + j]
|
| 98 |
+
bytes_out.append(byte)
|
| 99 |
+
return bytes(bytes_out).decode('utf-8', errors='replace')
|
| 100 |
+
|
| 101 |
+
def generate(prompt, max_bits=200):
|
| 102 |
+
bits = text_to_bits(prompt)
|
| 103 |
+
x = torch.tensor(bits, device=DEVICE).unsqueeze(0)
|
| 104 |
+
|
| 105 |
+
generated = []
|
| 106 |
+
with torch.no_grad():
|
| 107 |
+
for _ in range(max_bits):
|
| 108 |
+
logits = model(x[:, -512:])[:, -1, :]
|
| 109 |
+
probs = F.softmax(logits / 0.8, dim=-1)
|
| 110 |
+
next_bit = torch.multinomial(probs, 1).item()
|
| 111 |
+
generated.append(next_bit)
|
| 112 |
+
x = torch.cat([x, torch.tensor([[next_bit]], device=DEVICE)], 1)
|
| 113 |
+
|
| 114 |
+
all_bits = bits + generated
|
| 115 |
+
return bits_to_text(all_bits)
|
| 116 |
+
|
| 117 |
+
print("=== PURE BIT INFERENCE ===\n")
|
| 118 |
+
prompts = ["The ", "Hello", "A", "In ", "01"]
|
| 119 |
+
for p in prompts:
|
| 120 |
+
try:
|
| 121 |
+
out = generate(p, 160) # 160 bits = 20 chars
|
| 122 |
+
print(f"PROMPT: '{p}'")
|
| 123 |
+
print(f"OUTPUT: {repr(out)}\n")
|
| 124 |
+
except Exception as e:
|
| 125 |
+
print(f"PROMPT: '{p}' -> ERROR: {e}\n")
|