OpenTransformer's picture
Upload infer.py with huggingface_hub
f9fcbce verified
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
DEVICE = torch.device('cuda')
class Attention(nn.Module):
def __init__(self, d, heads=8):
super().__init__()
self.heads = heads
self.dk = d // heads
self.q_proj = nn.Linear(d, d, bias=False)
self.k_proj = nn.Linear(d, d, bias=False)
self.v_proj = nn.Linear(d, d, bias=False)
self.out_proj = nn.Linear(d, d, bias=False)
def forward(self, x, mask=None):
B, N, D = x.shape
q = self.q_proj(x).view(B, N, self.heads, self.dk).transpose(1, 2)
k = self.k_proj(x).view(B, N, self.heads, self.dk).transpose(1, 2)
v = self.v_proj(x).view(B, N, self.heads, self.dk).transpose(1, 2)
att = (q @ k.transpose(-1, -2)) / math.sqrt(self.dk)
if mask is not None:
att = att + mask
att = F.softmax(att, dim=-1)
out = (att @ v).transpose(1, 2).reshape(B, N, D)
return self.out_proj(out)
class MLP(nn.Module):
def __init__(self, d, mult=4):
super().__init__()
self.fc1 = nn.Linear(d, d * mult, bias=False)
self.fc2 = nn.Linear(d * mult, d, bias=False)
def forward(self, x):
return self.fc2(F.gelu(self.fc1(x)))
class Block(nn.Module):
def __init__(self, d, heads=8):
super().__init__()
self.ln1 = nn.LayerNorm(d)
self.attn = Attention(d, heads)
self.ln2 = nn.LayerNorm(d)
self.mlp = MLP(d)
def forward(self, x, mask):
x = x + self.attn(self.ln1(x), mask)
x = x + self.mlp(self.ln2(x))
return x
class PureBitModel(nn.Module):
def __init__(self, d=256, layers=6, heads=8):
super().__init__()
self.emb = nn.Embedding(2, d) # Binary vocab!
self.blocks = nn.ModuleList([Block(d, heads) for _ in range(layers)])
self.ln = nn.LayerNorm(d)
self.head = nn.Linear(d, 2, bias=False)
self.head.weight = self.emb.weight
def forward(self, x):
B, N = x.shape
mask = torch.triu(torch.ones(N, N, device=x.device), 1) * -1e9
h = self.emb(x)
for b in self.blocks:
h = b(h, mask)
return self.head(self.ln(h))
# Load
print("Loading purebit checkpoint...")
ckpt = torch.load('/workspace/purebit_ckpt_113000kb.pt', map_location=DEVICE)
print(f"Loss: {ckpt['loss']:.4f}")
print(f"Bits trained: {ckpt['bits']:,}")
print(f"Bytes trained: {ckpt['bytes']:,} ({ckpt['bytes']/1024/1024:.1f} MB)")
model = PureBitModel(d=256, layers=6, heads=8).to(DEVICE)
model.load_state_dict(ckpt['model'])
model.eval()
print("Model loaded!\n")
def text_to_bits(text):
bits = []
for byte in text.encode('utf-8'):
for i in range(7, -1, -1):
bits.append((byte >> i) & 1)
return bits
def bits_to_text(bits):
# Pad to multiple of 8
while len(bits) % 8 != 0:
bits.append(0)
bytes_out = []
for i in range(0, len(bits), 8):
byte = 0
for j in range(8):
byte = (byte << 1) | bits[i + j]
bytes_out.append(byte)
return bytes(bytes_out).decode('utf-8', errors='replace')
def generate(prompt, max_bits=200):
bits = text_to_bits(prompt)
x = torch.tensor(bits, device=DEVICE).unsqueeze(0)
generated = []
with torch.no_grad():
for _ in range(max_bits):
logits = model(x[:, -512:])[:, -1, :]
probs = F.softmax(logits / 0.8, dim=-1)
next_bit = torch.multinomial(probs, 1).item()
generated.append(next_bit)
x = torch.cat([x, torch.tensor([[next_bit]], device=DEVICE)], 1)
all_bits = bits + generated
return bits_to_text(all_bits)
print("=== PURE BIT INFERENCE ===\n")
prompts = ["The ", "Hello", "A", "In ", "01"]
for p in prompts:
try:
out = generate(p, 160) # 160 bits = 20 chars
print(f"PROMPT: '{p}'")
print(f"OUTPUT: {repr(out)}\n")
except Exception as e:
print(f"PROMPT: '{p}' -> ERROR: {e}\n")