sid-gpt-v2-training / sample.py
M64's picture
Upload folder using huggingface_hub
1262b25 verified
"""
SID-GPT v2 generation with KV-cache.
Generates SID register sequences token-by-token,
outputs uint16 LE binary playable by sidgpt-play.
"""
import argparse
import os
import struct
import sys
os.environ["TORCH_ROCM_AOTRITON_ENABLE_EXPERIMENTAL"] = "1"
import numpy as np
import torch
from model import ModelConfig, Transformer
TOKEN_SEP = 256
TOKEN_FRAME = 257
TOKENS_PER_FRAME = 26
def load_checkpoint(path, device):
ckpt = torch.load(
path, map_location=device, weights_only=False
)
config = ckpt["config"]
model = Transformer(config).to(device)
model.load_state_dict(ckpt["model"])
model.eval()
return model, config
def build_prompt(seed_file, num_seed_frames, device):
"""
Build prompt tensor. Unconditional = one SEP frame.
Style-seeded = SEP frame + N frames from a .bin file.
"""
prompt = [TOKEN_SEP] * TOKENS_PER_FRAME
if seed_file is not None:
raw = np.fromfile(seed_file, dtype=np.uint16)
tokens = raw.tolist()
# Find first data frame after initial SEP frames
pos = 0
while pos < len(tokens):
if tokens[pos] != TOKEN_SEP:
break
pos += 1
frames_added = 0
while pos < len(tokens) and frames_added < num_seed_frames:
if tokens[pos] == TOKEN_SEP:
pos += TOKENS_PER_FRAME
continue
if tokens[pos] == TOKEN_FRAME:
end = pos + TOKENS_PER_FRAME
if end <= len(tokens):
prompt.extend(tokens[pos:end])
frames_added += 1
pos = end
else:
pos += 1
print(f"[SEED] {frames_added} frames from {seed_file}")
return torch.tensor([prompt], dtype=torch.long, device=device)
def _reset_kv_cache(model):
for block in model.blocks:
block.attn.cache_k = None
block.attn.cache_v = None
def _sample_token(next_logits, temperature, top_k):
"""Sample one token from logits with temp + top-k."""
if temperature <= 0:
return torch.argmax(next_logits, dim=-1, keepdim=True)
scaled = next_logits / temperature
if top_k > 0 and top_k < scaled.shape[-1]:
v, _ = torch.topk(scaled, top_k)
threshold = v[:, -1].unsqueeze(-1)
scaled = scaled.masked_fill(
scaled < threshold, float("-inf")
)
probs = torch.softmax(scaled, dim=-1)
return torch.multinomial(probs, num_samples=1)
@torch.no_grad()
def generate(
model,
prompt,
num_tokens,
temperature=1.0,
top_k=50,
device="cpu",
):
"""
Autoregressive generation with KV-cache and sliding
window. When the cache fills up (cur_pos == block_size),
keeps the last 75% of tokens (frame-aligned), resets
the cache, re-prefills, and continues. RoPE encodes
relative positions so resetting absolute pos is safe.
"""
block_size = model.config.block_size
keep_ratio = 0.75
keep_len = int(block_size * keep_ratio)
keep_len = (keep_len // TOKENS_PER_FRAME) * TOKENS_PER_FRAME
prompt_list = prompt[0].tolist()
all_tokens = list(prompt_list)
if len(prompt_list) > block_size:
print(
f"[WARN] Prompt ({len(prompt_list)}) exceeds "
f"block_size ({block_size}), truncating"
)
prompt_list = prompt_list[-block_size:]
all_tokens = list(prompt_list)
# Prefill
inp = torch.tensor(
[prompt_list], dtype=torch.long, device=device
)
logits, _ = model(inp, start_pos=0)
next_logits = logits[:, -1, :]
cur_pos = len(prompt_list)
slide_count = 0
generated = []
for i in range(num_tokens):
# Sliding window: reset cache when full
if cur_pos >= block_size:
slide_count += 1
window = all_tokens[-keep_len:]
_reset_kv_cache(model)
inp = torch.tensor(
[window], dtype=torch.long, device=device
)
logits, _ = model(inp, start_pos=0)
next_logits = logits[:, -1, :]
cur_pos = keep_len
print(
f"[SLIDE] #{slide_count} at token {i}, "
f"kept {keep_len} tokens, "
f"generated {len(generated)} so far"
)
idx = _sample_token(next_logits, temperature, top_k)
tok = idx.item()
generated.append(tok)
all_tokens.append(tok)
# Decode step with KV-cache
logits, _ = model(idx, start_pos=cur_pos)
next_logits = logits[:, -1, :]
cur_pos += 1
return generated
def write_output(tokens, output_path):
"""Write uint16 LE binary, directly playable by sidgpt-play."""
data = struct.pack(f"<{len(tokens)}H", *tokens)
if output_path == "-":
sys.stdout.buffer.write(data)
else:
with open(output_path, "wb") as f:
f.write(data)
print(f"[OUT] Wrote {len(tokens)} tokens to {output_path}")
def main():
parser = argparse.ArgumentParser(
description="SID-GPT v2 generation"
)
parser.add_argument(
"--checkpoint", type=str, required=True,
)
parser.add_argument("--num-frames", type=int, default=500)
parser.add_argument("--temperature", type=float, default=0.9)
parser.add_argument("--top-k", type=int, default=50)
parser.add_argument("--seed", type=int, default=None)
parser.add_argument(
"--output", type=str, default="generated.bin"
)
parser.add_argument("--device", type=str, default="auto")
parser.add_argument("--seed-file", type=str, default=None)
parser.add_argument(
"--seed-frames", type=int, default=10,
help="Number of frames to use from seed file",
)
args = parser.parse_args()
if args.device == "auto":
if torch.cuda.is_available():
device = "cuda"
elif (
hasattr(torch.backends, "mps")
and torch.backends.mps.is_available()
):
device = "mps"
else:
device = "cpu"
else:
device = args.device
if args.seed is not None:
torch.manual_seed(args.seed)
print(f"[INIT] Device: {device}")
model, config = load_checkpoint(args.checkpoint, device)
print(
f"[MODEL] {config.n_layer}L/{config.n_head}H/"
f"{config.n_embd}D, "
f"{model.count_params():,} params"
)
prompt = build_prompt(args.seed_file, args.seed_frames, device)
prompt_tokens = prompt.shape[1]
num_tokens = args.num_frames * TOKENS_PER_FRAME
print(
f"[GEN] Prompt: {prompt_tokens} tokens, "
f"generating {num_tokens} tokens "
f"({args.num_frames} frames)"
)
generated = generate(
model,
prompt,
num_tokens,
temperature=args.temperature,
top_k=args.top_k,
device=device,
)
all_tokens = prompt[0].tolist() + generated
write_output(all_tokens, args.output)
# Stats
n_sep = sum(1 for t in all_tokens if t == TOKEN_SEP)
n_frame = sum(1 for t in all_tokens if t == TOKEN_FRAME)
n_data = sum(1 for t in all_tokens if t < 256)
print(
f"[STATS] Total: {len(all_tokens)} tokens "
f"(SEP={n_sep}, FRAME={n_frame}, data={n_data})"
)
if __name__ == "__main__":
main()