| import torch | |
| from safetensors.torch import load_file | |
| def load_model(path='model.safetensors'): | |
| return load_file(path) | |
| def decode_4to16(a3, a2, a1, a0, weights): | |
| """4-to-16 decoder: converts 4-bit binary to one-hot 16-bit output.""" | |
| inp = torch.tensor([float(a3), float(a2), float(a1), float(a0)]) | |
| outputs = [] | |
| for i in range(16): | |
| y = int((inp * weights[f'y{i}.weight']).sum() + weights[f'y{i}.bias'] >= 0) | |
| outputs.append(y) | |
| return outputs | |
| if __name__ == '__main__': | |
| w = load_model() | |
| print('4-to-16 Decoder') | |
| for val in range(16): | |
| a3, a2, a1, a0 = (val >> 3) & 1, (val >> 2) & 1, (val >> 1) & 1, val & 1 | |
| outputs = decode_4to16(a3, a2, a1, a0, w) | |
| print(f" {val:2d} ({a3}{a2}{a1}{a0}) -> {''.join(map(str, outputs))}") | |