| #!/Users/yukimatsumori/.pyenv/versions/3.12.2/bin/python3 | |
| import time | |
| import torch | |
| from transformers import GPT2LMHeadModel, GPT2Tokenizer | |
| seed = time.time_ns() | |
| torch.manual_seed(seed) | |
| model = GPT2LMHeadModel.from_pretrained("gpt2") | |
| tokenizer = GPT2Tokenizer.from_pretrained("gpt2") | |
| model.eval() | |
| noise = torch.randn(1, 16, 768) | |
| with torch.no_grad(): | |
| outputs = model(inputs_embeds=noise) | |
| logits = outputs.logits | |
| logits_noise = torch.randn_like(logits) * logits.std() * 10 | |
| corrupted_logits = logits + logits_noise | |
| indices = corrupted_logits.argmax(dim=-1).squeeze().tolist() | |
| debris = [tokenizer.decode([i]) for i in indices] | |
| print(f"[System State]: Injecting Entropy (Seed: {seed})") | |
| print(f"[Raw Debris]:") | |
| print(" ".join(debris)) | |