| # How to run Hyperion-DeepSpace-218M | |
| # pip install torch transformers | |
| import torch | |
| from transformers import GPT2TokenizerFast | |
| # Load model (you need the full code from the training script too — paste the full architecture above) | |
| # For now, people can just load the .pth and run with the original code | |
| tokenizer = GPT2TokenizerFast.from_pretrained("gpt2") | |
| tokenizer.pad_token = tokenizer.eos_token | |
| # Quick generation function (copy-paste the SuperLearningLM class first!) | |
| def chat(prompt, max_length=200, temperature=0.8): | |
| input_ids = tokenizer.encode(prompt, return_tensors="pt").cuda() | |
| with torch.no_grad(): | |
| output = model.generate(input_ids, max_length=max_length, temperature=temperature) | |
| return tokenizer.decode(output[0], skip_special_tokens=True) | |
| # Example | |
| print(chat("Once upon a time")) | |