Update README.md
Browse filesRemoved incorrect usability code.
README.md
CHANGED
|
@@ -94,32 +94,6 @@ The model was trained in three phases using the [nanochat](https://github.com/ka
|
|
| 94 |
|
| 95 |
This model requires the `nanochat` library to run, as it uses a custom architecture implementation optimized for educational clarity and hackability.
|
| 96 |
|
| 97 |
-
```python
|
| 98 |
-
import torch
|
| 99 |
-
from nanochat.gpt import GPT, GPTConfig
|
| 100 |
-
from nanochat.tokenizer import RustBPETokenizer
|
| 101 |
-
|
| 102 |
-
# 1. Load Configuration
|
| 103 |
-
# (Ensure you have meta.json and tokenizer.pkl downloaded)
|
| 104 |
-
import json
|
| 105 |
-
with open("meta.json", "r") as f:
|
| 106 |
-
config = json.load(f)["model_config"]
|
| 107 |
-
|
| 108 |
-
# 2. Initialize Model
|
| 109 |
-
model = GPT(GPTConfig(**config))
|
| 110 |
-
|
| 111 |
-
# 3. Load Weights
|
| 112 |
-
sd = torch.load("model.pt", map_location="cpu", weights_only=True)
|
| 113 |
-
# Clean up compilation prefixes if present
|
| 114 |
-
sd = {k.replace("_orig_mod.", ""): v for k, v in sd.items()}
|
| 115 |
-
model.load_state_dict(sd)
|
| 116 |
-
model.eval()
|
| 117 |
-
|
| 118 |
-
# 4. Generate
|
| 119 |
-
# ... (Requires tokenizer loading and Engine setup, see app.py in the Space)
|
| 120 |
-
```
|
| 121 |
-
|
| 122 |
-
|
| 123 |
## License
|
| 124 |
|
| 125 |
MIT
|
|
|
|
| 94 |
|
| 95 |
This model requires the `nanochat` library to run, as it uses a custom architecture implementation optimized for educational clarity and hackability.
|
| 96 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 97 |
## License
|
| 98 |
|
| 99 |
MIT
|