backup phase9 snapshot 20260311_105528
Browse files- backups/20260311_105528/files/checkpoints.sha256 +4 -0
- backups/20260311_105528/files/checkpoints/newtie_v5_p6_step1660.pt +3 -0
- backups/20260311_105528/files/checkpoints/newtie_v5_p7_step300.pt +3 -0
- backups/20260311_105528/files/checkpoints/newtie_v5_p8_step320.pt +3 -0
- backups/20260311_105528/files/checkpoints/newtie_v5_p9_step50.pt +3 -0
- backups/20260311_105528/files/logs/koyeb_phase9_watchdog.log +189 -0
- backups/20260311_105528/files/train.py +1561 -0
- backups/20260311_105528/manifest.json +15 -0
backups/20260311_105528/files/checkpoints.sha256
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
eada09a77af8d0a96ef4a5a9db7c806a357272de079dd36a604a53d80f96cb4b /tmp/newtie_checkpoint_backup_20260311_105528/files/checkpoints/newtie_v5_p6_step1660.pt
|
| 2 |
+
460c7072fd7c315dc12219a9f4102dce56fa82579ec6c3a2645645a45204be12 /tmp/newtie_checkpoint_backup_20260311_105528/files/checkpoints/newtie_v5_p7_step300.pt
|
| 3 |
+
43ee610b73d80dd90abb55450d4815d4728fb943f8d50c221b40ad4cef5e59da /tmp/newtie_checkpoint_backup_20260311_105528/files/checkpoints/newtie_v5_p8_step320.pt
|
| 4 |
+
70295e49221e19b15d7099edb53e037724e95fd8253231ef5945bca9aab80f46 /tmp/newtie_checkpoint_backup_20260311_105528/files/checkpoints/newtie_v5_p9_step50.pt
|
backups/20260311_105528/files/checkpoints/newtie_v5_p6_step1660.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:eada09a77af8d0a96ef4a5a9db7c806a357272de079dd36a604a53d80f96cb4b
|
| 3 |
+
size 178344960
|
backups/20260311_105528/files/checkpoints/newtie_v5_p7_step300.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:460c7072fd7c315dc12219a9f4102dce56fa82579ec6c3a2645645a45204be12
|
| 3 |
+
size 191923200
|
backups/20260311_105528/files/checkpoints/newtie_v5_p8_step320.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:43ee610b73d80dd90abb55450d4815d4728fb943f8d50c221b40ad4cef5e59da
|
| 3 |
+
size 180172800
|
backups/20260311_105528/files/checkpoints/newtie_v5_p9_step50.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:70295e49221e19b15d7099edb53e037724e95fd8253231ef5945bca9aab80f46
|
| 3 |
+
size 121143296
|
backups/20260311_105528/files/logs/koyeb_phase9_watchdog.log
ADDED
|
@@ -0,0 +1,189 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[watchdog] service=newtie-v5/tt-ssh interval=25 poll=20s ntfy=https://ntfy.sh/newtie-phase9-watchdog
|
| 2 |
+
[watchdog] service=newtie-v5/tt-ssh interval=25 poll=20s ntfy=https://ntfy.sh/newtie-phase9-watchdog
|
| 3 |
+
[watchdog] startup notify: ok=True detail=http_200
|
| 4 |
+
[watchdog] state=training step=170/360 loss=1.612175 best=1.488463
|
| 5 |
+
[watchdog] notify step=170 ok=True detail=http_200
|
| 6 |
+
[watchdog] service=newtie-v5/tt-ssh interval=25 poll=20s ntfy=https://ntfy.sh/newtie-phase9-watchdog
|
| 7 |
+
[watchdog] startup notify: ok=True detail=http_200
|
| 8 |
+
[watchdog] state=training step=170/360 loss=1.612175 best=1.488463
|
| 9 |
+
[watchdog] notify step=170 ok=True detail=http_200
|
| 10 |
+
[watchdog] state=training step=171/360 loss=1.808503 best=1.488463
|
| 11 |
+
[watchdog] state=training step=172/360 loss=1.478135 best=1.478135
|
| 12 |
+
[watchdog] state=training step=173/360 loss=1.998468 best=1.488463
|
| 13 |
+
[watchdog] state=training step=174/360 loss=1.922345 best=1.488463
|
| 14 |
+
[watchdog] state=training step=175/360 loss=1.790282 best=1.488463
|
| 15 |
+
[watchdog] state=training step=176/360 loss=1.652472 best=1.488463
|
| 16 |
+
[watchdog] state=training step=177/360 loss=1.666387 best=1.488463
|
| 17 |
+
[watchdog] state=training step=178/360 loss=1.720078 best=1.488463
|
| 18 |
+
[watchdog] state=training step=179/360 loss=1.67226 best=1.488463
|
| 19 |
+
[watchdog] state=training step=180/360 loss=1.610854 best=1.488463
|
| 20 |
+
[watchdog] state=training step=181/360 loss=1.81897 best=1.488463
|
| 21 |
+
[watchdog] state=training step=182/360 loss=1.689261 best=1.488463
|
| 22 |
+
[watchdog] state=training step=183/360 loss=1.408932 best=1.408932
|
| 23 |
+
[watchdog] state=training step=184/360 loss=1.545652 best=1.488463
|
| 24 |
+
[watchdog] service=newtie-v5/tt-ssh interval=25 poll=20s ntfy=https://ntfy.sh/newtie-phase9-watchdog
|
| 25 |
+
[watchdog] startup notify: ok=True detail=http_200
|
| 26 |
+
[watchdog] state=training step=184/360 loss=1.545652 best=1.488463
|
| 27 |
+
[watchdog] state=training step=185/360 loss=1.589464 best=1.488463
|
| 28 |
+
[watchdog] state=training step=186/360 loss=1.715149 best=1.488463
|
| 29 |
+
[watchdog] state=training step=187/360 loss=1.57981 best=1.488463
|
| 30 |
+
[watchdog] state=training step=188/360 loss=1.942953 best=1.488463
|
| 31 |
+
[watchdog] state=training step=189/360 loss=1.568122 best=1.488463
|
| 32 |
+
[watchdog] state=training step=190/360 loss=1.757539 best=1.488463
|
| 33 |
+
[watchdog] state=training step=191/360 loss=1.537299 best=1.488463
|
| 34 |
+
[watchdog] state=training step=192/360 loss=1.969033 best=1.488463
|
| 35 |
+
[watchdog] state=training step=193/360 loss=1.568621 best=1.488463
|
| 36 |
+
[watchdog] state=training step=194/360 loss=1.607412 best=1.488463
|
| 37 |
+
[watchdog] state=training step=195/360 loss=1.601118 best=1.488463
|
| 38 |
+
[watchdog] state=training step=196/360 loss=1.869775 best=1.488463
|
| 39 |
+
[watchdog] state=training step=197/360 loss=1.650993 best=1.488463
|
| 40 |
+
[watchdog] state=training step=198/360 loss=1.553282 best=1.488463
|
| 41 |
+
[watchdog] state=training step=199/360 loss=1.582979 best=1.488463
|
| 42 |
+
[watchdog] state=training step=200/360 loss=1.712442 best=1.488463
|
| 43 |
+
[watchdog] notify bucket=7 step=184 ok=True detail=http_200
|
| 44 |
+
[watchdog] state=training step=201/360 loss=1.441567 best=1.441567
|
| 45 |
+
[watchdog] state=training step=202/360 loss=1.669093 best=1.488463
|
| 46 |
+
[watchdog] state=training step=203/360 loss=1.723409 best=1.488463
|
| 47 |
+
[watchdog] state=training step=204/360 loss=1.645327 best=1.488463
|
| 48 |
+
[watchdog] state=training step=205/360 loss=1.761128 best=1.488463
|
| 49 |
+
[watchdog] state=training step=206/360 loss=1.586243 best=1.488463
|
| 50 |
+
[watchdog] fetch error: ^D❌ Error listing services to resolve the provided identifier to an object ID: the CLI was unable to query the Koyeb API because of an issue on your machine or in your configuration
|
| 51 |
+
|
| 52 |
+
🏥 How to solve the issue?
|
| 53 |
+
Fix your configuration and try again
|
| 54 |
+
|
| 55 |
+
🕦 The original error was:
|
| 56 |
+
Get "https://app.koyeb.com/v1/services?limit=100&offset=0": dial tcp: lookup app.koyeb.com: no such host
|
| 57 |
+
[watchdog] state=training step=207/360 loss=1.601974 best=1.488463
|
| 58 |
+
[watchdog] state=training step=208/360 loss=1.505801 best=1.488463
|
| 59 |
+
[watchdog] state=training step=209/360 loss=1.848183 best=1.488463
|
| 60 |
+
[watchdog] state=training step=210/360 loss=1.633552 best=1.488463
|
| 61 |
+
[watchdog] state=training step=211/360 loss=1.80079 best=1.488463
|
| 62 |
+
[watchdog] state=training step=212/360 loss=1.696399 best=1.488463
|
| 63 |
+
[watchdog] state=training step=213/360 loss=1.765791 best=1.488463
|
| 64 |
+
[watchdog] state=training step=214/360 loss=1.419518 best=1.419518
|
| 65 |
+
[watchdog] state=training step=215/360 loss=1.673599 best=1.488463
|
| 66 |
+
[watchdog] state=training step=216/360 loss=1.971041 best=1.488463
|
| 67 |
+
[watchdog] state=training step=217/360 loss=1.543152 best=1.488463
|
| 68 |
+
[watchdog] state=training step=218/360 loss=1.733779 best=1.488463
|
| 69 |
+
[watchdog] state=training step=219/360 loss=1.642925 best=1.488463
|
| 70 |
+
[watchdog] state=training step=220/360 loss=1.613036 best=1.488463
|
| 71 |
+
[watchdog] state=training step=221/360 loss=1.471104 best=1.471104
|
| 72 |
+
[watchdog] state=training step=222/360 loss=1.748332 best=1.488463
|
| 73 |
+
[watchdog] state=training step=223/360 loss=1.647199 best=1.488463
|
| 74 |
+
[watchdog] state=training step=224/360 loss=1.758446 best=1.488463
|
| 75 |
+
[watchdog] state=training step=225/360 loss=1.560556 best=1.488463
|
| 76 |
+
[watchdog] notify bucket=8 step=214 ok=True detail=http_200
|
| 77 |
+
[watchdog] state=training step=226/360 loss=1.696947 best=1.488463
|
| 78 |
+
[watchdog] state=training step=227/360 loss=1.567105 best=1.488463
|
| 79 |
+
[watchdog] state=training step=228/360 loss=1.715446 best=1.488463
|
| 80 |
+
[watchdog] state=training step=229/360 loss=1.659889 best=1.488463
|
| 81 |
+
[watchdog] state=training step=230/360 loss=1.670879 best=1.488463
|
| 82 |
+
[watchdog] state=training step=231/360 loss=1.545578 best=1.488463
|
| 83 |
+
[watchdog] state=training step=232/360 loss=1.720644 best=1.488463
|
| 84 |
+
[watchdog] state=training step=233/360 loss=2.049204 best=1.488463
|
| 85 |
+
[watchdog] state=training step=234/360 loss=2.144113 best=1.488463
|
| 86 |
+
[watchdog] state=training step=235/360 loss=1.714216 best=1.488463
|
| 87 |
+
[watchdog] state=training step=236/360 loss=2.169509 best=1.488463
|
| 88 |
+
[watchdog] state=training step=237/360 loss=1.610534 best=1.488463
|
| 89 |
+
[watchdog] state=training step=238/360 loss=1.956069 best=1.488463
|
| 90 |
+
[watchdog] state=training step=239/360 loss=1.603848 best=1.488463
|
| 91 |
+
[watchdog] state=training step=240/360 loss=2.006512 best=1.488463
|
| 92 |
+
[watchdog] state=training step=241/360 loss=1.784127 best=1.488463
|
| 93 |
+
[watchdog] state=training step=242/360 loss=1.476042 best=1.476042
|
| 94 |
+
[watchdog] state=training step=243/360 loss=1.563302 best=1.488463
|
| 95 |
+
[watchdog] state=training step=244/360 loss=2.105733 best=1.488463
|
| 96 |
+
[watchdog] state=training step=245/360 loss=1.779902 best=1.488463
|
| 97 |
+
[watchdog] state=training step=246/360 loss=1.558598 best=1.488463
|
| 98 |
+
[watchdog] state=training step=247/360 loss=1.689218 best=1.488463
|
| 99 |
+
[watchdog] state=training step=248/360 loss=1.904245 best=1.488463
|
| 100 |
+
[watchdog] state=training step=249/360 loss=1.505913 best=1.488463
|
| 101 |
+
[watchdog] state=training step=250/360 loss=1.501688 best=1.488463
|
| 102 |
+
[watchdog] state=training step=251/360 loss=1.707498 best=1.488463
|
| 103 |
+
[watchdog] state=training step=252/360 loss=1.421911 best=1.421911
|
| 104 |
+
[watchdog] state=training step=251/2600 loss=10.552023 best=10.552023
|
| 105 |
+
[watchdog] state=training step=252/2600 loss=2.721567 best=2.721567
|
| 106 |
+
[watchdog] state=training step=253/2600 loss=1.967193 best=1.967193
|
| 107 |
+
[watchdog] state=training step=254/2600 loss=2.396918 best=2.396918
|
| 108 |
+
[watchdog] state=training step=255/2600 loss=2.335386 best=2.335386
|
| 109 |
+
[watchdog] state=training step=256/2600 loss=2.278813 best=2.278813
|
| 110 |
+
[watchdog] state=training step=257/2600 loss=2.340992 best=2.340992
|
| 111 |
+
[watchdog] state=training step=258/2600 loss=2.376833 best=2.376833
|
| 112 |
+
[watchdog] state=training step=259/2600 loss=2.342246 best=2.342246
|
| 113 |
+
[watchdog] state=training step=260/2600 loss=2.560757 best=2.560757
|
| 114 |
+
[watchdog] fetch error: ^DGet "https://api.github.com/repos/koyeb/koyeb-cli/releases": dial tcp: lookup api.github.com: no such host
|
| 115 |
+
❌ Error listing services to resolve the provided identifier to an object ID: the CLI was unable to query the Koyeb API because of an issue on your machine or in your configuration
|
| 116 |
+
|
| 117 |
+
🏥 How to solve the issue?
|
| 118 |
+
Fix your configuration and try again
|
| 119 |
+
|
| 120 |
+
🕦 The original error was:
|
| 121 |
+
Get "https://app.koyeb.com/v1/services?limit=100&offset=0": dial tcp: lookup app.koyeb.com: no such host
|
| 122 |
+
[watchdog] state=training step=261/2600 loss=2.465716 best=2.465716
|
| 123 |
+
[watchdog] state=training step=262/2600 loss=2.633485 best=2.633485
|
| 124 |
+
[watchdog] service=newtie-v5/tt-ssh interval=25 poll=20s ntfy=https://ntfy.sh/newtie-phase9-watchdog
|
| 125 |
+
[watchdog] startup notify: ok=True detail=http_200
|
| 126 |
+
[watchdog] state=training step=262/2600 loss=2.633485 best=2.633485
|
| 127 |
+
[watchdog] state=training step=263/2600 loss=2.060942 best=2.060942
|
| 128 |
+
[watchdog] service=newtie-v5/tt-ssh interval=25 poll=20s ntfy=https://ntfy.sh/newtie-phase9-watchdog
|
| 129 |
+
[watchdog] startup notify: ok=True detail=http_200
|
| 130 |
+
[watchdog] state=training step=263/2600 loss=2.060942 best=2.060942
|
| 131 |
+
[watchdog] state=training step=264/2600 loss=2.619503 best=2.619503
|
| 132 |
+
[watchdog] state=training step=265/2600 loss=2.185474 best=2.185474
|
| 133 |
+
[watchdog] state=training step=266/2600 loss=2.531834 best=2.531834
|
| 134 |
+
[watchdog] state=training step=267/2600 loss=2.216286 best=2.216286
|
| 135 |
+
[watchdog] state=training step=268/2600 loss=2.10495 best=2.10495
|
| 136 |
+
[watchdog] state=training step=269/2600 loss=2.196869 best=2.196869
|
| 137 |
+
[watchdog] state=training step=270/2600 loss=2.186647 best=2.186647
|
| 138 |
+
[watchdog] state=training step=271/2600 loss=2.058746 best=2.058746
|
| 139 |
+
[watchdog] state=training step=272/2600 loss=2.190154 best=2.190154
|
| 140 |
+
[watchdog] state=training step=273/2600 loss=2.076107 best=2.076107
|
| 141 |
+
[watchdog] state=training step=274/2600 loss=2.463736 best=2.463736
|
| 142 |
+
[watchdog] state=training step=275/2600 loss=2.089918 best=2.089918
|
| 143 |
+
[watchdog] notify bucket=10 step=271 ok=True detail=http_200
|
| 144 |
+
[watchdog] state=training step=276/2600 loss=2.145116 best=2.145116
|
| 145 |
+
[watchdog] state=training step=277/2600 loss=1.952256 best=1.952256
|
| 146 |
+
[watchdog] state=training step=278/2600 loss=2.71554 best=2.71554
|
| 147 |
+
[watchdog] state=training step=279/2600 loss=2.065405 best=2.065405
|
| 148 |
+
[watchdog] state=training step=280/2600 loss=2.197073 best=2.197073
|
| 149 |
+
[watchdog] state=training step=281/2600 loss=2.09925 best=2.09925
|
| 150 |
+
[watchdog] state=training step=282/2600 loss=2.072892 best=2.072892
|
| 151 |
+
[watchdog] state=training step=283/2600 loss=2.102325 best=2.102325
|
| 152 |
+
[watchdog] state=training step=284/2600 loss=2.018738 best=2.018738
|
| 153 |
+
[watchdog] state=training step=285/2600 loss=2.251856 best=2.251856
|
| 154 |
+
[watchdog] state=training step=286/2600 loss=2.177423 best=2.177423
|
| 155 |
+
[watchdog] state=training step=287/2600 loss=2.223401 best=2.223401
|
| 156 |
+
[watchdog] state=training step=288/2600 loss=2.375378 best=2.375378
|
| 157 |
+
[watchdog] state=training step=289/2600 loss=2.143717 best=2.143717
|
| 158 |
+
[watchdog] state=training step=290/2600 loss=2.173659 best=2.173659
|
| 159 |
+
[watchdog] state=training step=291/2600 loss=1.918152 best=1.918152
|
| 160 |
+
[watchdog] state=training step=292/2600 loss=1.970492 best=1.970492
|
| 161 |
+
[watchdog] state=training step=293/2600 loss=1.849398 best=1.849398
|
| 162 |
+
[watchdog] state=training step=294/2600 loss=2.211296 best=2.211296
|
| 163 |
+
[watchdog] state=training step=295/2600 loss=1.990625 best=1.990625
|
| 164 |
+
[watchdog] state=training step=296/2600 loss=2.203358 best=2.203358
|
| 165 |
+
[watchdog] state=training step=297/2600 loss=2.312794 best=2.312794
|
| 166 |
+
[watchdog] state=training step=298/2600 loss=2.094307 best=2.094307
|
| 167 |
+
[watchdog] state=training step=299/2600 loss=1.979491 best=1.979491
|
| 168 |
+
[watchdog] state=training step=300/2600 loss=1.866587 best=1.866587
|
| 169 |
+
[watchdog] notify bucket=11 step=293 ok=True detail=http_200
|
| 170 |
+
[watchdog] state=training step=301/2600 loss=1.840695 best=1.840695
|
| 171 |
+
[watchdog] state=training step=302/2600 loss=1.869256 best=1.866587
|
| 172 |
+
[watchdog] state=training step=303/2600 loss=1.936824 best=1.866587
|
| 173 |
+
[watchdog] state=training step=304/2600 loss=2.16803 best=1.866587
|
| 174 |
+
[watchdog] state=training step=305/2600 loss=2.279149 best=1.866587
|
| 175 |
+
[watchdog] state=training step=306/2600 loss=2.042783 best=1.866587
|
| 176 |
+
[watchdog] state=training step=307/2600 loss=1.950344 best=1.866587
|
| 177 |
+
[watchdog] state=training step=308/2600 loss=2.212778 best=1.866587
|
| 178 |
+
[watchdog] state=training step=309/2600 loss=1.807868 best=1.807868
|
| 179 |
+
[watchdog] state=training step=310/2600 loss=2.098982 best=1.866587
|
| 180 |
+
[watchdog] state=training step=311/2600 loss=1.951648 best=1.866587
|
| 181 |
+
[watchdog] state=training step=312/2600 loss=1.796994 best=1.796994
|
| 182 |
+
[watchdog] state=training step=313/2600 loss=2.138237 best=1.866587
|
| 183 |
+
[watchdog] state=training step=314/2600 loss=1.951604 best=1.866587
|
| 184 |
+
[watchdog] state=training step=315/2600 loss=2.104002 best=1.866587
|
| 185 |
+
[watchdog] state=training step=316/2600 loss=1.914197 best=1.866587
|
| 186 |
+
[watchdog] state=training step=317/2600 loss=2.023874 best=1.866587
|
| 187 |
+
[watchdog] state=training step=318/2600 loss=2.161247 best=1.866587
|
| 188 |
+
[watchdog] state=training step=319/2600 loss=1.779523 best=1.779523
|
| 189 |
+
[watchdog] state=training step=320/2600 loss=2.093385 best=1.866587
|
backups/20260311_105528/files/train.py
ADDED
|
@@ -0,0 +1,1561 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Newtie V5 — Training Script (4× N300S Pipeline Parallel)
|
| 3 |
+
==========================================================
|
| 4 |
+
|
| 5 |
+
Phase 1: Cognitive Depth — train on local SFT data (~12.5K entries).
|
| 6 |
+
Focus: deep reasoning, metacognition, identity — NOT memorization.
|
| 7 |
+
|
| 8 |
+
Pipeline Parallel across 8 Wormhole chips:
|
| 9 |
+
- 32 layers / 8 chips = 4 layers per chip
|
| 10 |
+
- 64 micro-batches → 89% pipeline efficiency (1F1B schedule)
|
| 11 |
+
- MoE experts L1-pinned on Tensix cores (zero DRAM reads)
|
| 12 |
+
- STDP/Cerebrix as native Tensix compute kernels
|
| 13 |
+
- tt-topology mesh must be configured first (entrypoint.sh)
|
| 14 |
+
|
| 15 |
+
Falls back to: single N300 → CUDA → CPU
|
| 16 |
+
|
| 17 |
+
Data sources (local, Phase 1):
|
| 18 |
+
- sft_cognitive_depth.jsonl (8K entries — identity, philosophy, reasoning)
|
| 19 |
+
- deep_synthesis patterns (1.2K — cross-domain reasoning chains)
|
| 20 |
+
- waveflow2 SFT pairs (2.4K — scientific reasoning)
|
| 21 |
+
- cognitive scenarios (50 — causal reasoning, chat format)
|
| 22 |
+
- SALA SFT (37 — tool-use trajectories)
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
import os
|
| 26 |
+
import sys
|
| 27 |
+
import json
|
| 28 |
+
import time
|
| 29 |
+
import math
|
| 30 |
+
import glob
|
| 31 |
+
import random
|
| 32 |
+
import argparse
|
| 33 |
+
import signal
|
| 34 |
+
import urllib.error
|
| 35 |
+
import urllib.request
|
| 36 |
+
from pathlib import Path
|
| 37 |
+
from dataclasses import asdict
|
| 38 |
+
from collections import OrderedDict
|
| 39 |
+
from typing import Dict, List, Optional
|
| 40 |
+
|
| 41 |
+
# ═══════════════════════════════════════════════════════════════
|
| 42 |
+
# CPU THREADING
|
| 43 |
+
#
|
| 44 |
+
# Must be set BEFORE importing torch.
|
| 45 |
+
# Default behavior on N300: run matmuls on Wormhole chips via TTLinear.
|
| 46 |
+
# Set `--cpu-only` or env `NEWTIE_CPU_ONLY=1` to disable chip acceleration.
|
| 47 |
+
# ═══════════════════════════════════════════════════════════════
|
| 48 |
+
_CPU_ONLY = os.environ.get("NEWTIE_CPU_ONLY", "0") == "1"
|
| 49 |
+
_PERF_PROFILE = os.environ.get("NEWTIE_TT_PERF_PROFILE", "balanced").strip().lower()
|
| 50 |
+
_CPU_CAPACITY = os.cpu_count() or 8
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def _parse_int_env(name: str, default: int, minimum: int = 1, maximum: Optional[int] = None) -> int:
|
| 54 |
+
raw = os.environ.get(name)
|
| 55 |
+
if raw is None:
|
| 56 |
+
val = default
|
| 57 |
+
else:
|
| 58 |
+
try:
|
| 59 |
+
val = int(raw)
|
| 60 |
+
except (TypeError, ValueError):
|
| 61 |
+
print(f"[THREADS] invalid {name}={raw!r} -> fallback {default}")
|
| 62 |
+
val = default
|
| 63 |
+
if val < minimum:
|
| 64 |
+
val = minimum
|
| 65 |
+
if maximum is not None and val > maximum:
|
| 66 |
+
val = maximum
|
| 67 |
+
return val
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def _parse_bool_env(name: str, default: bool = False) -> bool:
|
| 71 |
+
raw = os.environ.get(name)
|
| 72 |
+
if raw is None:
|
| 73 |
+
return default
|
| 74 |
+
return str(raw).strip().lower() in {"1", "true", "on", "yes", "y"}
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def _parse_float_env(
|
| 78 |
+
name: str,
|
| 79 |
+
default: float,
|
| 80 |
+
minimum: Optional[float] = None,
|
| 81 |
+
maximum: Optional[float] = None,
|
| 82 |
+
) -> float:
|
| 83 |
+
raw = os.environ.get(name)
|
| 84 |
+
if raw is None:
|
| 85 |
+
val = default
|
| 86 |
+
else:
|
| 87 |
+
try:
|
| 88 |
+
val = float(raw)
|
| 89 |
+
except (TypeError, ValueError):
|
| 90 |
+
print(f"[ENV] invalid {name}={raw!r} -> fallback {default}")
|
| 91 |
+
val = default
|
| 92 |
+
if minimum is not None and val < minimum:
|
| 93 |
+
val = minimum
|
| 94 |
+
if maximum is not None and val > maximum:
|
| 95 |
+
val = maximum
|
| 96 |
+
return val
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def _get_perf_profile_defaults(cpu_capacity: int) -> Dict[str, int | bool]:
|
| 100 |
+
tt_profiles = {
|
| 101 |
+
"safe": {
|
| 102 |
+
"ncpu": 16,
|
| 103 |
+
"dataloader_workers": 2,
|
| 104 |
+
"dataloader_prefetch": 2,
|
| 105 |
+
"dataloader_persist_workers": True,
|
| 106 |
+
},
|
| 107 |
+
"balanced": {
|
| 108 |
+
"ncpu": 24,
|
| 109 |
+
"dataloader_workers": 4,
|
| 110 |
+
"dataloader_prefetch": 3,
|
| 111 |
+
"dataloader_persist_workers": True,
|
| 112 |
+
},
|
| 113 |
+
"throughput": {
|
| 114 |
+
"ncpu": 28,
|
| 115 |
+
"dataloader_workers": 6,
|
| 116 |
+
"dataloader_prefetch": 4,
|
| 117 |
+
"dataloader_persist_workers": True,
|
| 118 |
+
},
|
| 119 |
+
}
|
| 120 |
+
cpu_profiles = {
|
| 121 |
+
"safe": {
|
| 122 |
+
"ncpu": cpu_capacity,
|
| 123 |
+
"dataloader_workers": 4,
|
| 124 |
+
"dataloader_prefetch": 2,
|
| 125 |
+
"dataloader_persist_workers": True,
|
| 126 |
+
},
|
| 127 |
+
"balanced": {
|
| 128 |
+
"ncpu": cpu_capacity,
|
| 129 |
+
"dataloader_workers": 8,
|
| 130 |
+
"dataloader_prefetch": 4,
|
| 131 |
+
"dataloader_persist_workers": True,
|
| 132 |
+
},
|
| 133 |
+
"throughput": {
|
| 134 |
+
"ncpu": cpu_capacity,
|
| 135 |
+
"dataloader_workers": 12,
|
| 136 |
+
"dataloader_prefetch": 6,
|
| 137 |
+
"dataloader_persist_workers": True,
|
| 138 |
+
},
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
profile = _PERF_PROFILE if _PERF_PROFILE in tt_profiles else "balanced"
|
| 142 |
+
if _PERF_PROFILE not in tt_profiles:
|
| 143 |
+
print(f"[PERF] unknown NEWTIE_TT_PERF_PROFILE={_PERF_PROFILE!r}; falling back to balanced")
|
| 144 |
+
profile = "balanced"
|
| 145 |
+
|
| 146 |
+
return cpu_profiles[profile] if _CPU_ONLY else tt_profiles[profile]
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
_perf_defaults = _get_perf_profile_defaults(_CPU_CAPACITY)
|
| 150 |
+
_profile_hint = _PERF_PROFILE if _PERF_PROFILE in ("safe", "balanced", "throughput") else "balanced"
|
| 151 |
+
_default_threads = min(_CPU_CAPACITY, _perf_defaults["ncpu"])
|
| 152 |
+
|
| 153 |
+
_ncpu = _parse_int_env(
|
| 154 |
+
"NEWTIE_TT_CPU_THREADS",
|
| 155 |
+
default=_default_threads,
|
| 156 |
+
minimum=1,
|
| 157 |
+
maximum=_CPU_CAPACITY,
|
| 158 |
+
)
|
| 159 |
+
|
| 160 |
+
os.environ["OMP_NUM_THREADS"] = str(_ncpu)
|
| 161 |
+
os.environ["MKL_NUM_THREADS"] = str(_ncpu)
|
| 162 |
+
os.environ["OPENBLAS_NUM_THREADS"] = str(_ncpu)
|
| 163 |
+
os.environ["VECLIB_MAXIMUM_THREADS"] = str(_ncpu)
|
| 164 |
+
|
| 165 |
+
_tt_dataloader_workers = _parse_int_env(
|
| 166 |
+
"NEWTIE_DATALOADER_WORKERS",
|
| 167 |
+
default=int(_perf_defaults["dataloader_workers"]),
|
| 168 |
+
minimum=0,
|
| 169 |
+
maximum=64,
|
| 170 |
+
)
|
| 171 |
+
_tt_dataloader_prefetch = _parse_int_env(
|
| 172 |
+
"NEWTIE_DATALOADER_PREFETCH",
|
| 173 |
+
default=int(_perf_defaults["dataloader_prefetch"]),
|
| 174 |
+
minimum=1,
|
| 175 |
+
maximum=16,
|
| 176 |
+
)
|
| 177 |
+
_tt_dataloader_persist_workers = _parse_bool_env(
|
| 178 |
+
"NEWTIE_DATALOADER_PERSIST_WORKERS",
|
| 179 |
+
default=bool(_perf_defaults["dataloader_persist_workers"]),
|
| 180 |
+
)
|
| 181 |
+
_pretoken_cache_enabled = _parse_bool_env(
|
| 182 |
+
"NEWTIE_PRETOKENIZE_CACHE",
|
| 183 |
+
default=(_PERF_PROFILE == "throughput"),
|
| 184 |
+
)
|
| 185 |
+
_pretoken_cache_size = _parse_int_env(
|
| 186 |
+
"NEWTIE_PRETOKENIZE_CACHE_SIZE",
|
| 187 |
+
default=4096,
|
| 188 |
+
minimum=0,
|
| 189 |
+
maximum=200000,
|
| 190 |
+
)
|
| 191 |
+
|
| 192 |
+
# Power / benchmark overrides (consumed after phase preset selection)
|
| 193 |
+
_NEWTIE_TT_POWER_MODE = os.environ.get("NEWTIE_TT_POWER_MODE", "").strip()
|
| 194 |
+
_FORCE_BYTE_PATH = os.environ.get("NEWTIE_FORCE_BYTE_PATH", "0") == "1"
|
| 195 |
+
_OVERRIDE_SEQ_LEN = int(os.environ.get("NEWTIE_OVERRIDE_SEQ_LEN", "0") or 0)
|
| 196 |
+
_POWER_EFFECTIVE_MICROBATCH = int(os.environ.get("NEWTIE_POWER_EFFECTIVE_MICROBATCH", "0") or 0)
|
| 197 |
+
_BENCH_RELAX_EVAL = os.environ.get("NEWTIE_BENCH_RELAX_EVAL", "0") == "1"
|
| 198 |
+
_BENCH_RELAX_CHECKPOINT = os.environ.get("NEWTIE_BENCH_RELAX_CHECKPOINT", "0") == "1"
|
| 199 |
+
|
| 200 |
+
# Watchdog / NTFY notifications
|
| 201 |
+
_WATCHDOG_ENABLED = _parse_bool_env("NEWTIE_WATCHDOG_ENABLED", default=True)
|
| 202 |
+
_WATCHDOG_INTERVAL = _parse_int_env("NEWTIE_WATCHDOG_INTERVAL", default=25, minimum=1, maximum=100000)
|
| 203 |
+
_WATCHDOG_MIN_DELTA = _parse_float_env("NEWTIE_WATCHDOG_MIN_DELTA", default=1e-4, minimum=0.0)
|
| 204 |
+
_NTFY_TOPIC = os.environ.get("NEWTIE_NTFY_TOPIC", "").strip()
|
| 205 |
+
_NTFY_URL = os.environ.get("NEWTIE_NTFY_URL", "").strip()
|
| 206 |
+
_NTFY_TOKEN = os.environ.get("NEWTIE_NTFY_TOKEN", "").strip()
|
| 207 |
+
_NTFY_PRIORITY = os.environ.get("NEWTIE_NTFY_PRIORITY", "3").strip()
|
| 208 |
+
_NTFY_TAGS = os.environ.get("NEWTIE_NTFY_TAGS", "chart_with_upwards_trend").strip()
|
| 209 |
+
_NTFY_CLICK = os.environ.get("NEWTIE_NTFY_CLICK", "").strip()
|
| 210 |
+
_NTFY_TIMEOUT_S = _parse_float_env("NEWTIE_NTFY_TIMEOUT_S", default=10.0, minimum=1.0, maximum=60.0)
|
| 211 |
+
|
| 212 |
+
import torch
|
| 213 |
+
torch.set_num_threads(_ncpu)
|
| 214 |
+
print(
|
| 215 |
+
f"[THREADS] OMP={_ncpu}, MKL/OPENBLAS/VECLIB={_ncpu}, "
|
| 216 |
+
f"torch_threads={torch.get_num_threads()}, cpu_only={_CPU_ONLY}"
|
| 217 |
+
)
|
| 218 |
+
if _profile_hint != _PERF_PROFILE:
|
| 219 |
+
_resolved_profile_msg = f"[PERF] profile={_profile_hint} (requested={_PERF_PROFILE!r}, fallback={_profile_hint})"
|
| 220 |
+
else:
|
| 221 |
+
_resolved_profile_msg = f"[PERF] profile={_profile_hint} (requested={_PERF_PROFILE!r})"
|
| 222 |
+
print(
|
| 223 |
+
_resolved_profile_msg
|
| 224 |
+
)
|
| 225 |
+
print(
|
| 226 |
+
f"[THREADS] dataloader workers={_tt_dataloader_workers}, "
|
| 227 |
+
f"prefetch={_tt_dataloader_prefetch}, persist_workers={_tt_dataloader_persist_workers}"
|
| 228 |
+
)
|
| 229 |
+
print(
|
| 230 |
+
f"[DATA] pretoken cache enabled={_pretoken_cache_enabled}, "
|
| 231 |
+
f"cache_size={_pretoken_cache_size}"
|
| 232 |
+
)
|
| 233 |
+
print(
|
| 234 |
+
f"[WATCHDOG] enabled={_WATCHDOG_ENABLED}, interval={_WATCHDOG_INTERVAL}, "
|
| 235 |
+
f"min_delta={_WATCHDOG_MIN_DELTA}"
|
| 236 |
+
)
|
| 237 |
+
|
| 238 |
+
import torch.nn as nn
|
| 239 |
+
import torch.nn.functional as F
|
| 240 |
+
from torch.utils.data import Dataset, DataLoader, DistributedSampler
|
| 241 |
+
import torch.distributed as dist
|
| 242 |
+
|
| 243 |
+
# Add model to path
|
| 244 |
+
sys.path.insert(0, str(Path(__file__).parent))
|
| 245 |
+
|
| 246 |
+
from model.config import (
|
| 247 |
+
NewtieConfig, TrainingConfig, HardwareConfig,
|
| 248 |
+
get_phase1_config, get_phase2_config, get_phase3_config, get_phase4_config,
|
| 249 |
+
get_phase5_config, get_phase6_config, get_phase7_config, get_phase8_config,
|
| 250 |
+
get_phase9_config,
|
| 251 |
+
)
|
| 252 |
+
from model.newtie_v5 import NewtieV5
|
| 253 |
+
from model.tt_backend import DeviceMesh, PipelineEngine, create_pipeline
|
| 254 |
+
|
| 255 |
+
# TT ops are optional: only required when chip acceleration is enabled.
|
| 256 |
+
try:
|
| 257 |
+
from model.tt_ops import (
|
| 258 |
+
accelerate_model,
|
| 259 |
+
sync_tt_weights,
|
| 260 |
+
mark_tt_dirty,
|
| 261 |
+
begin_tt_runtime_step,
|
| 262 |
+
report_tt_adaptive_mesh_plan,
|
| 263 |
+
)
|
| 264 |
+
except Exception:
|
| 265 |
+
accelerate_model = None
|
| 266 |
+
sync_tt_weights = None
|
| 267 |
+
mark_tt_dirty = None
|
| 268 |
+
begin_tt_runtime_step = None
|
| 269 |
+
report_tt_adaptive_mesh_plan = None
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
# ═══════════════════════════════════════════════════════════════
|
| 273 |
+
# Hardware Detection (delegates to DeviceMesh for N300)
|
| 274 |
+
# ═══════════════════════════════════════════════════════════════
|
| 275 |
+
|
| 276 |
+
def detect_hardware():
|
| 277 |
+
"""Detect best available hardware.
|
| 278 |
+
|
| 279 |
+
Returns:
|
| 280 |
+
(hw_type, compute_device, tt_chip_count)
|
| 281 |
+
- N300: ("n300", None, chips)
|
| 282 |
+
- CUDA: ("cuda", cuda_device, 0)
|
| 283 |
+
- CPU: ("cpu", cpu_device, 0)
|
| 284 |
+
|
| 285 |
+
CRITICAL: Never use torch.device('cuda') or torch.device('cpu') for N300.
|
| 286 |
+
All tensors go to Wormhole chips via ttnn. The CPU only holds
|
| 287 |
+
parameter tensors for the optimizer — zero compute on CPU.
|
| 288 |
+
"""
|
| 289 |
+
# Check for Tenstorrent N300
|
| 290 |
+
if os.path.exists("/dev/tenstorrent"):
|
| 291 |
+
try:
|
| 292 |
+
import ttnn
|
| 293 |
+
pcie_endpoints = 0
|
| 294 |
+
|
| 295 |
+
# Koyeb exposes PCIe endpoints in /dev/tenstorrent. For N300S,
|
| 296 |
+
# each endpoint corresponds to one card = two Wormhole chips.
|
| 297 |
+
if os.path.isdir("/dev/tenstorrent"):
|
| 298 |
+
pcie_endpoints = len([f for f in os.listdir("/dev/tenstorrent")
|
| 299 |
+
if f.isdigit()])
|
| 300 |
+
chip_count = pcie_endpoints * 2
|
| 301 |
+
n_cards = pcie_endpoints
|
| 302 |
+
print(f"[HARDWARE] {n_cards}× N300S detected ({chip_count} Wormhole chips)")
|
| 303 |
+
print(f" 96 GB VRAM across {chip_count} chips (12 GB each)")
|
| 304 |
+
print(f" All matmul compute → Wormhole Tensix cores")
|
| 305 |
+
print(f" CPU only holds parameters for optimizer — zero CPU compute")
|
| 306 |
+
# Return None as device — model params stay on CPU for optimizer,
|
| 307 |
+
# but ALL forward/backward matmuls dispatch to ttnn devices
|
| 308 |
+
return "n300", None, chip_count
|
| 309 |
+
except ImportError:
|
| 310 |
+
print(f"[HARDWARE] Tenstorrent detected but ttnn not installed")
|
| 311 |
+
print(f" FATAL: Cannot run without ttnn on Tenstorrent hardware")
|
| 312 |
+
raise RuntimeError("ttnn required for Tenstorrent N300")
|
| 313 |
+
|
| 314 |
+
if torch.cuda.is_available():
|
| 315 |
+
name = torch.cuda.get_device_name(0)
|
| 316 |
+
mem = torch.cuda.get_device_properties(0).total_mem / 1e9
|
| 317 |
+
print(f"[HARDWARE] CUDA: {name} ({mem:.1f} GB)")
|
| 318 |
+
return "cuda", torch.device("cuda"), 0
|
| 319 |
+
|
| 320 |
+
print("[HARDWARE] CPU mode (no accelerator)")
|
| 321 |
+
return "cpu", torch.device("cpu"), 0
|
| 322 |
+
|
| 323 |
+
|
| 324 |
+
def active_graph_phase_name_for(train_config: TrainingConfig) -> str:
|
| 325 |
+
"""Map training phases to explicit active-graph runtime profiles."""
|
| 326 |
+
phase = int(getattr(train_config, "phase", 0) or 0)
|
| 327 |
+
phase_name = str(getattr(train_config, "phase_name", "") or "").strip().lower()
|
| 328 |
+
if phase == 7 or "grounded_agency" in phase_name:
|
| 329 |
+
return "phase7"
|
| 330 |
+
if phase in (8, 9) or "reasoning" in phase_name or "instruction_response" in phase_name:
|
| 331 |
+
return "phase8"
|
| 332 |
+
if "byte" in phase_name:
|
| 333 |
+
return "phase_byte_active"
|
| 334 |
+
return "phase6"
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
def build_hw_config_for_run(model_config: NewtieConfig,
|
| 338 |
+
hw_type: str,
|
| 339 |
+
tt_chip_count: int,
|
| 340 |
+
num_devices: int) -> HardwareConfig:
|
| 341 |
+
"""Pick a safe pipeline shape for currently visible TT hardware."""
|
| 342 |
+
hw_config = HardwareConfig()
|
| 343 |
+
if hw_type != "n300":
|
| 344 |
+
return hw_config
|
| 345 |
+
|
| 346 |
+
requested_chips = max(0, int(num_devices)) * 2 if num_devices else 0
|
| 347 |
+
available_chips = tt_chip_count if tt_chip_count > 0 else requested_chips
|
| 348 |
+
if requested_chips > 0 and tt_chip_count > 0:
|
| 349 |
+
available_chips = min(tt_chip_count, requested_chips)
|
| 350 |
+
if available_chips <= 0:
|
| 351 |
+
return hw_config
|
| 352 |
+
|
| 353 |
+
if available_chips >= 8:
|
| 354 |
+
stages = 8
|
| 355 |
+
elif available_chips >= 4:
|
| 356 |
+
stages = 4
|
| 357 |
+
elif available_chips >= 2:
|
| 358 |
+
stages = 2
|
| 359 |
+
else:
|
| 360 |
+
stages = 1
|
| 361 |
+
|
| 362 |
+
n_cards = max(1, stages // 2)
|
| 363 |
+
layers_per_stage = max(1, model_config.n_layers // stages)
|
| 364 |
+
|
| 365 |
+
hw_config.n_cards = n_cards
|
| 366 |
+
hw_config.stages = stages
|
| 367 |
+
hw_config.layers_per_stage = layers_per_stage
|
| 368 |
+
|
| 369 |
+
print(f"[HARDWARE] pipeline profile: cards={hw_config.n_cards}, "
|
| 370 |
+
f"chips={hw_config.total_chips}, stages={hw_config.stages}, "
|
| 371 |
+
f"layers_per_stage={hw_config.layers_per_stage}")
|
| 372 |
+
return hw_config
|
| 373 |
+
|
| 374 |
+
|
| 375 |
+
# ═══════════════════════════════════════════════════════════════
|
| 376 |
+
# Cognitive SFT Dataset
|
| 377 |
+
# ═══════════════════════════════════════════════════════════════
|
| 378 |
+
|
| 379 |
+
class CognitiveSFTDataset(Dataset):
|
| 380 |
+
"""
|
| 381 |
+
Loads all cognitive SFT data for Phase 1 training.
|
| 382 |
+
Tokenizes instruction→response pairs into training sequences.
|
| 383 |
+
|
| 384 |
+
Format: <|system|>\n{system}\n<|end|>\n<|user|>\n{instruction}\n<|end|>\n<|assistant|>\n{response}\n<|end|>
|
| 385 |
+
"""
|
| 386 |
+
|
| 387 |
+
SYSTEM_PROMPT = (
|
| 388 |
+
"Du är Newtie, en kognitiv AI skapad av InfinityHQ Research Lab. "
|
| 389 |
+
"Du resonerar djupt, steg för steg. Du är ärlig om vad du vet och inte vet. "
|
| 390 |
+
"Du tänker över ditt tänkande. Du svarar på samma språk som frågan."
|
| 391 |
+
)
|
| 392 |
+
|
| 393 |
+
def __init__(self, data_paths: List[str], tokenizer, seq_len: int = 2048,
|
| 394 |
+
max_samples: int = None,
|
| 395 |
+
cache_tokens: bool = _pretoken_cache_enabled,
|
| 396 |
+
cache_size: int = _pretoken_cache_size):
|
| 397 |
+
self.seq_len = seq_len
|
| 398 |
+
self.tokenizer = tokenizer
|
| 399 |
+
self.entries = []
|
| 400 |
+
self._cache_tokens = bool(cache_tokens)
|
| 401 |
+
self._cache_size = int(max(0, cache_size))
|
| 402 |
+
self._token_cache: OrderedDict[int, Dict[str, torch.Tensor]] = OrderedDict()
|
| 403 |
+
self._assistant_prefix_ids = self.tokenizer.encode("<|assistant|>\n")
|
| 404 |
+
self._end_marker_ids = self.tokenizer.encode("<|end|>")
|
| 405 |
+
|
| 406 |
+
# Load all JSONL files
|
| 407 |
+
for path in data_paths:
|
| 408 |
+
if not os.path.exists(path):
|
| 409 |
+
print(f" [DATA] Skipping {path} (not found)")
|
| 410 |
+
continue
|
| 411 |
+
with open(path, 'r') as f:
|
| 412 |
+
for line in f:
|
| 413 |
+
line = line.strip()
|
| 414 |
+
if not line:
|
| 415 |
+
continue
|
| 416 |
+
try:
|
| 417 |
+
entry = json.loads(line)
|
| 418 |
+
self.entries.append(entry)
|
| 419 |
+
except json.JSONDecodeError:
|
| 420 |
+
continue
|
| 421 |
+
print(f" [DATA] Loaded {path}: {len(self.entries)} total entries")
|
| 422 |
+
|
| 423 |
+
if max_samples and len(self.entries) > max_samples:
|
| 424 |
+
random.shuffle(self.entries)
|
| 425 |
+
self.entries = self.entries[:max_samples]
|
| 426 |
+
|
| 427 |
+
print(f" [DATA] Total: {len(self.entries)} training entries")
|
| 428 |
+
|
| 429 |
+
if self._cache_tokens and self._cache_size > 0:
|
| 430 |
+
print(f" [DATA] LRU token cache active: size={self._cache_size}")
|
| 431 |
+
|
| 432 |
+
@staticmethod
|
| 433 |
+
def _format_cached(item: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
|
| 434 |
+
"""Clone only when necessary for DataLoader worker safety."""
|
| 435 |
+
return {
|
| 436 |
+
"input_ids": item["input_ids"],
|
| 437 |
+
"labels": item["labels"],
|
| 438 |
+
**({"byte_ids": item["byte_ids"]} if "byte_ids" in item else {})
|
| 439 |
+
}
|
| 440 |
+
|
| 441 |
+
def __len__(self):
|
| 442 |
+
return len(self.entries)
|
| 443 |
+
|
| 444 |
+
@staticmethod
|
| 445 |
+
def _find_subsequence(haystack: List[int], needle: List[int], start_idx: int = 0) -> int:
|
| 446 |
+
if not needle:
|
| 447 |
+
return -1
|
| 448 |
+
max_start = len(haystack) - len(needle)
|
| 449 |
+
for idx in range(max(start_idx, 0), max_start + 1):
|
| 450 |
+
if haystack[idx] == needle[0] and haystack[idx: idx + len(needle)] == needle:
|
| 451 |
+
return idx
|
| 452 |
+
return -1
|
| 453 |
+
|
| 454 |
+
def _format_entry(self, entry: dict) -> str:
|
| 455 |
+
"""Convert entry to chat format string."""
|
| 456 |
+
# Handle different data formats
|
| 457 |
+
if "messages" in entry:
|
| 458 |
+
# Chat format (cognitive_scenarios.jsonl)
|
| 459 |
+
parts = []
|
| 460 |
+
for msg in entry["messages"]:
|
| 461 |
+
role = msg.get("role", "user")
|
| 462 |
+
content = msg.get("content", "")
|
| 463 |
+
parts.append(f"<|{role}|>\n{content}\n<|end|>")
|
| 464 |
+
return "\n".join(parts)
|
| 465 |
+
|
| 466 |
+
elif "instruction" in entry and "response" in entry:
|
| 467 |
+
# SFT format (sft_cognitive_depth.jsonl, etc.)
|
| 468 |
+
instruction = entry["instruction"]
|
| 469 |
+
response = entry["response"]
|
| 470 |
+
return (
|
| 471 |
+
f"<|system|>\n{self.SYSTEM_PROMPT}\n<|end|>\n"
|
| 472 |
+
f"<|user|>\n{instruction}\n<|end|>\n"
|
| 473 |
+
f"<|assistant|>\n{response}\n<|end|>"
|
| 474 |
+
)
|
| 475 |
+
|
| 476 |
+
elif "query" in entry and "content" in entry:
|
| 477 |
+
# Pattern format (deep_synthesis patterns)
|
| 478 |
+
query = entry["query"]
|
| 479 |
+
content = entry["content"]
|
| 480 |
+
return (
|
| 481 |
+
f"<|system|>\n{self.SYSTEM_PROMPT}\n<|end|>\n"
|
| 482 |
+
f"<|user|>\n{query}\n<|end|>\n"
|
| 483 |
+
f"<|assistant|>\n{content}\n<|end|>"
|
| 484 |
+
)
|
| 485 |
+
|
| 486 |
+
else:
|
| 487 |
+
# Fallback: just concatenate all string values
|
| 488 |
+
text = " ".join(str(v) for v in entry.values() if isinstance(v, str))
|
| 489 |
+
return text
|
| 490 |
+
|
| 491 |
+
def __getitem__(self, idx):
|
| 492 |
+
if self._cache_tokens and self._cache_size > 0:
|
| 493 |
+
cached = self._token_cache.get(idx)
|
| 494 |
+
if cached is not None:
|
| 495 |
+
self._token_cache.move_to_end(idx)
|
| 496 |
+
return self._format_cached(cached)
|
| 497 |
+
|
| 498 |
+
entry = self.entries[idx]
|
| 499 |
+
text = self._format_entry(entry)
|
| 500 |
+
|
| 501 |
+
# Tokenize
|
| 502 |
+
token_ids = self.tokenizer.encode(text)
|
| 503 |
+
content_len = min(len(token_ids), self.seq_len)
|
| 504 |
+
|
| 505 |
+
# Truncate or pad to seq_len
|
| 506 |
+
if len(token_ids) > self.seq_len:
|
| 507 |
+
token_ids = token_ids[:self.seq_len]
|
| 508 |
+
else:
|
| 509 |
+
token_ids = token_ids + [0] * (self.seq_len - len(token_ids))
|
| 510 |
+
|
| 511 |
+
input_ids = torch.tensor(token_ids, dtype=torch.long)
|
| 512 |
+
labels = torch.full_like(input_ids, -100)
|
| 513 |
+
|
| 514 |
+
# Train primarily on assistant completion tokens.
|
| 515 |
+
assistant_start = self._find_subsequence(
|
| 516 |
+
token_ids[:content_len], self._assistant_prefix_ids, start_idx=0
|
| 517 |
+
)
|
| 518 |
+
if assistant_start >= 0:
|
| 519 |
+
response_start = assistant_start + len(self._assistant_prefix_ids)
|
| 520 |
+
response_end = self._find_subsequence(
|
| 521 |
+
token_ids[:content_len], self._end_marker_ids, start_idx=response_start
|
| 522 |
+
)
|
| 523 |
+
if response_end < 0:
|
| 524 |
+
response_end = content_len
|
| 525 |
+
if response_end > response_start:
|
| 526 |
+
labels[response_start:response_end] = input_ids[response_start:response_end]
|
| 527 |
+
else:
|
| 528 |
+
labels[:content_len] = input_ids[:content_len]
|
| 529 |
+
else:
|
| 530 |
+
labels[:content_len] = input_ids[:content_len]
|
| 531 |
+
|
| 532 |
+
item = {"input_ids": input_ids, "labels": labels}
|
| 533 |
+
|
| 534 |
+
if _FORCE_BYTE_PATH:
|
| 535 |
+
byte_values = list(text.encode("utf-8"))
|
| 536 |
+
if len(byte_values) > self.seq_len:
|
| 537 |
+
byte_values = byte_values[:self.seq_len]
|
| 538 |
+
else:
|
| 539 |
+
byte_values = byte_values + [0] * (self.seq_len - len(byte_values))
|
| 540 |
+
item["byte_ids"] = torch.tensor(byte_values, dtype=torch.long)
|
| 541 |
+
|
| 542 |
+
if self._cache_tokens and self._cache_size > 0:
|
| 543 |
+
if len(self._token_cache) >= self._cache_size:
|
| 544 |
+
self._token_cache.popitem(last=False)
|
| 545 |
+
self._token_cache[idx] = {
|
| 546 |
+
"input_ids": item["input_ids"],
|
| 547 |
+
"labels": item["labels"],
|
| 548 |
+
**({"byte_ids": item["byte_ids"]} if "byte_ids" in item else {}),
|
| 549 |
+
}
|
| 550 |
+
|
| 551 |
+
return item
|
| 552 |
+
|
| 553 |
+
|
| 554 |
+
def apply_power_mode_overrides(train_config: TrainingConfig) -> TrainingConfig:
|
| 555 |
+
"""Apply env-driven benchmark overrides without changing phase presets on disk."""
|
| 556 |
+
changed = []
|
| 557 |
+
|
| 558 |
+
if _OVERRIDE_SEQ_LEN > 0 and _OVERRIDE_SEQ_LEN != train_config.seq_len:
|
| 559 |
+
train_config.seq_len = _OVERRIDE_SEQ_LEN
|
| 560 |
+
changed.append(f"seq_len={train_config.seq_len}")
|
| 561 |
+
|
| 562 |
+
if _POWER_EFFECTIVE_MICROBATCH > 0:
|
| 563 |
+
base_batch = max(1, int(train_config.batch_size))
|
| 564 |
+
target_effective = max(base_batch, _POWER_EFFECTIVE_MICROBATCH)
|
| 565 |
+
grad_accum = (target_effective + base_batch - 1) // base_batch
|
| 566 |
+
if grad_accum != train_config.gradient_accumulation_steps:
|
| 567 |
+
train_config.gradient_accumulation_steps = grad_accum
|
| 568 |
+
changed.append(f"effective_batch={train_config.effective_batch_size}")
|
| 569 |
+
|
| 570 |
+
if _BENCH_RELAX_EVAL:
|
| 571 |
+
new_eval = max(int(train_config.eval_interval), int(train_config.total_steps) + 1)
|
| 572 |
+
if new_eval != train_config.eval_interval:
|
| 573 |
+
train_config.eval_interval = new_eval
|
| 574 |
+
changed.append(f"eval_interval={train_config.eval_interval}")
|
| 575 |
+
|
| 576 |
+
if _BENCH_RELAX_CHECKPOINT:
|
| 577 |
+
new_save = max(int(train_config.save_interval), 50)
|
| 578 |
+
if new_save != train_config.save_interval:
|
| 579 |
+
train_config.save_interval = new_save
|
| 580 |
+
changed.append(f"save_interval={train_config.save_interval}")
|
| 581 |
+
|
| 582 |
+
if changed:
|
| 583 |
+
mode_name = _NEWTIE_TT_POWER_MODE or "custom"
|
| 584 |
+
print(f"[POWER] {mode_name}: {' | '.join(changed)}")
|
| 585 |
+
if _FORCE_BYTE_PATH:
|
| 586 |
+
print("[POWER] byte_path=forced")
|
| 587 |
+
|
| 588 |
+
return train_config
|
| 589 |
+
|
| 590 |
+
|
| 591 |
+
def adapt_resume_state_dict_for_current_model(
|
| 592 |
+
checkpoint_state: Dict[str, torch.Tensor]
|
| 593 |
+
) -> Dict[str, torch.Tensor]:
|
| 594 |
+
"""Map legacy perception keys into the current SuperByte-Lite layout."""
|
| 595 |
+
adapted = dict(checkpoint_state)
|
| 596 |
+
|
| 597 |
+
key_map = {
|
| 598 |
+
"perception.superbyte.byte_embed.weight": "perception.superbyte.impl.byte_embed.weight",
|
| 599 |
+
"perception.superbyte.gate_proj.weight": "perception.superbyte.impl.gate_proj.weight",
|
| 600 |
+
"perception.superbyte.gate_proj.bias": "perception.superbyte.impl.gate_proj.bias",
|
| 601 |
+
"perception.superbyte.value_proj.weight": "perception.superbyte.impl.value_proj.weight",
|
| 602 |
+
"perception.superbyte.value_proj.bias": "perception.superbyte.impl.value_proj.bias",
|
| 603 |
+
"perception.superbyte.norm.weight": "perception.superbyte.impl.norm.weight",
|
| 604 |
+
"perception.superbyte.norm.bias": "perception.superbyte.impl.norm.bias",
|
| 605 |
+
"perception.bridge.W_q.weight": "perception.bridge.impl.q_proj.weight",
|
| 606 |
+
"perception.bridge.W_k.weight": "perception.bridge.impl.k_proj.weight",
|
| 607 |
+
"perception.bridge.W_v.weight": "perception.bridge.impl.v_proj.weight",
|
| 608 |
+
"perception.bridge.W_out.weight": "perception.bridge.impl.out_proj.weight",
|
| 609 |
+
"perception.bridge.norm_q.weight": "perception.bridge.impl.norm_tokens.weight",
|
| 610 |
+
"perception.bridge.norm_q.bias": "perception.bridge.impl.norm_tokens.bias",
|
| 611 |
+
"perception.bridge.norm_kv.weight": "perception.bridge.impl.norm_patches.weight",
|
| 612 |
+
"perception.bridge.norm_kv.bias": "perception.bridge.impl.norm_patches.bias",
|
| 613 |
+
"perception.bridge.norm_out.weight": "perception.bridge.impl.norm_out.weight",
|
| 614 |
+
"perception.bridge.norm_out.bias": "perception.bridge.impl.norm_out.bias",
|
| 615 |
+
}
|
| 616 |
+
for old_key, new_key in key_map.items():
|
| 617 |
+
if old_key in checkpoint_state and new_key not in adapted:
|
| 618 |
+
adapted[new_key] = checkpoint_state[old_key]
|
| 619 |
+
|
| 620 |
+
scale_proj = checkpoint_state.get("perception.superbyte.scale_proj.weight")
|
| 621 |
+
local_proj_key = "perception.superbyte.impl.local_proj.weight"
|
| 622 |
+
if scale_proj is not None and local_proj_key not in adapted and scale_proj.ndim == 2:
|
| 623 |
+
out_dim, in_dim = scale_proj.shape
|
| 624 |
+
if out_dim > 0 and in_dim % out_dim == 0:
|
| 625 |
+
n_scales = in_dim // out_dim
|
| 626 |
+
adapted[local_proj_key] = scale_proj.view(out_dim, n_scales, out_dim).mean(dim=1)
|
| 627 |
+
|
| 628 |
+
return adapted
|
| 629 |
+
|
| 630 |
+
|
| 631 |
+
def default_phase_data_dir(phase: int) -> Optional[str]:
|
| 632 |
+
"""Prefer structured local phase datasets when present."""
|
| 633 |
+
candidates = [
|
| 634 |
+
f"/Volumes/ADi/The Concern/training_data/phase{phase}",
|
| 635 |
+
f"/data/newtie_v5/training_data/phase{phase}",
|
| 636 |
+
]
|
| 637 |
+
for cand in candidates:
|
| 638 |
+
if os.path.isdir(cand) and any(name.endswith(".jsonl") for name in os.listdir(cand)):
|
| 639 |
+
return cand
|
| 640 |
+
return None
|
| 641 |
+
|
| 642 |
+
|
| 643 |
+
# ═══════════════════════════════════════════════════════════════
|
| 644 |
+
# Simple BPE Tokenizer (fallback if sentencepiece not available)
|
| 645 |
+
# ═══════════════════════════════════════════════════════════════
|
| 646 |
+
|
| 647 |
+
class ByteFallbackTokenizer:
|
| 648 |
+
"""UTF-8 byte-level tokenizer — works without any external dependencies.
|
| 649 |
+
Each byte (0-255) maps to tokens 0-255. Special tokens 256+."""
|
| 650 |
+
|
| 651 |
+
def __init__(self, vocab_size=32000):
|
| 652 |
+
self.vocab_size = vocab_size
|
| 653 |
+
|
| 654 |
+
def encode(self, text: str) -> List[int]:
|
| 655 |
+
return list(text.encode('utf-8'))
|
| 656 |
+
|
| 657 |
+
def decode(self, ids: List[int]) -> str:
|
| 658 |
+
return bytes(min(b, 255) for b in ids).decode('utf-8', errors='replace')
|
| 659 |
+
|
| 660 |
+
|
| 661 |
+
# ═══════════════════════════════════════════════════════════════
|
| 662 |
+
# WSD Learning Rate Schedule
|
| 663 |
+
# ═══════════════════════════════════════════════════════════════
|
| 664 |
+
|
| 665 |
+
def get_wsd_lr(step: int, config: TrainingConfig) -> float:
|
| 666 |
+
"""Warmup-Stable-Decay schedule (MiniCPM-style)."""
|
| 667 |
+
total = config.total_steps
|
| 668 |
+
warmup = config.warmup_steps
|
| 669 |
+
stable_end = int(total * config.stable_ratio)
|
| 670 |
+
decay_start = stable_end
|
| 671 |
+
decay_end = total
|
| 672 |
+
|
| 673 |
+
if step < warmup:
|
| 674 |
+
# Linear warmup
|
| 675 |
+
return config.lr * step / warmup
|
| 676 |
+
elif step < stable_end:
|
| 677 |
+
# Stable phase — full LR
|
| 678 |
+
return config.lr
|
| 679 |
+
else:
|
| 680 |
+
# Cosine decay
|
| 681 |
+
progress = (step - decay_start) / max(decay_end - decay_start, 1)
|
| 682 |
+
return config.min_lr + 0.5 * (config.lr - config.min_lr) * (
|
| 683 |
+
1 + math.cos(math.pi * progress))
|
| 684 |
+
|
| 685 |
+
|
| 686 |
+
# ═══════════════════════════════════════════════════════════════
|
| 687 |
+
# Dashboard Integration
|
| 688 |
+
# ═══════════════════════════════════════════════════════════════
|
| 689 |
+
|
| 690 |
+
def write_status(log_dir: str, status: dict):
|
| 691 |
+
"""Write training status JSON for dashboard to read."""
|
| 692 |
+
path = os.path.join(log_dir, "training_status.json")
|
| 693 |
+
try:
|
| 694 |
+
with open(path, "w") as f:
|
| 695 |
+
json.dump(status, f, indent=2)
|
| 696 |
+
except (OSError, IOError):
|
| 697 |
+
pass # Non-critical
|
| 698 |
+
|
| 699 |
+
|
| 700 |
+
def write_bio_gates(log_dir: str, gate_dict: dict):
|
| 701 |
+
"""Write bio-module gate values + spike dynamics for dashboard."""
|
| 702 |
+
path = os.path.join(log_dir, "bio_gates.json")
|
| 703 |
+
try:
|
| 704 |
+
gates = {}
|
| 705 |
+
for k, v in gate_dict.items():
|
| 706 |
+
if isinstance(v, dict):
|
| 707 |
+
# Spike info dicts — flatten into "spike_hierarchy_info.spike_rate_word" etc.
|
| 708 |
+
for sk, sv in v.items():
|
| 709 |
+
val = sv.item() if hasattr(sv, 'item') else float(sv) if isinstance(sv, (int, float)) else None
|
| 710 |
+
if val is not None:
|
| 711 |
+
gates[f"{k}.{sk}"] = val
|
| 712 |
+
elif hasattr(v, 'mean'):
|
| 713 |
+
gates[k] = float(v.mean().item())
|
| 714 |
+
elif isinstance(v, (int, float)):
|
| 715 |
+
gates[k] = float(v)
|
| 716 |
+
with open(path, "w") as f:
|
| 717 |
+
json.dump(gates, f, indent=2)
|
| 718 |
+
except (OSError, IOError):
|
| 719 |
+
pass
|
| 720 |
+
|
| 721 |
+
|
| 722 |
+
def check_pause(log_dir: str) -> bool:
|
| 723 |
+
"""Check if dashboard sent a pause signal."""
|
| 724 |
+
return os.path.exists(os.path.join(log_dir, "PAUSE"))
|
| 725 |
+
|
| 726 |
+
|
| 727 |
+
def resolve_ntfy_url(explicit_url: str, topic: str) -> Optional[str]:
|
| 728 |
+
if explicit_url:
|
| 729 |
+
return explicit_url
|
| 730 |
+
if not topic:
|
| 731 |
+
return None
|
| 732 |
+
if topic.startswith("http://") or topic.startswith("https://"):
|
| 733 |
+
return topic
|
| 734 |
+
topic_name = topic.lstrip("/")
|
| 735 |
+
return f"https://ntfy.sh/{topic_name}"
|
| 736 |
+
|
| 737 |
+
|
| 738 |
+
def send_ntfy_message(
|
| 739 |
+
url: str,
|
| 740 |
+
body: str,
|
| 741 |
+
title: str,
|
| 742 |
+
priority: str = "3",
|
| 743 |
+
tags: str = "",
|
| 744 |
+
token: str = "",
|
| 745 |
+
click: str = "",
|
| 746 |
+
timeout_s: float = 10.0,
|
| 747 |
+
) -> tuple[bool, str]:
|
| 748 |
+
if not url:
|
| 749 |
+
return False, "no_ntfy_url"
|
| 750 |
+
|
| 751 |
+
headers = {
|
| 752 |
+
"Content-Type": "text/plain; charset=utf-8",
|
| 753 |
+
"Title": title[:200],
|
| 754 |
+
"Priority": str(priority),
|
| 755 |
+
}
|
| 756 |
+
if tags:
|
| 757 |
+
headers["Tags"] = tags
|
| 758 |
+
if click:
|
| 759 |
+
headers["Click"] = click
|
| 760 |
+
if token:
|
| 761 |
+
headers["Authorization"] = f"Bearer {token}"
|
| 762 |
+
|
| 763 |
+
req = urllib.request.Request(
|
| 764 |
+
url=url,
|
| 765 |
+
data=body.encode("utf-8"),
|
| 766 |
+
headers=headers,
|
| 767 |
+
method="POST",
|
| 768 |
+
)
|
| 769 |
+
try:
|
| 770 |
+
with urllib.request.urlopen(req, timeout=timeout_s) as resp:
|
| 771 |
+
if 200 <= int(getattr(resp, "status", 200)) < 300:
|
| 772 |
+
return True, f"http_{int(getattr(resp, 'status', 200))}"
|
| 773 |
+
return False, f"http_{int(getattr(resp, 'status', 0))}"
|
| 774 |
+
except urllib.error.HTTPError as exc:
|
| 775 |
+
return False, f"http_{exc.code}"
|
| 776 |
+
except Exception as exc:
|
| 777 |
+
return False, str(exc)
|
| 778 |
+
|
| 779 |
+
|
| 780 |
+
# ═══════════════════════════════════════════════════════════════
|
| 781 |
+
# Training Loop
|
| 782 |
+
# ═══════════════════════════════════════════════════════════════
|
| 783 |
+
|
| 784 |
+
def train(model_config: NewtieConfig = None,
|
| 785 |
+
train_config: TrainingConfig = None,
|
| 786 |
+
data_paths: List[str] = None,
|
| 787 |
+
resume_from: str = None,
|
| 788 |
+
resume_weights_only: bool = False,
|
| 789 |
+
num_devices: int = 0,
|
| 790 |
+
log_dir: str = "/data/newtie_v5/logs",
|
| 791 |
+
checkpoint_dir: str = "/data/newtie_v5/checkpoints"):
|
| 792 |
+
"""Main training function — 8-chip Pipeline Parallel via TT-Metalium.
|
| 793 |
+
|
| 794 |
+
Pipeline: 8 stages (4 layers/chip), 64 micro-batches, 89% efficiency.
|
| 795 |
+
Falls back to PyTorch sequential if ttnn unavailable.
|
| 796 |
+
"""
|
| 797 |
+
|
| 798 |
+
if model_config is None or train_config is None:
|
| 799 |
+
model_config, train_config = get_phase1_config()
|
| 800 |
+
|
| 801 |
+
train_config = apply_power_mode_overrides(train_config)
|
| 802 |
+
|
| 803 |
+
# Override output dirs from CLI
|
| 804 |
+
train_config.output_dir = checkpoint_dir
|
| 805 |
+
os.makedirs(log_dir, exist_ok=True)
|
| 806 |
+
os.makedirs(checkpoint_dir, exist_ok=True)
|
| 807 |
+
|
| 808 |
+
# ── Hardware detection ──
|
| 809 |
+
hw_type, compute_device, tt_chip_count = detect_hardware()
|
| 810 |
+
|
| 811 |
+
# ── Default data paths ──
|
| 812 |
+
if data_paths is None:
|
| 813 |
+
phase_dir = default_phase_data_dir(train_config.phase)
|
| 814 |
+
if phase_dir is not None:
|
| 815 |
+
data_paths = glob.glob(os.path.join(phase_dir, "*.jsonl"))
|
| 816 |
+
print(f" [DATA] Using phase data dir: {phase_dir}")
|
| 817 |
+
# Check Docker data directory first
|
| 818 |
+
if data_paths is None:
|
| 819 |
+
docker_data = "/data/newtie_v5/training_data"
|
| 820 |
+
if os.path.isdir(docker_data) and any(
|
| 821 |
+
f.endswith('.jsonl') for f in os.listdir(docker_data)):
|
| 822 |
+
data_paths = glob.glob(os.path.join(docker_data, "*.jsonl"))
|
| 823 |
+
print(f" [DATA] Using Docker data dir: {docker_data}")
|
| 824 |
+
else:
|
| 825 |
+
# Local development paths
|
| 826 |
+
base = "/Volumes/ADi/Ultimate_research"
|
| 827 |
+
data_paths = [
|
| 828 |
+
f"{base}/data/training/sft_cognitive_depth.jsonl",
|
| 829 |
+
f"{base}/data/training/sft_pairs_20260205_125455.jsonl",
|
| 830 |
+
f"{base}/data/training/sft_pairs_20260205_125230.jsonl",
|
| 831 |
+
f"{base}/data/training/sft_swedish_personality.jsonl",
|
| 832 |
+
f"{base}/adi_core_newtie/deep_synthesis/deep_reasoning_chain_patterns.jsonl",
|
| 833 |
+
f"{base}/adi_core_newtie/deep_synthesis/deep_analogy_patterns.jsonl",
|
| 834 |
+
f"{base}/adi_core_newtie/deep_synthesis/deep_synthesis_patterns.jsonl",
|
| 835 |
+
f"{base}/adi_core_newtie/deep_synthesis/deep_meta_patterns.jsonl",
|
| 836 |
+
f"{base}/adi_core_newtie/deep_synthesis/deep_calibration_patterns.jsonl",
|
| 837 |
+
f"{base}/data/sala_sft/biomodules.jsonl",
|
| 838 |
+
f"{base}/data/sala_sft/verified_search.jsonl",
|
| 839 |
+
]
|
| 840 |
+
# Also load waveflow2 SFT data
|
| 841 |
+
wf2_sft = glob.glob(f"{base}/waveflow2_newton_model/data/training/sft_*.jsonl")
|
| 842 |
+
data_paths.extend(wf2_sft)
|
| 843 |
+
# ADi training data
|
| 844 |
+
data_paths.append("/Volumes/ADi/The Concern/ADi/adi_training/train_data/cognitive_scenarios.jsonl")
|
| 845 |
+
# include introspective self‑explanations if available
|
| 846 |
+
introspect_path = "/Volumes/ADi/The Concern/ADi/introspect_sft.jsonl"
|
| 847 |
+
if os.path.exists(introspect_path):
|
| 848 |
+
print(f" [DATA] including introspection log: {introspect_path}")
|
| 849 |
+
data_paths.append(introspect_path)
|
| 850 |
+
|
| 851 |
+
# ── Tokenizer ──
|
| 852 |
+
try:
|
| 853 |
+
import sentencepiece as spm
|
| 854 |
+
tokenizer = spm.SentencePieceProcessor()
|
| 855 |
+
tokenizer.load(train_config.data_dir + "/tokenizer.model")
|
| 856 |
+
print("[TOKENIZER] SentencePiece loaded")
|
| 857 |
+
except Exception:
|
| 858 |
+
tokenizer = ByteFallbackTokenizer(model_config.vocab_size)
|
| 859 |
+
print("[TOKENIZER] Byte-level fallback (no sentencepiece)")
|
| 860 |
+
|
| 861 |
+
# ── Dataset & DataLoader ──
|
| 862 |
+
dataset = CognitiveSFTDataset(
|
| 863 |
+
data_paths=data_paths,
|
| 864 |
+
tokenizer=tokenizer,
|
| 865 |
+
seq_len=train_config.seq_len,
|
| 866 |
+
cache_tokens=_pretoken_cache_enabled,
|
| 867 |
+
cache_size=_pretoken_cache_size,
|
| 868 |
+
)
|
| 869 |
+
|
| 870 |
+
_dataloader_workers = _tt_dataloader_workers
|
| 871 |
+
if _CPU_ONLY:
|
| 872 |
+
_dataloader_workers = max(1, _dataloader_workers)
|
| 873 |
+
|
| 874 |
+
dataloader = DataLoader(
|
| 875 |
+
dataset,
|
| 876 |
+
batch_size=train_config.batch_size,
|
| 877 |
+
shuffle=True,
|
| 878 |
+
drop_last=True,
|
| 879 |
+
num_workers=_dataloader_workers,
|
| 880 |
+
pin_memory=(hw_type == "cuda"),
|
| 881 |
+
prefetch_factor=(_tt_dataloader_prefetch if _dataloader_workers > 0 else None),
|
| 882 |
+
persistent_workers=(_tt_dataloader_persist_workers if _dataloader_workers > 0 else False),
|
| 883 |
+
)
|
| 884 |
+
|
| 885 |
+
print(
|
| 886 |
+
f"[DL] workers={_dataloader_workers}, "
|
| 887 |
+
f"prefetch={_tt_dataloader_prefetch}, "
|
| 888 |
+
f"persist_workers={_tt_dataloader_persist_workers if _dataloader_workers > 0 else False}"
|
| 889 |
+
)
|
| 890 |
+
|
| 891 |
+
# Adaptive mesh planner report (observability only).
|
| 892 |
+
if report_tt_adaptive_mesh_plan is not None:
|
| 893 |
+
report_tt_adaptive_mesh_plan(model_config, train_config)
|
| 894 |
+
|
| 895 |
+
# Keep TT active-graph runtime aligned with the actual phase unless the
|
| 896 |
+
# caller explicitly overrides it via environment.
|
| 897 |
+
os.environ.setdefault(
|
| 898 |
+
"NEWTIE_TT_ACTIVE_GRAPH_PHASE",
|
| 899 |
+
active_graph_phase_name_for(train_config),
|
| 900 |
+
)
|
| 901 |
+
|
| 902 |
+
# ── Model ──
|
| 903 |
+
print(f"\n[MODEL] Initializing Newtie V5...")
|
| 904 |
+
model = NewtieV5(model_config)
|
| 905 |
+
|
| 906 |
+
# Count parameters
|
| 907 |
+
counts = model.count_parameters()
|
| 908 |
+
total = counts["TOTAL_unique"]
|
| 909 |
+
active = model.estimate_active_params()
|
| 910 |
+
print(f" Total params: {total:,} ({total/1e6:.1f}M)")
|
| 911 |
+
print(f" Active/token: {active:,} ({active/1e6:.1f}M, {active/total*100:.1f}%)")
|
| 912 |
+
print(f" BF16 size: {total * 2 / 1e9:.2f} GB")
|
| 913 |
+
|
| 914 |
+
# ── Device placement ──
|
| 915 |
+
if hw_type == "n300":
|
| 916 |
+
# N300: model params stay on CPU (for optimizer), compute goes to chips
|
| 917 |
+
# DO NOT call model.to(device) — there is no torch device for TT
|
| 918 |
+
print(f"[DEVICE] Model params on CPU (for AdamW), compute → Wormhole chips")
|
| 919 |
+
elif hw_type == "cuda":
|
| 920 |
+
model = model.to(compute_device)
|
| 921 |
+
if train_config.mixed_precision == "bf16":
|
| 922 |
+
model = model.to(torch.bfloat16)
|
| 923 |
+
else:
|
| 924 |
+
model = model.to(compute_device)
|
| 925 |
+
|
| 926 |
+
# ── Gradient checkpointing (critical for SSM memory) ──
|
| 927 |
+
if train_config.gradient_checkpointing:
|
| 928 |
+
model.gradient_checkpointing = True
|
| 929 |
+
print(f"[MEMORY] Gradient checkpointing ENABLED — saves ~98% activation memory")
|
| 930 |
+
else:
|
| 931 |
+
print(f"[MEMORY] WARNING: Gradient checkpointing DISABLED — may OOM!")
|
| 932 |
+
|
| 933 |
+
# ── Pipeline Engine (8-chip Metalium) ──
|
| 934 |
+
hw_config = build_hw_config_for_run(
|
| 935 |
+
model_config=model_config,
|
| 936 |
+
hw_type=hw_type,
|
| 937 |
+
tt_chip_count=tt_chip_count,
|
| 938 |
+
num_devices=num_devices,
|
| 939 |
+
)
|
| 940 |
+
pipeline_engine, device_mesh = create_pipeline(model, model_config, hw_config)
|
| 941 |
+
n_stages = hw_config.stages if device_mesh.ttnn_available else 1
|
| 942 |
+
|
| 943 |
+
# ── Accelerate: replace nn.Linear → TTLinear (matmul on Wormhole chips) ──
|
| 944 |
+
if _CPU_ONLY:
|
| 945 |
+
print(f"[MODE] CPU-only enabled — TT acceleration disabled")
|
| 946 |
+
elif hw_type == "n300" and device_mesh.ttnn_available:
|
| 947 |
+
if accelerate_model is None:
|
| 948 |
+
print("[TT-OPS] ERROR: TT ops unavailable (import failed) — cannot use chips")
|
| 949 |
+
else:
|
| 950 |
+
n_accelerated = accelerate_model(model, device_mesh, model_config)
|
| 951 |
+
if n_accelerated == 0:
|
| 952 |
+
print("[TT-OPS] WARNING: No layers accelerated — check device mesh")
|
| 953 |
+
else:
|
| 954 |
+
print(f"[TT-OPS] {n_accelerated} Linear layers now execute on Wormhole chips")
|
| 955 |
+
print(f" CPU RAM: ~{total * 2 * 4 / 1e9:.1f} GB (params + optimizer + grads)")
|
| 956 |
+
print(f" TT VRAM: 96 GB total across 8 chips")
|
| 957 |
+
|
| 958 |
+
# ── Optimizer: dual LR (backbone + bio-modules) ──
|
| 959 |
+
backbone_params = []
|
| 960 |
+
bio_params = []
|
| 961 |
+
for name, param in model.named_parameters():
|
| 962 |
+
if not param.requires_grad:
|
| 963 |
+
continue
|
| 964 |
+
if "bio" in name or "stdp" in name or "synaplexa" in name or \
|
| 965 |
+
"cerebrix" in name or "dendroplast" in name or "metabolism" in name or \
|
| 966 |
+
"spike" in name or "lif" in name or "wta" in name:
|
| 967 |
+
bio_params.append(param)
|
| 968 |
+
else:
|
| 969 |
+
backbone_params.append(param)
|
| 970 |
+
|
| 971 |
+
optimizer = torch.optim.AdamW([
|
| 972 |
+
{"params": backbone_params, "lr": train_config.lr},
|
| 973 |
+
{"params": bio_params, "lr": train_config.bio_lr},
|
| 974 |
+
], weight_decay=train_config.weight_decay,
|
| 975 |
+
betas=(train_config.beta1, train_config.beta2))
|
| 976 |
+
|
| 977 |
+
print(f"\n[OPTIMIZER] AdamW (dual LR)")
|
| 978 |
+
print(f" Backbone: {len(backbone_params)} tensors, lr={train_config.lr}")
|
| 979 |
+
print(f" Bio-modules: {len(bio_params)} tensors, lr={train_config.bio_lr}")
|
| 980 |
+
|
| 981 |
+
# ── Resume ──
|
| 982 |
+
start_step = 0
|
| 983 |
+
resume_loss_for_watchdog: Optional[float] = None
|
| 984 |
+
if resume_from and os.path.exists(resume_from):
|
| 985 |
+
checkpoint = torch.load(resume_from, map_location=compute_device)
|
| 986 |
+
ckpt_loss = checkpoint.get("loss")
|
| 987 |
+
if isinstance(ckpt_loss, (int, float)):
|
| 988 |
+
resume_loss_for_watchdog = float(ckpt_loss)
|
| 989 |
+
if resume_weights_only:
|
| 990 |
+
resume_state = adapt_resume_state_dict_for_current_model(checkpoint["model_state_dict"])
|
| 991 |
+
missing, unexpected = model.load_state_dict(resume_state, strict=False)
|
| 992 |
+
print(f"[RESUME] Loaded weights only from {resume_from}")
|
| 993 |
+
print("[RESUME] Optimizer state reset for new phase / fresh step schedule")
|
| 994 |
+
print(f"[RESUME] Compatible load: missing={len(missing)} unexpected={len(unexpected)}")
|
| 995 |
+
if missing:
|
| 996 |
+
print(f"[RESUME] Missing sample: {missing[:6]}")
|
| 997 |
+
if unexpected:
|
| 998 |
+
print(f"[RESUME] Unexpected sample: {unexpected[:6]}")
|
| 999 |
+
else:
|
| 1000 |
+
model.load_state_dict(checkpoint["model_state_dict"])
|
| 1001 |
+
optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
|
| 1002 |
+
start_step = checkpoint.get("step", 0)
|
| 1003 |
+
print(f"[RESUME] From step {start_step}")
|
| 1004 |
+
|
| 1005 |
+
# ── Training ──
|
| 1006 |
+
model.train()
|
| 1007 |
+
grad_accum = train_config.gradient_accumulation_steps
|
| 1008 |
+
|
| 1009 |
+
print(f"\n{'═'*60}")
|
| 1010 |
+
print(f" TRAINING PHASE {train_config.phase}: {train_config.phase_name}")
|
| 1011 |
+
print(f" Steps: {train_config.total_steps}")
|
| 1012 |
+
print(f" Tokens/step: {train_config.tokens_per_step:,}")
|
| 1013 |
+
print(f" Total tokens: {train_config.total_steps * train_config.tokens_per_step / 1e6:.0f}M")
|
| 1014 |
+
print(f" Device: {hw_type}")
|
| 1015 |
+
print(f" Pipeline: {n_stages} stages × {hw_config.layers_per_stage} layers/chip")
|
| 1016 |
+
print(f" Pipeline efficiency: {hw_config.pipeline_efficiency*100:.1f}%")
|
| 1017 |
+
print(f" Micro-batch: {train_config.batch_size} × {hw_config.n_micro_batches} = {train_config.batch_size * hw_config.n_micro_batches} effective")
|
| 1018 |
+
print(f" Dashboard: {log_dir}")
|
| 1019 |
+
print(f" Checkpoints: {checkpoint_dir}")
|
| 1020 |
+
print(f"{'═'*60}\n")
|
| 1021 |
+
|
| 1022 |
+
global_step = start_step
|
| 1023 |
+
running_loss = 0.0
|
| 1024 |
+
best_loss = float('inf')
|
| 1025 |
+
tokens_processed = 0
|
| 1026 |
+
train_start = time.time()
|
| 1027 |
+
watchdog_enabled = _WATCHDOG_ENABLED and _WATCHDOG_INTERVAL > 0
|
| 1028 |
+
watchdog_ntfy_url = resolve_ntfy_url(_NTFY_URL, _NTFY_TOPIC)
|
| 1029 |
+
watchdog_best_loss = (
|
| 1030 |
+
float(resume_loss_for_watchdog)
|
| 1031 |
+
if resume_loss_for_watchdog is not None
|
| 1032 |
+
else float("inf")
|
| 1033 |
+
)
|
| 1034 |
+
watchdog_last_report_quality: Optional[float] = None
|
| 1035 |
+
watchdog_last_report_loss: Optional[float] = None
|
| 1036 |
+
watchdog_last_report_step = 0
|
| 1037 |
+
watchdog_last_improvement_step = 0
|
| 1038 |
+
watchdog_ntfy_sent = 0
|
| 1039 |
+
watchdog_ntfy_fail = 0
|
| 1040 |
+
watchdog_quality = 0.0
|
| 1041 |
+
if watchdog_enabled:
|
| 1042 |
+
if watchdog_ntfy_url:
|
| 1043 |
+
print(f"[WATCHDOG] interval={_WATCHDOG_INTERVAL} steps, ntfy={watchdog_ntfy_url}")
|
| 1044 |
+
else:
|
| 1045 |
+
print(
|
| 1046 |
+
"[WATCHDOG] interval active but NTFY target missing "
|
| 1047 |
+
"(set NEWTIE_NTFY_TOPIC or NEWTIE_NTFY_URL)"
|
| 1048 |
+
)
|
| 1049 |
+
|
| 1050 |
+
data_iter = iter(dataloader)
|
| 1051 |
+
import sys # for flush
|
| 1052 |
+
print(f"[DEBUG] Entering training loop at {time.strftime('%H:%M:%S')}", flush=True)
|
| 1053 |
+
|
| 1054 |
+
while global_step < train_config.total_steps:
|
| 1055 |
+
step_t0 = time.time()
|
| 1056 |
+
optimizer.zero_grad()
|
| 1057 |
+
step_loss = 0.0
|
| 1058 |
+
if begin_tt_runtime_step is not None:
|
| 1059 |
+
begin_tt_runtime_step()
|
| 1060 |
+
|
| 1061 |
+
for micro_step in range(grad_accum):
|
| 1062 |
+
try:
|
| 1063 |
+
batch = next(data_iter)
|
| 1064 |
+
except StopIteration:
|
| 1065 |
+
data_iter = iter(dataloader)
|
| 1066 |
+
batch = next(data_iter)
|
| 1067 |
+
|
| 1068 |
+
# For N300: tensors stay on CPU, TTLinear dispatches matmul to chips
|
| 1069 |
+
# For CUDA: tensors move to GPU
|
| 1070 |
+
if compute_device is not None:
|
| 1071 |
+
input_ids = batch["input_ids"].to(compute_device)
|
| 1072 |
+
labels = batch["labels"].to(compute_device)
|
| 1073 |
+
byte_ids = batch.get("byte_ids")
|
| 1074 |
+
if byte_ids is not None:
|
| 1075 |
+
byte_ids = byte_ids.to(compute_device)
|
| 1076 |
+
else:
|
| 1077 |
+
input_ids = batch["input_ids"]
|
| 1078 |
+
labels = batch["labels"]
|
| 1079 |
+
byte_ids = batch.get("byte_ids")
|
| 1080 |
+
|
| 1081 |
+
# Forward timing
|
| 1082 |
+
fwd_t0 = time.time()
|
| 1083 |
+
if _CPU_ONLY:
|
| 1084 |
+
# CPU bf16 autocast: EPYC 9254 has AVX-512-BF16 for ~2× throughput
|
| 1085 |
+
with torch.autocast(device_type="cpu", dtype=torch.bfloat16):
|
| 1086 |
+
output = model(input_ids, byte_ids=byte_ids, labels=labels,
|
| 1087 |
+
return_gates=(global_step % train_config.log_interval == 0))
|
| 1088 |
+
elif train_config.mixed_precision == "bf16" and hw_type == "cuda":
|
| 1089 |
+
with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
|
| 1090 |
+
output = model(input_ids, byte_ids=byte_ids, labels=labels,
|
| 1091 |
+
return_gates=(global_step % train_config.log_interval == 0))
|
| 1092 |
+
else:
|
| 1093 |
+
output = model(input_ids, byte_ids=byte_ids, labels=labels,
|
| 1094 |
+
return_gates=(global_step % train_config.log_interval == 0))
|
| 1095 |
+
fwd_elapsed = time.time() - fwd_t0
|
| 1096 |
+
|
| 1097 |
+
loss = output.loss / grad_accum
|
| 1098 |
+
|
| 1099 |
+
# NaN detection
|
| 1100 |
+
if torch.isnan(loss) or torch.isinf(loss):
|
| 1101 |
+
print(f"[NaN] step={global_step} micro={micro_step} loss={loss.item()}", flush=True)
|
| 1102 |
+
print(f"[NaN] logits range: {output.logits.min().item():.4f} to {output.logits.max().item():.4f}", flush=True)
|
| 1103 |
+
print(f"[NaN] logits has NaN: {torch.isnan(output.logits).any().item()}", flush=True)
|
| 1104 |
+
print(f"[NaN] logits has Inf: {torch.isinf(output.logits).any().item()}", flush=True)
|
| 1105 |
+
if hasattr(output, 'gate_dict') and output.gate_dict:
|
| 1106 |
+
for k, v in output.gate_dict.items():
|
| 1107 |
+
if isinstance(v, torch.Tensor):
|
| 1108 |
+
if torch.isnan(v).any() or torch.isinf(v).any():
|
| 1109 |
+
print(f"[NaN] gate {k} has NaN/Inf: min={v.min().item():.4f} max={v.max().item():.4f}", flush=True)
|
| 1110 |
+
# Check input for NaN
|
| 1111 |
+
print(f"[NaN] input range: {input_ids.min().item()} to {input_ids.max().item()}", flush=True)
|
| 1112 |
+
# Check parameters for NaN
|
| 1113 |
+
nan_params = sum(1 for n, p in model.named_parameters() if torch.isnan(p).any())
|
| 1114 |
+
inf_params = sum(1 for n, p in model.named_parameters() if torch.isinf(p).any())
|
| 1115 |
+
print(f"[NaN] params with NaN: {nan_params}, with Inf: {inf_params}", flush=True)
|
| 1116 |
+
if nan_params > 0:
|
| 1117 |
+
for n, p in model.named_parameters():
|
| 1118 |
+
if torch.isnan(p).any():
|
| 1119 |
+
print(f"[NaN] param {n}: {torch.isnan(p).sum().item()} NaN values", flush=True)
|
| 1120 |
+
|
| 1121 |
+
# Backward timing
|
| 1122 |
+
bwd_t0 = time.time()
|
| 1123 |
+
if torch.isnan(loss) or torch.isinf(loss):
|
| 1124 |
+
print(f"[SKIP-BWD] Skipping backward for NaN/inf loss at step={global_step}", flush=True)
|
| 1125 |
+
# Zero out any partial gradients
|
| 1126 |
+
optimizer.zero_grad()
|
| 1127 |
+
bwd_elapsed = 0.0
|
| 1128 |
+
else:
|
| 1129 |
+
loss.backward()
|
| 1130 |
+
bwd_elapsed = time.time() - bwd_t0
|
| 1131 |
+
|
| 1132 |
+
# Check gradients for NaN/Inf after backward
|
| 1133 |
+
if global_step < 20:
|
| 1134 |
+
nan_grads = []
|
| 1135 |
+
inf_grads = []
|
| 1136 |
+
max_grad = 0.0
|
| 1137 |
+
for n, p in model.named_parameters():
|
| 1138 |
+
if p.grad is not None:
|
| 1139 |
+
if torch.isnan(p.grad).any():
|
| 1140 |
+
nan_grads.append(n)
|
| 1141 |
+
if torch.isinf(p.grad).any():
|
| 1142 |
+
inf_grads.append(n)
|
| 1143 |
+
gmax = p.grad.abs().max().item()
|
| 1144 |
+
if gmax > max_grad:
|
| 1145 |
+
max_grad = gmax
|
| 1146 |
+
if nan_grads:
|
| 1147 |
+
print(f"[GRAD-NaN] {len(nan_grads)} params have NaN grads: {nan_grads[:5]}", flush=True)
|
| 1148 |
+
if inf_grads:
|
| 1149 |
+
print(f"[GRAD-Inf] {len(inf_grads)} params have Inf grads: {inf_grads[:5]}", flush=True)
|
| 1150 |
+
print(f"[GRAD] max_grad_abs={max_grad:.4f}", flush=True)
|
| 1151 |
+
|
| 1152 |
+
step_loss += loss.item()
|
| 1153 |
+
tokens_processed += input_ids.numel()
|
| 1154 |
+
|
| 1155 |
+
# Debug: log EVERY micro-step for first 20 steps
|
| 1156 |
+
if global_step < 20:
|
| 1157 |
+
print(f"[DEBUG] step={global_step} micro={micro_step} "
|
| 1158 |
+
f"fwd={fwd_elapsed:.1f}s bwd={bwd_elapsed:.1f}s "
|
| 1159 |
+
f"loss={loss.item()*grad_accum:.4f} "
|
| 1160 |
+
f"shape={list(input_ids.shape)}", flush=True)
|
| 1161 |
+
|
| 1162 |
+
# Gradient clipping
|
| 1163 |
+
torch.nn.utils.clip_grad_norm_(
|
| 1164 |
+
model.parameters(), train_config.max_grad_norm)
|
| 1165 |
+
|
| 1166 |
+
# LR schedule
|
| 1167 |
+
lr = get_wsd_lr(global_step, train_config)
|
| 1168 |
+
for pg in optimizer.param_groups:
|
| 1169 |
+
if pg is optimizer.param_groups[0]:
|
| 1170 |
+
pg['lr'] = lr
|
| 1171 |
+
else:
|
| 1172 |
+
pg['lr'] = lr * (train_config.bio_lr / train_config.lr)
|
| 1173 |
+
|
| 1174 |
+
optimizer.step()
|
| 1175 |
+
|
| 1176 |
+
# Re-sync TTLinear cached weights to TT devices after optimizer update
|
| 1177 |
+
if (not _CPU_ONLY) and (mark_tt_dirty is not None) and (sync_tt_weights is not None):
|
| 1178 |
+
mark_tt_dirty(model)
|
| 1179 |
+
sync_tt_weights(model)
|
| 1180 |
+
|
| 1181 |
+
global_step += 1
|
| 1182 |
+
running_loss += step_loss
|
| 1183 |
+
step_elapsed = time.time() - step_t0
|
| 1184 |
+
elapsed = time.time() - train_start
|
| 1185 |
+
watchdog_best_loss = min(float(watchdog_best_loss), float(step_loss))
|
| 1186 |
+
watchdog_quality = 0.0
|
| 1187 |
+
if math.isfinite(watchdog_best_loss):
|
| 1188 |
+
# Monotonic quality proxy: lower best loss -> higher score in (0, 1].
|
| 1189 |
+
watchdog_quality = 1.0 / (1.0 + max(watchdog_best_loss, 0.0))
|
| 1190 |
+
best_loss_for_status = min(float(best_loss), float(step_loss))
|
| 1191 |
+
steps_per_sec = global_step / max(elapsed, 1e-6)
|
| 1192 |
+
estimated_remaining_s = int(
|
| 1193 |
+
(train_config.total_steps - global_step) / max(steps_per_sec, 0.01)
|
| 1194 |
+
)
|
| 1195 |
+
|
| 1196 |
+
if watchdog_enabled and global_step % _WATCHDOG_INTERVAL == 0:
|
| 1197 |
+
watchdog_last_report_step = global_step
|
| 1198 |
+
if watchdog_last_report_quality is None:
|
| 1199 |
+
watchdog_last_report_quality = watchdog_quality
|
| 1200 |
+
watchdog_last_report_loss = watchdog_best_loss
|
| 1201 |
+
print(
|
| 1202 |
+
f"[WATCHDOG] baseline step={global_step} "
|
| 1203 |
+
f"quality={watchdog_quality:.6f} best_loss={watchdog_best_loss:.6f}"
|
| 1204 |
+
)
|
| 1205 |
+
else:
|
| 1206 |
+
quality_delta = watchdog_quality - watchdog_last_report_quality
|
| 1207 |
+
if quality_delta > _WATCHDOG_MIN_DELTA:
|
| 1208 |
+
prev_quality = watchdog_last_report_quality
|
| 1209 |
+
prev_loss = (
|
| 1210 |
+
watchdog_last_report_loss
|
| 1211 |
+
if watchdog_last_report_loss is not None
|
| 1212 |
+
else float("nan")
|
| 1213 |
+
)
|
| 1214 |
+
watchdog_last_report_quality = watchdog_quality
|
| 1215 |
+
watchdog_last_report_loss = watchdog_best_loss
|
| 1216 |
+
watchdog_last_improvement_step = global_step
|
| 1217 |
+
print(
|
| 1218 |
+
f"[WATCHDOG] IMPROVED step={global_step} "
|
| 1219 |
+
f"quality {prev_quality:.6f} -> {watchdog_quality:.6f} "
|
| 1220 |
+
f"(delta={quality_delta:.6f}) "
|
| 1221 |
+
f"best_loss {prev_loss:.6f} -> {watchdog_best_loss:.6f}"
|
| 1222 |
+
)
|
| 1223 |
+
if watchdog_ntfy_url:
|
| 1224 |
+
title = f"Newtie quality improved p{train_config.phase} s{global_step}"
|
| 1225 |
+
body = (
|
| 1226 |
+
f"Phase: {train_config.phase} ({train_config.phase_name})\n"
|
| 1227 |
+
f"Step: {global_step}/{train_config.total_steps}\n"
|
| 1228 |
+
f"Quality: {prev_quality:.6f} -> {watchdog_quality:.6f} "
|
| 1229 |
+
f"(+{quality_delta:.6f})\n"
|
| 1230 |
+
f"Best loss: {prev_loss:.6f} -> {watchdog_best_loss:.6f}\n"
|
| 1231 |
+
f"LR: {lr:.3e}\n"
|
| 1232 |
+
f"Elapsed: {int(elapsed)}s"
|
| 1233 |
+
)
|
| 1234 |
+
ok, detail = send_ntfy_message(
|
| 1235 |
+
url=watchdog_ntfy_url,
|
| 1236 |
+
body=body,
|
| 1237 |
+
title=title,
|
| 1238 |
+
priority=_NTFY_PRIORITY,
|
| 1239 |
+
tags=_NTFY_TAGS,
|
| 1240 |
+
token=_NTFY_TOKEN,
|
| 1241 |
+
click=_NTFY_CLICK,
|
| 1242 |
+
timeout_s=_NTFY_TIMEOUT_S,
|
| 1243 |
+
)
|
| 1244 |
+
if ok:
|
| 1245 |
+
watchdog_ntfy_sent += 1
|
| 1246 |
+
print(f"[WATCHDOG] NTFY sent ({detail})")
|
| 1247 |
+
else:
|
| 1248 |
+
watchdog_ntfy_fail += 1
|
| 1249 |
+
print(f"[WATCHDOG] NTFY failed ({detail})")
|
| 1250 |
+
|
| 1251 |
+
write_status(log_dir, {
|
| 1252 |
+
"state": "training",
|
| 1253 |
+
"phase": train_config.phase,
|
| 1254 |
+
"phase_name": train_config.phase_name,
|
| 1255 |
+
"step": global_step,
|
| 1256 |
+
"total_steps": train_config.total_steps,
|
| 1257 |
+
"progress_pct": round(global_step / train_config.total_steps * 100, 1),
|
| 1258 |
+
"loss": round(step_loss, 6),
|
| 1259 |
+
"best_loss": round(best_loss_for_status, 6),
|
| 1260 |
+
"lr": lr,
|
| 1261 |
+
"tps": round(tokens_processed / max(elapsed, 1e-6), 1),
|
| 1262 |
+
"tokens_seen": tokens_processed,
|
| 1263 |
+
"elapsed_seconds": int(elapsed),
|
| 1264 |
+
"hw_type": hw_type,
|
| 1265 |
+
"pipeline_stages": n_stages,
|
| 1266 |
+
"pipeline_efficiency": round(hw_config.pipeline_efficiency * 100, 1),
|
| 1267 |
+
"estimated_remaining_s": estimated_remaining_s,
|
| 1268 |
+
"watchdog_enabled": watchdog_enabled,
|
| 1269 |
+
"watchdog_interval": _WATCHDOG_INTERVAL,
|
| 1270 |
+
"watchdog_quality": round(watchdog_quality, 8),
|
| 1271 |
+
"watchdog_best_loss": (
|
| 1272 |
+
round(watchdog_best_loss, 8) if math.isfinite(watchdog_best_loss) else None
|
| 1273 |
+
),
|
| 1274 |
+
"watchdog_last_report_step": watchdog_last_report_step,
|
| 1275 |
+
"watchdog_last_improvement_step": watchdog_last_improvement_step,
|
| 1276 |
+
"watchdog_ntfy_sent": watchdog_ntfy_sent,
|
| 1277 |
+
"watchdog_ntfy_fail": watchdog_ntfy_fail,
|
| 1278 |
+
})
|
| 1279 |
+
|
| 1280 |
+
# Debug: total step time for first 20 steps
|
| 1281 |
+
if global_step <= 20:
|
| 1282 |
+
# Report TT backward stats
|
| 1283 |
+
try:
|
| 1284 |
+
from model.tt_ops import _bwd_tt_ok, _bwd_tt_fail, _bwd_cpu
|
| 1285 |
+
bwd_stats = f" tt_bwd={_bwd_tt_ok} cpu_bwd={_bwd_cpu} fail={_bwd_tt_fail}"
|
| 1286 |
+
except ImportError:
|
| 1287 |
+
bwd_stats = ""
|
| 1288 |
+
print(f"[DEBUG] Step {global_step} DONE in {step_elapsed:.1f}s "
|
| 1289 |
+
f"(loss={step_loss:.4f}){bwd_stats}", flush=True)
|
| 1290 |
+
|
| 1291 |
+
# ── Logging ──
|
| 1292 |
+
if global_step % train_config.log_interval == 0:
|
| 1293 |
+
tps = tokens_processed / elapsed
|
| 1294 |
+
avg_loss = running_loss / train_config.log_interval
|
| 1295 |
+
running_loss = 0.0
|
| 1296 |
+
|
| 1297 |
+
# MoE expert usage logging
|
| 1298 |
+
moe_aux_val = 0.0
|
| 1299 |
+
if hasattr(output, 'aux_loss') and output.aux_loss is not None:
|
| 1300 |
+
moe_aux_val = output.aux_loss.item()
|
| 1301 |
+
moe_balance_str = ""
|
| 1302 |
+
if train_config.log_moe_usage:
|
| 1303 |
+
expert_counts_all = []
|
| 1304 |
+
for layer in model.layers:
|
| 1305 |
+
counts = getattr(layer, '_last_expert_counts', None)
|
| 1306 |
+
if counts is not None:
|
| 1307 |
+
expert_counts_all.append(counts)
|
| 1308 |
+
if expert_counts_all:
|
| 1309 |
+
total_counts = torch.stack(expert_counts_all).sum(0) # [n_experts]
|
| 1310 |
+
total_tokens = total_counts.sum().item()
|
| 1311 |
+
if total_tokens > 0:
|
| 1312 |
+
usage_pct = (total_counts / total_tokens * 100)
|
| 1313 |
+
top3 = usage_pct.topk(3)
|
| 1314 |
+
bot3_vals, bot3_idx = usage_pct.topk(3, largest=False)
|
| 1315 |
+
balance = 1.0 - (usage_pct.std() / usage_pct.mean()).item() # 1.0 = perfect
|
| 1316 |
+
moe_balance_str = f" moe_bal={balance:.3f}"
|
| 1317 |
+
|
| 1318 |
+
print(f"[STEP {global_step:>5d}/{train_config.total_steps}] "
|
| 1319 |
+
f"loss={avg_loss:.4f} lr={lr:.2e} "
|
| 1320 |
+
f"tps={tps:.0f} "
|
| 1321 |
+
f"aux={moe_aux_val:.4f}{moe_balance_str} "
|
| 1322 |
+
f"elapsed={elapsed:.0f}s", flush=True)
|
| 1323 |
+
|
| 1324 |
+
# Detailed MoE usage every log interval
|
| 1325 |
+
if train_config.log_moe_usage and expert_counts_all:
|
| 1326 |
+
print(f" [MOE] experts={len(total_counts)} active={int((total_counts > 0).sum())} "
|
| 1327 |
+
f"top3=[{int(top3.indices[0])}:{top3.values[0]:.1f}% "
|
| 1328 |
+
f"{int(top3.indices[1])}:{top3.values[1]:.1f}% "
|
| 1329 |
+
f"{int(top3.indices[2])}:{top3.values[2]:.1f}%] "
|
| 1330 |
+
f"bot3=[{int(bot3_idx[0])}:{bot3_vals[0]:.1f}% "
|
| 1331 |
+
f"{int(bot3_idx[1])}:{bot3_vals[1]:.1f}% "
|
| 1332 |
+
f"{int(bot3_idx[2])}:{bot3_vals[2]:.1f}%]", flush=True)
|
| 1333 |
+
|
| 1334 |
+
# Log bio-gate values
|
| 1335 |
+
if train_config.log_bio_gates and output.gate_dict:
|
| 1336 |
+
skip_keys = {"quality", "confidence_base", "energy",
|
| 1337 |
+
"spike_stdp_info", "spike_hierarchy_info"}
|
| 1338 |
+
gate_str = " ".join(
|
| 1339 |
+
f"{k}={v.mean().item():.3f}"
|
| 1340 |
+
for k, v in output.gate_dict.items()
|
| 1341 |
+
if isinstance(v, torch.Tensor) and v.shape[-1] == 1
|
| 1342 |
+
and k not in skip_keys
|
| 1343 |
+
)
|
| 1344 |
+
print(f" [GATES] {gate_str}", flush=True)
|
| 1345 |
+
|
| 1346 |
+
# Log spike dynamics — this is how you SEE the SHAN working
|
| 1347 |
+
sh_info = output.gate_dict.get("spike_hierarchy_info")
|
| 1348 |
+
if isinstance(sh_info, dict):
|
| 1349 |
+
rates = {k: (v.item() if isinstance(v, torch.Tensor) else v)
|
| 1350 |
+
for k, v in sh_info.items()}
|
| 1351 |
+
print(f" [SPIKE] word={rates.get('spike_rate_word',0):.4f} "
|
| 1352 |
+
f"sent={rates.get('spike_rate_sentence',0):.4f} "
|
| 1353 |
+
f"para={rates.get('spike_rate_paragraph',0):.4f}", flush=True)
|
| 1354 |
+
|
| 1355 |
+
ss_info = output.gate_dict.get("spike_stdp_info")
|
| 1356 |
+
if isinstance(ss_info, dict):
|
| 1357 |
+
rates = {k: (v.item() if isinstance(v, torch.Tensor) else v)
|
| 1358 |
+
for k, v in ss_info.items()}
|
| 1359 |
+
print(f" [STDP-SPIKE] pre={rates.get('pre_spike_rate',0):.4f} "
|
| 1360 |
+
f"post={rates.get('post_spike_rate',0):.4f}", flush=True)
|
| 1361 |
+
|
| 1362 |
+
# Write gates for dashboard
|
| 1363 |
+
write_bio_gates(log_dir, output.gate_dict)
|
| 1364 |
+
|
| 1365 |
+
if output.energy is not None:
|
| 1366 |
+
avg_energy = output.energy.mean().item()
|
| 1367 |
+
print(f" [ENERGY] avg={avg_energy:.3f}", flush=True)
|
| 1368 |
+
|
| 1369 |
+
# ── Pause check (from dashboard) ──
|
| 1370 |
+
if check_pause(log_dir):
|
| 1371 |
+
print(f" [PAUSE] Dashboard pause signal received. Waiting...")
|
| 1372 |
+
while check_pause(log_dir):
|
| 1373 |
+
time.sleep(5)
|
| 1374 |
+
print(f" [RESUME] Continuing training.")
|
| 1375 |
+
|
| 1376 |
+
# ── Save checkpoint ──
|
| 1377 |
+
if global_step % train_config.save_interval == 0:
|
| 1378 |
+
save_path = os.path.join(
|
| 1379 |
+
checkpoint_dir,
|
| 1380 |
+
f"newtie_v5_p{train_config.phase}_step{global_step}.pt")
|
| 1381 |
+
os.makedirs(os.path.dirname(save_path), exist_ok=True)
|
| 1382 |
+
|
| 1383 |
+
torch.save({
|
| 1384 |
+
"step": global_step,
|
| 1385 |
+
"model_state_dict": model.state_dict(),
|
| 1386 |
+
"optimizer_state_dict": optimizer.state_dict(),
|
| 1387 |
+
"config": asdict(model_config) if hasattr(model_config, '__dataclass_fields__') else vars(model_config),
|
| 1388 |
+
"train_config": asdict(train_config) if hasattr(train_config, '__dataclass_fields__') else vars(train_config),
|
| 1389 |
+
"loss": step_loss,
|
| 1390 |
+
"tokens": tokens_processed,
|
| 1391 |
+
}, save_path)
|
| 1392 |
+
print(f" [SAVE] {save_path}")
|
| 1393 |
+
|
| 1394 |
+
if step_loss < best_loss:
|
| 1395 |
+
best_loss = step_loss
|
| 1396 |
+
best_path = os.path.join(
|
| 1397 |
+
checkpoint_dir, "newtie_v5_best.pt")
|
| 1398 |
+
torch.save({
|
| 1399 |
+
"step": global_step,
|
| 1400 |
+
"model_state_dict": model.state_dict(),
|
| 1401 |
+
"config": asdict(model_config) if hasattr(model_config, '__dataclass_fields__') else vars(model_config),
|
| 1402 |
+
"loss": best_loss,
|
| 1403 |
+
}, best_path)
|
| 1404 |
+
print(f" [BEST] {best_path} (loss={best_loss:.4f})")
|
| 1405 |
+
|
| 1406 |
+
# ── Final save ──
|
| 1407 |
+
total_time = time.time() - train_start
|
| 1408 |
+
|
| 1409 |
+
# Cleanup device mesh
|
| 1410 |
+
if device_mesh:
|
| 1411 |
+
device_mesh.close()
|
| 1412 |
+
|
| 1413 |
+
print(f"\n{'═'*60}")
|
| 1414 |
+
print(f" TRAINING COMPLETE")
|
| 1415 |
+
print(f" Phase: {train_config.phase_name}")
|
| 1416 |
+
print(f" Steps: {global_step}")
|
| 1417 |
+
print(f" Tokens: {tokens_processed:,}")
|
| 1418 |
+
print(f" Time: {total_time/3600:.1f}h")
|
| 1419 |
+
print(f" Final loss: {step_loss:.4f}")
|
| 1420 |
+
print(f" Best loss: {best_loss:.4f}")
|
| 1421 |
+
print(f" Pipeline: {n_stages} stages × {hw_config.layers_per_stage} layers/chip")
|
| 1422 |
+
print(f"{'═'*60}")
|
| 1423 |
+
|
| 1424 |
+
# Write final status for dashboard
|
| 1425 |
+
write_status(log_dir, {
|
| 1426 |
+
"state": "completed",
|
| 1427 |
+
"phase": train_config.phase,
|
| 1428 |
+
"phase_name": train_config.phase_name,
|
| 1429 |
+
"step": global_step,
|
| 1430 |
+
"total_steps": train_config.total_steps,
|
| 1431 |
+
"progress_pct": 100.0,
|
| 1432 |
+
"loss": round(step_loss, 6),
|
| 1433 |
+
"best_loss": round(best_loss, 6),
|
| 1434 |
+
"tokens_seen": tokens_processed,
|
| 1435 |
+
"elapsed_seconds": int(total_time),
|
| 1436 |
+
"hw_type": hw_type,
|
| 1437 |
+
"pipeline_stages": n_stages,
|
| 1438 |
+
"pipeline_efficiency": round(hw_config.pipeline_efficiency * 100, 1),
|
| 1439 |
+
"watchdog_enabled": watchdog_enabled,
|
| 1440 |
+
"watchdog_interval": _WATCHDOG_INTERVAL,
|
| 1441 |
+
"watchdog_quality": round(watchdog_quality, 8),
|
| 1442 |
+
"watchdog_best_loss": (
|
| 1443 |
+
round(watchdog_best_loss, 8) if math.isfinite(watchdog_best_loss) else None
|
| 1444 |
+
),
|
| 1445 |
+
"watchdog_last_report_step": watchdog_last_report_step,
|
| 1446 |
+
"watchdog_last_improvement_step": watchdog_last_improvement_step,
|
| 1447 |
+
"watchdog_ntfy_sent": watchdog_ntfy_sent,
|
| 1448 |
+
"watchdog_ntfy_fail": watchdog_ntfy_fail,
|
| 1449 |
+
})
|
| 1450 |
+
|
| 1451 |
+
return model
|
| 1452 |
+
|
| 1453 |
+
|
| 1454 |
+
if __name__ == "__main__":
|
| 1455 |
+
parser = argparse.ArgumentParser(description="Newtie V5 Training (4× N300S)")
|
| 1456 |
+
parser.add_argument("--max-steps", type=int, default=0,
|
| 1457 |
+
help="Optional hard cap on steps for smoke/verification runs (0 = full phase)")
|
| 1458 |
+
parser.add_argument("--data-dir", type=str, default="/data/newtie_v5/training_data",
|
| 1459 |
+
help="Directory containing .jsonl training files")
|
| 1460 |
+
parser.add_argument("--checkpoint-dir", type=str, default="/data/newtie_v5/checkpoints",
|
| 1461 |
+
help="Directory for saving checkpoints")
|
| 1462 |
+
parser.add_argument("--log-dir", type=str, default="/data/newtie_v5/logs",
|
| 1463 |
+
help="Directory for dashboard status files")
|
| 1464 |
+
parser.add_argument("--num-devices", type=int, default=0,
|
| 1465 |
+
help="Number of N300S cards (0 = auto-detect)")
|
| 1466 |
+
parser.add_argument("--resume", type=str, default=None,
|
| 1467 |
+
help="Path to checkpoint to resume from")
|
| 1468 |
+
parser.add_argument("--resume-weights-only", action="store_true",
|
| 1469 |
+
help="Load model weights from --resume but reset optimizer and step count")
|
| 1470 |
+
parser.add_argument("--phase", type=int, choices=[1, 2, 3, 4, 5, 6, 7, 8, 9], default=1,
|
| 1471 |
+
help="Training phase (1=cognitive, 2=tool, 3=self, 4=consciousness, 5=instruct, 6=memory_tool, 7=grounded_agency, 8=reasoning_agency, 9=instruction_response)")
|
| 1472 |
+
parser.add_argument("--cpu-only", action="store_true",
|
| 1473 |
+
help="Force CPU-only (disable Wormhole TT acceleration)")
|
| 1474 |
+
parser.add_argument("--watchdog-interval", type=int, default=None,
|
| 1475 |
+
help="Quality watchdog cadence in steps (default: env NEWTIE_WATCHDOG_INTERVAL or 25)")
|
| 1476 |
+
parser.add_argument("--watchdog-min-delta", type=float, default=None,
|
| 1477 |
+
help="Minimum quality delta to count as improvement (default: env NEWTIE_WATCHDOG_MIN_DELTA)")
|
| 1478 |
+
parser.add_argument("--disable-watchdog", action="store_true",
|
| 1479 |
+
help="Disable quality watchdog notifications")
|
| 1480 |
+
parser.add_argument("--ntfy-topic", type=str, default=None,
|
| 1481 |
+
help="NTFY topic name (e.g. my-newtie-topic)")
|
| 1482 |
+
parser.add_argument("--ntfy-url", type=str, default=None,
|
| 1483 |
+
help="Full NTFY publish URL (overrides --ntfy-topic)")
|
| 1484 |
+
parser.add_argument("--ntfy-token", type=str, default=None,
|
| 1485 |
+
help="NTFY bearer token (optional)")
|
| 1486 |
+
parser.add_argument("--ntfy-priority", type=str, default=None,
|
| 1487 |
+
help="NTFY priority header (1-5)")
|
| 1488 |
+
parser.add_argument("--ntfy-tags", type=str, default=None,
|
| 1489 |
+
help="NTFY tags header (comma-separated)")
|
| 1490 |
+
parser.add_argument("--ntfy-click", type=str, default=None,
|
| 1491 |
+
help="NTFY click URL")
|
| 1492 |
+
args = parser.parse_args()
|
| 1493 |
+
|
| 1494 |
+
# Apply CPU-only override early (before training starts)
|
| 1495 |
+
if args.cpu_only:
|
| 1496 |
+
os.environ["NEWTIE_CPU_ONLY"] = "1"
|
| 1497 |
+
_CPU_ONLY = True
|
| 1498 |
+
|
| 1499 |
+
# Apply watchdog / NTFY CLI overrides
|
| 1500 |
+
if args.disable_watchdog:
|
| 1501 |
+
_WATCHDOG_ENABLED = False
|
| 1502 |
+
if args.watchdog_interval is not None:
|
| 1503 |
+
_WATCHDOG_INTERVAL = max(1, int(args.watchdog_interval))
|
| 1504 |
+
if args.watchdog_min_delta is not None:
|
| 1505 |
+
_WATCHDOG_MIN_DELTA = max(0.0, float(args.watchdog_min_delta))
|
| 1506 |
+
if args.ntfy_topic is not None:
|
| 1507 |
+
_NTFY_TOPIC = args.ntfy_topic.strip()
|
| 1508 |
+
if args.ntfy_url is not None:
|
| 1509 |
+
_NTFY_URL = args.ntfy_url.strip()
|
| 1510 |
+
if args.ntfy_token is not None:
|
| 1511 |
+
_NTFY_TOKEN = args.ntfy_token.strip()
|
| 1512 |
+
if args.ntfy_priority is not None:
|
| 1513 |
+
_NTFY_PRIORITY = args.ntfy_priority.strip()
|
| 1514 |
+
if args.ntfy_tags is not None:
|
| 1515 |
+
_NTFY_TAGS = args.ntfy_tags.strip()
|
| 1516 |
+
if args.ntfy_click is not None:
|
| 1517 |
+
_NTFY_CLICK = args.ntfy_click.strip()
|
| 1518 |
+
|
| 1519 |
+
# Collect data paths from data-dir
|
| 1520 |
+
data_paths = None
|
| 1521 |
+
if os.path.isdir(args.data_dir):
|
| 1522 |
+
found = glob.glob(os.path.join(args.data_dir, "*.jsonl"))
|
| 1523 |
+
if found:
|
| 1524 |
+
data_paths = found
|
| 1525 |
+
print(f"[DATA] Found {len(found)} JSONL files in {args.data_dir}")
|
| 1526 |
+
|
| 1527 |
+
if args.phase == 1:
|
| 1528 |
+
model_config, train_config = get_phase1_config()
|
| 1529 |
+
elif args.phase == 2:
|
| 1530 |
+
model_config, train_config = get_phase2_config()
|
| 1531 |
+
elif args.phase == 3:
|
| 1532 |
+
model_config, train_config = get_phase3_config()
|
| 1533 |
+
elif args.phase == 4:
|
| 1534 |
+
model_config, train_config = get_phase4_config()
|
| 1535 |
+
elif args.phase == 5:
|
| 1536 |
+
model_config, train_config = get_phase5_config()
|
| 1537 |
+
elif args.phase == 6:
|
| 1538 |
+
model_config, train_config = get_phase6_config()
|
| 1539 |
+
elif args.phase == 7:
|
| 1540 |
+
model_config, train_config = get_phase7_config()
|
| 1541 |
+
elif args.phase == 8:
|
| 1542 |
+
model_config, train_config = get_phase8_config()
|
| 1543 |
+
else:
|
| 1544 |
+
model_config, train_config = get_phase9_config()
|
| 1545 |
+
|
| 1546 |
+
if args.max_steps and args.max_steps > 0:
|
| 1547 |
+
requested_steps = int(args.max_steps)
|
| 1548 |
+
if requested_steps < train_config.total_steps:
|
| 1549 |
+
print(f"[CONFIG] --max-steps applied: {train_config.total_steps} -> {requested_steps}")
|
| 1550 |
+
train_config.total_steps = requested_steps
|
| 1551 |
+
|
| 1552 |
+
train(
|
| 1553 |
+
model_config=model_config,
|
| 1554 |
+
train_config=train_config,
|
| 1555 |
+
data_paths=data_paths,
|
| 1556 |
+
resume_from=args.resume,
|
| 1557 |
+
resume_weights_only=args.resume_weights_only,
|
| 1558 |
+
num_devices=args.num_devices,
|
| 1559 |
+
log_dir=args.log_dir,
|
| 1560 |
+
checkpoint_dir=args.checkpoint_dir,
|
| 1561 |
+
)
|
backups/20260311_105528/manifest.json
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"backup_ts": "2026-03-11T09:55:29Z",
|
| 3 |
+
"repo": "Eaivee/newtie-tt-checkpoints",
|
| 4 |
+
"files": {
|
| 5 |
+
"checkpoints": [
|
| 6 |
+
"newtie_v5_p9_step50.pt",
|
| 7 |
+
"newtie_v5_p8_step320.pt",
|
| 8 |
+
"newtie_v5_p7_step300.pt",
|
| 9 |
+
"newtie_v5_p6_step1660.pt"
|
| 10 |
+
],
|
| 11 |
+
"config": "train.py",
|
| 12 |
+
"logs": "logs/koyeb_phase9_watchdog.log"
|
| 13 |
+
},
|
| 14 |
+
"note": "Phase 9 watchdog snapshot; latest seen step in logs=320/2600."
|
| 15 |
+
}
|