mindi-backup / AMD Cloud Logs.txt
Mindigenous
Sync latest workspace state: data/scripts updates and archive cleanup
5ae3e12
root@jupyter-launcher-260329213424-8481:/workspace# apt update && apt install -y git-lfs
Get:1 https://repo.radeon.com/amdgpu/7.0/ubuntu jammy InRelease [3183 B]
Get:2 https://repo.radeon.com/rocm/apt/7.0 jammy InRelease [2603 B]
Get:3 http://archive.ubuntu.com/ubuntu jammy InRelease [270 kB]
Get:4 https://repo.radeon.com/amdgpu/7.0/ubuntu jammy/main amd64 Packages [1329 B]
Get:5 http://security.ubuntu.com/ubuntu jammy-security InRelease [129 kB]
Get:6 http://archive.ubuntu.com/ubuntu jammy-updates InRelease [128 kB]
Get:7 http://archive.ubuntu.com/ubuntu jammy-backports InRelease [127 kB]
Get:8 https://repo.radeon.com/rocm/apt/7.0 jammy/main amd64 Packages [82.7 kB]
Get:9 http://archive.ubuntu.com/ubuntu jammy/restricted amd64 Packages [164 kB]
Get:10 http://archive.ubuntu.com/ubuntu jammy/main amd64 Packages [1792 kB]
Get:11 http://archive.ubuntu.com/ubuntu jammy/multiverse amd64 Packages [266 kB]
Get:12 http://archive.ubuntu.com/ubuntu jammy/universe amd64 Packages [17.5 MB]
Get:13 https://ppa.launchpadcontent.net/deadsnakes/ppa/ubuntu jammy InRelease [18.1 kB]
Get:14 http://security.ubuntu.com/ubuntu jammy-security/universe amd64 Packages [1309 kB]
Get:15 http://archive.ubuntu.com/ubuntu jammy-updates/universe amd64 Packages [1620 kB]
Get:16 http://archive.ubuntu.com/ubuntu jammy-updates/restricted amd64 Packages [7011 kB]
Get:17 http://archive.ubuntu.com/ubuntu jammy-updates/main amd64 Packages [4173 kB]
Get:18 https://ppa.launchpadcontent.net/deadsnakes/ppa/ubuntu jammy/main amd64 Packages [38.9 kB]
Get:19 http://archive.ubuntu.com/ubuntu jammy-updates/multiverse amd64 Packages [70.9 kB]
Get:20 http://archive.ubuntu.com/ubuntu jammy-backports/universe amd64 Packages [35.6 kB]
Get:21 http://archive.ubuntu.com/ubuntu jammy-backports/main amd64 Packages [84.0 kB]
Get:22 http://security.ubuntu.com/ubuntu jammy-security/multiverse amd64 Packages [62.6 kB]
Get:23 http://security.ubuntu.com/ubuntu jammy-security/restricted amd64 Packages [6803 kB]
Get:24 http://security.ubuntu.com/ubuntu jammy-security/main amd64 Packages [3842 kB]
Fetched 45.5 MB in 2s (22.4 MB/s)
Reading package lists... Done
Building dependency tree... Done
Reading state information... Done
103 packages can be upgraded. Run 'apt list --upgradable' to see them.
W: https://repo.radeon.com/amdgpu/7.0/ubuntu/dists/jammy/InRelease: Key is stored in legacy trusted.gpg keyring (/etc/apt/trusted.gpg), see the DEPRECATION section in apt-key(8) for details.
W: https://repo.radeon.com/rocm/apt/7.0/dists/jammy/InRelease: Key is stored in legacy trusted.gpg keyring (/etc/apt/trusted.gpg), see the DEPRECATION section in apt-key(8) for details.
Reading package lists... Done
Building dependency tree... Done
Reading state information... Done
The following NEW packages will be installed:
git-lfs
0 upgraded, 1 newly installed, 0 to remove and 103 not upgraded.
Need to get 3544 kB of archives.
After this operation, 10.5 MB of additional disk space will be used.
Get:1 http://archive.ubuntu.com/ubuntu jammy-updates/universe amd64 git-lfs amd64 3.0.2-1ubuntu0.3 [3544 kB]
Fetched 3544 kB in 1s (3462 kB/s)
debconf: delaying package configuration, since apt-utils is not installed
Selecting previously unselected package git-lfs.
(Reading database ... 58520 files and directories currently installed.)
Preparing to unpack .../git-lfs_3.0.2-1ubuntu0.3_amd64.deb ...
Unpacking git-lfs (3.0.2-1ubuntu0.3) ...
Setting up git-lfs (3.0.2-1ubuntu0.3) ...
root@jupyter-launcher-260329213424-8481:/workspace# apt update && apt install -y git-lfs
Hit:1 https://repo.radeon.com/amdgpu/7.0/ubuntu jammy InRelease
Hit:2 https://repo.radeon.com/rocm/apt/7.0 jammy InRelease
Hit:3 http://security.ubuntu.com/ubuntu jammy-security InRelease
Hit:4 http://archive.ubuntu.com/ubuntu jammy InRelease
Hit:5 http://archive.ubuntu.com/ubuntu jammy-updates InRelease
Hit:6 https://ppa.launchpadcontent.net/deadsnakes/ppa/ubuntu jammy InRelease
Hit:7 http://archive.ubuntu.com/ubuntu jammy-backports InRelease
Reading package lists... Done
Building dependency tree... Done
Reading state information... Done
103 packages can be upgraded. Run 'apt list --upgradable' to see them.
W: https://repo.radeon.com/amdgpu/7.0/ubuntu/dists/jammy/InRelease: Key is stored in legacy trusted.gpg keyring (/etc/apt/trusted.gpg), see the DEPRECATION section in apt-key(8) for details.
W: https://repo.radeon.com/rocm/apt/7.0/dists/jammy/InRelease: Key is stored in legacy trusted.gpg keyring (/etc/apt/trusted.gpg), see the DEPRECATION section in apt-key(8) for details.
Reading package lists... Done
Building dependency tree... Done
Reading state information... Done
git-lfs is already the newest version (3.0.2-1ubuntu0.3).
0 upgraded, 0 newly installed, 0 to remove and 103 not upgraded.
root@jupyter-launcher-260329213424-8481:/workspace# git lfs install
Git LFS initialized.
root@jupyter-launcher-260329213424-8481:/workspace# git clone https://huggingface.co/Mindigenous/mindi-backup
Cloning into 'mindi-backup'...
remote: Enumerating objects: 227, done.
remote: Counting objects: 100% (224/224), done.
remote: Compressing objects: 100% (208/208), done.
remote: Total 227 (delta 6), reused 223 (delta 6), pack-reused 3 (from 1)
Receiving objects: 100% (227/227), 111.59 KiB | 22.32 MiB/s, done.
Resolving deltas: 100% (6/6), done.
Filtering content: 100% (85/85), 8.77 GiB | 85.18 MiB/s, done.
Encountered 3 file(s) that may not have been copied correctly on Windows:
checkpoints/component5_420m/step_3000.pt
checkpoints/component5_420m/step_3200.pt
checkpoints/component5_420m/latest.pt
See: `git lfs help smudge` for more details.
root@jupyter-launcher-260329213424-8481:/workspace# cd mindi-backup
root@jupyter-launcher-260329213424-8481:/workspace/mindi-backup# ls -lh
total 967M
-rw-r--r-- 1 root root 1.4K Mar 29 21:35 CONTEXT_SUMMARY.md
-rw-r--r-- 1 root root 3.2K Mar 29 21:35 README_COMPONENT_1_SETUP.md
-rw-r--r-- 1 root root 1.6K Mar 29 21:35 README_COMPONENT_3_DATASET_PIPELINE.md
-rw-r--r-- 1 root root 990 Mar 29 21:35 README_COMPONENT_4_MODEL_ARCHITECTURE.md
-rw-r--r-- 1 root root 1.5K Mar 29 21:35 README_COMPONENT_5_TRAINING_PIPELINE.md
-rw-r--r-- 1 root root 632 Mar 29 21:35 README_COMPONENT_8_CHAT_INTERFACE.md
-rw-r--r-- 1 root root 3.8K Mar 29 21:35 README_FINAL_PROJECT.md
drwxr-xr-x 6 root root 4.0K Mar 29 21:35 artifacts
-rw-r--r-- 1 root root 322M Mar 29 21:36 backup_step1000.tar.gz
-rw-r--r-- 1 root root 322M Mar 29 21:36 backup_step2000.tar.gz
-rw-r--r-- 1 root root 323M Mar 29 21:36 backup_step3000.tar.gz
drwxr-xr-x 3 root root 4.0K Mar 29 21:35 checkpoints
-rw-r--r-- 1 root root 1.3K Mar 29 21:35 config.py
drwxr-xr-x 2 root root 4.0K Mar 29 21:35 configs
drwxr-xr-x 7 root root 4.0K Mar 29 21:36 data
-rw-r--r-- 1 root root 7.4K Mar 29 21:35 data_fetch.py
-rw-r--r-- 1 root root 1.6K Mar 29 21:35 dataset.py
drwxr-xr-x 3 root root 4.0K Mar 29 21:35 hf_release
drwxr-xr-x 3 root root 4.0K Mar 29 21:35 hf_space
drwxr-xr-x 2 root root 4.0K Mar 29 21:36 logs
drwxr-xr-x 4 root root 4.0K Mar 29 21:35 models
drwxr-xr-x 3 root root 4.0K Mar 29 21:35 release
-rw-r--r-- 1 root root 44 Mar 29 21:35 requirements.txt
-rw-r--r-- 1 root root 202 Mar 29 21:35 requirements_optional_windows_bitsandbytes.txt
drwxr-xr-x 2 root root 4.0K Mar 29 21:35 scripts
drwxr-xr-x 10 root root 4.0K Mar 29 21:35 src
-rw-r--r-- 1 root root 7.5K Mar 29 21:35 train.py
-rw-r--r-- 1 root root 1.5K Mar 29 21:35 utils.py
root@jupyter-launcher-260329213424-8481:/workspace/mindi-backup# tar -xzvf backup_step3000.tar.gz
output/
output/tokenizer/
output/lora_adapters/
output/checkpoints/
output/checkpoints/checkpoint-2500/
output/checkpoints/checkpoint-2500/rng_state.pth
output/checkpoints/checkpoint-2500/adapter_config.json
output/checkpoints/checkpoint-2500/README.md
output/checkpoints/checkpoint-2500/adapter_model.safetensors
output/checkpoints/checkpoint-2500/training_args.bin
output/checkpoints/checkpoint-2500/scheduler.pt
output/checkpoints/checkpoint-2500/trainer_state.json
output/checkpoints/checkpoint-2500/scaler.pt
output/checkpoints/checkpoint-2500/optimizer.pt
output/checkpoints/checkpoint-2250/
output/checkpoints/checkpoint-2250/rng_state.pth
output/checkpoints/checkpoint-2250/adapter_config.json
output/checkpoints/checkpoint-2250/README.md
output/checkpoints/checkpoint-2250/adapter_model.safetensors
output/checkpoints/checkpoint-2250/training_args.bin
output/checkpoints/checkpoint-2250/scheduler.pt
output/checkpoints/checkpoint-2250/trainer_state.json
output/checkpoints/checkpoint-2250/scaler.pt
output/checkpoints/checkpoint-2250/optimizer.pt
output/checkpoints/checkpoint-2750/
output/checkpoints/checkpoint-2750/rng_state.pth
output/checkpoints/checkpoint-2750/adapter_config.json
output/checkpoints/checkpoint-2750/README.md
output/checkpoints/checkpoint-2750/adapter_model.safetensors
output/checkpoints/checkpoint-2750/training_args.bin
output/checkpoints/checkpoint-2750/scheduler.pt
output/checkpoints/checkpoint-2750/trainer_state.json
output/checkpoints/checkpoint-2750/scaler.pt
output/checkpoints/checkpoint-2750/optimizer.pt
output/checkpoints/checkpoint-3000/
output/checkpoints/checkpoint-3000/rng_state.pth
output/checkpoints/checkpoint-3000/adapter_config.json
output/checkpoints/checkpoint-3000/README.md
output/checkpoints/checkpoint-3000/adapter_model.safetensors
output/checkpoints/checkpoint-3000/training_args.bin
output/checkpoints/checkpoint-3000/scheduler.pt
output/checkpoints/checkpoint-3000/trainer_state.json
output/checkpoints/checkpoint-3000/scaler.pt
output/checkpoints/checkpoint-3000/optimizer.pt
root@jupyter-launcher-260329213424-8481:/workspace/mindi-backup# ls
CONTEXT_SUMMARY.md README_COMPONENT_5_TRAINING_PIPELINE.md backup_step1000.tar.gz config.py dataset.py models requirements_optional_windows_bitsandbytes.txt utils.py
README_COMPONENT_1_SETUP.md README_COMPONENT_8_CHAT_INTERFACE.md backup_step2000.tar.gz configs hf_release output scripts
README_COMPONENT_3_DATASET_PIPELINE.md README_FINAL_PROJECT.md backup_step3000.tar.gz data hf_space release src
README_COMPONENT_4_MODEL_ARCHITECTURE.md artifacts checkpoints data_fetch.py logs requirements.txt train.py
root@jupyter-launcher-260329213424-8481:/workspace/mindi-backup# python train.py
2026-03-29 21:42:06 | WARNING | Primary model path /workspace/mindi-backup/model is missing HF files. Falling back to /workspace/mindi-backup/hf_release/MINDI-1.0-420M
2026-03-29 21:42:06 | INFO | Loading model and tokenizer from /workspace/mindi-backup/hf_release/MINDI-1.0-420M
trainable params: 7,630,848 || all params: 431,565,696 || trainable%: 1.7682
2026-03-29 21:42:09 | INFO | Loaded 30187 samples from /workspace/mindi-backup/data/train.jsonl
2026-03-29 21:42:09 | INFO | Starting training. Resume mode: True
2026-03-29 21:42:43 | WARNING | Resume requested but no valid checkpoint found (MindiForCausalLM does not support gradient checkpointing.). Starting fresh training.
Traceback (most recent call last):
File "/workspace/mindi-backup/train.py", line 100, in _maybe_resume_train
trainer.train(resume_from_checkpoint=True)
File "/usr/local/lib/python3.12/dist-packages/transformers/trainer.py", line 2325, in train
return inner_training_loop(
^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/dist-packages/transformers/trainer.py", line 2447, in _inner_training_loop
self.model.gradient_checkpointing_enable(gradient_checkpointing_kwargs=args.gradient_checkpointing_kwargs)
File "/usr/local/lib/python3.12/dist-packages/transformers/modeling_utils.py", line 3689, in gradient_checkpointing_enable
raise ValueError(f"{self.__class__.__name__} does not support gradient checkpointing.")
ValueError: MindiForCausalLM does not support gradient checkpointing.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/workspace/mindi-backup/train.py", line 228, in <module>
train(resume=not args.no_resume and TRAINING_CONFIG.resume_training)
File "/workspace/mindi-backup/train.py", line 198, in train
_maybe_resume_train(trainer, logger, resume_requested=resume)
File "/workspace/mindi-backup/train.py", line 106, in _maybe_resume_train
trainer.train()
File "/usr/local/lib/python3.12/dist-packages/transformers/trainer.py", line 2325, in train
return inner_training_loop(
^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/dist-packages/transformers/trainer.py", line 2447, in _inner_training_loop
self.model.gradient_checkpointing_enable(gradient_checkpointing_kwargs=args.gradient_checkpointing_kwargs)
File "/usr/local/lib/python3.12/dist-packages/transformers/modeling_utils.py", line 3689, in gradient_checkpointing_enable
raise ValueError(f"{self.__class__.__name__} does not support gradient checkpointing.")
ValueError: MindiForCausalLM does not support gradient checkpointing.
root@jupyter-launcher-260329213424-8481:/workspace/mindi-backup# nano train.py
bash: nano: command not found
root@jupyter-launcher-260329213424-8481:/workspace/mindi-backup# python train.py
2026-03-29 21:45:01 | WARNING | Primary model path /workspace/mindi-backup/model is missing HF files. Falling back to /workspace/mindi-backup/hf_release/MINDI-1.0-420M
2026-03-29 21:45:01 | INFO | Loading model and tokenizer from /workspace/mindi-backup/hf_release/MINDI-1.0-420M
trainable params: 7,630,848 || all params: 431,565,696 || trainable%: 1.7682
2026-03-29 21:45:04 | INFO | Loaded 30187 samples from /workspace/mindi-backup/data/train.jsonl
2026-03-29 21:45:05 | INFO | Starting training. Resume mode: True
{'loss': 7.1909, 'grad_norm': 4.033649444580078, 'learning_rate': 9.563669040059571e-06, 'epoch': 0.8}
{'loss': 6.8721, 'grad_norm': 3.1705539226531982, 'learning_rate': 9.556629582622677e-06, 'epoch': 0.81}
{'loss': 7.0743, 'grad_norm': 4.005091667175293, 'learning_rate': 9.549536427140578e-06, 'epoch': 0.81}
{'loss': 6.9808, 'grad_norm': 3.571833848953247, 'learning_rate': 9.542389657203257e-06, 'epoch': 0.82}
{'loss': 6.8726, 'grad_norm': 3.3678417205810547, 'learning_rate': 9.53518935703252e-06, 'epoch': 0.82}
{'loss': 6.8989, 'grad_norm': 3.275954484939575, 'learning_rate': 9.527935611481004e-06, 'epoch': 0.83}
{'loss': 7.1032, 'grad_norm': 2.9973928928375244, 'learning_rate': 9.520628506031182e-06, 'epoch': 0.83}
{'loss': 6.9012, 'grad_norm': 3.545182704925537, 'learning_rate': 9.513268126794352e-06, 'epoch': 0.84}
{'loss': 6.8152, 'grad_norm': 3.3187150955200195, 'learning_rate': 9.505854560509615e-06, 'epoch': 0.84}
{'loss': 6.9742, 'grad_norm': 3.11761736869812, 'learning_rate': 9.498387894542871e-06, 'epoch': 0.85}
{'loss': 6.9556, 'grad_norm': 3.488884210586548, 'learning_rate': 9.490868216885768e-06, 'epoch': 0.85}
{'loss': 6.7676, 'grad_norm': 3.011949300765991, 'learning_rate': 9.48329561615468e-06, 'epoch': 0.86}
{'loss': 6.9605, 'grad_norm': 3.6621603965759277, 'learning_rate': 9.475670181589662e-06, 'epoch': 0.86}
{'loss': 6.9785, 'grad_norm': 3.3519508838653564, 'learning_rate': 9.467992003053386e-06, 'epoch': 0.87}
{'loss': 6.863, 'grad_norm': 3.3262953758239746, 'learning_rate': 9.460261171030096e-06, 'epoch': 0.87}
{'loss': 6.9829, 'grad_norm': 3.331179141998291, 'learning_rate': 9.452477776624538e-06, 'epoch': 0.88}
{'loss': 6.6871, 'grad_norm': 3.370134115219116, 'learning_rate': 9.444641911560878e-06, 'epoch': 0.89}
{'loss': 6.8841, 'grad_norm': 3.558593511581421, 'learning_rate': 9.436753668181633e-06, 'epoch': 0.89}
{'loss': 6.8509, 'grad_norm': 3.670783042907715, 'learning_rate': 9.428813139446574e-06, 'epoch': 0.9}
{'loss': 6.7301, 'grad_norm': 3.489644765853882, 'learning_rate': 9.420820418931638e-06, 'epoch': 0.9}
{'loss': 6.638, 'grad_norm': 3.607030153274536, 'learning_rate': 9.412775600827817e-06, 'epoch': 0.91}
{'loss': 6.8112, 'grad_norm': 3.41335129737854, 'learning_rate': 9.404678779940053e-06, 'epoch': 0.91}
{'loss': 6.7781, 'grad_norm': 3.429769992828369, 'learning_rate': 9.39653005168612e-06, 'epoch': 0.92}
{'loss': 6.8019, 'grad_norm': 3.4179837703704834, 'learning_rate': 9.388329512095504e-06, 'epoch': 0.92}
{'loss': 6.6101, 'grad_norm': 3.2728028297424316, 'learning_rate': 9.380077257808258e-06, 'epoch': 0.93}
{'loss': 6.7738, 'grad_norm': 3.5320591926574707, 'learning_rate': 9.371773386073879e-06, 'epoch': 0.93}
{'loss': 6.5871, 'grad_norm': 4.164572238922119, 'learning_rate': 9.363417994750151e-06, 'epoch': 0.94}
{'loss': 6.9216, 'grad_norm': 4.653748989105225, 'learning_rate': 9.355011182301998e-06, 'epoch': 0.94}
{'loss': 6.8102, 'grad_norm': 3.353635311126709, 'learning_rate': 9.34655304780032e-06, 'epoch': 0.95}
{'loss': 6.5929, 'grad_norm': 3.211069107055664, 'learning_rate': 9.338043690920827e-06, 'epoch': 0.95}
{'loss': 6.5415, 'grad_norm': 3.9036269187927246, 'learning_rate': 9.329483211942864e-06, 'epoch': 0.96}
{'loss': 7.0395, 'grad_norm': 3.5718047618865967, 'learning_rate': 9.320871711748231e-06, 'epoch': 0.96}
{'loss': 6.6165, 'grad_norm': 3.5392861366271973, 'learning_rate': 9.312209291819988e-06, 'epoch': 0.97}
{'loss': 6.6293, 'grad_norm': 3.6753382682800293, 'learning_rate': 9.303496054241268e-06, 'epoch': 0.98}
{'loss': 6.6496, 'grad_norm': 3.901423931121826, 'learning_rate': 9.294732101694068e-06, 'epoch': 0.98}
{'loss': 6.7906, 'grad_norm': 4.056935787200928, 'learning_rate': 9.28591753745804e-06, 'epoch': 0.99}
{'loss': 6.8524, 'grad_norm': 3.764296531677246, 'learning_rate': 9.277052465409278e-06, 'epoch': 0.99}
{'loss': 6.7833, 'grad_norm': 4.084295749664307, 'learning_rate': 9.26813699001908e-06, 'epoch': 1.0}
{'loss': 6.4813, 'grad_norm': 3.9245924949645996, 'learning_rate': 9.259171216352742e-06, 'epoch': 1.0}
{'loss': 6.8891, 'grad_norm': 3.395155668258667, 'learning_rate': 9.250155250068294e-06, 'epoch': 1.01}
{'loss': 6.6709, 'grad_norm': 4.0898613929748535, 'learning_rate': 9.241089197415269e-06, 'epoch': 1.01}
{'loss': 6.6346, 'grad_norm': 3.8944408893585205, 'learning_rate': 9.231973165233448e-06, 'epoch': 1.02}
{'loss': 6.6908, 'grad_norm': 3.3602097034454346, 'learning_rate': 9.222807260951598e-06, 'epoch': 1.02}
{'loss': 6.85, 'grad_norm': 3.530630350112915, 'learning_rate': 9.213591592586215e-06, 'epoch': 1.03}
{'loss': 6.7814, 'grad_norm': 3.407895803451538, 'learning_rate': 9.204326268740237e-06, 'epoch': 1.03}
{'loss': 6.7538, 'grad_norm': 3.441638946533203, 'learning_rate': 9.195011398601778e-06, 'epoch': 1.04}
{'loss': 6.715, 'grad_norm': 3.903031587600708, 'learning_rate': 9.185647091942834e-06, 'epoch': 1.04}
{'loss': 6.5245, 'grad_norm': 3.5777175426483154, 'learning_rate': 9.176233459117982e-06, 'epoch': 1.05}
{'loss': 6.696, 'grad_norm': 3.97487735748291, 'learning_rate': 9.166770611063102e-06, 'epoch': 1.05}
{'loss': 6.7483, 'grad_norm': 4.139445781707764, 'learning_rate': 9.157258659294045e-06, 'epoch': 1.06}
{'loss': 6.7169, 'grad_norm': 3.6211724281311035, 'learning_rate': 9.147697715905336e-06, 'epoch': 1.07}
{'loss': 6.8314, 'grad_norm': 3.8818159103393555, 'learning_rate': 9.138087893568842e-06, 'epoch': 1.07}
{'loss': 6.7227, 'grad_norm': 3.5572359561920166, 'learning_rate': 9.12842930553245e-06, 'epoch': 1.08}
{'loss': 6.7112, 'grad_norm': 4.153123378753662, 'learning_rate': 9.118722065618734e-06, 'epoch': 1.08}
{'loss': 6.562, 'grad_norm': 3.8019614219665527, 'learning_rate': 9.108966288223605e-06, 'epoch': 1.09}
{'loss': 6.7799, 'grad_norm': 3.6672396659851074, 'learning_rate': 9.099162088314974e-06, 'epoch': 1.09}
{'loss': 6.4768, 'grad_norm': 3.598310708999634, 'learning_rate': 9.089309581431392e-06, 'epoch': 1.1}
{'loss': 6.5686, 'grad_norm': 3.7088115215301514, 'learning_rate': 9.079408883680683e-06, 'epoch': 1.1}
{'loss': 6.6908, 'grad_norm': 3.9502580165863037, 'learning_rate': 9.069460111738583e-06, 'epoch': 1.11}
{'loss': 6.4669, 'grad_norm': 4.229706287384033, 'learning_rate': 9.059463382847368e-06, 'epoch': 1.11}
{'loss': 6.7618, 'grad_norm': 4.1307525634765625, 'learning_rate': 9.049418814814463e-06, 'epoch': 1.12}
{'loss': 6.7429, 'grad_norm': 3.6580512523651123, 'learning_rate': 9.039326526011056e-06, 'epoch': 1.12}
{'loss': 6.6446, 'grad_norm': 4.068822383880615, 'learning_rate': 9.029186635370708e-06, 'epoch': 1.13}
{'loss': 6.55, 'grad_norm': 3.6997809410095215, 'learning_rate': 9.018999262387951e-06, 'epoch': 1.13}
{'loss': 6.7614, 'grad_norm': 3.647613763809204, 'learning_rate': 9.00876452711687e-06, 'epoch': 1.14}
{'loss': 6.3108, 'grad_norm': 4.199386119842529, 'learning_rate': 8.998482550169703e-06, 'epoch': 1.14}
{'loss': 6.5506, 'grad_norm': 4.422983169555664, 'learning_rate': 8.988153452715406e-06, 'epoch': 1.15}
{'loss': 6.6576, 'grad_norm': 3.8606784343719482, 'learning_rate': 8.977777356478238e-06, 'epoch': 1.16}
{'loss': 6.6042, 'grad_norm': 4.240106582641602, 'learning_rate': 8.967354383736312e-06, 'epoch': 1.16}
{'loss': 6.7848, 'grad_norm': 3.935682773590088, 'learning_rate': 8.956884657320171e-06, 'epoch': 1.17}
{'loss': 6.809, 'grad_norm': 3.8408560752868652, 'learning_rate': 8.94636830061132e-06, 'epoch': 1.17}
{'loss': 6.4051, 'grad_norm': 3.807954788208008, 'learning_rate': 8.935805437540792e-06, 'epoch': 1.18}
{'loss': 6.785, 'grad_norm': 3.6497578620910645, 'learning_rate': 8.925196192587675e-06, 'epoch': 1.18}
{'loss': 6.466, 'grad_norm': 4.403098106384277, 'learning_rate': 8.91454069077765e-06, 'epoch': 1.19}
{'loss': 6.6386, 'grad_norm': 3.7430531978607178, 'learning_rate': 8.903839057681518e-06, 'epoch': 1.19}
{'loss': 6.6355, 'grad_norm': 3.8051366806030273, 'learning_rate': 8.893091419413714e-06, 'epoch': 1.2}
{'loss': 6.327, 'grad_norm': 4.103903770446777, 'learning_rate': 8.882297902630828e-06, 'epoch': 1.2}
{'loss': 6.5227, 'grad_norm': 3.8555779457092285, 'learning_rate': 8.871458634530112e-06, 'epoch': 1.21}
{'loss': 6.6998, 'grad_norm': 3.7445805072784424, 'learning_rate': 8.860573742847975e-06, 'epoch': 1.21}
{'loss': 6.7064, 'grad_norm': 4.0517473220825195, 'learning_rate': 8.849643355858485e-06, 'epoch': 1.22}
{'loss': 6.5921, 'grad_norm': 4.48715353012085, 'learning_rate': 8.838667602371851e-06, 'epoch': 1.22}
{'loss': 6.5222, 'grad_norm': 3.5811736583709717, 'learning_rate': 8.82764661173291e-06, 'epoch': 1.23}
{'loss': 6.358, 'grad_norm': 3.740990400314331, 'learning_rate': 8.8165805138196e-06, 'epoch': 1.23}
{'loss': 6.4787, 'grad_norm': 3.9410696029663086, 'learning_rate': 8.80546943904143e-06, 'epoch': 1.24}
{'loss': 6.6214, 'grad_norm': 3.5017576217651367, 'learning_rate': 8.794313518337942e-06, 'epoch': 1.25}
{'loss': 6.521, 'grad_norm': 4.726663589477539, 'learning_rate': 8.78311288317717e-06, 'epoch': 1.25}
{'loss': 6.5897, 'grad_norm': 4.564174175262451, 'learning_rate': 8.77186766555409e-06, 'epoch': 1.26}
{'loss': 6.5837, 'grad_norm': 4.211238384246826, 'learning_rate': 8.760577997989068e-06, 'epoch': 1.26}
{'loss': 6.6079, 'grad_norm': 3.6740944385528564, 'learning_rate': 8.749244013526287e-06, 'epoch': 1.27}
{'loss': 6.4388, 'grad_norm': 3.6543848514556885, 'learning_rate': 8.737865845732193e-06, 'epoch': 1.27}
{'loss': 6.3968, 'grad_norm': 3.844977378845215, 'learning_rate': 8.726443628693914e-06, 'epoch': 1.28}
{'loss': 6.5858, 'grad_norm': 3.7024519443511963, 'learning_rate': 8.714977497017675e-06, 'epoch': 1.28}
{'loss': 6.6024, 'grad_norm': 4.099799156188965, 'learning_rate': 8.70346758582723e-06, 'epoch': 1.29}
{'loss': 6.3404, 'grad_norm': 4.438448905944824, 'learning_rate': 8.691914030762238e-06, 'epoch': 1.29}
{'loss': 6.3843, 'grad_norm': 4.025275707244873, 'learning_rate': 8.680316967976702e-06, 'epoch': 1.3}
{'loss': 6.5521, 'grad_norm': 4.203976154327393, 'learning_rate': 8.668676534137335e-06, 'epoch': 1.3}
{'loss': 6.6462, 'grad_norm': 3.4420244693756104, 'learning_rate': 8.656992866421963e-06, 'epoch': 1.31}
{'loss': 6.5364, 'grad_norm': 4.195858001708984, 'learning_rate': 8.645266102517909e-06, 'epoch': 1.31}
{'loss': 6.3839, 'grad_norm': 4.207982063293457, 'learning_rate': 8.633496380620362e-06, 'epoch': 1.32}
{'loss': 6.38, 'grad_norm': 4.2781805992126465, 'learning_rate': 8.62168383943076e-06, 'epoch': 1.32}
{'loss': 6.4022, 'grad_norm': 4.422223091125488, 'learning_rate': 8.609828618155141e-06, 'epoch': 1.33}
{'loss': 6.5803, 'grad_norm': 3.9238698482513428, 'learning_rate': 8.597930856502516e-06, 'epoch': 1.34}
{'loss': 6.4962, 'grad_norm': 3.9540951251983643, 'learning_rate': 8.585990694683218e-06, 'epoch': 1.34}
{'loss': 6.615, 'grad_norm': 4.329217433929443, 'learning_rate': 8.574008273407246e-06, 'epoch': 1.35}
{'loss': 6.3084, 'grad_norm': 3.7574896812438965, 'learning_rate': 8.56198373388261e-06, 'epoch': 1.35}
{'loss': 6.3158, 'grad_norm': 4.539099216461182, 'learning_rate': 8.549917217813671e-06, 'epoch': 1.36}
{'loss': 6.4299, 'grad_norm': 3.670349597930908, 'learning_rate': 8.537808867399458e-06, 'epoch': 1.36}
{'loss': 6.292, 'grad_norm': 3.935389280319214, 'learning_rate': 8.525658825332008e-06, 'epoch': 1.37}
{'loss': 6.6246, 'grad_norm': 4.2957682609558105, 'learning_rate': 8.513467234794677e-06, 'epoch': 1.37}
{'loss': 6.858, 'grad_norm': 4.171406269073486, 'learning_rate': 8.50123423946045e-06, 'epoch': 1.38}
{'loss': 6.4571, 'grad_norm': 3.6439156532287598, 'learning_rate': 8.488959983490252e-06, 'epoch': 1.38}
{'loss': 6.657, 'grad_norm': 4.392574787139893, 'learning_rate': 8.476644611531248e-06, 'epoch': 1.39}
{'loss': 6.3989, 'grad_norm': 3.5832161903381348, 'learning_rate': 8.464288268715141e-06, 'epoch': 1.39}
{'loss': 6.489, 'grad_norm': 4.772254467010498, 'learning_rate': 8.451891100656454e-06, 'epoch': 1.4}
{'loss': 6.5326, 'grad_norm': 4.3936448097229, 'learning_rate': 8.439453253450822e-06, 'epoch': 1.4}
{'loss': 6.4461, 'grad_norm': 3.83701491355896, 'learning_rate': 8.426974873673266e-06, 'epoch': 1.41}
{'loss': 6.7059, 'grad_norm': 4.412644863128662, 'learning_rate': 8.414456108376472e-06, 'epoch': 1.42}
{'loss': 6.4425, 'grad_norm': 3.9730429649353027, 'learning_rate': 8.401897105089045e-06, 'epoch': 1.42}
{'loss': 6.5235, 'grad_norm': 4.264787673950195, 'learning_rate': 8.389298011813784e-06, 'epoch': 1.43}
{'loss': 6.5288, 'grad_norm': 3.663318395614624, 'learning_rate': 8.376658977025934e-06, 'epoch': 1.43}
{'loss': 6.4418, 'grad_norm': 4.351901054382324, 'learning_rate': 8.363980149671427e-06, 'epoch': 1.44}
{'loss': 6.4602, 'grad_norm': 5.032154560089111, 'learning_rate': 8.35126167916514e-06, 'epoch': 1.44}
{'loss': 6.3486, 'grad_norm': 4.513311862945557, 'learning_rate': 8.33850371538913e-06, 'epoch': 1.45}
{'loss': 6.5311, 'grad_norm': 4.235100269317627, 'learning_rate': 8.325706408690863e-06, 'epoch': 1.45}
{'loss': 6.3726, 'grad_norm': 4.238891124725342, 'learning_rate': 8.312869909881447e-06, 'epoch': 1.46}
{'loss': 6.3832, 'grad_norm': 3.9024808406829834, 'learning_rate': 8.29999437023385e-06, 'epoch': 1.46}
{'loss': 6.4059, 'grad_norm': 4.562249183654785, 'learning_rate': 8.287079941481127e-06, 'epoch': 1.47}
{'loss': 6.5805, 'grad_norm': 3.789057493209839, 'learning_rate': 8.274126775814619e-06, 'epoch': 1.47}
{'loss': 6.4151, 'grad_norm': 3.9197933673858643, 'learning_rate': 8.26113502588217e-06, 'epoch': 1.48}
{'loss': 6.5903, 'grad_norm': 4.0246052742004395, 'learning_rate': 8.248104844786322e-06, 'epoch': 1.48}
{'loss': 6.4788, 'grad_norm': 4.165124893188477, 'learning_rate': 8.235036386082513e-06, 'epoch': 1.49}
{'loss': 6.443, 'grad_norm': 3.9405601024627686, 'learning_rate': 8.22192980377727e-06, 'epoch': 1.49}
{'loss': 6.5982, 'grad_norm': 4.176534175872803, 'learning_rate': 8.208785252326386e-06, 'epoch': 1.5}
{'loss': 6.2452, 'grad_norm': 3.9046828746795654, 'learning_rate': 8.195602886633108e-06, 'epoch': 1.51}
{'loss': 6.3649, 'grad_norm': 3.844703197479248, 'learning_rate': 8.182382862046312e-06, 'epoch': 1.51}
{'loss': 6.1972, 'grad_norm': 4.611677169799805, 'learning_rate': 8.169125334358663e-06, 'epoch': 1.52}
{'loss': 6.5997, 'grad_norm': 4.394063949584961, 'learning_rate': 8.15583045980479e-06, 'epoch': 1.52}
{'loss': 6.4346, 'grad_norm': 4.1324238777160645, 'learning_rate': 8.142498395059433e-06, 'epoch': 1.53}
{'loss': 6.3161, 'grad_norm': 4.06502103805542, 'learning_rate': 8.12912929723561e-06, 'epoch': 1.53}
{'loss': 6.5002, 'grad_norm': 4.40083646774292, 'learning_rate': 8.11572332388276e-06, 'epoch': 1.54}
{'loss': 6.5842, 'grad_norm': 5.2764716148376465, 'learning_rate': 8.102280632984876e-06, 'epoch': 1.54}
{'loss': 6.4769, 'grad_norm': 4.330339431762695, 'learning_rate': 8.088801382958661e-06, 'epoch': 1.55}
{'loss': 6.3781, 'grad_norm': 4.499543190002441, 'learning_rate': 8.075285732651652e-06, 'epoch': 1.55}
{'loss': 6.3078, 'grad_norm': 3.760270833969116, 'learning_rate': 8.061733841340345e-06, 'epoch': 1.56}
{'loss': 6.4755, 'grad_norm': 4.548635482788086, 'learning_rate': 8.048145868728324e-06, 'epoch': 1.56}
{'loss': 6.4778, 'grad_norm': 5.334836959838867, 'learning_rate': 8.034521974944372e-06, 'epoch': 1.57}
{'loss': 6.3703, 'grad_norm': 3.828137159347534, 'learning_rate': 8.020862320540597e-06, 'epoch': 1.57}
{'loss': 6.1173, 'grad_norm': 5.237795352935791, 'learning_rate': 8.007167066490526e-06, 'epoch': 1.58}
{'loss': 6.2668, 'grad_norm': 3.772153377532959, 'learning_rate': 7.993436374187215e-06, 'epoch': 1.58}
{'loss': 6.291, 'grad_norm': 4.495264530181885, 'learning_rate': 7.979670405441345e-06, 'epoch': 1.59}
{'loss': 6.5972, 'grad_norm': 4.337673664093018, 'learning_rate': 7.965869322479312e-06, 'epoch': 1.6}
{'loss': 6.3549, 'grad_norm': 4.343821048736572, 'learning_rate': 7.952033287941327e-06, 'epoch': 1.6}
{'loss': 6.5114, 'grad_norm': 4.5774993896484375, 'learning_rate': 7.938162464879487e-06, 'epoch': 1.61}
{'loss': 6.4692, 'grad_norm': 4.534322261810303, 'learning_rate': 7.924257016755854e-06, 'epoch': 1.61}
{'loss': 6.321, 'grad_norm': 4.455904960632324, 'learning_rate': 7.910317107440537e-06, 'epoch': 1.62}
{'loss': 6.3514, 'grad_norm': 3.930542230606079, 'learning_rate': 7.896342901209756e-06, 'epoch': 1.62}
{'loss': 6.388, 'grad_norm': 4.580833911895752, 'learning_rate': 7.88233456274391e-06, 'epoch': 1.63}
{'loss': 6.5266, 'grad_norm': 3.7175240516662598, 'learning_rate': 7.868292257125622e-06, 'epoch': 1.63}
{'loss': 6.4009, 'grad_norm': 4.208019256591797, 'learning_rate': 7.854216149837811e-06, 'epoch': 1.64}
{'loss': 6.4584, 'grad_norm': 5.5519118309021, 'learning_rate': 7.840106406761738e-06, 'epoch': 1.64}
{'loss': 6.4704, 'grad_norm': 4.413114547729492, 'learning_rate': 7.825963194175044e-06, 'epoch': 1.65}
{'loss': 6.512, 'grad_norm': 3.8769850730895996, 'learning_rate': 7.811786678749793e-06, 'epoch': 1.65}
{'loss': 6.4677, 'grad_norm': 4.508973121643066, 'learning_rate': 7.797577027550514e-06, 'epoch': 1.66}
{'loss': 6.4138, 'grad_norm': 4.44165563583374, 'learning_rate': 7.783334408032225e-06, 'epoch': 1.66}
{'loss': 6.4173, 'grad_norm': 4.24896764755249, 'learning_rate': 7.769058988038466e-06, 'epoch': 1.67}
{'loss': 6.3602, 'grad_norm': 4.590809345245361, 'learning_rate': 7.75475093579931e-06, 'epoch': 1.67}
{'loss': 6.0953, 'grad_norm': 4.374899864196777, 'learning_rate': 7.740410419929394e-06, 'epoch': 1.68}
{'loss': 6.3401, 'grad_norm': 4.791284084320068, 'learning_rate': 7.726037609425928e-06, 'epoch': 1.69}
{'loss': 6.3104, 'grad_norm': 4.054768085479736, 'learning_rate': 7.71163267366669e-06, 'epoch': 1.69}
{'loss': 6.3191, 'grad_norm': 4.539426803588867, 'learning_rate': 7.697195782408056e-06, 'epoch': 1.7}
{'loss': 6.454, 'grad_norm': 4.435790061950684, 'learning_rate': 7.682727105782974e-06, 'epoch': 1.7}
{'loss': 6.2901, 'grad_norm': 4.04224157333374, 'learning_rate': 7.668226814298976e-06, 'epoch': 1.71}
{'loss': 6.5382, 'grad_norm': 4.358019828796387, 'learning_rate': 7.653695078836154e-06, 'epoch': 1.71}
{'loss': 6.3375, 'grad_norm': 4.362654209136963, 'learning_rate': 7.639132070645165e-06, 'epoch': 1.72}
{'loss': 6.3148, 'grad_norm': 4.057913780212402, 'learning_rate': 7.624537961345195e-06, 'epoch': 1.72}
{'loss': 6.0909, 'grad_norm': 4.102101802825928, 'learning_rate': 7.6099129229219495e-06, 'epoch': 1.73}
{'loss': 6.1735, 'grad_norm': 4.697354793548584, 'learning_rate': 7.595257127725615e-06, 'epoch': 1.73}
{'loss': 6.4758, 'grad_norm': 4.520318508148193, 'learning_rate': 7.5805707484688404e-06, 'epoch': 1.74}
{'loss': 6.3299, 'grad_norm': 4.040170669555664, 'learning_rate': 7.565853958224694e-06, 'epoch': 1.74}
{'loss': 6.3448, 'grad_norm': 5.196842193603516, 'learning_rate': 7.551106930424618e-06, 'epoch': 1.75}
{'loss': 6.3078, 'grad_norm': 4.716385364532471, 'learning_rate': 7.5363298388564056e-06, 'epoch': 1.75}
{'loss': 6.2161, 'grad_norm': 4.073972225189209, 'learning_rate': 7.521522857662127e-06, 'epoch': 1.76}
{'loss': 6.2323, 'grad_norm': 4.350238800048828, 'learning_rate': 7.5066861613360965e-06, 'epoch': 1.76}
{'loss': 6.3378, 'grad_norm': 4.29862117767334, 'learning_rate': 7.491819924722806e-06, 'epoch': 1.77}
{'loss': 6.343, 'grad_norm': 3.782808780670166, 'learning_rate': 7.47692432301487e-06, 'epoch': 1.78}
{'loss': 6.3283, 'grad_norm': 4.934012413024902, 'learning_rate': 7.461999531750958e-06, 'epoch': 1.78}
{'loss': 6.2285, 'grad_norm': 4.942707061767578, 'learning_rate': 7.447045726813726e-06, 'epoch': 1.79}
{'loss': 6.3111, 'grad_norm': 4.3710198402404785, 'learning_rate': 7.432063084427746e-06, 'epoch': 1.79}
{'loss': 6.4183, 'grad_norm': 4.452266693115234, 'learning_rate': 7.417051781157425e-06, 'epoch': 1.8}
{'loss': 6.4294, 'grad_norm': 3.9995484352111816, 'learning_rate': 7.40201199390493e-06, 'epoch': 1.8}
{'loss': 6.2125, 'grad_norm': 4.062810897827148, 'learning_rate': 7.3869438999080986e-06, 'epoch': 1.81}
{'loss': 6.1296, 'grad_norm': 3.9967126846313477, 'learning_rate': 7.371847676738354e-06, 'epoch': 1.81}
{'loss': 6.2876, 'grad_norm': 4.047409534454346, 'learning_rate': 7.356723502298606e-06, 'epoch': 1.82}
{'loss': 6.2409, 'grad_norm': 4.863693714141846, 'learning_rate': 7.341571554821163e-06, 'epoch': 1.82}
{'loss': 6.2925, 'grad_norm': 3.9938430786132812, 'learning_rate': 7.326392012865626e-06, 'epoch': 1.83}
{'loss': 6.5495, 'grad_norm': 4.63430118560791, 'learning_rate': 7.311185055316785e-06, 'epoch': 1.83}
{'loss': 6.5158, 'grad_norm': 4.256169319152832, 'learning_rate': 7.2959508613825155e-06, 'epoch': 1.84}
{'loss': 6.2226, 'grad_norm': 4.3375468254089355, 'learning_rate': 7.2806896105916566e-06, 'epoch': 1.84}
{'loss': 6.2728, 'grad_norm': 4.490257740020752, 'learning_rate': 7.265401482791907e-06, 'epoch': 1.85}
{'loss': 6.4827, 'grad_norm': 4.102600574493408, 'learning_rate': 7.250086658147697e-06, 'epoch': 1.85}
{'loss': 6.2786, 'grad_norm': 4.251227378845215, 'learning_rate': 7.23474531713807e-06, 'epoch': 1.86}
37%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‰ | 7028/18870 [44:42<2:03:45, 1.59it/s]
apt update && apt install -y git-lfs
git clone https://huggingface.co/Mindigenous/mindi-backup
cd mindi-backup
tar -xzvf backup_step12000.tar.gz