Yaning1001 commited on
Commit
4471134
·
verified ·
1 Parent(s): 66ab767

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. wandb/run-20241030_112700-d8o4g13r/files/wandb-metadata.json +97 -0
  3. wandb/run-20241030_231835-no2y29q9/files/output.log +183 -0
  4. wandb/run-20241030_231835-no2y29q9/files/requirements.txt +147 -0
  5. wandb/run-20241030_231835-no2y29q9/files/wandb-metadata.json +97 -0
  6. wandb/run-20241030_231835-no2y29q9/logs/debug-internal.log +8 -0
  7. wandb/run-20241030_231835-no2y29q9/logs/debug.log +29 -0
  8. wandb/run-20241030_233740-a8ghkt6q/files/output.log +13 -0
  9. wandb/run-20241030_233740-a8ghkt6q/files/requirements.txt +147 -0
  10. wandb/run-20241030_233740-a8ghkt6q/files/wandb-metadata.json +97 -0
  11. wandb/run-20241030_233740-a8ghkt6q/logs/debug-internal.log +8 -0
  12. wandb/run-20241030_233740-a8ghkt6q/logs/debug.log +26 -0
  13. wandb/run-20241031_001055-dua2g15g/run-dua2g15g.wandb +3 -0
  14. wandb/run-20241031_002020-qq5oimta/files/wandb-summary.json +1 -0
  15. wandb/run-20241031_002020-qq5oimta/logs/debug-internal.log +20 -0
  16. wandb/run-20241031_002020-qq5oimta/logs/debug.log +33 -0
  17. wandb/run-20241101_012438-qowf210g/logs/debug-internal.log +16 -0
  18. wandb/run-20241101_012733-val6n9r9/files/output.log +16 -0
  19. wandb/run-20241101_012733-val6n9r9/files/requirements.txt +147 -0
  20. wandb/run-20241101_012733-val6n9r9/files/wandb-metadata.json +97 -0
  21. wandb/run-20241101_012733-val6n9r9/logs/debug-internal.log +8 -0
  22. wandb/run-20241101_012733-val6n9r9/logs/debug.log +26 -0
  23. wandb/run-20241101_093116-jo652wfc/files/output.log +13 -0
  24. wandb/run-20241101_093116-jo652wfc/files/requirements.txt +147 -0
  25. wandb/run-20241101_093116-jo652wfc/files/wandb-metadata.json +97 -0
  26. wandb/run-20241101_093116-jo652wfc/logs/debug-internal.log +8 -0
  27. wandb/run-20241101_093116-jo652wfc/logs/debug.log +26 -0
  28. wandb/run-20241101_093116-jo652wfc/run-jo652wfc.wandb +0 -0
  29. wandb/run-20241101_093116-w11cgu13/files/output.log +13 -0
  30. wandb/run-20241101_093116-w11cgu13/files/requirements.txt +147 -0
  31. wandb/run-20241101_093116-w11cgu13/files/wandb-metadata.json +97 -0
  32. wandb/run-20241101_093116-w11cgu13/logs/debug-internal.log +8 -0
  33. wandb/run-20241101_093116-w11cgu13/logs/debug.log +26 -0
  34. wandb/run-20241101_093116-w11cgu13/run-w11cgu13.wandb +0 -0
  35. wandb/run-20241101_200535-lnp8ii96/files/output.log +17 -0
  36. wandb/run-20241101_200535-lnp8ii96/files/requirements.txt +147 -0
  37. wandb/run-20241101_200535-lnp8ii96/files/wandb-metadata.json +97 -0
  38. wandb/run-20241101_200535-lnp8ii96/logs/debug-internal.log +8 -0
  39. wandb/run-20241101_200535-lnp8ii96/logs/debug.log +26 -0
  40. wandb/run-20241101_200535-xloij0da/run-xloij0da.wandb +3 -0
  41. wandb/run-20241105_155905-adxztc74/files/config.yaml +49 -0
  42. wandb/run-20241105_155905-adxztc74/files/output.log +19 -0
  43. wandb/run-20241105_155905-adxztc74/files/requirements.txt +147 -0
  44. wandb/run-20241105_155905-adxztc74/files/wandb-metadata.json +44 -0
  45. wandb/run-20241105_155905-adxztc74/files/wandb-summary.json +1 -0
  46. wandb/run-20241105_155905-adxztc74/logs/debug-internal.log +18 -0
  47. wandb/run-20241105_155905-adxztc74/logs/debug.log +27 -0
  48. wandb/run-20241105_155905-adxztc74/run-adxztc74.wandb +0 -0
  49. wandb/run-20241105_155954-daaq0lj0/files/config.yaml +49 -0
  50. wandb/run-20241105_155954-daaq0lj0/files/output.log +19 -0
.gitattributes CHANGED
@@ -67,3 +67,5 @@ wandb/run-20241031_114700-3cqkhntc/run-3cqkhntc.wandb filter=lfs diff=lfs merge=
67
  wandb/run-20241030_222932-sh4rlbgu/run-sh4rlbgu.wandb filter=lfs diff=lfs merge=lfs -text
68
  wandb/run-20241113_180154-b24s43he/run-b24s43he.wandb filter=lfs diff=lfs merge=lfs -text
69
  wandb/run-20241030_233740-0fagh7s8/run-0fagh7s8.wandb filter=lfs diff=lfs merge=lfs -text
 
 
 
67
  wandb/run-20241030_222932-sh4rlbgu/run-sh4rlbgu.wandb filter=lfs diff=lfs merge=lfs -text
68
  wandb/run-20241113_180154-b24s43he/run-b24s43he.wandb filter=lfs diff=lfs merge=lfs -text
69
  wandb/run-20241030_233740-0fagh7s8/run-0fagh7s8.wandb filter=lfs diff=lfs merge=lfs -text
70
+ wandb/run-20241031_001055-dua2g15g/run-dua2g15g.wandb filter=lfs diff=lfs merge=lfs -text
71
+ wandb/run-20241101_200535-xloij0da/run-xloij0da.wandb filter=lfs diff=lfs merge=lfs -text
wandb/run-20241030_112700-d8o4g13r/files/wandb-metadata.json ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.4.0-162-generic-x86_64-with-glibc2.31",
3
+ "python": "3.9.19",
4
+ "startedAt": "2024-10-30T15:27:00.858134Z",
5
+ "args": [
6
+ "--perturbation",
7
+ "reverse_control",
8
+ "--train_set",
9
+ "10M",
10
+ "--batch_size",
11
+ "3",
12
+ "--epoch",
13
+ "3",
14
+ "--seed",
15
+ "0"
16
+ ],
17
+ "program": "/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py",
18
+ "codePath": "train/train_deep_wandb.py",
19
+ "git": {
20
+ "remote": "git@hf.co:Yaning1001/Impossible_llm.git",
21
+ "commit": "ed716cdcfcdea02b67f7ed0f3504c2b1c8b737c4"
22
+ },
23
+ "email": "yaning1001@gmail.com",
24
+ "root": "/mnt/ssd3/chunhui/yaning/project/impossible_llm/train",
25
+ "host": "mms-large-2",
26
+ "username": "chunhui",
27
+ "executable": "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/bin/python",
28
+ "codePathLocal": "train_deep_wandb.py",
29
+ "cpu_count": 32,
30
+ "cpu_count_logical": 64,
31
+ "gpu": "NVIDIA RTX A6000",
32
+ "gpu_count": 8,
33
+ "disk": {
34
+ "/": {
35
+ "total": "1888559353856",
36
+ "used": "1710831083520"
37
+ }
38
+ },
39
+ "memory": {
40
+ "total": "202617098240"
41
+ },
42
+ "cpu": {
43
+ "count": 32,
44
+ "countLogical": 64
45
+ },
46
+ "gpu_nvidia": [
47
+ {
48
+ "name": "NVIDIA RTX A6000",
49
+ "memoryTotal": "51527024640",
50
+ "cudaCores": 10752,
51
+ "architecture": "Ampere"
52
+ },
53
+ {
54
+ "name": "NVIDIA RTX A6000",
55
+ "memoryTotal": "51527024640",
56
+ "cudaCores": 10752,
57
+ "architecture": "Ampere"
58
+ },
59
+ {
60
+ "name": "NVIDIA RTX A6000",
61
+ "memoryTotal": "51527024640",
62
+ "cudaCores": 10752,
63
+ "architecture": "Ampere"
64
+ },
65
+ {
66
+ "name": "NVIDIA RTX A6000",
67
+ "memoryTotal": "51527024640",
68
+ "cudaCores": 10752,
69
+ "architecture": "Ampere"
70
+ },
71
+ {
72
+ "name": "NVIDIA RTX A6000",
73
+ "memoryTotal": "51527024640",
74
+ "cudaCores": 10752,
75
+ "architecture": "Ampere"
76
+ },
77
+ {
78
+ "name": "NVIDIA RTX A6000",
79
+ "memoryTotal": "51527024640",
80
+ "cudaCores": 10752,
81
+ "architecture": "Ampere"
82
+ },
83
+ {
84
+ "name": "NVIDIA RTX A6000",
85
+ "memoryTotal": "51527024640",
86
+ "cudaCores": 10752,
87
+ "architecture": "Ampere"
88
+ },
89
+ {
90
+ "name": "NVIDIA RTX A6000",
91
+ "memoryTotal": "51527024640",
92
+ "cudaCores": 10752,
93
+ "architecture": "Ampere"
94
+ }
95
+ ],
96
+ "cudaVersion": "11.8"
97
+ }
wandb/run-20241030_231835-no2y29q9/files/output.log ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Loading checkpoint shards: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:05<00:00, 2.94s/it]
2
+ tokenized_valid: Dataset({
3
+ features: ['input_ids', 'attention_mask'],
4
+ num_rows: 600
5
+ })
6
+ /mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/site-packages/transformers/training_args.py:1545: FutureWarning: `evaluation_strategy` is deprecated and will be removed in version 4.46 of 🤗 Transformers. Use `eval_strategy` instead
7
+ warnings.warn(
8
+ [2024-10-30 23:18:43,562] [INFO] [real_accelerator.py:219:get_accelerator] Setting ds_accelerator to cuda (auto detect)
9
+ [2024-10-30 23:18:53,300] [INFO] [comm.py:652:init_distributed] cdb=None
10
+ [2024-10-30 23:18:53,301] [INFO] [comm.py:683:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl
11
+ Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.
12
+ Installed CUDA version 11.8 does not match the version torch was compiled with 11.7 but since the APIs are compatible, accepting this combination
13
+ Using /home/chunhui/.cache/torch_extensions/py39_cu117 as PyTorch extensions root...
14
+ Loading extension module cpu_adam...
15
+ Time to load cpu_adam op: 5.888516664505005 seconds
16
+ wandb: WARNING The `run_name` is currently set to the same value as `TrainingArguments.output_dir`. If this was not intended, please specify a different run name by setting the `TrainingArguments.run_name` parameter.
17
+
18
+ {'loss': 2.7525, 'grad_norm': 0.0, 'learning_rate': 5e-06, 'epoch': 0.0}
19
+ {'loss': 2.7605, 'grad_norm': 0.0, 'learning_rate': 5e-06, 'epoch': 0.0}
20
+ {'loss': 2.8008, 'grad_norm': 0.0, 'learning_rate': 5e-06, 'epoch': 0.0}
21
+ {'loss': 2.809, 'grad_norm': 0.0, 'learning_rate': 5e-06, 'epoch': 0.0}
22
+ {'loss': 2.7841, 'grad_norm': 0.0, 'learning_rate': 5e-06, 'epoch': 0.01}
23
+ {'loss': 2.7924, 'grad_norm': 0.0, 'learning_rate': 5e-06, 'epoch': 0.01}
24
+ {'loss': 2.7732, 'grad_norm': 0.0, 'learning_rate': 5e-06, 'epoch': 0.01}
25
+ {'loss': 2.7953, 'grad_norm': 0.0, 'learning_rate': 5e-06, 'epoch': 0.01}
26
+ {'loss': 2.7747, 'grad_norm': 0.0, 'learning_rate': 5e-06, 'epoch': 0.01}
27
+ {'loss': 2.7909, 'grad_norm': 0.0, 'learning_rate': 5e-06, 'epoch': 0.01}
28
+
29
+ {'eval_loss': 2.8126132488250732, 'eval_runtime': 12.6414, 'eval_samples_per_second': 47.463, 'eval_steps_per_second': 1.028, 'epoch': 0.01}
30
+ {'loss': 2.7592, 'grad_norm': 0.0, 'learning_rate': 5e-06, 'epoch': 0.01}
31
+ {'loss': 2.7394, 'grad_norm': 0.0, 'learning_rate': 5e-06, 'epoch': 0.01}
32
+ {'loss': 2.8111, 'grad_norm': 0.0, 'learning_rate': 5e-06, 'epoch': 0.01}
33
+ {'loss': 2.7786, 'grad_norm': 0.0, 'learning_rate': 5e-06, 'epoch': 0.01}
34
+ {'loss': 2.7711, 'grad_norm': 0.0, 'learning_rate': 5e-06, 'epoch': 0.02}
35
+ {'loss': 2.7517, 'grad_norm': 6.298451900482178, 'learning_rate': 4.998288843258043e-06, 'epoch': 0.02}
36
+ {'loss': 2.7259, 'grad_norm': 6.298451900482178, 'learning_rate': 4.998288843258043e-06, 'epoch': 0.02}
37
+ {'loss': 2.7971, 'grad_norm': 6.545077323913574, 'learning_rate': 4.996577686516086e-06, 'epoch': 0.02}
38
+ {'loss': 2.6019, 'grad_norm': 5.211835861206055, 'learning_rate': 4.994866529774127e-06, 'epoch': 0.02}
39
+ {'loss': 2.6075, 'grad_norm': 3.1549763679504395, 'learning_rate': 4.99315537303217e-06, 'epoch': 0.02}
40
+ {'eval_loss': 2.605424404144287, 'eval_runtime': 11.5255, 'eval_samples_per_second': 52.058, 'eval_steps_per_second': 1.128, 'epoch': 0.02}
41
+ {'loss': 2.5804, 'grad_norm': 3.6320786476135254, 'learning_rate': 4.991444216290213e-06, 'epoch': 0.02}
42
+ {'loss': 2.5365, 'grad_norm': 2.6124260425567627, 'learning_rate': 4.989733059548255e-06, 'epoch': 0.02}
43
+ {'loss': 2.4614, 'grad_norm': 2.2906017303466797, 'learning_rate': 4.988021902806298e-06, 'epoch': 0.02}
44
+ {'loss': 2.4587, 'grad_norm': 2.029607057571411, 'learning_rate': 4.9863107460643404e-06, 'epoch': 0.02}
45
+ {'loss': 2.4088, 'grad_norm': 2.137622594833374, 'learning_rate': 4.984599589322382e-06, 'epoch': 0.03}
46
+ {'loss': 2.4206, 'grad_norm': 3.2038962841033936, 'learning_rate': 4.982888432580425e-06, 'epoch': 0.03}
47
+ {'loss': 2.3714, 'grad_norm': 2.447030782699585, 'learning_rate': 4.9811772758384674e-06, 'epoch': 0.03}
48
+ {'loss': 2.3694, 'grad_norm': 2.107184648513794, 'learning_rate': 4.97946611909651e-06, 'epoch': 0.03}
49
+ {'loss': 2.3499, 'grad_norm': 2.287409543991089, 'learning_rate': 4.977754962354553e-06, 'epoch': 0.03}
50
+ {'loss': 2.3258, 'grad_norm': 2.332838535308838, 'learning_rate': 4.976043805612594e-06, 'epoch': 0.03}
51
+ {'eval_loss': 2.3948795795440674, 'eval_runtime': 11.5106, 'eval_samples_per_second': 52.126, 'eval_steps_per_second': 1.129, 'epoch': 0.03}
52
+ {'loss': 2.2726, 'grad_norm': 1.9339438676834106, 'learning_rate': 4.974332648870637e-06, 'epoch': 0.03}
53
+ {'loss': 2.3154, 'grad_norm': 1.7203729152679443, 'learning_rate': 4.972621492128679e-06, 'epoch': 0.03}
54
+ {'loss': 2.2666, 'grad_norm': 1.9621284008026123, 'learning_rate': 4.970910335386721e-06, 'epoch': 0.03}
55
+ {'loss': 2.2531, 'grad_norm': 2.7898294925689697, 'learning_rate': 4.969199178644764e-06, 'epoch': 0.03}
56
+ {'loss': 2.2254, 'grad_norm': 1.831383466720581, 'learning_rate': 4.967488021902807e-06, 'epoch': 0.04}
57
+ {'loss': 2.1198, 'grad_norm': 1.9499363899230957, 'learning_rate': 4.965776865160849e-06, 'epoch': 0.04}
58
+ {'loss': 2.2448, 'grad_norm': 1.9152458906173706, 'learning_rate': 4.964065708418892e-06, 'epoch': 0.04}
59
+ {'loss': 2.1506, 'grad_norm': 1.651543378829956, 'learning_rate': 4.962354551676934e-06, 'epoch': 0.04}
60
+ {'loss': 2.1617, 'grad_norm': 1.5792090892791748, 'learning_rate': 4.960643394934976e-06, 'epoch': 0.04}
61
+ {'loss': 2.2038, 'grad_norm': 2.3606202602386475, 'learning_rate': 4.958932238193019e-06, 'epoch': 0.04}
62
+ {'eval_loss': 2.2738945484161377, 'eval_runtime': 11.5159, 'eval_samples_per_second': 52.102, 'eval_steps_per_second': 1.129, 'epoch': 0.04}
63
+ {'loss': 2.1666, 'grad_norm': 2.259366989135742, 'learning_rate': 4.9572210814510614e-06, 'epoch': 0.04}
64
+ {'loss': 2.0839, 'grad_norm': 1.9008065462112427, 'learning_rate': 4.955509924709104e-06, 'epoch': 0.04}
65
+ {'loss': 2.1488, 'grad_norm': 2.3995864391326904, 'learning_rate': 4.953798767967146e-06, 'epoch': 0.04}
66
+ {'loss': 2.1045, 'grad_norm': 1.932542324066162, 'learning_rate': 4.952087611225188e-06, 'epoch': 0.05}
67
+ {'loss': 2.1207, 'grad_norm': 2.3346352577209473, 'learning_rate': 4.950376454483231e-06, 'epoch': 0.05}
68
+ {'loss': 2.0903, 'grad_norm': 2.1826212406158447, 'learning_rate': 4.948665297741274e-06, 'epoch': 0.05}
69
+ {'loss': 2.1428, 'grad_norm': 2.398021936416626, 'learning_rate': 4.946954140999316e-06, 'epoch': 0.05}
70
+ {'loss': 2.0761, 'grad_norm': 2.2599124908447266, 'learning_rate': 4.945242984257359e-06, 'epoch': 0.05}
71
+ {'loss': 2.0773, 'grad_norm': 2.1450204849243164, 'learning_rate': 4.943531827515401e-06, 'epoch': 0.05}
72
+ {'loss': 2.0209, 'grad_norm': 1.935485601425171, 'learning_rate': 4.941820670773443e-06, 'epoch': 0.05}
73
+ {'eval_loss': 2.1975936889648438, 'eval_runtime': 11.5223, 'eval_samples_per_second': 52.073, 'eval_steps_per_second': 1.128, 'epoch': 0.05}
74
+ {'loss': 2.0184, 'grad_norm': 2.629650831222534, 'learning_rate': 4.940109514031486e-06, 'epoch': 0.05}
75
+ {'loss': 2.1345, 'grad_norm': 2.674574851989746, 'learning_rate': 4.9383983572895284e-06, 'epoch': 0.05}
76
+ {'loss': 2.0042, 'grad_norm': 2.356196403503418, 'learning_rate': 4.936687200547571e-06, 'epoch': 0.05}
77
+ {'loss': 2.0616, 'grad_norm': 2.331747531890869, 'learning_rate': 4.934976043805613e-06, 'epoch': 0.06}
78
+ {'loss': 2.0188, 'grad_norm': 2.0535356998443604, 'learning_rate': 4.9332648870636554e-06, 'epoch': 0.06}
79
+ {'loss': 1.9852, 'grad_norm': 2.5634617805480957, 'learning_rate': 4.931553730321697e-06, 'epoch': 0.06}
80
+ {'loss': 2.0274, 'grad_norm': 2.2701539993286133, 'learning_rate': 4.92984257357974e-06, 'epoch': 0.06}
81
+ {'loss': 1.9411, 'grad_norm': 2.1275668144226074, 'learning_rate': 4.928131416837782e-06, 'epoch': 0.06}
82
+ {'loss': 2.0064, 'grad_norm': 2.1210527420043945, 'learning_rate': 4.926420260095825e-06, 'epoch': 0.06}
83
+ {'loss': 2.0139, 'grad_norm': 2.3929598331451416, 'learning_rate': 4.924709103353868e-06, 'epoch': 0.06}
84
+ {'eval_loss': 2.145512580871582, 'eval_runtime': 11.5261, 'eval_samples_per_second': 52.056, 'eval_steps_per_second': 1.128, 'epoch': 0.06}
85
+ {'loss': 1.9654, 'grad_norm': 1.9947539567947388, 'learning_rate': 4.92299794661191e-06, 'epoch': 0.06}
86
+ {'loss': 1.9792, 'grad_norm': 2.342836618423462, 'learning_rate': 4.921286789869952e-06, 'epoch': 0.06}
87
+ {'loss': 2.039, 'grad_norm': 2.3230276107788086, 'learning_rate': 4.919575633127995e-06, 'epoch': 0.06}
88
+ {'loss': 2.0071, 'grad_norm': 1.8526898622512817, 'learning_rate': 4.917864476386037e-06, 'epoch': 0.07}
89
+ {'loss': 1.8769, 'grad_norm': 2.4270002841949463, 'learning_rate': 4.91615331964408e-06, 'epoch': 0.07}
90
+ {'loss': 1.9785, 'grad_norm': 2.003593683242798, 'learning_rate': 4.9144421629021224e-06, 'epoch': 0.07}
91
+ {'loss': 1.9254, 'grad_norm': 2.6093783378601074, 'learning_rate': 4.912731006160164e-06, 'epoch': 0.07}
92
+ {'loss': 2.0027, 'grad_norm': 2.1653261184692383, 'learning_rate': 4.911019849418207e-06, 'epoch': 0.07}
93
+ {'loss': 1.9359, 'grad_norm': 2.6015000343322754, 'learning_rate': 4.9093086926762494e-06, 'epoch': 0.07}
94
+ {'loss': 2.0521, 'grad_norm': 2.019484043121338, 'learning_rate': 4.907597535934292e-06, 'epoch': 0.07}
95
+ {'eval_loss': 2.1111998558044434, 'eval_runtime': 11.5296, 'eval_samples_per_second': 52.04, 'eval_steps_per_second': 1.128, 'epoch': 0.07}
96
+ {'loss': 1.9439, 'grad_norm': 1.9294712543487549, 'learning_rate': 4.905886379192335e-06, 'epoch': 0.07}
97
+ {'loss': 1.8964, 'grad_norm': 2.253174066543579, 'learning_rate': 4.904175222450377e-06, 'epoch': 0.07}
98
+ {'loss': 1.9529, 'grad_norm': 2.0996475219726562, 'learning_rate': 4.902464065708419e-06, 'epoch': 0.07}
99
+ {'loss': 1.9224, 'grad_norm': 2.0278995037078857, 'learning_rate': 4.900752908966462e-06, 'epoch': 0.08}
100
+ {'loss': 1.9289, 'grad_norm': 2.3247783184051514, 'learning_rate': 4.899041752224504e-06, 'epoch': 0.08}
101
+ {'loss': 1.9101, 'grad_norm': 1.927708625793457, 'learning_rate': 4.897330595482547e-06, 'epoch': 0.08}
102
+ {'loss': 1.9442, 'grad_norm': 2.276090383529663, 'learning_rate': 4.8956194387405895e-06, 'epoch': 0.08}
103
+ {'loss': 1.9425, 'grad_norm': 2.2609736919403076, 'learning_rate': 4.893908281998632e-06, 'epoch': 0.08}
104
+ {'loss': 1.8732, 'grad_norm': 2.352919101715088, 'learning_rate': 4.892197125256674e-06, 'epoch': 0.08}
105
+ {'loss': 1.9449, 'grad_norm': 2.2031309604644775, 'learning_rate': 4.8904859685147164e-06, 'epoch': 0.08}
106
+ {'eval_loss': 2.078676462173462, 'eval_runtime': 11.5386, 'eval_samples_per_second': 51.999, 'eval_steps_per_second': 1.127, 'epoch': 0.08}
107
+ {'loss': 1.8836, 'grad_norm': 2.183171272277832, 'learning_rate': 4.888774811772759e-06, 'epoch': 0.08}
108
+ {'loss': 1.919, 'grad_norm': 2.177760124206543, 'learning_rate': 4.887063655030802e-06, 'epoch': 0.08}
109
+ {'loss': 1.921, 'grad_norm': 2.321638822555542, 'learning_rate': 4.8853524982888434e-06, 'epoch': 0.09}
110
+ {'loss': 1.8998, 'grad_norm': 2.097433567047119, 'learning_rate': 4.883641341546886e-06, 'epoch': 0.09}
111
+ {'loss': 1.8696, 'grad_norm': 2.15975022315979, 'learning_rate': 4.881930184804929e-06, 'epoch': 0.09}
112
+ {'loss': 1.9312, 'grad_norm': 2.3222997188568115, 'learning_rate': 4.88021902806297e-06, 'epoch': 0.09}
113
+ {'loss': 1.8807, 'grad_norm': 2.4326765537261963, 'learning_rate': 4.878507871321013e-06, 'epoch': 0.09}
114
+ {'loss': 1.8787, 'grad_norm': 2.2705607414245605, 'learning_rate': 4.876796714579056e-06, 'epoch': 0.09}
115
+ {'loss': 1.8955, 'grad_norm': 2.7113804817199707, 'learning_rate': 4.875085557837098e-06, 'epoch': 0.09}
116
+ {'loss': 1.8414, 'grad_norm': 2.274242401123047, 'learning_rate': 4.873374401095141e-06, 'epoch': 0.09}
117
+ {'eval_loss': 2.060840606689453, 'eval_runtime': 11.5241, 'eval_samples_per_second': 52.065, 'eval_steps_per_second': 1.128, 'epoch': 0.09}
118
+ {'loss': 1.8706, 'grad_norm': 2.546048879623413, 'learning_rate': 4.8716632443531835e-06, 'epoch': 0.09}
119
+ {'loss': 1.8896, 'grad_norm': 2.1173014640808105, 'learning_rate': 4.869952087611225e-06, 'epoch': 0.09}
120
+ {'loss': 1.8787, 'grad_norm': 2.4878427982330322, 'learning_rate': 4.868240930869268e-06, 'epoch': 0.1}
121
+ {'loss': 1.8756, 'grad_norm': 2.1240787506103516, 'learning_rate': 4.8665297741273105e-06, 'epoch': 0.1}
122
+ {'loss': 1.9245, 'grad_norm': 2.1906278133392334, 'learning_rate': 4.864818617385353e-06, 'epoch': 0.1}
123
+ {'loss': 1.8033, 'grad_norm': 2.197439432144165, 'learning_rate': 4.863107460643396e-06, 'epoch': 0.1}
124
+ {'loss': 1.8828, 'grad_norm': 2.3327462673187256, 'learning_rate': 4.8613963039014374e-06, 'epoch': 0.1}
125
+ {'loss': 1.7941, 'grad_norm': 2.095132827758789, 'learning_rate': 4.85968514715948e-06, 'epoch': 0.1}
126
+ {'loss': 1.8109, 'grad_norm': 2.401387929916382, 'learning_rate': 4.857973990417523e-06, 'epoch': 0.1}
127
+ {'loss': 1.8268, 'grad_norm': 2.195265769958496, 'learning_rate': 4.856262833675565e-06, 'epoch': 0.1}
128
+ {'eval_loss': 2.0458765029907227, 'eval_runtime': 11.5321, 'eval_samples_per_second': 52.029, 'eval_steps_per_second': 1.127, 'epoch': 0.1}
129
+ {'loss': 1.8518, 'grad_norm': 2.4990336894989014, 'learning_rate': 4.854551676933608e-06, 'epoch': 0.1}
130
+ {'loss': 1.8206, 'grad_norm': 2.077829122543335, 'learning_rate': 4.8528405201916505e-06, 'epoch': 0.1}
131
+ {'loss': 1.7893, 'grad_norm': 2.1873979568481445, 'learning_rate': 4.851129363449692e-06, 'epoch': 0.11}
132
+ {'loss': 1.8129, 'grad_norm': 2.1641716957092285, 'learning_rate': 4.849418206707735e-06, 'epoch': 0.11}
133
+ {'loss': 1.881, 'grad_norm': 2.187925100326538, 'learning_rate': 4.8477070499657775e-06, 'epoch': 0.11}
134
+ {'loss': 1.863, 'grad_norm': 2.3495638370513916, 'learning_rate': 4.84599589322382e-06, 'epoch': 0.11}
135
+ {'loss': 1.8643, 'grad_norm': 2.1752283573150635, 'learning_rate': 4.844284736481863e-06, 'epoch': 0.11}
136
+ {'loss': 1.849, 'grad_norm': 2.323124647140503, 'learning_rate': 4.8425735797399045e-06, 'epoch': 0.11}
137
+ {'loss': 1.7471, 'grad_norm': 2.360914945602417, 'learning_rate': 4.840862422997947e-06, 'epoch': 0.11}
138
+ {'loss': 1.8054, 'grad_norm': 2.29608416557312, 'learning_rate': 4.839151266255989e-06, 'epoch': 0.11}
139
+ {'eval_loss': 2.017833709716797, 'eval_runtime': 11.5312, 'eval_samples_per_second': 52.033, 'eval_steps_per_second': 1.127, 'epoch': 0.11}
140
+ {'loss': 1.738, 'grad_norm': 2.081592321395874, 'learning_rate': 4.8374401095140314e-06, 'epoch': 0.11}
141
+ {'loss': 1.8248, 'grad_norm': 2.419157028198242, 'learning_rate': 4.835728952772074e-06, 'epoch': 0.11}
142
+ {'loss': 1.7872, 'grad_norm': 2.393253803253174, 'learning_rate': 4.834017796030117e-06, 'epoch': 0.12}
143
+ {'loss': 1.815, 'grad_norm': 2.2206389904022217, 'learning_rate': 4.832306639288159e-06, 'epoch': 0.12}
144
+ {'loss': 1.7481, 'grad_norm': 2.660158634185791, 'learning_rate': 4.830595482546202e-06, 'epoch': 0.12}
145
+ {'loss': 1.8719, 'grad_norm': 2.1578104496002197, 'learning_rate': 4.828884325804244e-06, 'epoch': 0.12}
146
+ {'loss': 1.8006, 'grad_norm': 2.285301685333252, 'learning_rate': 4.827173169062286e-06, 'epoch': 0.12}
147
+ {'loss': 1.8335, 'grad_norm': 2.4699130058288574, 'learning_rate': 4.825462012320329e-06, 'epoch': 0.12}
148
+ {'loss': 1.8206, 'grad_norm': 2.3087847232818604, 'learning_rate': 4.8237508555783715e-06, 'epoch': 0.12}
149
+ {'loss': 1.9022, 'grad_norm': 2.329796075820923, 'learning_rate': 4.822039698836414e-06, 'epoch': 0.12}
150
+ {'eval_loss': 2.0053842067718506, 'eval_runtime': 11.5201, 'eval_samples_per_second': 52.083, 'eval_steps_per_second': 1.128, 'epoch': 0.12}
151
+ {'loss': 1.8257, 'grad_norm': 2.540306568145752, 'learning_rate': 4.820328542094456e-06, 'epoch': 0.12}
152
+ {'loss': 1.8087, 'grad_norm': 2.6038308143615723, 'learning_rate': 4.8186173853524985e-06, 'epoch': 0.13}
153
+ {'loss': 1.7559, 'grad_norm': 2.5904085636138916, 'learning_rate': 4.816906228610541e-06, 'epoch': 0.13}
154
+ {'loss': 1.7934, 'grad_norm': 2.345602512359619, 'learning_rate': 4.815195071868584e-06, 'epoch': 0.13}
155
+ {'loss': 1.7462, 'grad_norm': 2.185978651046753, 'learning_rate': 4.813483915126626e-06, 'epoch': 0.13}
156
+ {'loss': 1.8069, 'grad_norm': 2.1208553314208984, 'learning_rate': 4.811772758384669e-06, 'epoch': 0.13}
157
+ {'loss': 1.7291, 'grad_norm': 2.478742837905884, 'learning_rate': 4.810061601642711e-06, 'epoch': 0.13}
158
+ {'loss': 1.7905, 'grad_norm': 2.2552616596221924, 'learning_rate': 4.808350444900753e-06, 'epoch': 0.13}
159
+ {'loss': 1.8371, 'grad_norm': 2.503005266189575, 'learning_rate': 4.806639288158796e-06, 'epoch': 0.13}
160
+ {'loss': 1.8357, 'grad_norm': 2.340322256088257, 'learning_rate': 4.8049281314168385e-06, 'epoch': 0.13}
161
+ {'eval_loss': 1.9985140562057495, 'eval_runtime': 11.5253, 'eval_samples_per_second': 52.059, 'eval_steps_per_second': 1.128, 'epoch': 0.13}
162
+ {'loss': 1.7605, 'grad_norm': 2.6791834831237793, 'learning_rate': 4.803216974674881e-06, 'epoch': 0.13}
163
+ {'loss': 1.8254, 'grad_norm': 2.2488231658935547, 'learning_rate': 4.801505817932923e-06, 'epoch': 0.14}
164
+ {'loss': 1.8009, 'grad_norm': 2.2643635272979736, 'learning_rate': 4.7997946611909655e-06, 'epoch': 0.14}
165
+ {'loss': 1.7361, 'grad_norm': 2.151967763900757, 'learning_rate': 4.798083504449008e-06, 'epoch': 0.14}
166
+ {'loss': 1.7842, 'grad_norm': 2.318420648574829, 'learning_rate': 4.796372347707051e-06, 'epoch': 0.14}
167
+ {'loss': 1.8062, 'grad_norm': 2.2854278087615967, 'learning_rate': 4.7946611909650925e-06, 'epoch': 0.14}
168
+ {'loss': 1.7247, 'grad_norm': 2.5026426315307617, 'learning_rate': 4.792950034223135e-06, 'epoch': 0.14}
169
+ {'loss': 1.799, 'grad_norm': 2.449467182159424, 'learning_rate': 4.791238877481178e-06, 'epoch': 0.14}
170
+ {'loss': 1.765, 'grad_norm': 2.472188949584961, 'learning_rate': 4.78952772073922e-06, 'epoch': 0.14}
171
+ {'loss': 1.7534, 'grad_norm': 2.3607966899871826, 'learning_rate': 4.787816563997262e-06, 'epoch': 0.14}
172
+ {'eval_loss': 1.986325740814209, 'eval_runtime': 11.521, 'eval_samples_per_second': 52.079, 'eval_steps_per_second': 1.128, 'epoch': 0.14}
173
+ {'loss': 1.7835, 'grad_norm': 2.830254077911377, 'learning_rate': 4.786105407255305e-06, 'epoch': 0.14}
174
+ {'loss': 1.7279, 'grad_norm': 2.241579532623291, 'learning_rate': 4.784394250513347e-06, 'epoch': 0.15}
175
+ {'loss': 1.87, 'grad_norm': 2.860384941101074, 'learning_rate': 4.78268309377139e-06, 'epoch': 0.15}
176
+ {'loss': 1.7452, 'grad_norm': 2.333618640899658, 'learning_rate': 4.7809719370294325e-06, 'epoch': 0.15}
177
+ {'loss': 1.729, 'grad_norm': 2.3267569541931152, 'learning_rate': 4.779260780287474e-06, 'epoch': 0.15}
178
+ {'loss': 1.7302, 'grad_norm': 2.508273124694824, 'learning_rate': 4.777549623545517e-06, 'epoch': 0.15}
179
+ {'loss': 1.7606, 'grad_norm': 2.3898637294769287, 'learning_rate': 4.7758384668035595e-06, 'epoch': 0.15}
180
+ {'loss': 1.7658, 'grad_norm': 2.4441757202148438, 'learning_rate': 4.774127310061602e-06, 'epoch': 0.15}
181
+ {'loss': 1.8535, 'grad_norm': 2.4708995819091797, 'learning_rate': 4.772416153319645e-06, 'epoch': 0.15}
182
+ {'loss': 1.7782, 'grad_norm': 2.721252918243408, 'learning_rate': 4.770704996577687e-06, 'epoch': 0.15}
183
+ {'eval_loss': 1.980696201324463, 'eval_runtime': 11.5248, 'eval_samples_per_second': 52.062, 'eval_steps_per_second': 1.128, 'epoch': 0.15}
wandb/run-20241030_231835-no2y29q9/files/requirements.txt ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ funcsigs==1.0.2
2
+ sentry-sdk==2.17.0
3
+ multiprocess==0.70.16
4
+ numpy==1.26.2
5
+ pluralizer==1.2.0
6
+ debugpy==1.6.7
7
+ nvidia-cudnn-cu11==8.5.0.96
8
+ deepspeed==0.15.2
9
+ data==0.4
10
+ pandas==2.1.3
11
+ tomli==2.0.1
12
+ charset-normalizer==3.3.2
13
+ attrs==24.2.0
14
+ aiosignal==1.3.1
15
+ fsspec==2023.10.0
16
+ nvidia-cusparse-cu11==11.7.4.91
17
+ zipp==3.12.0
18
+ mypy-extensions==1.0.0
19
+ datasets==3.0.1
20
+ joblib==1.3.2
21
+ hjson==3.1.0
22
+ traitlets==5.7.1
23
+ stack-data==0.6.0
24
+ transformers==4.45.1
25
+ sympy==1.11.1
26
+ Pygments==2.15.0
27
+ docker-pycreds==0.4.0
28
+ dill==0.3.8
29
+ wheel==0.44.0
30
+ prompt-toolkit==3.0.30
31
+ parso==0.8.3
32
+ ipykernel==6.23.1
33
+ pyarrow==17.0.0
34
+ certifi==2023.11.17
35
+ nvidia-cufft-cu11==10.9.0.58
36
+ six==1.16.0
37
+ pydantic==2.9.2
38
+ click==8.1.7
39
+ nest-asyncio==1.5.6
40
+ gmpy2==2.1.0
41
+ matplotlib==3.8.2
42
+ scipy==1.11.4
43
+ typing_extensions==4.12.2
44
+ statsmodels==0.14.0
45
+ huggingface-hub==0.25.0
46
+ frozenlist==1.4.1
47
+ gpustat==1.1.1
48
+ nvidia-nvtx-cu11==11.7.91
49
+ safetensors==0.4.5
50
+ stanza==1.9.2
51
+ decorator==5.1.1
52
+ seaborn==0.13.0
53
+ sentencepiece==0.2.0
54
+ PyYAML==6.0.1
55
+ black==24.8.0
56
+ protobuf==4.25.1
57
+ pickleshare==0.7.5
58
+ peft==0.13.0
59
+ triton==2.0.0
60
+ nvidia-cuda-runtime-cu11==11.7.99
61
+ Jinja2==3.1.2
62
+ nvidia-cusolver-cu11==11.4.0.1
63
+ executing==1.2.0
64
+ jupyter_client==8.1.0
65
+ pluggy==1.3.0
66
+ cmake==3.30.3
67
+ pytz==2023.3.post1
68
+ aiohappyeyeballs==2.4.2
69
+ kiwisolver==1.4.5
70
+ py-cpuinfo==9.0.0
71
+ Pillow==10.1.0
72
+ ptyprocess==0.7.0
73
+ importlib_resources==6.4.5
74
+ GitPython==3.1.43
75
+ importlib-metadata==6.0.0
76
+ iniconfig==2.0.0
77
+ scikit-learn==1.3.2
78
+ exceptiongroup==1.1.0
79
+ networkx==2.8.6
80
+ accelerate==1.0.0
81
+ nltk==3.8.1
82
+ shutilwhich==1.1.0
83
+ fonttools==4.45.1
84
+ future==0.18.3
85
+ aiohttp==3.10.6
86
+ wcwidth==0.2.5
87
+ idna==3.6
88
+ filelock==3.12.2
89
+ pathspec==0.12.1
90
+ jupyter_core==5.1.0
91
+ lit==18.1.8
92
+ nvidia-curand-cu11==10.2.10.91
93
+ nvidia-cublas-cu11==11.10.3.66
94
+ nvidia-ml-py==12.560.30
95
+ msgpack==1.1.0
96
+ python-dateutil==2.8.2
97
+ blessed==1.20.0
98
+ packaging==23.0
99
+ gitdb==4.0.11
100
+ yarl==1.13.0
101
+ emoji==2.8.0
102
+ tzdata==2023.3
103
+ cycler==0.12.1
104
+ tornado==6.2
105
+ backcall==0.2.0
106
+ plotnine==0.12.4
107
+ ninja==1.11.1.1
108
+ latex==0.7.0
109
+ wandb==0.18.5
110
+ setproctitle==1.3.3
111
+ threadpoolctl==3.2.0
112
+ requests==2.32.3
113
+ pyparsing==3.1.1
114
+ smmap==5.0.1
115
+ pyzmq==23.0.0
116
+ async-timeout==4.0.3
117
+ annotated-types==0.7.0
118
+ matplotlib-inline==0.1.6
119
+ latexcodec==1.0.0
120
+ ipython==8.0.0
121
+ patsy==0.5.3
122
+ contourpy==1.2.0
123
+ multidict==6.1.0
124
+ mizani==0.9.3
125
+ urllib3==2.1.0
126
+ tokenizers==0.20.0
127
+ MarkupSafe==2.1.2
128
+ pip==24.2
129
+ pexpect==4.8.0
130
+ tqdm==4.66.5
131
+ jedi==0.18.2
132
+ pydantic_core==2.23.4
133
+ tempdir==0.7.1
134
+ mpmath==1.2.1
135
+ setuptools==72.1.0
136
+ pytest==7.4.3
137
+ pure-eval==0.2.2
138
+ psutil==5.9.1
139
+ comm==0.1.2
140
+ nvidia-cuda-cupti-cu11==11.7.101
141
+ nvidia-cuda-nvrtc-cu11==11.7.99
142
+ regex==2023.10.3
143
+ platformdirs==2.5.2
144
+ asttokens==2.2.1
145
+ torch==2.0.0
146
+ nvidia-nccl-cu11==2.14.3
147
+ xxhash==3.5.0
wandb/run-20241030_231835-no2y29q9/files/wandb-metadata.json ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.4.0-162-generic-x86_64-with-glibc2.31",
3
+ "python": "3.9.19",
4
+ "startedAt": "2024-10-31T03:18:35.473254Z",
5
+ "args": [
6
+ "--perturbation",
7
+ "reverse_full",
8
+ "--train_set",
9
+ "10M",
10
+ "--batch_size",
11
+ "3",
12
+ "--epoch",
13
+ "3",
14
+ "--seed",
15
+ "0"
16
+ ],
17
+ "program": "/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py",
18
+ "codePath": "train/train_deep_wandb.py",
19
+ "git": {
20
+ "remote": "git@hf.co:Yaning1001/Impossible_llm.git",
21
+ "commit": "ed716cdcfcdea02b67f7ed0f3504c2b1c8b737c4"
22
+ },
23
+ "email": "yaning1001@gmail.com",
24
+ "root": "/mnt/ssd3/chunhui/yaning/project/impossible_llm/train",
25
+ "host": "mms-large-2",
26
+ "username": "chunhui",
27
+ "executable": "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/bin/python",
28
+ "codePathLocal": "train_deep_wandb.py",
29
+ "cpu_count": 32,
30
+ "cpu_count_logical": 64,
31
+ "gpu": "NVIDIA RTX A6000",
32
+ "gpu_count": 8,
33
+ "disk": {
34
+ "/": {
35
+ "total": "1888559353856",
36
+ "used": "1711064555520"
37
+ }
38
+ },
39
+ "memory": {
40
+ "total": "202617098240"
41
+ },
42
+ "cpu": {
43
+ "count": 32,
44
+ "countLogical": 64
45
+ },
46
+ "gpu_nvidia": [
47
+ {
48
+ "name": "NVIDIA RTX A6000",
49
+ "memoryTotal": "51527024640",
50
+ "cudaCores": 10752,
51
+ "architecture": "Ampere"
52
+ },
53
+ {
54
+ "name": "NVIDIA RTX A6000",
55
+ "memoryTotal": "51527024640",
56
+ "cudaCores": 10752,
57
+ "architecture": "Ampere"
58
+ },
59
+ {
60
+ "name": "NVIDIA RTX A6000",
61
+ "memoryTotal": "51527024640",
62
+ "cudaCores": 10752,
63
+ "architecture": "Ampere"
64
+ },
65
+ {
66
+ "name": "NVIDIA RTX A6000",
67
+ "memoryTotal": "51527024640",
68
+ "cudaCores": 10752,
69
+ "architecture": "Ampere"
70
+ },
71
+ {
72
+ "name": "NVIDIA RTX A6000",
73
+ "memoryTotal": "51527024640",
74
+ "cudaCores": 10752,
75
+ "architecture": "Ampere"
76
+ },
77
+ {
78
+ "name": "NVIDIA RTX A6000",
79
+ "memoryTotal": "51527024640",
80
+ "cudaCores": 10752,
81
+ "architecture": "Ampere"
82
+ },
83
+ {
84
+ "name": "NVIDIA RTX A6000",
85
+ "memoryTotal": "51527024640",
86
+ "cudaCores": 10752,
87
+ "architecture": "Ampere"
88
+ },
89
+ {
90
+ "name": "NVIDIA RTX A6000",
91
+ "memoryTotal": "51527024640",
92
+ "cudaCores": 10752,
93
+ "architecture": "Ampere"
94
+ }
95
+ ],
96
+ "cudaVersion": "11.8"
97
+ }
wandb/run-20241030_231835-no2y29q9/logs/debug-internal.log ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {"time":"2024-10-30T23:18:35.475047064-04:00","level":"INFO","msg":"using version","core version":"0.18.5"}
2
+ {"time":"2024-10-30T23:18:35.475059034-04:00","level":"INFO","msg":"created symlink","path":"/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241030_231835-no2y29q9/logs/debug-core.log"}
3
+ {"time":"2024-10-30T23:18:35.580787522-04:00","level":"INFO","msg":"created new stream","id":"no2y29q9"}
4
+ {"time":"2024-10-30T23:18:35.580820912-04:00","level":"INFO","msg":"stream: started","id":"no2y29q9"}
5
+ {"time":"2024-10-30T23:18:35.580831862-04:00","level":"INFO","msg":"sender: started","stream_id":"no2y29q9"}
6
+ {"time":"2024-10-30T23:18:35.580825052-04:00","level":"INFO","msg":"writer: Do: started","stream_id":{"value":"no2y29q9"}}
7
+ {"time":"2024-10-30T23:18:35.580852152-04:00","level":"INFO","msg":"handler: started","stream_id":{"value":"no2y29q9"}}
8
+ {"time":"2024-10-30T23:18:35.772437179-04:00","level":"INFO","msg":"Starting system monitor"}
wandb/run-20241030_231835-no2y29q9/logs/debug.log ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-10-30 23:18:35,471 INFO MainThread:457826 [wandb_setup.py:_flush():79] Current SDK version is 0.18.5
2
+ 2024-10-30 23:18:35,471 INFO MainThread:457826 [wandb_setup.py:_flush():79] Configure stats pid to 457826
3
+ 2024-10-30 23:18:35,471 INFO MainThread:457826 [wandb_setup.py:_flush():79] Loading settings from /home/chunhui/.config/wandb/settings
4
+ 2024-10-30 23:18:35,471 INFO MainThread:457826 [wandb_setup.py:_flush():79] Loading settings from /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/settings
5
+ 2024-10-30 23:18:35,471 INFO MainThread:457826 [wandb_setup.py:_flush():79] Loading settings from environment variables: {}
6
+ 2024-10-30 23:18:35,471 INFO MainThread:457826 [wandb_setup.py:_flush():79] Applying setup settings: {'mode': None, '_disable_service': None}
7
+ 2024-10-30 23:18:35,471 INFO MainThread:457826 [wandb_setup.py:_flush():79] Inferring run settings from compute environment: {'program_relpath': 'train/train_deep_wandb.py', 'program_abspath': '/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py', 'program': '/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py'}
8
+ 2024-10-30 23:18:35,471 INFO MainThread:457826 [wandb_setup.py:_flush():79] Applying login settings: {}
9
+ 2024-10-30 23:18:35,471 INFO MainThread:457826 [wandb_init.py:_log_setup():534] Logging user logs to /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241030_231835-no2y29q9/logs/debug.log
10
+ 2024-10-30 23:18:35,471 INFO MainThread:457826 [wandb_init.py:_log_setup():535] Logging internal logs to /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241030_231835-no2y29q9/logs/debug-internal.log
11
+ 2024-10-30 23:18:35,471 INFO MainThread:457826 [wandb_init.py:init():621] calling init triggers
12
+ 2024-10-30 23:18:35,471 INFO MainThread:457826 [wandb_init.py:init():628] wandb.init called with sweep_config: {}
13
+ config: {}
14
+ 2024-10-30 23:18:35,471 INFO MainThread:457826 [wandb_init.py:init():671] starting backend
15
+ 2024-10-30 23:18:35,471 INFO MainThread:457826 [wandb_init.py:init():675] sending inform_init request
16
+ 2024-10-30 23:18:35,472 INFO MainThread:457826 [backend.py:_multiprocessing_setup():104] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
17
+ 2024-10-30 23:18:35,473 INFO MainThread:457826 [wandb_init.py:init():688] backend started and connected
18
+ 2024-10-30 23:18:35,476 INFO MainThread:457826 [wandb_init.py:init():783] updated telemetry
19
+ 2024-10-30 23:18:35,498 INFO MainThread:457826 [wandb_init.py:init():816] communicating run to backend with 90.0 second timeout
20
+ 2024-10-30 23:18:35,769 INFO MainThread:457826 [wandb_init.py:init():867] starting run threads in backend
21
+ 2024-10-30 23:18:35,880 INFO MainThread:457826 [wandb_run.py:_console_start():2463] atexit reg
22
+ 2024-10-30 23:18:35,880 INFO MainThread:457826 [wandb_run.py:_redirect():2311] redirect: wrap_raw
23
+ 2024-10-30 23:18:35,880 INFO MainThread:457826 [wandb_run.py:_redirect():2376] Wrapping output streams.
24
+ 2024-10-30 23:18:35,880 INFO MainThread:457826 [wandb_run.py:_redirect():2401] Redirects installed.
25
+ 2024-10-30 23:18:35,882 INFO MainThread:457826 [wandb_init.py:init():911] run started, returning control to user process
26
+ 2024-10-30 23:18:35,882 INFO MainThread:457826 [wandb_run.py:_config_callback():1390] config_cb None None {'perturbation': 'reverse_full', 'train_set': '10M', 'batch_size': 3, 'epoch': 3, 'seed': 0}
27
+ 2024-10-30 23:19:28,559 INFO MainThread:457826 [wandb_run.py:_config_callback():1390] config_cb None None {'vocab_size': 128256, 'max_position_embeddings': 131072, 'hidden_size': 3072, 'intermediate_size': 8192, 'num_hidden_layers': 28, 'num_attention_heads': 24, 'num_key_value_heads': 8, 'hidden_act': 'silu', 'initializer_range': 0.02, 'rms_norm_eps': 1e-05, 'pretraining_tp': 1, 'use_cache': True, 'rope_theta': 500000.0, 'rope_scaling': {'factor': 32.0, 'high_freq_factor': 4.0, 'low_freq_factor': 1.0, 'original_max_position_embeddings': 8192, 'rope_type': 'llama3'}, 'attention_bias': False, 'attention_dropout': 0.0, 'mlp_bias': False, 'head_dim': 128, 'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': 'bfloat16', 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': ['LlamaForCausalLM'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 128000, 'pad_token_id': None, 'eos_token_id': 128001, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'meta-llama/Llama-3.2-3B', 'transformers_version': '4.45.1', 'model_type': 'llama', 'output_dir': './checkpoints/Llama-3.2-3B/babylm_reverse_full_10M_seed0/runs', 'overwrite_output_dir': False, 'do_train': False, 'do_eval': True, 'do_predict': False, 'eval_strategy': 'steps', 'prediction_loss_only': False, 'per_device_train_batch_size': 3, 'per_device_eval_batch_size': 8, 'per_gpu_train_batch_size': None, 'per_gpu_eval_batch_size': None, 'gradient_accumulation_steps': 1, 'eval_accumulation_steps': None, 'eval_delay': 0, 'torch_empty_cache_steps': None, 'learning_rate': 5e-06, 'weight_decay': 0.0, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 3, 'max_steps': -1, 'lr_scheduler_type': 'linear', 'lr_scheduler_kwargs': {}, 'warmup_ratio': 0.0, 'warmup_steps': 0, 'log_level': 'passive', 'log_level_replica': 'warning', 'log_on_each_node': True, 'logging_dir': './logs', 'logging_strategy': 'steps', 'logging_first_step': False, 'logging_steps': 1, 'logging_nan_inf_filter': True, 'save_strategy': 'steps', 'save_steps': 150, 'save_total_limit': None, 'save_safetensors': True, 'save_on_each_node': False, 'save_only_model': False, 'restore_callback_states_from_checkpoint': False, 'no_cuda': False, 'use_cpu': False, 'use_mps_device': False, 'seed': 0, 'data_seed': None, 'jit_mode_eval': False, 'use_ipex': False, 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'auto', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': None, 'local_rank': 0, 'ddp_backend': None, 'tpu_num_cores': None, 'tpu_metrics_debug': False, 'debug': [], 'dataloader_drop_last': False, 'eval_steps': 10, 'dataloader_num_workers': 0, 'dataloader_prefetch_factor': None, 'past_index': -1, 'run_name': './checkpoints/Llama-3.2-3B/babylm_reverse_full_10M_seed0/runs', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': None, 'load_best_model_at_end': False, 'metric_for_best_model': None, 'greater_is_better': None, 'ignore_data_skip': False, 'fsdp': [], 'fsdp_min_num_params': 0, 'fsdp_config': {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, 'fsdp_transformer_layer_cls_to_wrap': None, 'accelerator_config': {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}, 'deepspeed': 'deepspeed_config/train_dp_config.json', 'label_smoothing_factor': 0.0, 'optim': 'adamw_torch', 'optim_args': None, 'adafactor': False, 'group_by_length': False, 'length_column_name': 'length', 'report_to': ['wandb'], 'ddp_find_unused_parameters': None, 'ddp_bucket_cap_mb': None, 'ddp_broadcast_buffers': None, 'dataloader_pin_memory': True, 'dataloader_persistent_workers': False, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': False, 'resume_from_checkpoint': None, 'hub_model_id': None, 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'hub_private_repo': False, 'hub_always_push': False, 'gradient_checkpointing': False, 'gradient_checkpointing_kwargs': None, 'include_inputs_for_metrics': False, 'eval_do_concat_batches': True, 'fp16_backend': 'auto', 'evaluation_strategy': 'steps', 'push_to_hub_model_id': None, 'push_to_hub_organization': None, 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', 'mp_parameters': '', 'auto_find_batch_size': False, 'full_determinism': False, 'torchdynamo': None, 'ray_scope': 'last', 'ddp_timeout': 1800, 'torch_compile': False, 'torch_compile_backend': None, 'torch_compile_mode': None, 'dispatch_batches': None, 'split_batches': None, 'include_tokens_per_second': False, 'include_num_input_tokens_seen': False, 'neftune_noise_alpha': None, 'optim_target_modules': None, 'batch_eval_metrics': False, 'eval_on_start': False, 'use_liger_kernel': False, 'eval_use_gather_object': False}
28
+ 2024-10-30 23:19:28,563 INFO MainThread:457826 [wandb_config.py:__setitem__():154] config set model/num_parameters = 3212749824 - <bound method Run._config_callback of <wandb.sdk.wandb_run.Run object at 0x7f2a7e632f10>>
29
+ 2024-10-30 23:19:28,563 INFO MainThread:457826 [wandb_run.py:_config_callback():1390] config_cb model/num_parameters 3212749824 None
wandb/run-20241030_233740-a8ghkt6q/files/output.log ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Loading checkpoint shards: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:05<00:00, 2.95s/it]
2
+ tokenized_valid: Dataset({
3
+ features: ['input_ids', 'attention_mask'],
4
+ num_rows: 600
5
+ })
6
+ /mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/site-packages/transformers/training_args.py:1545: FutureWarning: `evaluation_strategy` is deprecated and will be removed in version 4.46 of 🤗 Transformers. Use `eval_strategy` instead
7
+ warnings.warn(
8
+ [2024-10-30 23:37:50,707] [INFO] [real_accelerator.py:219:get_accelerator] Setting ds_accelerator to cuda (auto detect)
9
+ [2024-10-30 23:37:58,435] [INFO] [comm.py:652:init_distributed] cdb=None
10
+ Installed CUDA version 11.8 does not match the version torch was compiled with 11.7 but since the APIs are compatible, accepting this combination
11
+ Using /home/chunhui/.cache/torch_extensions/py39_cu117 as PyTorch extensions root...
12
+ Loading extension module cpu_adam...
13
+ Time to load cpu_adam op: 4.681263446807861 seconds
wandb/run-20241030_233740-a8ghkt6q/files/requirements.txt ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ funcsigs==1.0.2
2
+ sentry-sdk==2.17.0
3
+ multiprocess==0.70.16
4
+ numpy==1.26.2
5
+ pluralizer==1.2.0
6
+ debugpy==1.6.7
7
+ nvidia-cudnn-cu11==8.5.0.96
8
+ deepspeed==0.15.2
9
+ data==0.4
10
+ pandas==2.1.3
11
+ tomli==2.0.1
12
+ charset-normalizer==3.3.2
13
+ attrs==24.2.0
14
+ aiosignal==1.3.1
15
+ fsspec==2023.10.0
16
+ nvidia-cusparse-cu11==11.7.4.91
17
+ zipp==3.12.0
18
+ mypy-extensions==1.0.0
19
+ datasets==3.0.1
20
+ joblib==1.3.2
21
+ hjson==3.1.0
22
+ traitlets==5.7.1
23
+ stack-data==0.6.0
24
+ transformers==4.45.1
25
+ sympy==1.11.1
26
+ Pygments==2.15.0
27
+ docker-pycreds==0.4.0
28
+ dill==0.3.8
29
+ wheel==0.44.0
30
+ prompt-toolkit==3.0.30
31
+ parso==0.8.3
32
+ ipykernel==6.23.1
33
+ pyarrow==17.0.0
34
+ certifi==2023.11.17
35
+ nvidia-cufft-cu11==10.9.0.58
36
+ six==1.16.0
37
+ pydantic==2.9.2
38
+ click==8.1.7
39
+ nest-asyncio==1.5.6
40
+ gmpy2==2.1.0
41
+ matplotlib==3.8.2
42
+ scipy==1.11.4
43
+ typing_extensions==4.12.2
44
+ statsmodels==0.14.0
45
+ huggingface-hub==0.25.0
46
+ frozenlist==1.4.1
47
+ gpustat==1.1.1
48
+ nvidia-nvtx-cu11==11.7.91
49
+ safetensors==0.4.5
50
+ stanza==1.9.2
51
+ decorator==5.1.1
52
+ seaborn==0.13.0
53
+ sentencepiece==0.2.0
54
+ PyYAML==6.0.1
55
+ black==24.8.0
56
+ protobuf==4.25.1
57
+ pickleshare==0.7.5
58
+ peft==0.13.0
59
+ triton==2.0.0
60
+ nvidia-cuda-runtime-cu11==11.7.99
61
+ Jinja2==3.1.2
62
+ nvidia-cusolver-cu11==11.4.0.1
63
+ executing==1.2.0
64
+ jupyter_client==8.1.0
65
+ pluggy==1.3.0
66
+ cmake==3.30.3
67
+ pytz==2023.3.post1
68
+ aiohappyeyeballs==2.4.2
69
+ kiwisolver==1.4.5
70
+ py-cpuinfo==9.0.0
71
+ Pillow==10.1.0
72
+ ptyprocess==0.7.0
73
+ importlib_resources==6.4.5
74
+ GitPython==3.1.43
75
+ importlib-metadata==6.0.0
76
+ iniconfig==2.0.0
77
+ scikit-learn==1.3.2
78
+ exceptiongroup==1.1.0
79
+ networkx==2.8.6
80
+ accelerate==1.0.0
81
+ nltk==3.8.1
82
+ shutilwhich==1.1.0
83
+ fonttools==4.45.1
84
+ future==0.18.3
85
+ aiohttp==3.10.6
86
+ wcwidth==0.2.5
87
+ idna==3.6
88
+ filelock==3.12.2
89
+ pathspec==0.12.1
90
+ jupyter_core==5.1.0
91
+ lit==18.1.8
92
+ nvidia-curand-cu11==10.2.10.91
93
+ nvidia-cublas-cu11==11.10.3.66
94
+ nvidia-ml-py==12.560.30
95
+ msgpack==1.1.0
96
+ python-dateutil==2.8.2
97
+ blessed==1.20.0
98
+ packaging==23.0
99
+ gitdb==4.0.11
100
+ yarl==1.13.0
101
+ emoji==2.8.0
102
+ tzdata==2023.3
103
+ cycler==0.12.1
104
+ tornado==6.2
105
+ backcall==0.2.0
106
+ plotnine==0.12.4
107
+ ninja==1.11.1.1
108
+ latex==0.7.0
109
+ wandb==0.18.5
110
+ setproctitle==1.3.3
111
+ threadpoolctl==3.2.0
112
+ requests==2.32.3
113
+ pyparsing==3.1.1
114
+ smmap==5.0.1
115
+ pyzmq==23.0.0
116
+ async-timeout==4.0.3
117
+ annotated-types==0.7.0
118
+ matplotlib-inline==0.1.6
119
+ latexcodec==1.0.0
120
+ ipython==8.0.0
121
+ patsy==0.5.3
122
+ contourpy==1.2.0
123
+ multidict==6.1.0
124
+ mizani==0.9.3
125
+ urllib3==2.1.0
126
+ tokenizers==0.20.0
127
+ MarkupSafe==2.1.2
128
+ pip==24.2
129
+ pexpect==4.8.0
130
+ tqdm==4.66.5
131
+ jedi==0.18.2
132
+ pydantic_core==2.23.4
133
+ tempdir==0.7.1
134
+ mpmath==1.2.1
135
+ setuptools==72.1.0
136
+ pytest==7.4.3
137
+ pure-eval==0.2.2
138
+ psutil==5.9.1
139
+ comm==0.1.2
140
+ nvidia-cuda-cupti-cu11==11.7.101
141
+ nvidia-cuda-nvrtc-cu11==11.7.99
142
+ regex==2023.10.3
143
+ platformdirs==2.5.2
144
+ asttokens==2.2.1
145
+ torch==2.0.0
146
+ nvidia-nccl-cu11==2.14.3
147
+ xxhash==3.5.0
wandb/run-20241030_233740-a8ghkt6q/files/wandb-metadata.json ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.4.0-162-generic-x86_64-with-glibc2.31",
3
+ "python": "3.9.19",
4
+ "startedAt": "2024-10-31T03:37:40.852967Z",
5
+ "args": [
6
+ "--perturbation",
7
+ "reverse_control",
8
+ "--train_set",
9
+ "10M",
10
+ "--batch_size",
11
+ "3",
12
+ "--epoch",
13
+ "3",
14
+ "--seed",
15
+ "0"
16
+ ],
17
+ "program": "/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py",
18
+ "codePath": "train/train_deep_wandb.py",
19
+ "git": {
20
+ "remote": "git@hf.co:Yaning1001/Impossible_llm.git",
21
+ "commit": "ed716cdcfcdea02b67f7ed0f3504c2b1c8b737c4"
22
+ },
23
+ "email": "yaning1001@gmail.com",
24
+ "root": "/mnt/ssd3/chunhui/yaning/project/impossible_llm/train",
25
+ "host": "mms-large-2",
26
+ "username": "chunhui",
27
+ "executable": "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/bin/python",
28
+ "codePathLocal": "train_deep_wandb.py",
29
+ "cpu_count": 32,
30
+ "cpu_count_logical": 64,
31
+ "gpu": "NVIDIA RTX A6000",
32
+ "gpu_count": 8,
33
+ "disk": {
34
+ "/": {
35
+ "total": "1888559353856",
36
+ "used": "1711065919488"
37
+ }
38
+ },
39
+ "memory": {
40
+ "total": "202617098240"
41
+ },
42
+ "cpu": {
43
+ "count": 32,
44
+ "countLogical": 64
45
+ },
46
+ "gpu_nvidia": [
47
+ {
48
+ "name": "NVIDIA RTX A6000",
49
+ "memoryTotal": "51527024640",
50
+ "cudaCores": 10752,
51
+ "architecture": "Ampere"
52
+ },
53
+ {
54
+ "name": "NVIDIA RTX A6000",
55
+ "memoryTotal": "51527024640",
56
+ "cudaCores": 10752,
57
+ "architecture": "Ampere"
58
+ },
59
+ {
60
+ "name": "NVIDIA RTX A6000",
61
+ "memoryTotal": "51527024640",
62
+ "cudaCores": 10752,
63
+ "architecture": "Ampere"
64
+ },
65
+ {
66
+ "name": "NVIDIA RTX A6000",
67
+ "memoryTotal": "51527024640",
68
+ "cudaCores": 10752,
69
+ "architecture": "Ampere"
70
+ },
71
+ {
72
+ "name": "NVIDIA RTX A6000",
73
+ "memoryTotal": "51527024640",
74
+ "cudaCores": 10752,
75
+ "architecture": "Ampere"
76
+ },
77
+ {
78
+ "name": "NVIDIA RTX A6000",
79
+ "memoryTotal": "51527024640",
80
+ "cudaCores": 10752,
81
+ "architecture": "Ampere"
82
+ },
83
+ {
84
+ "name": "NVIDIA RTX A6000",
85
+ "memoryTotal": "51527024640",
86
+ "cudaCores": 10752,
87
+ "architecture": "Ampere"
88
+ },
89
+ {
90
+ "name": "NVIDIA RTX A6000",
91
+ "memoryTotal": "51527024640",
92
+ "cudaCores": 10752,
93
+ "architecture": "Ampere"
94
+ }
95
+ ],
96
+ "cudaVersion": "11.8"
97
+ }
wandb/run-20241030_233740-a8ghkt6q/logs/debug-internal.log ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {"time":"2024-10-30T23:37:40.855273129-04:00","level":"INFO","msg":"using version","core version":"0.18.5"}
2
+ {"time":"2024-10-30T23:37:40.855285669-04:00","level":"INFO","msg":"created symlink","path":"/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241030_233740-a8ghkt6q/logs/debug-core.log"}
3
+ {"time":"2024-10-30T23:37:40.96763436-04:00","level":"INFO","msg":"created new stream","id":"a8ghkt6q"}
4
+ {"time":"2024-10-30T23:37:40.967695611-04:00","level":"INFO","msg":"stream: started","id":"a8ghkt6q"}
5
+ {"time":"2024-10-30T23:37:40.967745251-04:00","level":"INFO","msg":"sender: started","stream_id":"a8ghkt6q"}
6
+ {"time":"2024-10-30T23:37:40.967712521-04:00","level":"INFO","msg":"writer: Do: started","stream_id":{"value":"a8ghkt6q"}}
7
+ {"time":"2024-10-30T23:37:40.967732341-04:00","level":"INFO","msg":"handler: started","stream_id":{"value":"a8ghkt6q"}}
8
+ {"time":"2024-10-30T23:37:41.220876038-04:00","level":"INFO","msg":"Starting system monitor"}
wandb/run-20241030_233740-a8ghkt6q/logs/debug.log ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-10-30 23:37:40,849 INFO MainThread:464537 [wandb_setup.py:_flush():79] Current SDK version is 0.18.5
2
+ 2024-10-30 23:37:40,849 INFO MainThread:464537 [wandb_setup.py:_flush():79] Configure stats pid to 464537
3
+ 2024-10-30 23:37:40,849 INFO MainThread:464537 [wandb_setup.py:_flush():79] Loading settings from /home/chunhui/.config/wandb/settings
4
+ 2024-10-30 23:37:40,849 INFO MainThread:464537 [wandb_setup.py:_flush():79] Loading settings from /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/settings
5
+ 2024-10-30 23:37:40,849 INFO MainThread:464537 [wandb_setup.py:_flush():79] Loading settings from environment variables: {}
6
+ 2024-10-30 23:37:40,849 INFO MainThread:464537 [wandb_setup.py:_flush():79] Applying setup settings: {'mode': None, '_disable_service': None}
7
+ 2024-10-30 23:37:40,849 INFO MainThread:464537 [wandb_setup.py:_flush():79] Inferring run settings from compute environment: {'program_relpath': 'train/train_deep_wandb.py', 'program_abspath': '/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py', 'program': '/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py'}
8
+ 2024-10-30 23:37:40,849 INFO MainThread:464537 [wandb_setup.py:_flush():79] Applying login settings: {}
9
+ 2024-10-30 23:37:40,850 INFO MainThread:464537 [wandb_init.py:_log_setup():534] Logging user logs to /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241030_233740-a8ghkt6q/logs/debug.log
10
+ 2024-10-30 23:37:40,850 INFO MainThread:464537 [wandb_init.py:_log_setup():535] Logging internal logs to /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241030_233740-a8ghkt6q/logs/debug-internal.log
11
+ 2024-10-30 23:37:40,850 INFO MainThread:464537 [wandb_init.py:init():621] calling init triggers
12
+ 2024-10-30 23:37:40,850 INFO MainThread:464537 [wandb_init.py:init():628] wandb.init called with sweep_config: {}
13
+ config: {}
14
+ 2024-10-30 23:37:40,850 INFO MainThread:464537 [wandb_init.py:init():671] starting backend
15
+ 2024-10-30 23:37:40,850 INFO MainThread:464537 [wandb_init.py:init():675] sending inform_init request
16
+ 2024-10-30 23:37:40,852 INFO MainThread:464537 [backend.py:_multiprocessing_setup():104] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
17
+ 2024-10-30 23:37:40,852 INFO MainThread:464537 [wandb_init.py:init():688] backend started and connected
18
+ 2024-10-30 23:37:40,855 INFO MainThread:464537 [wandb_init.py:init():783] updated telemetry
19
+ 2024-10-30 23:37:40,883 INFO MainThread:464537 [wandb_init.py:init():816] communicating run to backend with 90.0 second timeout
20
+ 2024-10-30 23:37:41,217 INFO MainThread:464537 [wandb_init.py:init():867] starting run threads in backend
21
+ 2024-10-30 23:37:42,837 INFO MainThread:464537 [wandb_run.py:_console_start():2463] atexit reg
22
+ 2024-10-30 23:37:42,837 INFO MainThread:464537 [wandb_run.py:_redirect():2311] redirect: wrap_raw
23
+ 2024-10-30 23:37:42,837 INFO MainThread:464537 [wandb_run.py:_redirect():2376] Wrapping output streams.
24
+ 2024-10-30 23:37:42,837 INFO MainThread:464537 [wandb_run.py:_redirect():2401] Redirects installed.
25
+ 2024-10-30 23:37:42,860 INFO MainThread:464537 [wandb_init.py:init():911] run started, returning control to user process
26
+ 2024-10-30 23:37:42,861 INFO MainThread:464537 [wandb_run.py:_config_callback():1390] config_cb None None {'perturbation': 'reverse_control', 'train_set': '10M', 'batch_size': 3, 'epoch': 3, 'seed': 0}
wandb/run-20241031_001055-dua2g15g/run-dua2g15g.wandb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1cf00c20fe4ac81c7ab3f9adde203aab4ee8211db895fe69b24a47b6d5b260f
3
+ size 327680
wandb/run-20241031_002020-qq5oimta/files/wandb-summary.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"_wandb":{"runtime":32219}}
wandb/run-20241031_002020-qq5oimta/logs/debug-internal.log ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"time":"2024-10-31T00:20:20.034540173-04:00","level":"INFO","msg":"using version","core version":"0.18.5"}
2
+ {"time":"2024-10-31T00:20:20.034551713-04:00","level":"INFO","msg":"created symlink","path":"/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241031_002020-qq5oimta/logs/debug-core.log"}
3
+ {"time":"2024-10-31T00:20:20.142123016-04:00","level":"INFO","msg":"created new stream","id":"qq5oimta"}
4
+ {"time":"2024-10-31T00:20:20.142164126-04:00","level":"INFO","msg":"stream: started","id":"qq5oimta"}
5
+ {"time":"2024-10-31T00:20:20.142182576-04:00","level":"INFO","msg":"writer: Do: started","stream_id":{"value":"qq5oimta"}}
6
+ {"time":"2024-10-31T00:20:20.142443509-04:00","level":"INFO","msg":"handler: started","stream_id":{"value":"qq5oimta"}}
7
+ {"time":"2024-10-31T00:20:20.142465839-04:00","level":"INFO","msg":"sender: started","stream_id":"qq5oimta"}
8
+ {"time":"2024-10-31T00:20:21.404732185-04:00","level":"INFO","msg":"Starting system monitor"}
9
+ {"time":"2024-10-31T00:36:56.749060403-04:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)"}
10
+ {"time":"2024-10-31T01:06:48.979811136-04:00","level":"INFO","msg":"api: retrying HTTP error","status":500,"url":"https://api.wandb.ai/files/yaning1001-dartmouth-college/impossible_llm_reverse/qq5oimta/file_stream"}
11
+ {"time":"2024-10-31T06:09:10.032616886-04:00","level":"INFO","msg":"api: retrying HTTP error","status":502,"url":"https://api.wandb.ai/files/yaning1001-dartmouth-college/impossible_llm_reverse/qq5oimta/file_stream"}
12
+ {"time":"2024-10-31T09:17:19.827022088-04:00","level":"INFO","msg":"Stopping system monitor"}
13
+ {"time":"2024-10-31T09:17:19.904160799-04:00","level":"INFO","msg":"Stopped system monitor"}
14
+ {"time":"2024-10-31T09:17:20.764779005-04:00","level":"INFO","msg":"handler: operation stats","stats":{"operations":[{"desc":"saving job artifact","runtime_seconds":0.617493792,"subtasks":[{"desc":"wandb-job.json","runtime_seconds":0.043146083,"progress":"563B/563B"}]}],"total_operations":1}}
15
+ {"time":"2024-10-31T09:17:21.720810984-04:00","level":"INFO","msg":"fileTransfer: Close: file transfer manager closed"}
16
+ {"time":"2024-10-31T09:17:22.833725492-04:00","level":"INFO","msg":"stream: closing","id":"qq5oimta"}
17
+ {"time":"2024-10-31T09:17:22.833769292-04:00","level":"INFO","msg":"handler: closed","stream_id":{"value":"qq5oimta"}}
18
+ {"time":"2024-10-31T09:17:22.833834722-04:00","level":"INFO","msg":"sender: closed","stream_id":"qq5oimta"}
19
+ {"time":"2024-10-31T09:17:22.833829992-04:00","level":"INFO","msg":"writer: Close: closed","stream_id":{"value":"qq5oimta"}}
20
+ {"time":"2024-10-31T09:17:22.833996253-04:00","level":"INFO","msg":"stream: closed","id":"qq5oimta"}
wandb/run-20241031_002020-qq5oimta/logs/debug.log ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-10-31 00:20:20,030 INFO MainThread:484459 [wandb_setup.py:_flush():79] Current SDK version is 0.18.5
2
+ 2024-10-31 00:20:20,031 INFO MainThread:484459 [wandb_setup.py:_flush():79] Configure stats pid to 484459
3
+ 2024-10-31 00:20:20,031 INFO MainThread:484459 [wandb_setup.py:_flush():79] Loading settings from /home/chunhui/.config/wandb/settings
4
+ 2024-10-31 00:20:20,031 INFO MainThread:484459 [wandb_setup.py:_flush():79] Loading settings from /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/settings
5
+ 2024-10-31 00:20:20,031 INFO MainThread:484459 [wandb_setup.py:_flush():79] Loading settings from environment variables: {}
6
+ 2024-10-31 00:20:20,031 INFO MainThread:484459 [wandb_setup.py:_flush():79] Applying setup settings: {'mode': None, '_disable_service': None}
7
+ 2024-10-31 00:20:20,031 INFO MainThread:484459 [wandb_setup.py:_flush():79] Inferring run settings from compute environment: {'program_relpath': 'train/train_deep_wandb.py', 'program_abspath': '/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py', 'program': '/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py'}
8
+ 2024-10-31 00:20:20,031 INFO MainThread:484459 [wandb_setup.py:_flush():79] Applying login settings: {}
9
+ 2024-10-31 00:20:20,031 INFO MainThread:484459 [wandb_init.py:_log_setup():534] Logging user logs to /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241031_002020-qq5oimta/logs/debug.log
10
+ 2024-10-31 00:20:20,031 INFO MainThread:484459 [wandb_init.py:_log_setup():535] Logging internal logs to /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241031_002020-qq5oimta/logs/debug-internal.log
11
+ 2024-10-31 00:20:20,031 INFO MainThread:484459 [wandb_init.py:init():621] calling init triggers
12
+ 2024-10-31 00:20:20,031 INFO MainThread:484459 [wandb_init.py:init():628] wandb.init called with sweep_config: {}
13
+ config: {}
14
+ 2024-10-31 00:20:20,031 INFO MainThread:484459 [wandb_init.py:init():671] starting backend
15
+ 2024-10-31 00:20:20,031 INFO MainThread:484459 [wandb_init.py:init():675] sending inform_init request
16
+ 2024-10-31 00:20:20,032 INFO MainThread:484459 [backend.py:_multiprocessing_setup():104] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
17
+ 2024-10-31 00:20:20,032 INFO MainThread:484459 [wandb_init.py:init():688] backend started and connected
18
+ 2024-10-31 00:20:20,036 INFO MainThread:484459 [wandb_init.py:init():783] updated telemetry
19
+ 2024-10-31 00:20:20,065 INFO MainThread:484459 [wandb_init.py:init():816] communicating run to backend with 90.0 second timeout
20
+ 2024-10-31 00:20:21,401 INFO MainThread:484459 [wandb_init.py:init():867] starting run threads in backend
21
+ 2024-10-31 00:20:21,535 INFO MainThread:484459 [wandb_run.py:_console_start():2463] atexit reg
22
+ 2024-10-31 00:20:21,535 INFO MainThread:484459 [wandb_run.py:_redirect():2311] redirect: wrap_raw
23
+ 2024-10-31 00:20:21,535 INFO MainThread:484459 [wandb_run.py:_redirect():2376] Wrapping output streams.
24
+ 2024-10-31 00:20:21,535 INFO MainThread:484459 [wandb_run.py:_redirect():2401] Redirects installed.
25
+ 2024-10-31 00:20:21,537 INFO MainThread:484459 [wandb_init.py:init():911] run started, returning control to user process
26
+ 2024-10-31 00:20:21,537 INFO MainThread:484459 [wandb_run.py:_config_callback():1390] config_cb None None {'perturbation': 'reverse_full', 'train_set': '10M', 'batch_size': 3, 'epoch': 6, 'seed': 0, 'lr': 1e-05}
27
+ 2024-10-31 09:17:19,684 INFO MainThread:484459 [wandb_run.py:_finish():2158] finishing run yaning1001-dartmouth-college/impossible_llm_reverse/qq5oimta
28
+ 2024-10-31 09:17:19,700 INFO MainThread:484459 [wandb_run.py:_atexit_cleanup():2426] got exitcode: 0
29
+ 2024-10-31 09:17:19,748 INFO MainThread:484459 [wandb_run.py:_restore():2408] restore
30
+ 2024-10-31 09:17:19,749 INFO MainThread:484459 [wandb_run.py:_restore():2414] restore done
31
+ 2024-10-31 09:17:22,826 INFO MainThread:484459 [wandb_run.py:_footer_history_summary_info():3975] rendering history
32
+ 2024-10-31 09:17:22,827 INFO MainThread:484459 [wandb_run.py:_footer_history_summary_info():4007] rendering summary
33
+ 2024-10-31 09:17:22,833 INFO MainThread:484459 [wandb_run.py:_footer_sync_info():3934] logging synced files
wandb/run-20241101_012438-qowf210g/logs/debug-internal.log ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"time":"2024-11-01T01:24:38.220929393-04:00","level":"INFO","msg":"using version","core version":"0.18.5"}
2
+ {"time":"2024-11-01T01:24:38.220939703-04:00","level":"INFO","msg":"created symlink","path":"/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241101_012438-qowf210g/logs/debug-core.log"}
3
+ {"time":"2024-11-01T01:24:38.327424494-04:00","level":"INFO","msg":"created new stream","id":"qowf210g"}
4
+ {"time":"2024-11-01T01:24:38.327466724-04:00","level":"INFO","msg":"stream: started","id":"qowf210g"}
5
+ {"time":"2024-11-01T01:24:38.327501474-04:00","level":"INFO","msg":"sender: started","stream_id":"qowf210g"}
6
+ {"time":"2024-11-01T01:24:38.327480694-04:00","level":"INFO","msg":"writer: Do: started","stream_id":{"value":"qowf210g"}}
7
+ {"time":"2024-11-01T01:24:38.327492874-04:00","level":"INFO","msg":"handler: started","stream_id":{"value":"qowf210g"}}
8
+ {"time":"2024-11-01T01:24:38.529752791-04:00","level":"INFO","msg":"Starting system monitor"}
9
+ {"time":"2024-11-01T01:24:38.641652449-04:00","level":"INFO","msg":"stream: closing","id":"qowf210g"}
10
+ {"time":"2024-11-01T01:24:38.64170344-04:00","level":"INFO","msg":"Stopping system monitor"}
11
+ {"time":"2024-11-01T01:24:38.661560348-04:00","level":"INFO","msg":"Stopped system monitor"}
12
+ {"time":"2024-11-01T01:24:39.235879964-04:00","level":"INFO","msg":"fileTransfer: Close: file transfer manager closed"}
13
+ {"time":"2024-11-01T01:24:39.374996001-04:00","level":"INFO","msg":"handler: closed","stream_id":{"value":"qowf210g"}}
14
+ {"time":"2024-11-01T01:24:39.375064892-04:00","level":"INFO","msg":"sender: closed","stream_id":"qowf210g"}
15
+ {"time":"2024-11-01T01:24:39.375054422-04:00","level":"INFO","msg":"writer: Close: closed","stream_id":{"value":"qowf210g"}}
16
+ {"time":"2024-11-01T01:24:39.375140422-04:00","level":"INFO","msg":"stream: closed","id":"qowf210g"}
wandb/run-20241101_012733-val6n9r9/files/output.log ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Downloading shards: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [02:32<00:00, 76.35s/it]
2
+ Loading checkpoint shards: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:06<00:00, 3.42s/it]
3
+ Map: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 16425/16425 [00:52<00:00, 310.38 examples/s]
4
+ Map: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 17013/17013 [00:49<00:00, 341.92 examples/s]
5
+ tokenized_valid: Dataset({
6
+ features: ['input_ids', 'attention_mask'],
7
+ num_rows: 600
8
+ })
9
+ /mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/site-packages/transformers/training_args.py:1545: FutureWarning: `evaluation_strategy` is deprecated and will be removed in version 4.46 of 🤗 Transformers. Use `eval_strategy` instead
10
+ warnings.warn(
11
+ [2024-11-01 01:32:26,372] [INFO] [real_accelerator.py:219:get_accelerator] Setting ds_accelerator to cuda (auto detect)
12
+ [2024-11-01 01:32:36,756] [INFO] [comm.py:652:init_distributed] cdb=None
13
+ Installed CUDA version 11.8 does not match the version torch was compiled with 11.7 but since the APIs are compatible, accepting this combination
14
+ Using /home/chunhui/.cache/torch_extensions/py39_cu117 as PyTorch extensions root...
15
+ Loading extension module cpu_adam...
16
+ Time to load cpu_adam op: 5.428602933883667 seconds
wandb/run-20241101_012733-val6n9r9/files/requirements.txt ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ funcsigs==1.0.2
2
+ sentry-sdk==2.17.0
3
+ multiprocess==0.70.16
4
+ numpy==1.26.2
5
+ pluralizer==1.2.0
6
+ debugpy==1.6.7
7
+ nvidia-cudnn-cu11==8.5.0.96
8
+ deepspeed==0.15.2
9
+ data==0.4
10
+ pandas==2.1.3
11
+ tomli==2.0.1
12
+ charset-normalizer==3.3.2
13
+ attrs==24.2.0
14
+ aiosignal==1.3.1
15
+ fsspec==2023.10.0
16
+ nvidia-cusparse-cu11==11.7.4.91
17
+ zipp==3.12.0
18
+ mypy-extensions==1.0.0
19
+ datasets==3.0.1
20
+ joblib==1.3.2
21
+ hjson==3.1.0
22
+ traitlets==5.7.1
23
+ stack-data==0.6.0
24
+ transformers==4.45.1
25
+ sympy==1.11.1
26
+ Pygments==2.15.0
27
+ docker-pycreds==0.4.0
28
+ dill==0.3.8
29
+ wheel==0.44.0
30
+ prompt-toolkit==3.0.30
31
+ parso==0.8.3
32
+ ipykernel==6.23.1
33
+ pyarrow==17.0.0
34
+ certifi==2023.11.17
35
+ nvidia-cufft-cu11==10.9.0.58
36
+ six==1.16.0
37
+ pydantic==2.9.2
38
+ click==8.1.7
39
+ nest-asyncio==1.5.6
40
+ gmpy2==2.1.0
41
+ matplotlib==3.8.2
42
+ scipy==1.11.4
43
+ typing_extensions==4.12.2
44
+ statsmodels==0.14.0
45
+ huggingface-hub==0.25.0
46
+ frozenlist==1.4.1
47
+ gpustat==1.1.1
48
+ nvidia-nvtx-cu11==11.7.91
49
+ safetensors==0.4.5
50
+ stanza==1.9.2
51
+ decorator==5.1.1
52
+ seaborn==0.13.0
53
+ sentencepiece==0.2.0
54
+ PyYAML==6.0.1
55
+ black==24.8.0
56
+ protobuf==4.25.1
57
+ pickleshare==0.7.5
58
+ peft==0.13.0
59
+ triton==2.0.0
60
+ nvidia-cuda-runtime-cu11==11.7.99
61
+ Jinja2==3.1.2
62
+ nvidia-cusolver-cu11==11.4.0.1
63
+ executing==1.2.0
64
+ jupyter_client==8.1.0
65
+ pluggy==1.3.0
66
+ cmake==3.30.3
67
+ pytz==2023.3.post1
68
+ aiohappyeyeballs==2.4.2
69
+ kiwisolver==1.4.5
70
+ py-cpuinfo==9.0.0
71
+ Pillow==10.1.0
72
+ ptyprocess==0.7.0
73
+ importlib_resources==6.4.5
74
+ GitPython==3.1.43
75
+ importlib-metadata==6.0.0
76
+ iniconfig==2.0.0
77
+ scikit-learn==1.3.2
78
+ exceptiongroup==1.1.0
79
+ networkx==2.8.6
80
+ accelerate==1.0.0
81
+ nltk==3.8.1
82
+ shutilwhich==1.1.0
83
+ fonttools==4.45.1
84
+ future==0.18.3
85
+ aiohttp==3.10.6
86
+ wcwidth==0.2.5
87
+ idna==3.6
88
+ filelock==3.12.2
89
+ pathspec==0.12.1
90
+ jupyter_core==5.1.0
91
+ lit==18.1.8
92
+ nvidia-curand-cu11==10.2.10.91
93
+ nvidia-cublas-cu11==11.10.3.66
94
+ nvidia-ml-py==12.560.30
95
+ msgpack==1.1.0
96
+ python-dateutil==2.8.2
97
+ blessed==1.20.0
98
+ packaging==23.0
99
+ gitdb==4.0.11
100
+ yarl==1.13.0
101
+ emoji==2.8.0
102
+ tzdata==2023.3
103
+ cycler==0.12.1
104
+ tornado==6.2
105
+ backcall==0.2.0
106
+ plotnine==0.12.4
107
+ ninja==1.11.1.1
108
+ latex==0.7.0
109
+ wandb==0.18.5
110
+ setproctitle==1.3.3
111
+ threadpoolctl==3.2.0
112
+ requests==2.32.3
113
+ pyparsing==3.1.1
114
+ smmap==5.0.1
115
+ pyzmq==23.0.0
116
+ async-timeout==4.0.3
117
+ annotated-types==0.7.0
118
+ matplotlib-inline==0.1.6
119
+ latexcodec==1.0.0
120
+ ipython==8.0.0
121
+ patsy==0.5.3
122
+ contourpy==1.2.0
123
+ multidict==6.1.0
124
+ mizani==0.9.3
125
+ urllib3==2.1.0
126
+ tokenizers==0.20.0
127
+ MarkupSafe==2.1.2
128
+ pip==24.2
129
+ pexpect==4.8.0
130
+ tqdm==4.66.5
131
+ jedi==0.18.2
132
+ pydantic_core==2.23.4
133
+ tempdir==0.7.1
134
+ mpmath==1.2.1
135
+ setuptools==72.1.0
136
+ pytest==7.4.3
137
+ pure-eval==0.2.2
138
+ psutil==5.9.1
139
+ comm==0.1.2
140
+ nvidia-cuda-cupti-cu11==11.7.101
141
+ nvidia-cuda-nvrtc-cu11==11.7.99
142
+ regex==2023.10.3
143
+ platformdirs==2.5.2
144
+ asttokens==2.2.1
145
+ torch==2.0.0
146
+ nvidia-nccl-cu11==2.14.3
147
+ xxhash==3.5.0
wandb/run-20241101_012733-val6n9r9/files/wandb-metadata.json ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.4.0-162-generic-x86_64-with-glibc2.31",
3
+ "python": "3.9.19",
4
+ "startedAt": "2024-11-01T05:27:33.992750Z",
5
+ "args": [
6
+ "--perturbation",
7
+ "shuffle_nondeterministic",
8
+ "--train_set",
9
+ "10M",
10
+ "--batch_size",
11
+ "3",
12
+ "--epoch",
13
+ "6",
14
+ "--seed",
15
+ "0"
16
+ ],
17
+ "program": "/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py",
18
+ "codePath": "train/train_deep_wandb.py",
19
+ "git": {
20
+ "remote": "git@hf.co:Yaning1001/Impossible_llm.git",
21
+ "commit": "ed716cdcfcdea02b67f7ed0f3504c2b1c8b737c4"
22
+ },
23
+ "email": "yaning1001@gmail.com",
24
+ "root": "/mnt/ssd3/chunhui/yaning/project/impossible_llm/train",
25
+ "host": "mms-large-2",
26
+ "username": "chunhui",
27
+ "executable": "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/bin/python",
28
+ "codePathLocal": "train_deep_wandb.py",
29
+ "cpu_count": 32,
30
+ "cpu_count_logical": 64,
31
+ "gpu": "NVIDIA RTX A6000",
32
+ "gpu_count": 8,
33
+ "disk": {
34
+ "/": {
35
+ "total": "1888559353856",
36
+ "used": "1753992269824"
37
+ }
38
+ },
39
+ "memory": {
40
+ "total": "202617098240"
41
+ },
42
+ "cpu": {
43
+ "count": 32,
44
+ "countLogical": 64
45
+ },
46
+ "gpu_nvidia": [
47
+ {
48
+ "name": "NVIDIA RTX A6000",
49
+ "memoryTotal": "51527024640",
50
+ "cudaCores": 10752,
51
+ "architecture": "Ampere"
52
+ },
53
+ {
54
+ "name": "NVIDIA RTX A6000",
55
+ "memoryTotal": "51527024640",
56
+ "cudaCores": 10752,
57
+ "architecture": "Ampere"
58
+ },
59
+ {
60
+ "name": "NVIDIA RTX A6000",
61
+ "memoryTotal": "51527024640",
62
+ "cudaCores": 10752,
63
+ "architecture": "Ampere"
64
+ },
65
+ {
66
+ "name": "NVIDIA RTX A6000",
67
+ "memoryTotal": "51527024640",
68
+ "cudaCores": 10752,
69
+ "architecture": "Ampere"
70
+ },
71
+ {
72
+ "name": "NVIDIA RTX A6000",
73
+ "memoryTotal": "51527024640",
74
+ "cudaCores": 10752,
75
+ "architecture": "Ampere"
76
+ },
77
+ {
78
+ "name": "NVIDIA RTX A6000",
79
+ "memoryTotal": "51527024640",
80
+ "cudaCores": 10752,
81
+ "architecture": "Ampere"
82
+ },
83
+ {
84
+ "name": "NVIDIA RTX A6000",
85
+ "memoryTotal": "51527024640",
86
+ "cudaCores": 10752,
87
+ "architecture": "Ampere"
88
+ },
89
+ {
90
+ "name": "NVIDIA RTX A6000",
91
+ "memoryTotal": "51527024640",
92
+ "cudaCores": 10752,
93
+ "architecture": "Ampere"
94
+ }
95
+ ],
96
+ "cudaVersion": "11.8"
97
+ }
wandb/run-20241101_012733-val6n9r9/logs/debug-internal.log ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {"time":"2024-11-01T01:27:33.995250587-04:00","level":"INFO","msg":"using version","core version":"0.18.5"}
2
+ {"time":"2024-11-01T01:27:33.995267057-04:00","level":"INFO","msg":"created symlink","path":"/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241101_012733-val6n9r9/logs/debug-core.log"}
3
+ {"time":"2024-11-01T01:27:34.1039281-04:00","level":"INFO","msg":"created new stream","id":"val6n9r9"}
4
+ {"time":"2024-11-01T01:27:34.103978121-04:00","level":"INFO","msg":"stream: started","id":"val6n9r9"}
5
+ {"time":"2024-11-01T01:27:34.104094092-04:00","level":"INFO","msg":"sender: started","stream_id":"val6n9r9"}
6
+ {"time":"2024-11-01T01:27:34.104027291-04:00","level":"INFO","msg":"writer: Do: started","stream_id":{"value":"val6n9r9"}}
7
+ {"time":"2024-11-01T01:27:34.104049701-04:00","level":"INFO","msg":"handler: started","stream_id":{"value":"val6n9r9"}}
8
+ {"time":"2024-11-01T01:27:34.310274622-04:00","level":"INFO","msg":"Starting system monitor"}
wandb/run-20241101_012733-val6n9r9/logs/debug.log ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-11-01 01:27:33,990 INFO MainThread:678553 [wandb_setup.py:_flush():79] Current SDK version is 0.18.5
2
+ 2024-11-01 01:27:33,990 INFO MainThread:678553 [wandb_setup.py:_flush():79] Configure stats pid to 678553
3
+ 2024-11-01 01:27:33,990 INFO MainThread:678553 [wandb_setup.py:_flush():79] Loading settings from /home/chunhui/.config/wandb/settings
4
+ 2024-11-01 01:27:33,990 INFO MainThread:678553 [wandb_setup.py:_flush():79] Loading settings from /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/settings
5
+ 2024-11-01 01:27:33,990 INFO MainThread:678553 [wandb_setup.py:_flush():79] Loading settings from environment variables: {}
6
+ 2024-11-01 01:27:33,990 INFO MainThread:678553 [wandb_setup.py:_flush():79] Applying setup settings: {'mode': None, '_disable_service': None}
7
+ 2024-11-01 01:27:33,990 INFO MainThread:678553 [wandb_setup.py:_flush():79] Inferring run settings from compute environment: {'program_relpath': 'train/train_deep_wandb.py', 'program_abspath': '/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py', 'program': '/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py'}
8
+ 2024-11-01 01:27:33,990 INFO MainThread:678553 [wandb_setup.py:_flush():79] Applying login settings: {}
9
+ 2024-11-01 01:27:33,990 INFO MainThread:678553 [wandb_init.py:_log_setup():534] Logging user logs to /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241101_012733-val6n9r9/logs/debug.log
10
+ 2024-11-01 01:27:33,990 INFO MainThread:678553 [wandb_init.py:_log_setup():535] Logging internal logs to /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241101_012733-val6n9r9/logs/debug-internal.log
11
+ 2024-11-01 01:27:33,991 INFO MainThread:678553 [wandb_init.py:init():621] calling init triggers
12
+ 2024-11-01 01:27:33,991 INFO MainThread:678553 [wandb_init.py:init():628] wandb.init called with sweep_config: {}
13
+ config: {}
14
+ 2024-11-01 01:27:33,991 INFO MainThread:678553 [wandb_init.py:init():671] starting backend
15
+ 2024-11-01 01:27:33,991 INFO MainThread:678553 [wandb_init.py:init():675] sending inform_init request
16
+ 2024-11-01 01:27:33,992 INFO MainThread:678553 [backend.py:_multiprocessing_setup():104] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
17
+ 2024-11-01 01:27:33,992 INFO MainThread:678553 [wandb_init.py:init():688] backend started and connected
18
+ 2024-11-01 01:27:33,996 INFO MainThread:678553 [wandb_init.py:init():783] updated telemetry
19
+ 2024-11-01 01:27:34,020 INFO MainThread:678553 [wandb_init.py:init():816] communicating run to backend with 90.0 second timeout
20
+ 2024-11-01 01:27:34,307 INFO MainThread:678553 [wandb_init.py:init():867] starting run threads in backend
21
+ 2024-11-01 01:27:34,397 INFO MainThread:678553 [wandb_run.py:_console_start():2463] atexit reg
22
+ 2024-11-01 01:27:34,397 INFO MainThread:678553 [wandb_run.py:_redirect():2311] redirect: wrap_raw
23
+ 2024-11-01 01:27:34,397 INFO MainThread:678553 [wandb_run.py:_redirect():2376] Wrapping output streams.
24
+ 2024-11-01 01:27:34,397 INFO MainThread:678553 [wandb_run.py:_redirect():2401] Redirects installed.
25
+ 2024-11-01 01:27:34,399 INFO MainThread:678553 [wandb_init.py:init():911] run started, returning control to user process
26
+ 2024-11-01 01:27:34,399 INFO MainThread:678553 [wandb_run.py:_config_callback():1390] config_cb None None {'perturbation': 'shuffle_nondeterministic', 'train_set': '10M', 'batch_size': 3, 'epoch': 6, 'seed': 0, 'lr': 5e-06}
wandb/run-20241101_093116-jo652wfc/files/output.log ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Loading checkpoint shards: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:18<00:00, 9.08s/it]
2
+ tokenized_valid: Dataset({
3
+ features: ['input_ids', 'attention_mask'],
4
+ num_rows: 600
5
+ })
6
+ /mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/site-packages/transformers/training_args.py:1545: FutureWarning: `evaluation_strategy` is deprecated and will be removed in version 4.46 of 🤗 Transformers. Use `eval_strategy` instead
7
+ warnings.warn(
8
+ [2024-11-01 09:31:37,056] [INFO] [real_accelerator.py:219:get_accelerator] Setting ds_accelerator to cuda (auto detect)
9
+ [2024-11-01 09:31:45,780] [INFO] [comm.py:652:init_distributed] cdb=None
10
+ Installed CUDA version 11.8 does not match the version torch was compiled with 11.7 but since the APIs are compatible, accepting this combination
11
+ Using /home/chunhui/.cache/torch_extensions/py39_cu117 as PyTorch extensions root...
12
+ Loading extension module cpu_adam...
13
+ Time to load cpu_adam op: 4.828465461730957 seconds
wandb/run-20241101_093116-jo652wfc/files/requirements.txt ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ funcsigs==1.0.2
2
+ sentry-sdk==2.17.0
3
+ multiprocess==0.70.16
4
+ numpy==1.26.2
5
+ pluralizer==1.2.0
6
+ debugpy==1.6.7
7
+ nvidia-cudnn-cu11==8.5.0.96
8
+ deepspeed==0.15.2
9
+ data==0.4
10
+ pandas==2.1.3
11
+ tomli==2.0.1
12
+ charset-normalizer==3.3.2
13
+ attrs==24.2.0
14
+ aiosignal==1.3.1
15
+ fsspec==2023.10.0
16
+ nvidia-cusparse-cu11==11.7.4.91
17
+ zipp==3.12.0
18
+ mypy-extensions==1.0.0
19
+ datasets==3.0.1
20
+ joblib==1.3.2
21
+ hjson==3.1.0
22
+ traitlets==5.7.1
23
+ stack-data==0.6.0
24
+ transformers==4.45.1
25
+ sympy==1.11.1
26
+ Pygments==2.15.0
27
+ docker-pycreds==0.4.0
28
+ dill==0.3.8
29
+ wheel==0.44.0
30
+ prompt-toolkit==3.0.30
31
+ parso==0.8.3
32
+ ipykernel==6.23.1
33
+ pyarrow==17.0.0
34
+ certifi==2023.11.17
35
+ nvidia-cufft-cu11==10.9.0.58
36
+ six==1.16.0
37
+ pydantic==2.9.2
38
+ click==8.1.7
39
+ nest-asyncio==1.5.6
40
+ gmpy2==2.1.0
41
+ matplotlib==3.8.2
42
+ scipy==1.11.4
43
+ typing_extensions==4.12.2
44
+ statsmodels==0.14.0
45
+ huggingface-hub==0.25.0
46
+ frozenlist==1.4.1
47
+ gpustat==1.1.1
48
+ nvidia-nvtx-cu11==11.7.91
49
+ safetensors==0.4.5
50
+ stanza==1.9.2
51
+ decorator==5.1.1
52
+ seaborn==0.13.0
53
+ sentencepiece==0.2.0
54
+ PyYAML==6.0.1
55
+ black==24.8.0
56
+ protobuf==4.25.1
57
+ pickleshare==0.7.5
58
+ peft==0.13.0
59
+ triton==2.0.0
60
+ nvidia-cuda-runtime-cu11==11.7.99
61
+ Jinja2==3.1.2
62
+ nvidia-cusolver-cu11==11.4.0.1
63
+ executing==1.2.0
64
+ jupyter_client==8.1.0
65
+ pluggy==1.3.0
66
+ cmake==3.30.3
67
+ pytz==2023.3.post1
68
+ aiohappyeyeballs==2.4.2
69
+ kiwisolver==1.4.5
70
+ py-cpuinfo==9.0.0
71
+ Pillow==10.1.0
72
+ ptyprocess==0.7.0
73
+ importlib_resources==6.4.5
74
+ GitPython==3.1.43
75
+ importlib-metadata==6.0.0
76
+ iniconfig==2.0.0
77
+ scikit-learn==1.3.2
78
+ exceptiongroup==1.1.0
79
+ networkx==2.8.6
80
+ accelerate==1.0.0
81
+ nltk==3.8.1
82
+ shutilwhich==1.1.0
83
+ fonttools==4.45.1
84
+ future==0.18.3
85
+ aiohttp==3.10.6
86
+ wcwidth==0.2.5
87
+ idna==3.6
88
+ filelock==3.12.2
89
+ pathspec==0.12.1
90
+ jupyter_core==5.1.0
91
+ lit==18.1.8
92
+ nvidia-curand-cu11==10.2.10.91
93
+ nvidia-cublas-cu11==11.10.3.66
94
+ nvidia-ml-py==12.560.30
95
+ msgpack==1.1.0
96
+ python-dateutil==2.8.2
97
+ blessed==1.20.0
98
+ packaging==23.0
99
+ gitdb==4.0.11
100
+ yarl==1.13.0
101
+ emoji==2.8.0
102
+ tzdata==2023.3
103
+ cycler==0.12.1
104
+ tornado==6.2
105
+ backcall==0.2.0
106
+ plotnine==0.12.4
107
+ ninja==1.11.1.1
108
+ latex==0.7.0
109
+ wandb==0.18.5
110
+ setproctitle==1.3.3
111
+ threadpoolctl==3.2.0
112
+ requests==2.32.3
113
+ pyparsing==3.1.1
114
+ smmap==5.0.1
115
+ pyzmq==23.0.0
116
+ async-timeout==4.0.3
117
+ annotated-types==0.7.0
118
+ matplotlib-inline==0.1.6
119
+ latexcodec==1.0.0
120
+ ipython==8.0.0
121
+ patsy==0.5.3
122
+ contourpy==1.2.0
123
+ multidict==6.1.0
124
+ mizani==0.9.3
125
+ urllib3==2.1.0
126
+ tokenizers==0.20.0
127
+ MarkupSafe==2.1.2
128
+ pip==24.2
129
+ pexpect==4.8.0
130
+ tqdm==4.66.5
131
+ jedi==0.18.2
132
+ pydantic_core==2.23.4
133
+ tempdir==0.7.1
134
+ mpmath==1.2.1
135
+ setuptools==72.1.0
136
+ pytest==7.4.3
137
+ pure-eval==0.2.2
138
+ psutil==5.9.1
139
+ comm==0.1.2
140
+ nvidia-cuda-cupti-cu11==11.7.101
141
+ nvidia-cuda-nvrtc-cu11==11.7.99
142
+ regex==2023.10.3
143
+ platformdirs==2.5.2
144
+ asttokens==2.2.1
145
+ torch==2.0.0
146
+ nvidia-nccl-cu11==2.14.3
147
+ xxhash==3.5.0
wandb/run-20241101_093116-jo652wfc/files/wandb-metadata.json ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.4.0-162-generic-x86_64-with-glibc2.31",
3
+ "python": "3.9.19",
4
+ "startedAt": "2024-11-01T13:31:16.509088Z",
5
+ "args": [
6
+ "--perturbation",
7
+ "reverse_control",
8
+ "--train_set",
9
+ "10M",
10
+ "--batch_size",
11
+ "3",
12
+ "--epoch",
13
+ "7",
14
+ "--seed",
15
+ "0"
16
+ ],
17
+ "program": "/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py",
18
+ "codePath": "train/train_deep_wandb.py",
19
+ "git": {
20
+ "remote": "git@hf.co:Yaning1001/Impossible_llm.git",
21
+ "commit": "ed716cdcfcdea02b67f7ed0f3504c2b1c8b737c4"
22
+ },
23
+ "email": "yaning1001@gmail.com",
24
+ "root": "/mnt/ssd3/chunhui/yaning/project/impossible_llm/train",
25
+ "host": "mms-large-2",
26
+ "username": "chunhui",
27
+ "executable": "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/bin/python",
28
+ "codePathLocal": "train_deep_wandb.py",
29
+ "cpu_count": 32,
30
+ "cpu_count_logical": 64,
31
+ "gpu": "NVIDIA RTX A6000",
32
+ "gpu_count": 8,
33
+ "disk": {
34
+ "/": {
35
+ "total": "1888559353856",
36
+ "used": "1754716262400"
37
+ }
38
+ },
39
+ "memory": {
40
+ "total": "202617098240"
41
+ },
42
+ "cpu": {
43
+ "count": 32,
44
+ "countLogical": 64
45
+ },
46
+ "gpu_nvidia": [
47
+ {
48
+ "name": "NVIDIA RTX A6000",
49
+ "memoryTotal": "51527024640",
50
+ "cudaCores": 10752,
51
+ "architecture": "Ampere"
52
+ },
53
+ {
54
+ "name": "NVIDIA RTX A6000",
55
+ "memoryTotal": "51527024640",
56
+ "cudaCores": 10752,
57
+ "architecture": "Ampere"
58
+ },
59
+ {
60
+ "name": "NVIDIA RTX A6000",
61
+ "memoryTotal": "51527024640",
62
+ "cudaCores": 10752,
63
+ "architecture": "Ampere"
64
+ },
65
+ {
66
+ "name": "NVIDIA RTX A6000",
67
+ "memoryTotal": "51527024640",
68
+ "cudaCores": 10752,
69
+ "architecture": "Ampere"
70
+ },
71
+ {
72
+ "name": "NVIDIA RTX A6000",
73
+ "memoryTotal": "51527024640",
74
+ "cudaCores": 10752,
75
+ "architecture": "Ampere"
76
+ },
77
+ {
78
+ "name": "NVIDIA RTX A6000",
79
+ "memoryTotal": "51527024640",
80
+ "cudaCores": 10752,
81
+ "architecture": "Ampere"
82
+ },
83
+ {
84
+ "name": "NVIDIA RTX A6000",
85
+ "memoryTotal": "51527024640",
86
+ "cudaCores": 10752,
87
+ "architecture": "Ampere"
88
+ },
89
+ {
90
+ "name": "NVIDIA RTX A6000",
91
+ "memoryTotal": "51527024640",
92
+ "cudaCores": 10752,
93
+ "architecture": "Ampere"
94
+ }
95
+ ],
96
+ "cudaVersion": "11.8"
97
+ }
wandb/run-20241101_093116-jo652wfc/logs/debug-internal.log ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {"time":"2024-11-01T09:31:16.511149419-04:00","level":"INFO","msg":"using version","core version":"0.18.5"}
2
+ {"time":"2024-11-01T09:31:16.511162399-04:00","level":"INFO","msg":"created symlink","path":"/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241101_093116-jo652wfc/logs/debug-core.log"}
3
+ {"time":"2024-11-01T09:31:16.618441589-04:00","level":"INFO","msg":"created new stream","id":"jo652wfc"}
4
+ {"time":"2024-11-01T09:31:16.618474009-04:00","level":"INFO","msg":"stream: started","id":"jo652wfc"}
5
+ {"time":"2024-11-01T09:31:16.618503669-04:00","level":"INFO","msg":"sender: started","stream_id":"jo652wfc"}
6
+ {"time":"2024-11-01T09:31:16.618487059-04:00","level":"INFO","msg":"writer: Do: started","stream_id":{"value":"jo652wfc"}}
7
+ {"time":"2024-11-01T09:31:16.618502009-04:00","level":"INFO","msg":"handler: started","stream_id":{"value":"jo652wfc"}}
8
+ {"time":"2024-11-01T09:31:16.832438843-04:00","level":"INFO","msg":"Starting system monitor"}
wandb/run-20241101_093116-jo652wfc/logs/debug.log ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-11-01 09:31:16,507 INFO MainThread:781949 [wandb_setup.py:_flush():79] Current SDK version is 0.18.5
2
+ 2024-11-01 09:31:16,507 INFO MainThread:781949 [wandb_setup.py:_flush():79] Configure stats pid to 781949
3
+ 2024-11-01 09:31:16,507 INFO MainThread:781949 [wandb_setup.py:_flush():79] Loading settings from /home/chunhui/.config/wandb/settings
4
+ 2024-11-01 09:31:16,507 INFO MainThread:781949 [wandb_setup.py:_flush():79] Loading settings from /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/settings
5
+ 2024-11-01 09:31:16,507 INFO MainThread:781949 [wandb_setup.py:_flush():79] Loading settings from environment variables: {}
6
+ 2024-11-01 09:31:16,507 INFO MainThread:781949 [wandb_setup.py:_flush():79] Applying setup settings: {'mode': None, '_disable_service': None}
7
+ 2024-11-01 09:31:16,507 INFO MainThread:781949 [wandb_setup.py:_flush():79] Inferring run settings from compute environment: {'program_relpath': 'train/train_deep_wandb.py', 'program_abspath': '/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py', 'program': '/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py'}
8
+ 2024-11-01 09:31:16,507 INFO MainThread:781949 [wandb_setup.py:_flush():79] Applying login settings: {}
9
+ 2024-11-01 09:31:16,507 INFO MainThread:781949 [wandb_init.py:_log_setup():534] Logging user logs to /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241101_093116-jo652wfc/logs/debug.log
10
+ 2024-11-01 09:31:16,507 INFO MainThread:781949 [wandb_init.py:_log_setup():535] Logging internal logs to /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241101_093116-jo652wfc/logs/debug-internal.log
11
+ 2024-11-01 09:31:16,507 INFO MainThread:781949 [wandb_init.py:init():621] calling init triggers
12
+ 2024-11-01 09:31:16,507 INFO MainThread:781949 [wandb_init.py:init():628] wandb.init called with sweep_config: {}
13
+ config: {}
14
+ 2024-11-01 09:31:16,507 INFO MainThread:781949 [wandb_init.py:init():671] starting backend
15
+ 2024-11-01 09:31:16,507 INFO MainThread:781949 [wandb_init.py:init():675] sending inform_init request
16
+ 2024-11-01 09:31:16,508 INFO MainThread:781949 [backend.py:_multiprocessing_setup():104] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
17
+ 2024-11-01 09:31:16,508 INFO MainThread:781949 [wandb_init.py:init():688] backend started and connected
18
+ 2024-11-01 09:31:16,511 INFO MainThread:781949 [wandb_init.py:init():783] updated telemetry
19
+ 2024-11-01 09:31:16,539 INFO MainThread:781949 [wandb_init.py:init():816] communicating run to backend with 90.0 second timeout
20
+ 2024-11-01 09:31:16,829 INFO MainThread:781949 [wandb_init.py:init():867] starting run threads in backend
21
+ 2024-11-01 09:31:16,925 INFO MainThread:781949 [wandb_run.py:_console_start():2463] atexit reg
22
+ 2024-11-01 09:31:16,925 INFO MainThread:781949 [wandb_run.py:_redirect():2311] redirect: wrap_raw
23
+ 2024-11-01 09:31:16,925 INFO MainThread:781949 [wandb_run.py:_redirect():2376] Wrapping output streams.
24
+ 2024-11-01 09:31:16,925 INFO MainThread:781949 [wandb_run.py:_redirect():2401] Redirects installed.
25
+ 2024-11-01 09:31:16,927 INFO MainThread:781949 [wandb_init.py:init():911] run started, returning control to user process
26
+ 2024-11-01 09:31:16,927 INFO MainThread:781949 [wandb_run.py:_config_callback():1390] config_cb None None {'perturbation': 'reverse_control', 'train_set': '10M', 'batch_size': 3, 'epoch': 7, 'seed': 0, 'lr': 5e-06}
wandb/run-20241101_093116-jo652wfc/run-jo652wfc.wandb ADDED
Binary file (32.8 kB). View file
 
wandb/run-20241101_093116-w11cgu13/files/output.log ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Loading checkpoint shards: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:18<00:00, 9.04s/it]
2
+ tokenized_valid: Dataset({
3
+ features: ['input_ids', 'attention_mask'],
4
+ num_rows: 600
5
+ })
6
+ /mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/site-packages/transformers/training_args.py:1545: FutureWarning: `evaluation_strategy` is deprecated and will be removed in version 4.46 of 🤗 Transformers. Use `eval_strategy` instead
7
+ warnings.warn(
8
+ [2024-11-01 09:31:37,040] [INFO] [real_accelerator.py:219:get_accelerator] Setting ds_accelerator to cuda (auto detect)
9
+ [2024-11-01 09:31:45,693] [INFO] [comm.py:652:init_distributed] cdb=None
10
+ Installed CUDA version 11.8 does not match the version torch was compiled with 11.7 but since the APIs are compatible, accepting this combination
11
+ Using /home/chunhui/.cache/torch_extensions/py39_cu117 as PyTorch extensions root...
12
+ Loading extension module cpu_adam...
13
+ Time to load cpu_adam op: 4.85305643081665 seconds
wandb/run-20241101_093116-w11cgu13/files/requirements.txt ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ funcsigs==1.0.2
2
+ sentry-sdk==2.17.0
3
+ multiprocess==0.70.16
4
+ numpy==1.26.2
5
+ pluralizer==1.2.0
6
+ debugpy==1.6.7
7
+ nvidia-cudnn-cu11==8.5.0.96
8
+ deepspeed==0.15.2
9
+ data==0.4
10
+ pandas==2.1.3
11
+ tomli==2.0.1
12
+ charset-normalizer==3.3.2
13
+ attrs==24.2.0
14
+ aiosignal==1.3.1
15
+ fsspec==2023.10.0
16
+ nvidia-cusparse-cu11==11.7.4.91
17
+ zipp==3.12.0
18
+ mypy-extensions==1.0.0
19
+ datasets==3.0.1
20
+ joblib==1.3.2
21
+ hjson==3.1.0
22
+ traitlets==5.7.1
23
+ stack-data==0.6.0
24
+ transformers==4.45.1
25
+ sympy==1.11.1
26
+ Pygments==2.15.0
27
+ docker-pycreds==0.4.0
28
+ dill==0.3.8
29
+ wheel==0.44.0
30
+ prompt-toolkit==3.0.30
31
+ parso==0.8.3
32
+ ipykernel==6.23.1
33
+ pyarrow==17.0.0
34
+ certifi==2023.11.17
35
+ nvidia-cufft-cu11==10.9.0.58
36
+ six==1.16.0
37
+ pydantic==2.9.2
38
+ click==8.1.7
39
+ nest-asyncio==1.5.6
40
+ gmpy2==2.1.0
41
+ matplotlib==3.8.2
42
+ scipy==1.11.4
43
+ typing_extensions==4.12.2
44
+ statsmodels==0.14.0
45
+ huggingface-hub==0.25.0
46
+ frozenlist==1.4.1
47
+ gpustat==1.1.1
48
+ nvidia-nvtx-cu11==11.7.91
49
+ safetensors==0.4.5
50
+ stanza==1.9.2
51
+ decorator==5.1.1
52
+ seaborn==0.13.0
53
+ sentencepiece==0.2.0
54
+ PyYAML==6.0.1
55
+ black==24.8.0
56
+ protobuf==4.25.1
57
+ pickleshare==0.7.5
58
+ peft==0.13.0
59
+ triton==2.0.0
60
+ nvidia-cuda-runtime-cu11==11.7.99
61
+ Jinja2==3.1.2
62
+ nvidia-cusolver-cu11==11.4.0.1
63
+ executing==1.2.0
64
+ jupyter_client==8.1.0
65
+ pluggy==1.3.0
66
+ cmake==3.30.3
67
+ pytz==2023.3.post1
68
+ aiohappyeyeballs==2.4.2
69
+ kiwisolver==1.4.5
70
+ py-cpuinfo==9.0.0
71
+ Pillow==10.1.0
72
+ ptyprocess==0.7.0
73
+ importlib_resources==6.4.5
74
+ GitPython==3.1.43
75
+ importlib-metadata==6.0.0
76
+ iniconfig==2.0.0
77
+ scikit-learn==1.3.2
78
+ exceptiongroup==1.1.0
79
+ networkx==2.8.6
80
+ accelerate==1.0.0
81
+ nltk==3.8.1
82
+ shutilwhich==1.1.0
83
+ fonttools==4.45.1
84
+ future==0.18.3
85
+ aiohttp==3.10.6
86
+ wcwidth==0.2.5
87
+ idna==3.6
88
+ filelock==3.12.2
89
+ pathspec==0.12.1
90
+ jupyter_core==5.1.0
91
+ lit==18.1.8
92
+ nvidia-curand-cu11==10.2.10.91
93
+ nvidia-cublas-cu11==11.10.3.66
94
+ nvidia-ml-py==12.560.30
95
+ msgpack==1.1.0
96
+ python-dateutil==2.8.2
97
+ blessed==1.20.0
98
+ packaging==23.0
99
+ gitdb==4.0.11
100
+ yarl==1.13.0
101
+ emoji==2.8.0
102
+ tzdata==2023.3
103
+ cycler==0.12.1
104
+ tornado==6.2
105
+ backcall==0.2.0
106
+ plotnine==0.12.4
107
+ ninja==1.11.1.1
108
+ latex==0.7.0
109
+ wandb==0.18.5
110
+ setproctitle==1.3.3
111
+ threadpoolctl==3.2.0
112
+ requests==2.32.3
113
+ pyparsing==3.1.1
114
+ smmap==5.0.1
115
+ pyzmq==23.0.0
116
+ async-timeout==4.0.3
117
+ annotated-types==0.7.0
118
+ matplotlib-inline==0.1.6
119
+ latexcodec==1.0.0
120
+ ipython==8.0.0
121
+ patsy==0.5.3
122
+ contourpy==1.2.0
123
+ multidict==6.1.0
124
+ mizani==0.9.3
125
+ urllib3==2.1.0
126
+ tokenizers==0.20.0
127
+ MarkupSafe==2.1.2
128
+ pip==24.2
129
+ pexpect==4.8.0
130
+ tqdm==4.66.5
131
+ jedi==0.18.2
132
+ pydantic_core==2.23.4
133
+ tempdir==0.7.1
134
+ mpmath==1.2.1
135
+ setuptools==72.1.0
136
+ pytest==7.4.3
137
+ pure-eval==0.2.2
138
+ psutil==5.9.1
139
+ comm==0.1.2
140
+ nvidia-cuda-cupti-cu11==11.7.101
141
+ nvidia-cuda-nvrtc-cu11==11.7.99
142
+ regex==2023.10.3
143
+ platformdirs==2.5.2
144
+ asttokens==2.2.1
145
+ torch==2.0.0
146
+ nvidia-nccl-cu11==2.14.3
147
+ xxhash==3.5.0
wandb/run-20241101_093116-w11cgu13/files/wandb-metadata.json ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.4.0-162-generic-x86_64-with-glibc2.31",
3
+ "python": "3.9.19",
4
+ "startedAt": "2024-11-01T13:31:16.513694Z",
5
+ "args": [
6
+ "--perturbation",
7
+ "reverse_control",
8
+ "--train_set",
9
+ "10M",
10
+ "--batch_size",
11
+ "3",
12
+ "--epoch",
13
+ "7",
14
+ "--seed",
15
+ "0"
16
+ ],
17
+ "program": "/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py",
18
+ "codePath": "train/train_deep_wandb.py",
19
+ "git": {
20
+ "remote": "git@hf.co:Yaning1001/Impossible_llm.git",
21
+ "commit": "ed716cdcfcdea02b67f7ed0f3504c2b1c8b737c4"
22
+ },
23
+ "email": "yaning1001@gmail.com",
24
+ "root": "/mnt/ssd3/chunhui/yaning/project/impossible_llm/train",
25
+ "host": "mms-large-2",
26
+ "username": "chunhui",
27
+ "executable": "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/bin/python",
28
+ "codePathLocal": "train_deep_wandb.py",
29
+ "cpu_count": 32,
30
+ "cpu_count_logical": 64,
31
+ "gpu": "NVIDIA RTX A6000",
32
+ "gpu_count": 8,
33
+ "disk": {
34
+ "/": {
35
+ "total": "1888559353856",
36
+ "used": "1754716262400"
37
+ }
38
+ },
39
+ "memory": {
40
+ "total": "202617098240"
41
+ },
42
+ "cpu": {
43
+ "count": 32,
44
+ "countLogical": 64
45
+ },
46
+ "gpu_nvidia": [
47
+ {
48
+ "name": "NVIDIA RTX A6000",
49
+ "memoryTotal": "51527024640",
50
+ "cudaCores": 10752,
51
+ "architecture": "Ampere"
52
+ },
53
+ {
54
+ "name": "NVIDIA RTX A6000",
55
+ "memoryTotal": "51527024640",
56
+ "cudaCores": 10752,
57
+ "architecture": "Ampere"
58
+ },
59
+ {
60
+ "name": "NVIDIA RTX A6000",
61
+ "memoryTotal": "51527024640",
62
+ "cudaCores": 10752,
63
+ "architecture": "Ampere"
64
+ },
65
+ {
66
+ "name": "NVIDIA RTX A6000",
67
+ "memoryTotal": "51527024640",
68
+ "cudaCores": 10752,
69
+ "architecture": "Ampere"
70
+ },
71
+ {
72
+ "name": "NVIDIA RTX A6000",
73
+ "memoryTotal": "51527024640",
74
+ "cudaCores": 10752,
75
+ "architecture": "Ampere"
76
+ },
77
+ {
78
+ "name": "NVIDIA RTX A6000",
79
+ "memoryTotal": "51527024640",
80
+ "cudaCores": 10752,
81
+ "architecture": "Ampere"
82
+ },
83
+ {
84
+ "name": "NVIDIA RTX A6000",
85
+ "memoryTotal": "51527024640",
86
+ "cudaCores": 10752,
87
+ "architecture": "Ampere"
88
+ },
89
+ {
90
+ "name": "NVIDIA RTX A6000",
91
+ "memoryTotal": "51527024640",
92
+ "cudaCores": 10752,
93
+ "architecture": "Ampere"
94
+ }
95
+ ],
96
+ "cudaVersion": "11.8"
97
+ }
wandb/run-20241101_093116-w11cgu13/logs/debug-internal.log ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {"time":"2024-11-01T09:31:16.515903771-04:00","level":"INFO","msg":"using version","core version":"0.18.5"}
2
+ {"time":"2024-11-01T09:31:16.515917391-04:00","level":"INFO","msg":"created symlink","path":"/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241101_093116-w11cgu13/logs/debug-core.log"}
3
+ {"time":"2024-11-01T09:31:16.623731793-04:00","level":"INFO","msg":"created new stream","id":"w11cgu13"}
4
+ {"time":"2024-11-01T09:31:16.623771704-04:00","level":"INFO","msg":"stream: started","id":"w11cgu13"}
5
+ {"time":"2024-11-01T09:31:16.623799034-04:00","level":"INFO","msg":"writer: Do: started","stream_id":{"value":"w11cgu13"}}
6
+ {"time":"2024-11-01T09:31:16.623856324-04:00","level":"INFO","msg":"sender: started","stream_id":"w11cgu13"}
7
+ {"time":"2024-11-01T09:31:16.623831334-04:00","level":"INFO","msg":"handler: started","stream_id":{"value":"w11cgu13"}}
8
+ {"time":"2024-11-01T09:31:16.852066303-04:00","level":"INFO","msg":"Starting system monitor"}
wandb/run-20241101_093116-w11cgu13/logs/debug.log ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-11-01 09:31:16,511 INFO MainThread:781946 [wandb_setup.py:_flush():79] Current SDK version is 0.18.5
2
+ 2024-11-01 09:31:16,511 INFO MainThread:781946 [wandb_setup.py:_flush():79] Configure stats pid to 781946
3
+ 2024-11-01 09:31:16,511 INFO MainThread:781946 [wandb_setup.py:_flush():79] Loading settings from /home/chunhui/.config/wandb/settings
4
+ 2024-11-01 09:31:16,511 INFO MainThread:781946 [wandb_setup.py:_flush():79] Loading settings from /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/settings
5
+ 2024-11-01 09:31:16,511 INFO MainThread:781946 [wandb_setup.py:_flush():79] Loading settings from environment variables: {}
6
+ 2024-11-01 09:31:16,511 INFO MainThread:781946 [wandb_setup.py:_flush():79] Applying setup settings: {'mode': None, '_disable_service': None}
7
+ 2024-11-01 09:31:16,511 INFO MainThread:781946 [wandb_setup.py:_flush():79] Inferring run settings from compute environment: {'program_relpath': 'train/train_deep_wandb.py', 'program_abspath': '/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py', 'program': '/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py'}
8
+ 2024-11-01 09:31:16,511 INFO MainThread:781946 [wandb_setup.py:_flush():79] Applying login settings: {}
9
+ 2024-11-01 09:31:16,512 INFO MainThread:781946 [wandb_init.py:_log_setup():534] Logging user logs to /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241101_093116-w11cgu13/logs/debug.log
10
+ 2024-11-01 09:31:16,512 INFO MainThread:781946 [wandb_init.py:_log_setup():535] Logging internal logs to /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241101_093116-w11cgu13/logs/debug-internal.log
11
+ 2024-11-01 09:31:16,512 INFO MainThread:781946 [wandb_init.py:init():621] calling init triggers
12
+ 2024-11-01 09:31:16,512 INFO MainThread:781946 [wandb_init.py:init():628] wandb.init called with sweep_config: {}
13
+ config: {}
14
+ 2024-11-01 09:31:16,512 INFO MainThread:781946 [wandb_init.py:init():671] starting backend
15
+ 2024-11-01 09:31:16,512 INFO MainThread:781946 [wandb_init.py:init():675] sending inform_init request
16
+ 2024-11-01 09:31:16,513 INFO MainThread:781946 [backend.py:_multiprocessing_setup():104] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
17
+ 2024-11-01 09:31:16,513 INFO MainThread:781946 [wandb_init.py:init():688] backend started and connected
18
+ 2024-11-01 09:31:16,516 INFO MainThread:781946 [wandb_init.py:init():783] updated telemetry
19
+ 2024-11-01 09:31:16,551 INFO MainThread:781946 [wandb_init.py:init():816] communicating run to backend with 90.0 second timeout
20
+ 2024-11-01 09:31:16,849 INFO MainThread:781946 [wandb_init.py:init():867] starting run threads in backend
21
+ 2024-11-01 09:31:16,943 INFO MainThread:781946 [wandb_run.py:_console_start():2463] atexit reg
22
+ 2024-11-01 09:31:16,943 INFO MainThread:781946 [wandb_run.py:_redirect():2311] redirect: wrap_raw
23
+ 2024-11-01 09:31:16,943 INFO MainThread:781946 [wandb_run.py:_redirect():2376] Wrapping output streams.
24
+ 2024-11-01 09:31:16,943 INFO MainThread:781946 [wandb_run.py:_redirect():2401] Redirects installed.
25
+ 2024-11-01 09:31:16,945 INFO MainThread:781946 [wandb_init.py:init():911] run started, returning control to user process
26
+ 2024-11-01 09:31:16,945 INFO MainThread:781946 [wandb_run.py:_config_callback():1390] config_cb None None {'perturbation': 'reverse_control', 'train_set': '10M', 'batch_size': 3, 'epoch': 7, 'seed': 0, 'lr': 5e-06}
wandb/run-20241101_093116-w11cgu13/run-w11cgu13.wandb ADDED
Binary file (32.8 kB). View file
 
wandb/run-20241101_200535-lnp8ii96/files/output.log ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model-00001-of-00002.safetensors: 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4.97G/4.97G [01:51<00:00, 42.0MB/s]
2
+ model-00002-of-00002.safetensors: 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1.46G/1.46G [00:34<00:00, 42.3MB/s]
3
+ Downloading shards: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [02:25<00:00, 72.94s/it]
4
+ Loading checkpoint shards: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:05<00:00, 2.56s/it]
5
+ generation_config.json: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 185/185 [00:00<00:00, 66.7kB/s]
6
+ tokenized_valid: Dataset({
7
+ features: ['input_ids', 'attention_mask'],
8
+ num_rows: 600
9
+ })
10
+ /mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/site-packages/transformers/training_args.py:1545: FutureWarning: `evaluation_strategy` is deprecated and will be removed in version 4.46 of 🤗 Transformers. Use `eval_strategy` instead
11
+ warnings.warn(
12
+ [2024-11-01 20:08:09,443] [INFO] [real_accelerator.py:219:get_accelerator] Setting ds_accelerator to cuda (auto detect)
13
+ [2024-11-01 20:08:19,367] [INFO] [comm.py:652:init_distributed] cdb=None
14
+ Installed CUDA version 11.8 does not match the version torch was compiled with 11.7 but since the APIs are compatible, accepting this combination
15
+ Using /home/chunhui/.cache/torch_extensions/py39_cu117 as PyTorch extensions root...
16
+ Loading extension module cpu_adam...
17
+ Time to load cpu_adam op: 5.52386474609375 seconds
wandb/run-20241101_200535-lnp8ii96/files/requirements.txt ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ funcsigs==1.0.2
2
+ sentry-sdk==2.17.0
3
+ multiprocess==0.70.16
4
+ numpy==1.26.2
5
+ pluralizer==1.2.0
6
+ debugpy==1.6.7
7
+ nvidia-cudnn-cu11==8.5.0.96
8
+ deepspeed==0.15.2
9
+ data==0.4
10
+ pandas==2.1.3
11
+ tomli==2.0.1
12
+ charset-normalizer==3.3.2
13
+ attrs==24.2.0
14
+ aiosignal==1.3.1
15
+ fsspec==2023.10.0
16
+ nvidia-cusparse-cu11==11.7.4.91
17
+ zipp==3.12.0
18
+ mypy-extensions==1.0.0
19
+ datasets==3.0.1
20
+ joblib==1.3.2
21
+ hjson==3.1.0
22
+ traitlets==5.7.1
23
+ stack-data==0.6.0
24
+ transformers==4.45.1
25
+ sympy==1.11.1
26
+ Pygments==2.15.0
27
+ docker-pycreds==0.4.0
28
+ dill==0.3.8
29
+ wheel==0.44.0
30
+ prompt-toolkit==3.0.30
31
+ parso==0.8.3
32
+ ipykernel==6.23.1
33
+ pyarrow==17.0.0
34
+ certifi==2023.11.17
35
+ nvidia-cufft-cu11==10.9.0.58
36
+ six==1.16.0
37
+ pydantic==2.9.2
38
+ click==8.1.7
39
+ nest-asyncio==1.5.6
40
+ gmpy2==2.1.0
41
+ matplotlib==3.8.2
42
+ scipy==1.11.4
43
+ typing_extensions==4.12.2
44
+ statsmodels==0.14.0
45
+ huggingface-hub==0.25.0
46
+ frozenlist==1.4.1
47
+ gpustat==1.1.1
48
+ nvidia-nvtx-cu11==11.7.91
49
+ safetensors==0.4.5
50
+ stanza==1.9.2
51
+ decorator==5.1.1
52
+ seaborn==0.13.0
53
+ sentencepiece==0.2.0
54
+ PyYAML==6.0.1
55
+ black==24.8.0
56
+ protobuf==4.25.1
57
+ pickleshare==0.7.5
58
+ peft==0.13.0
59
+ triton==2.0.0
60
+ nvidia-cuda-runtime-cu11==11.7.99
61
+ Jinja2==3.1.2
62
+ nvidia-cusolver-cu11==11.4.0.1
63
+ executing==1.2.0
64
+ jupyter_client==8.1.0
65
+ pluggy==1.3.0
66
+ cmake==3.30.3
67
+ pytz==2023.3.post1
68
+ aiohappyeyeballs==2.4.2
69
+ kiwisolver==1.4.5
70
+ py-cpuinfo==9.0.0
71
+ Pillow==10.1.0
72
+ ptyprocess==0.7.0
73
+ importlib_resources==6.4.5
74
+ GitPython==3.1.43
75
+ importlib-metadata==6.0.0
76
+ iniconfig==2.0.0
77
+ scikit-learn==1.3.2
78
+ exceptiongroup==1.1.0
79
+ networkx==2.8.6
80
+ accelerate==1.0.0
81
+ nltk==3.8.1
82
+ shutilwhich==1.1.0
83
+ fonttools==4.45.1
84
+ future==0.18.3
85
+ aiohttp==3.10.6
86
+ wcwidth==0.2.5
87
+ idna==3.6
88
+ filelock==3.12.2
89
+ pathspec==0.12.1
90
+ jupyter_core==5.1.0
91
+ lit==18.1.8
92
+ nvidia-curand-cu11==10.2.10.91
93
+ nvidia-cublas-cu11==11.10.3.66
94
+ nvidia-ml-py==12.560.30
95
+ msgpack==1.1.0
96
+ python-dateutil==2.8.2
97
+ blessed==1.20.0
98
+ packaging==23.0
99
+ gitdb==4.0.11
100
+ yarl==1.13.0
101
+ emoji==2.8.0
102
+ tzdata==2023.3
103
+ cycler==0.12.1
104
+ tornado==6.2
105
+ backcall==0.2.0
106
+ plotnine==0.12.4
107
+ ninja==1.11.1.1
108
+ latex==0.7.0
109
+ wandb==0.18.5
110
+ setproctitle==1.3.3
111
+ threadpoolctl==3.2.0
112
+ requests==2.32.3
113
+ pyparsing==3.1.1
114
+ smmap==5.0.1
115
+ pyzmq==23.0.0
116
+ async-timeout==4.0.3
117
+ annotated-types==0.7.0
118
+ matplotlib-inline==0.1.6
119
+ latexcodec==1.0.0
120
+ ipython==8.0.0
121
+ patsy==0.5.3
122
+ contourpy==1.2.0
123
+ multidict==6.1.0
124
+ mizani==0.9.3
125
+ urllib3==2.1.0
126
+ tokenizers==0.20.0
127
+ MarkupSafe==2.1.2
128
+ pip==24.2
129
+ pexpect==4.8.0
130
+ tqdm==4.66.5
131
+ jedi==0.18.2
132
+ pydantic_core==2.23.4
133
+ tempdir==0.7.1
134
+ mpmath==1.2.1
135
+ setuptools==72.1.0
136
+ pytest==7.4.3
137
+ pure-eval==0.2.2
138
+ psutil==5.9.1
139
+ comm==0.1.2
140
+ nvidia-cuda-cupti-cu11==11.7.101
141
+ nvidia-cuda-nvrtc-cu11==11.7.99
142
+ regex==2023.10.3
143
+ platformdirs==2.5.2
144
+ asttokens==2.2.1
145
+ torch==2.0.0
146
+ nvidia-nccl-cu11==2.14.3
147
+ xxhash==3.5.0
wandb/run-20241101_200535-lnp8ii96/files/wandb-metadata.json ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.4.0-162-generic-x86_64-with-glibc2.31",
3
+ "python": "3.9.19",
4
+ "startedAt": "2024-11-02T00:05:35.623567Z",
5
+ "args": [
6
+ "--perturbation",
7
+ "shuffle_nondeterministic",
8
+ "--train_set",
9
+ "10M",
10
+ "--batch_size",
11
+ "3",
12
+ "--epoch",
13
+ "3",
14
+ "--seed",
15
+ "0"
16
+ ],
17
+ "program": "/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py",
18
+ "codePath": "train/train_deep_wandb.py",
19
+ "git": {
20
+ "remote": "git@hf.co:Yaning1001/Impossible_llm.git",
21
+ "commit": "ed716cdcfcdea02b67f7ed0f3504c2b1c8b737c4"
22
+ },
23
+ "email": "yaning1001@gmail.com",
24
+ "root": "/mnt/ssd3/chunhui/yaning/project/impossible_llm/train",
25
+ "host": "mms-large-2",
26
+ "username": "chunhui",
27
+ "executable": "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/bin/python",
28
+ "codePathLocal": "train_deep_wandb.py",
29
+ "cpu_count": 32,
30
+ "cpu_count_logical": 64,
31
+ "gpu": "NVIDIA RTX A6000",
32
+ "gpu_count": 8,
33
+ "disk": {
34
+ "/": {
35
+ "total": "1888559353856",
36
+ "used": "1754801684480"
37
+ }
38
+ },
39
+ "memory": {
40
+ "total": "202617098240"
41
+ },
42
+ "cpu": {
43
+ "count": 32,
44
+ "countLogical": 64
45
+ },
46
+ "gpu_nvidia": [
47
+ {
48
+ "name": "NVIDIA RTX A6000",
49
+ "memoryTotal": "51527024640",
50
+ "cudaCores": 10752,
51
+ "architecture": "Ampere"
52
+ },
53
+ {
54
+ "name": "NVIDIA RTX A6000",
55
+ "memoryTotal": "51527024640",
56
+ "cudaCores": 10752,
57
+ "architecture": "Ampere"
58
+ },
59
+ {
60
+ "name": "NVIDIA RTX A6000",
61
+ "memoryTotal": "51527024640",
62
+ "cudaCores": 10752,
63
+ "architecture": "Ampere"
64
+ },
65
+ {
66
+ "name": "NVIDIA RTX A6000",
67
+ "memoryTotal": "51527024640",
68
+ "cudaCores": 10752,
69
+ "architecture": "Ampere"
70
+ },
71
+ {
72
+ "name": "NVIDIA RTX A6000",
73
+ "memoryTotal": "51527024640",
74
+ "cudaCores": 10752,
75
+ "architecture": "Ampere"
76
+ },
77
+ {
78
+ "name": "NVIDIA RTX A6000",
79
+ "memoryTotal": "51527024640",
80
+ "cudaCores": 10752,
81
+ "architecture": "Ampere"
82
+ },
83
+ {
84
+ "name": "NVIDIA RTX A6000",
85
+ "memoryTotal": "51527024640",
86
+ "cudaCores": 10752,
87
+ "architecture": "Ampere"
88
+ },
89
+ {
90
+ "name": "NVIDIA RTX A6000",
91
+ "memoryTotal": "51527024640",
92
+ "cudaCores": 10752,
93
+ "architecture": "Ampere"
94
+ }
95
+ ],
96
+ "cudaVersion": "11.8"
97
+ }
wandb/run-20241101_200535-lnp8ii96/logs/debug-internal.log ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {"time":"2024-11-01T20:05:35.626320342-04:00","level":"INFO","msg":"using version","core version":"0.18.5"}
2
+ {"time":"2024-11-01T20:05:35.626341822-04:00","level":"INFO","msg":"created symlink","path":"/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241101_200535-lnp8ii96/logs/debug-core.log"}
3
+ {"time":"2024-11-01T20:05:35.736138328-04:00","level":"INFO","msg":"created new stream","id":"lnp8ii96"}
4
+ {"time":"2024-11-01T20:05:35.736185149-04:00","level":"INFO","msg":"stream: started","id":"lnp8ii96"}
5
+ {"time":"2024-11-01T20:05:35.736261459-04:00","level":"INFO","msg":"sender: started","stream_id":"lnp8ii96"}
6
+ {"time":"2024-11-01T20:05:35.736245139-04:00","level":"INFO","msg":"writer: Do: started","stream_id":{"value":"lnp8ii96"}}
7
+ {"time":"2024-11-01T20:05:35.73631463-04:00","level":"INFO","msg":"handler: started","stream_id":{"value":"lnp8ii96"}}
8
+ {"time":"2024-11-01T20:05:35.90118916-04:00","level":"INFO","msg":"Starting system monitor"}
wandb/run-20241101_200535-lnp8ii96/logs/debug.log ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-11-01 20:05:35,620 INFO MainThread:871229 [wandb_setup.py:_flush():79] Current SDK version is 0.18.5
2
+ 2024-11-01 20:05:35,620 INFO MainThread:871229 [wandb_setup.py:_flush():79] Configure stats pid to 871229
3
+ 2024-11-01 20:05:35,620 INFO MainThread:871229 [wandb_setup.py:_flush():79] Loading settings from /home/chunhui/.config/wandb/settings
4
+ 2024-11-01 20:05:35,620 INFO MainThread:871229 [wandb_setup.py:_flush():79] Loading settings from /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/settings
5
+ 2024-11-01 20:05:35,620 INFO MainThread:871229 [wandb_setup.py:_flush():79] Loading settings from environment variables: {}
6
+ 2024-11-01 20:05:35,620 INFO MainThread:871229 [wandb_setup.py:_flush():79] Applying setup settings: {'mode': None, '_disable_service': None}
7
+ 2024-11-01 20:05:35,620 INFO MainThread:871229 [wandb_setup.py:_flush():79] Inferring run settings from compute environment: {'program_relpath': 'train/train_deep_wandb.py', 'program_abspath': '/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py', 'program': '/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py'}
8
+ 2024-11-01 20:05:35,620 INFO MainThread:871229 [wandb_setup.py:_flush():79] Applying login settings: {}
9
+ 2024-11-01 20:05:35,620 INFO MainThread:871229 [wandb_init.py:_log_setup():534] Logging user logs to /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241101_200535-lnp8ii96/logs/debug.log
10
+ 2024-11-01 20:05:35,620 INFO MainThread:871229 [wandb_init.py:_log_setup():535] Logging internal logs to /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241101_200535-lnp8ii96/logs/debug-internal.log
11
+ 2024-11-01 20:05:35,621 INFO MainThread:871229 [wandb_init.py:init():621] calling init triggers
12
+ 2024-11-01 20:05:35,621 INFO MainThread:871229 [wandb_init.py:init():628] wandb.init called with sweep_config: {}
13
+ config: {}
14
+ 2024-11-01 20:05:35,621 INFO MainThread:871229 [wandb_init.py:init():671] starting backend
15
+ 2024-11-01 20:05:35,621 INFO MainThread:871229 [wandb_init.py:init():675] sending inform_init request
16
+ 2024-11-01 20:05:35,622 INFO MainThread:871229 [backend.py:_multiprocessing_setup():104] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
17
+ 2024-11-01 20:05:35,623 INFO MainThread:871229 [wandb_init.py:init():688] backend started and connected
18
+ 2024-11-01 20:05:35,626 INFO MainThread:871229 [wandb_init.py:init():783] updated telemetry
19
+ 2024-11-01 20:05:35,646 INFO MainThread:871229 [wandb_init.py:init():816] communicating run to backend with 90.0 second timeout
20
+ 2024-11-01 20:05:35,897 INFO MainThread:871229 [wandb_init.py:init():867] starting run threads in backend
21
+ 2024-11-01 20:05:35,987 INFO MainThread:871229 [wandb_run.py:_console_start():2463] atexit reg
22
+ 2024-11-01 20:05:35,987 INFO MainThread:871229 [wandb_run.py:_redirect():2311] redirect: wrap_raw
23
+ 2024-11-01 20:05:35,987 INFO MainThread:871229 [wandb_run.py:_redirect():2376] Wrapping output streams.
24
+ 2024-11-01 20:05:35,987 INFO MainThread:871229 [wandb_run.py:_redirect():2401] Redirects installed.
25
+ 2024-11-01 20:05:35,989 INFO MainThread:871229 [wandb_init.py:init():911] run started, returning control to user process
26
+ 2024-11-01 20:05:35,989 INFO MainThread:871229 [wandb_run.py:_config_callback():1390] config_cb None None {'perturbation': 'shuffle_nondeterministic', 'train_set': '10M', 'batch_size': 3, 'epoch': 3, 'seed': 0, 'lr': 5e-06}
wandb/run-20241101_200535-xloij0da/run-xloij0da.wandb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86f830d3dae48b66e8e3e2ca7fdbd7abc67f7ca270bb41d81aa5d822bd8fc2de
3
+ size 131072
wandb/run-20241105_155905-adxztc74/files/config.yaml ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _wandb:
2
+ value:
3
+ cli_version: 0.18.5
4
+ m: []
5
+ python_version: 3.9.19
6
+ t:
7
+ "1":
8
+ - 1
9
+ - 5
10
+ - 11
11
+ - 49
12
+ - 51
13
+ - 53
14
+ - 55
15
+ - 71
16
+ - 98
17
+ "2":
18
+ - 1
19
+ - 5
20
+ - 11
21
+ - 49
22
+ - 51
23
+ - 53
24
+ - 55
25
+ - 71
26
+ - 98
27
+ "3":
28
+ - 13
29
+ - 23
30
+ - 55
31
+ "4": 3.9.19
32
+ "5": 0.18.5
33
+ "6": 4.45.1
34
+ "8":
35
+ - 5
36
+ "12": 0.18.5
37
+ "13": linux-x86_64
38
+ batch_size:
39
+ value: 3
40
+ epoch:
41
+ value: 3
42
+ lr:
43
+ value: 5e-06
44
+ perturbation:
45
+ value: shuffle_deterministic21
46
+ seed:
47
+ value: 0
48
+ train_set:
49
+ value: 10M
wandb/run-20241105_155905-adxztc74/files/output.log ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Traceback (most recent call last):
2
+ File "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/pathlib.py", line 1323, in mkdir
3
+ self._accessor.mkdir(self, mode)
4
+ FileNotFoundError: [Errno 2] No such file or directory: '/home/chunhui/.cache/huggingface/datasets/babylm_dataset_test/babylm_shuffle_deterministic21_10M_seed0/0.0.0'
5
+
6
+ During handling of the above exception, another exception occurred:
7
+
8
+ Traceback (most recent call last):
9
+ File "/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py", line 165, in <module>
10
+ dataset = load_dataset('babylm_dataset_test.py', name=dataset_name, trust_remote_code=True)
11
+ File "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/site-packages/datasets/load.py", line 2096, in load_dataset
12
+ builder_instance.download_and_prepare(
13
+ File "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/site-packages/datasets/builder.py", line 855, in download_and_prepare
14
+ Path(self._output_dir).parent.mkdir(parents=True, exist_ok=True)
15
+ File "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/pathlib.py", line 1327, in mkdir
16
+ self.parent.mkdir(parents=True, exist_ok=True)
17
+ File "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/pathlib.py", line 1323, in mkdir
18
+ self._accessor.mkdir(self, mode)
19
+ OSError: [Errno 28] No space left on device: '/home/chunhui/.cache/huggingface/datasets/babylm_dataset_test/babylm_shuffle_deterministic21_10M_seed0'
wandb/run-20241105_155905-adxztc74/files/requirements.txt ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ funcsigs==1.0.2
2
+ sentry-sdk==2.17.0
3
+ multiprocess==0.70.16
4
+ numpy==1.26.2
5
+ pluralizer==1.2.0
6
+ debugpy==1.6.7
7
+ nvidia-cudnn-cu11==8.5.0.96
8
+ deepspeed==0.15.2
9
+ data==0.4
10
+ pandas==2.1.3
11
+ tomli==2.0.1
12
+ charset-normalizer==3.3.2
13
+ attrs==24.2.0
14
+ aiosignal==1.3.1
15
+ fsspec==2023.10.0
16
+ nvidia-cusparse-cu11==11.7.4.91
17
+ zipp==3.12.0
18
+ mypy-extensions==1.0.0
19
+ datasets==3.0.1
20
+ joblib==1.3.2
21
+ hjson==3.1.0
22
+ traitlets==5.7.1
23
+ stack-data==0.6.0
24
+ transformers==4.45.1
25
+ sympy==1.11.1
26
+ Pygments==2.15.0
27
+ docker-pycreds==0.4.0
28
+ dill==0.3.8
29
+ wheel==0.44.0
30
+ prompt-toolkit==3.0.30
31
+ parso==0.8.3
32
+ ipykernel==6.23.1
33
+ pyarrow==17.0.0
34
+ certifi==2023.11.17
35
+ nvidia-cufft-cu11==10.9.0.58
36
+ six==1.16.0
37
+ pydantic==2.9.2
38
+ click==8.1.7
39
+ nest-asyncio==1.5.6
40
+ gmpy2==2.1.0
41
+ matplotlib==3.8.2
42
+ scipy==1.11.4
43
+ typing_extensions==4.12.2
44
+ statsmodels==0.14.0
45
+ huggingface-hub==0.25.0
46
+ frozenlist==1.4.1
47
+ gpustat==1.1.1
48
+ nvidia-nvtx-cu11==11.7.91
49
+ safetensors==0.4.5
50
+ stanza==1.9.2
51
+ decorator==5.1.1
52
+ seaborn==0.13.0
53
+ sentencepiece==0.2.0
54
+ PyYAML==6.0.1
55
+ black==24.8.0
56
+ protobuf==4.25.1
57
+ pickleshare==0.7.5
58
+ peft==0.13.0
59
+ triton==2.0.0
60
+ nvidia-cuda-runtime-cu11==11.7.99
61
+ Jinja2==3.1.2
62
+ nvidia-cusolver-cu11==11.4.0.1
63
+ executing==1.2.0
64
+ jupyter_client==8.1.0
65
+ pluggy==1.3.0
66
+ cmake==3.30.3
67
+ pytz==2023.3.post1
68
+ aiohappyeyeballs==2.4.2
69
+ kiwisolver==1.4.5
70
+ py-cpuinfo==9.0.0
71
+ Pillow==10.1.0
72
+ ptyprocess==0.7.0
73
+ importlib_resources==6.4.5
74
+ GitPython==3.1.43
75
+ importlib-metadata==6.0.0
76
+ iniconfig==2.0.0
77
+ scikit-learn==1.3.2
78
+ exceptiongroup==1.1.0
79
+ networkx==2.8.6
80
+ accelerate==1.0.0
81
+ nltk==3.8.1
82
+ shutilwhich==1.1.0
83
+ fonttools==4.45.1
84
+ future==0.18.3
85
+ aiohttp==3.10.6
86
+ wcwidth==0.2.5
87
+ idna==3.6
88
+ filelock==3.12.2
89
+ pathspec==0.12.1
90
+ jupyter_core==5.1.0
91
+ lit==18.1.8
92
+ nvidia-curand-cu11==10.2.10.91
93
+ nvidia-cublas-cu11==11.10.3.66
94
+ nvidia-ml-py==12.560.30
95
+ msgpack==1.1.0
96
+ python-dateutil==2.8.2
97
+ blessed==1.20.0
98
+ packaging==23.0
99
+ gitdb==4.0.11
100
+ yarl==1.13.0
101
+ emoji==2.8.0
102
+ tzdata==2023.3
103
+ cycler==0.12.1
104
+ tornado==6.2
105
+ backcall==0.2.0
106
+ plotnine==0.12.4
107
+ ninja==1.11.1.1
108
+ latex==0.7.0
109
+ wandb==0.18.5
110
+ setproctitle==1.3.3
111
+ threadpoolctl==3.2.0
112
+ requests==2.32.3
113
+ pyparsing==3.1.1
114
+ smmap==5.0.1
115
+ pyzmq==23.0.0
116
+ async-timeout==4.0.3
117
+ annotated-types==0.7.0
118
+ matplotlib-inline==0.1.6
119
+ latexcodec==1.0.0
120
+ ipython==8.0.0
121
+ patsy==0.5.3
122
+ contourpy==1.2.0
123
+ multidict==6.1.0
124
+ mizani==0.9.3
125
+ urllib3==2.1.0
126
+ tokenizers==0.20.0
127
+ MarkupSafe==2.1.2
128
+ pip==24.2
129
+ pexpect==4.8.0
130
+ tqdm==4.66.5
131
+ jedi==0.18.2
132
+ pydantic_core==2.23.4
133
+ tempdir==0.7.1
134
+ mpmath==1.2.1
135
+ setuptools==72.1.0
136
+ pytest==7.4.3
137
+ pure-eval==0.2.2
138
+ psutil==5.9.1
139
+ comm==0.1.2
140
+ nvidia-cuda-cupti-cu11==11.7.101
141
+ nvidia-cuda-nvrtc-cu11==11.7.99
142
+ regex==2023.10.3
143
+ platformdirs==2.5.2
144
+ asttokens==2.2.1
145
+ torch==2.0.0
146
+ nvidia-nccl-cu11==2.14.3
147
+ xxhash==3.5.0
wandb/run-20241105_155905-adxztc74/files/wandb-metadata.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.4.0-162-generic-x86_64-with-glibc2.31",
3
+ "python": "3.9.19",
4
+ "startedAt": "2024-11-05T20:59:05.914825Z",
5
+ "args": [
6
+ "--perturbation",
7
+ "shuffle_deterministic21",
8
+ "--train_set",
9
+ "10M",
10
+ "--batch_size",
11
+ "3",
12
+ "--epoch",
13
+ "3",
14
+ "--seed",
15
+ "0"
16
+ ],
17
+ "program": "/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py",
18
+ "codePath": "train/train_deep_wandb.py",
19
+ "git": {
20
+ "remote": "git@hf.co:Yaning1001/Impossible_llm.git",
21
+ "commit": "ed716cdcfcdea02b67f7ed0f3504c2b1c8b737c4"
22
+ },
23
+ "email": "yaning1001@gmail.com",
24
+ "root": "/mnt/ssd3/chunhui/yaning/project/impossible_llm/train",
25
+ "host": "mms-large-2",
26
+ "username": "chunhui",
27
+ "executable": "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/bin/python",
28
+ "codePathLocal": "train_deep_wandb.py",
29
+ "cpu_count": 32,
30
+ "cpu_count_logical": 64,
31
+ "disk": {
32
+ "/": {
33
+ "total": "1888559353856",
34
+ "used": "1792550322176"
35
+ }
36
+ },
37
+ "memory": {
38
+ "total": "202617098240"
39
+ },
40
+ "cpu": {
41
+ "count": 32,
42
+ "countLogical": 64
43
+ }
44
+ }
wandb/run-20241105_155905-adxztc74/files/wandb-summary.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"_wandb":{"runtime":8}}
wandb/run-20241105_155905-adxztc74/logs/debug-internal.log ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"time":"2024-11-05T15:59:05.91775728-05:00","level":"INFO","msg":"using version","core version":"0.18.5"}
2
+ {"time":"2024-11-05T15:59:05.91777683-05:00","level":"INFO","msg":"created symlink","path":"/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241105_155905-adxztc74/logs/debug-core.log"}
3
+ {"time":"2024-11-05T15:59:10.952154954-05:00","level":"INFO","msg":"created new stream","id":"adxztc74"}
4
+ {"time":"2024-11-05T15:59:10.952196724-05:00","level":"INFO","msg":"stream: started","id":"adxztc74"}
5
+ {"time":"2024-11-05T15:59:10.952239154-05:00","level":"INFO","msg":"sender: started","stream_id":"adxztc74"}
6
+ {"time":"2024-11-05T15:59:10.952206264-05:00","level":"INFO","msg":"handler: started","stream_id":{"value":"adxztc74"}}
7
+ {"time":"2024-11-05T15:59:10.952238964-05:00","level":"INFO","msg":"writer: Do: started","stream_id":{"value":"adxztc74"}}
8
+ {"time":"2024-11-05T15:59:11.536374693-05:00","level":"INFO","msg":"api: retrying HTTP error","status":500,"url":"https://api.wandb.ai/graphql"}
9
+ {"time":"2024-11-05T15:59:14.076098382-05:00","level":"INFO","msg":"Starting system monitor"}
10
+ {"time":"2024-11-05T15:59:14.191332644-05:00","level":"INFO","msg":"stream: closing","id":"adxztc74"}
11
+ {"time":"2024-11-05T15:59:14.191368314-05:00","level":"INFO","msg":"Stopping system monitor"}
12
+ {"time":"2024-11-05T15:59:14.191421524-05:00","level":"INFO","msg":"Stopped system monitor"}
13
+ {"time":"2024-11-05T15:59:14.461833728-05:00","level":"ERROR","msg":"sender: sendDefer: failed to build job artifact","error":"failed to write data to file: write /tmp/tmpfile-240625076: no space left on device"}
14
+ {"time":"2024-11-05T15:59:14.724659519-05:00","level":"INFO","msg":"fileTransfer: Close: file transfer manager closed"}
15
+ {"time":"2024-11-05T15:59:14.905874368-05:00","level":"INFO","msg":"handler: closed","stream_id":{"value":"adxztc74"}}
16
+ {"time":"2024-11-05T15:59:14.905929738-05:00","level":"INFO","msg":"sender: closed","stream_id":"adxztc74"}
17
+ {"time":"2024-11-05T15:59:14.905921668-05:00","level":"INFO","msg":"writer: Close: closed","stream_id":{"value":"adxztc74"}}
18
+ {"time":"2024-11-05T15:59:14.906009548-05:00","level":"INFO","msg":"stream: closed","id":"adxztc74"}
wandb/run-20241105_155905-adxztc74/logs/debug.log ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-11-05 15:59:05,913 INFO MainThread:1768668 [wandb_setup.py:_flush():79] Current SDK version is 0.18.5
2
+ 2024-11-05 15:59:05,913 INFO MainThread:1768668 [wandb_setup.py:_flush():79] Configure stats pid to 1768668
3
+ 2024-11-05 15:59:05,913 INFO MainThread:1768668 [wandb_setup.py:_flush():79] Loading settings from /home/chunhui/.config/wandb/settings
4
+ 2024-11-05 15:59:05,913 INFO MainThread:1768668 [wandb_setup.py:_flush():79] Loading settings from /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/settings
5
+ 2024-11-05 15:59:05,913 INFO MainThread:1768668 [wandb_setup.py:_flush():79] Loading settings from environment variables: {}
6
+ 2024-11-05 15:59:05,913 INFO MainThread:1768668 [wandb_setup.py:_flush():79] Applying setup settings: {'mode': None, '_disable_service': None}
7
+ 2024-11-05 15:59:05,913 INFO MainThread:1768668 [wandb_setup.py:_flush():79] Inferring run settings from compute environment: {'program_relpath': 'train/train_deep_wandb.py', 'program_abspath': '/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py', 'program': '/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py'}
8
+ 2024-11-05 15:59:05,913 INFO MainThread:1768668 [wandb_setup.py:_flush():79] Applying login settings: {}
9
+ 2024-11-05 15:59:05,913 INFO MainThread:1768668 [wandb_init.py:_log_setup():534] Logging user logs to /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241105_155905-adxztc74/logs/debug.log
10
+ 2024-11-05 15:59:05,913 INFO MainThread:1768668 [wandb_init.py:_log_setup():535] Logging internal logs to /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241105_155905-adxztc74/logs/debug-internal.log
11
+ 2024-11-05 15:59:05,913 INFO MainThread:1768668 [wandb_init.py:init():621] calling init triggers
12
+ 2024-11-05 15:59:05,913 INFO MainThread:1768668 [wandb_init.py:init():628] wandb.init called with sweep_config: {}
13
+ config: {}
14
+ 2024-11-05 15:59:05,913 INFO MainThread:1768668 [wandb_init.py:init():671] starting backend
15
+ 2024-11-05 15:59:05,913 INFO MainThread:1768668 [wandb_init.py:init():675] sending inform_init request
16
+ 2024-11-05 15:59:05,914 INFO MainThread:1768668 [backend.py:_multiprocessing_setup():104] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
17
+ 2024-11-05 15:59:05,914 INFO MainThread:1768668 [wandb_init.py:init():688] backend started and connected
18
+ 2024-11-05 15:59:05,917 INFO MainThread:1768668 [wandb_init.py:init():783] updated telemetry
19
+ 2024-11-05 15:59:05,944 INFO MainThread:1768668 [wandb_init.py:init():816] communicating run to backend with 90.0 second timeout
20
+ 2024-11-05 15:59:14,072 INFO MainThread:1768668 [wandb_init.py:init():867] starting run threads in backend
21
+ 2024-11-05 15:59:14,167 INFO MainThread:1768668 [wandb_run.py:_console_start():2463] atexit reg
22
+ 2024-11-05 15:59:14,167 INFO MainThread:1768668 [wandb_run.py:_redirect():2311] redirect: wrap_raw
23
+ 2024-11-05 15:59:14,167 INFO MainThread:1768668 [wandb_run.py:_redirect():2376] Wrapping output streams.
24
+ 2024-11-05 15:59:14,167 INFO MainThread:1768668 [wandb_run.py:_redirect():2401] Redirects installed.
25
+ 2024-11-05 15:59:14,168 INFO MainThread:1768668 [wandb_init.py:init():911] run started, returning control to user process
26
+ 2024-11-05 15:59:14,169 INFO MainThread:1768668 [wandb_run.py:_config_callback():1390] config_cb None None {'perturbation': 'shuffle_deterministic21', 'train_set': '10M', 'batch_size': 3, 'epoch': 3, 'seed': 0, 'lr': 5e-06}
27
+ 2024-11-05 15:59:14,191 WARNING MsgRouterThr:1768668 [router.py:message_loop():77] message_loop has been closed
wandb/run-20241105_155905-adxztc74/run-adxztc74.wandb ADDED
Binary file (3.76 kB). View file
 
wandb/run-20241105_155954-daaq0lj0/files/config.yaml ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _wandb:
2
+ value:
3
+ cli_version: 0.18.5
4
+ m: []
5
+ python_version: 3.9.19
6
+ t:
7
+ "1":
8
+ - 1
9
+ - 5
10
+ - 11
11
+ - 49
12
+ - 51
13
+ - 53
14
+ - 55
15
+ - 71
16
+ - 98
17
+ "2":
18
+ - 1
19
+ - 5
20
+ - 11
21
+ - 49
22
+ - 51
23
+ - 53
24
+ - 55
25
+ - 71
26
+ - 98
27
+ "3":
28
+ - 13
29
+ - 23
30
+ - 55
31
+ "4": 3.9.19
32
+ "5": 0.18.5
33
+ "6": 4.45.1
34
+ "8":
35
+ - 5
36
+ "12": 0.18.5
37
+ "13": linux-x86_64
38
+ batch_size:
39
+ value: 3
40
+ epoch:
41
+ value: 3
42
+ lr:
43
+ value: 5e-06
44
+ perturbation:
45
+ value: shuffle_deterministic21
46
+ seed:
47
+ value: 0
48
+ train_set:
49
+ value: 10M
wandb/run-20241105_155954-daaq0lj0/files/output.log ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Traceback (most recent call last):
2
+ File "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/pathlib.py", line 1323, in mkdir
3
+ self._accessor.mkdir(self, mode)
4
+ FileNotFoundError: [Errno 2] No such file or directory: '/home/chunhui/.cache/huggingface/datasets/babylm_dataset_test/babylm_shuffle_deterministic21_10M_seed0/0.0.0'
5
+
6
+ During handling of the above exception, another exception occurred:
7
+
8
+ Traceback (most recent call last):
9
+ File "/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py", line 165, in <module>
10
+ dataset = load_dataset('babylm_dataset_test.py', name=dataset_name, trust_remote_code=True)
11
+ File "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/site-packages/datasets/load.py", line 2096, in load_dataset
12
+ builder_instance.download_and_prepare(
13
+ File "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/site-packages/datasets/builder.py", line 855, in download_and_prepare
14
+ Path(self._output_dir).parent.mkdir(parents=True, exist_ok=True)
15
+ File "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/pathlib.py", line 1327, in mkdir
16
+ self.parent.mkdir(parents=True, exist_ok=True)
17
+ File "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/pathlib.py", line 1323, in mkdir
18
+ self._accessor.mkdir(self, mode)
19
+ OSError: [Errno 28] No space left on device: '/home/chunhui/.cache/huggingface/datasets/babylm_dataset_test/babylm_shuffle_deterministic21_10M_seed0'