Yaning1001 commited on
Commit
f421ba7
·
verified ·
1 Parent(s): e155a54

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +7 -0
  2. wandb/run-20241030_010305-y2ohxj86/logs/debug-internal.log +16 -0
  3. wandb/run-20241030_010641-fp8gbo2l/files/config.yaml +47 -0
  4. wandb/run-20241030_010641-fp8gbo2l/files/output.log +4 -0
  5. wandb/run-20241030_010641-fp8gbo2l/files/wandb-metadata.json +29 -0
  6. wandb/run-20241030_010641-fp8gbo2l/files/wandb-summary.json +1 -0
  7. wandb/run-20241030_010641-fp8gbo2l/logs/debug-internal.log +16 -0
  8. wandb/run-20241030_010641-fp8gbo2l/logs/debug.log +27 -0
  9. wandb/run-20241030_010641-fp8gbo2l/run-fp8gbo2l.wandb +0 -0
  10. wandb/run-20241030_011509-zmlu7388/files/output.log +15 -0
  11. wandb/run-20241030_011509-zmlu7388/files/requirements.txt +147 -0
  12. wandb/run-20241030_011509-zmlu7388/files/wandb-metadata.json +97 -0
  13. wandb/run-20241030_011509-zmlu7388/logs/debug-internal.log +8 -0
  14. wandb/run-20241030_011509-zmlu7388/logs/debug.log +26 -0
  15. wandb/run-20241031_002020-q6ot1vz6/run-q6ot1vz6.wandb +3 -0
  16. wandb/run-20241031_122006-f2ep45tp/files/config.yaml +49 -0
  17. wandb/run-20241031_122006-f2ep45tp/files/output.log +60 -0
  18. wandb/run-20241031_122006-f2ep45tp/files/requirements.txt +147 -0
  19. wandb/run-20241031_122006-f2ep45tp/files/wandb-metadata.json +97 -0
  20. wandb/run-20241031_122006-f2ep45tp/files/wandb-summary.json +1 -0
  21. wandb/run-20241031_122006-f2ep45tp/logs/debug-internal.log +11 -0
  22. wandb/run-20241031_122006-f2ep45tp/logs/debug.log +27 -0
  23. wandb/run-20241031_122006-f2ep45tp/run-f2ep45tp.wandb +0 -0
  24. wandb/run-20241031_122114-2k9672ya/run-2k9672ya.wandb +3 -0
  25. wandb/run-20241101_093116-8434p043/files/output.log +16 -0
  26. wandb/run-20241101_093116-8434p043/files/requirements.txt +147 -0
  27. wandb/run-20241101_093116-8434p043/files/wandb-metadata.json +97 -0
  28. wandb/run-20241101_093116-8434p043/logs/debug-internal.log +8 -0
  29. wandb/run-20241101_093116-8434p043/logs/debug.log +26 -0
  30. wandb/run-20241101_093116-8434p043/run-8434p043.wandb +0 -0
  31. wandb/run-20241101_094656-v2rxhny6/files/wandb-metadata.json +97 -0
  32. wandb/run-20241101_094656-v2rxhny6/run-v2rxhny6.wandb +0 -0
  33. wandb/run-20241101_200535-6xsf0vem/files/output.log +21 -0
  34. wandb/run-20241101_200535-6xsf0vem/files/requirements.txt +147 -0
  35. wandb/run-20241101_200535-6xsf0vem/files/wandb-metadata.json +97 -0
  36. wandb/run-20241101_200535-6xsf0vem/logs/debug-internal.log +8 -0
  37. wandb/run-20241101_200535-6xsf0vem/logs/debug.log +29 -0
  38. wandb/run-20241101_200535-hnfjoqai/files/output.log +14 -0
  39. wandb/run-20241101_200535-hnfjoqai/files/requirements.txt +147 -0
  40. wandb/run-20241101_200535-hnfjoqai/files/wandb-metadata.json +97 -0
  41. wandb/run-20241101_200535-hnfjoqai/logs/debug-internal.log +8 -0
  42. wandb/run-20241101_200535-hnfjoqai/logs/debug.log +26 -0
  43. wandb/run-20241101_201910-hnwfqg73/files/output.log +1 -0
  44. wandb/run-20241101_201910-hnwfqg73/files/requirements.txt +147 -0
  45. wandb/run-20241101_201910-hnwfqg73/files/wandb-metadata.json +97 -0
  46. wandb/run-20241101_201910-hnwfqg73/logs/debug-internal.log +8 -0
  47. wandb/run-20241101_201910-hnwfqg73/logs/debug.log +26 -0
  48. wandb/run-20241101_201910-hnwfqg73/run-hnwfqg73.wandb +0 -0
  49. wandb/run-20241105_160217-21j8oh7z/files/config.yaml +49 -0
  50. wandb/run-20241105_160217-21j8oh7z/files/output.log +19 -0
.gitattributes CHANGED
@@ -103,3 +103,10 @@ wandb/run-20241101_202058-jijqbvs1/run-jijqbvs1.wandb filter=lfs diff=lfs merge=
103
  wandb/run-20241129_083813-gsvlu1z8/run-gsvlu1z8.wandb filter=lfs diff=lfs merge=lfs -text
104
  wandb/run-20241030_231835-o1t74f3e/run-o1t74f3e.wandb filter=lfs diff=lfs merge=lfs -text
105
  wandb/run-20241030_233740-98qje3cr/run-98qje3cr.wandb filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
103
  wandb/run-20241129_083813-gsvlu1z8/run-gsvlu1z8.wandb filter=lfs diff=lfs merge=lfs -text
104
  wandb/run-20241030_231835-o1t74f3e/run-o1t74f3e.wandb filter=lfs diff=lfs merge=lfs -text
105
  wandb/run-20241030_233740-98qje3cr/run-98qje3cr.wandb filter=lfs diff=lfs merge=lfs -text
106
+ wandb/run-20241106_234348-zan8h57j/run-zan8h57j.wandb filter=lfs diff=lfs merge=lfs -text
107
+ wandb/run-20241105_163248-rhhc1g6i/run-rhhc1g6i.wandb filter=lfs diff=lfs merge=lfs -text
108
+ wandb/run-20241105_163029-2rkpz70q/run-2rkpz70q.wandb filter=lfs diff=lfs merge=lfs -text
109
+ wandb/run-20241105_163248-thalxhcd/run-thalxhcd.wandb filter=lfs diff=lfs merge=lfs -text
110
+ wandb/run-20241031_002020-q6ot1vz6/run-q6ot1vz6.wandb filter=lfs diff=lfs merge=lfs -text
111
+ wandb/run-20241106_234348-l3eig11b/run-l3eig11b.wandb filter=lfs diff=lfs merge=lfs -text
112
+ wandb/run-20241031_122114-2k9672ya/run-2k9672ya.wandb filter=lfs diff=lfs merge=lfs -text
wandb/run-20241030_010305-y2ohxj86/logs/debug-internal.log ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"time":"2024-10-30T01:03:05.8822364-04:00","level":"INFO","msg":"using version","core version":"0.18.5"}
2
+ {"time":"2024-10-30T01:03:05.88225013-04:00","level":"INFO","msg":"created symlink","path":"/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241030_010305-y2ohxj86/logs/debug-core.log"}
3
+ {"time":"2024-10-30T01:03:05.989239396-04:00","level":"INFO","msg":"created new stream","id":"y2ohxj86"}
4
+ {"time":"2024-10-30T01:03:05.989281006-04:00","level":"INFO","msg":"stream: started","id":"y2ohxj86"}
5
+ {"time":"2024-10-30T01:03:05.989342226-04:00","level":"INFO","msg":"sender: started","stream_id":"y2ohxj86"}
6
+ {"time":"2024-10-30T01:03:05.989341646-04:00","level":"INFO","msg":"writer: Do: started","stream_id":{"value":"y2ohxj86"}}
7
+ {"time":"2024-10-30T01:03:05.989369606-04:00","level":"INFO","msg":"handler: started","stream_id":{"value":"y2ohxj86"}}
8
+ {"time":"2024-10-30T01:03:06.155395708-04:00","level":"INFO","msg":"Starting system monitor"}
9
+ {"time":"2024-10-30T01:03:06.261798859-04:00","level":"INFO","msg":"stream: closing","id":"y2ohxj86"}
10
+ {"time":"2024-10-30T01:03:06.26182351-04:00","level":"INFO","msg":"Stopping system monitor"}
11
+ {"time":"2024-10-30T01:03:06.262278953-04:00","level":"INFO","msg":"Stopped system monitor"}
12
+ {"time":"2024-10-30T01:03:07.554925721-04:00","level":"INFO","msg":"fileTransfer: Close: file transfer manager closed"}
13
+ {"time":"2024-10-30T01:03:07.677085611-04:00","level":"INFO","msg":"handler: closed","stream_id":{"value":"y2ohxj86"}}
14
+ {"time":"2024-10-30T01:03:07.677137671-04:00","level":"INFO","msg":"writer: Close: closed","stream_id":{"value":"y2ohxj86"}}
15
+ {"time":"2024-10-30T01:03:07.677156552-04:00","level":"INFO","msg":"sender: closed","stream_id":"y2ohxj86"}
16
+ {"time":"2024-10-30T01:03:07.677223022-04:00","level":"INFO","msg":"stream: closed","id":"y2ohxj86"}
wandb/run-20241030_010641-fp8gbo2l/files/config.yaml ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _wandb:
2
+ value:
3
+ cli_version: 0.18.5
4
+ m: []
5
+ python_version: 3.9.19
6
+ t:
7
+ "1":
8
+ - 1
9
+ - 5
10
+ - 11
11
+ - 49
12
+ - 51
13
+ - 53
14
+ - 55
15
+ - 71
16
+ - 98
17
+ "2":
18
+ - 1
19
+ - 5
20
+ - 11
21
+ - 49
22
+ - 51
23
+ - 53
24
+ - 55
25
+ - 71
26
+ - 98
27
+ "3":
28
+ - 13
29
+ - 23
30
+ - 55
31
+ "4": 3.9.19
32
+ "5": 0.18.5
33
+ "6": 4.45.1
34
+ "8":
35
+ - 5
36
+ "12": 0.18.5
37
+ "13": linux-x86_64
38
+ batch_size:
39
+ value: 3
40
+ epoch:
41
+ value: 7
42
+ perturbation:
43
+ value: reverse_control
44
+ seed:
45
+ value: 0
46
+ train_set:
47
+ value: 10M
wandb/run-20241030_010641-fp8gbo2l/files/output.log ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Traceback (most recent call last):
2
+ File "/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py", line 162, in <module>
3
+ dataset_name = f"babylm_{args.perturbation}_{args.train_zset}_seed{args.seed}"
4
+ AttributeError: 'Namespace' object has no attribute 'train_zset'
wandb/run-20241030_010641-fp8gbo2l/files/wandb-metadata.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.4.0-162-generic-x86_64-with-glibc2.31",
3
+ "python": "3.9.19",
4
+ "startedAt": "2024-10-30T05:06:41.433074Z",
5
+ "args": [
6
+ "--perturbation",
7
+ "reverse_control",
8
+ "--train_set",
9
+ "10M",
10
+ "--batch_size",
11
+ "3",
12
+ "--epoch",
13
+ "7",
14
+ "--seed",
15
+ "0"
16
+ ],
17
+ "program": "/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py",
18
+ "codePath": "train/train_deep_wandb.py",
19
+ "git": {
20
+ "remote": "git@hf.co:Yaning1001/Impossible_llm.git",
21
+ "commit": "ed716cdcfcdea02b67f7ed0f3504c2b1c8b737c4"
22
+ },
23
+ "email": "yaning1001@gmail.com",
24
+ "root": "/mnt/ssd3/chunhui/yaning/project/impossible_llm/train",
25
+ "host": "mms-large-2",
26
+ "username": "chunhui",
27
+ "executable": "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/bin/python",
28
+ "codePathLocal": "train_deep_wandb.py"
29
+ }
wandb/run-20241030_010641-fp8gbo2l/files/wandb-summary.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"_wandb":{"runtime":1}}
wandb/run-20241030_010641-fp8gbo2l/logs/debug-internal.log ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"time":"2024-10-30T01:06:41.435404628-04:00","level":"INFO","msg":"using version","core version":"0.18.5"}
2
+ {"time":"2024-10-30T01:06:41.435416958-04:00","level":"INFO","msg":"created symlink","path":"/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241030_010641-fp8gbo2l/logs/debug-core.log"}
3
+ {"time":"2024-10-30T01:06:41.543037882-04:00","level":"INFO","msg":"created new stream","id":"fp8gbo2l"}
4
+ {"time":"2024-10-30T01:06:41.543083813-04:00","level":"INFO","msg":"stream: started","id":"fp8gbo2l"}
5
+ {"time":"2024-10-30T01:06:41.543120933-04:00","level":"INFO","msg":"sender: started","stream_id":"fp8gbo2l"}
6
+ {"time":"2024-10-30T01:06:41.543122113-04:00","level":"INFO","msg":"handler: started","stream_id":{"value":"fp8gbo2l"}}
7
+ {"time":"2024-10-30T01:06:41.543101113-04:00","level":"INFO","msg":"writer: Do: started","stream_id":{"value":"fp8gbo2l"}}
8
+ {"time":"2024-10-30T01:06:43.031925671-04:00","level":"INFO","msg":"Starting system monitor"}
9
+ {"time":"2024-10-30T01:06:43.129140054-04:00","level":"INFO","msg":"stream: closing","id":"fp8gbo2l"}
10
+ {"time":"2024-10-30T01:06:43.129196235-04:00","level":"INFO","msg":"Stopping system monitor"}
11
+ {"time":"2024-10-30T01:06:43.222777423-04:00","level":"INFO","msg":"Stopped system monitor"}
12
+ {"time":"2024-10-30T01:06:43.624872197-04:00","level":"INFO","msg":"fileTransfer: Close: file transfer manager closed"}
13
+ {"time":"2024-10-30T01:06:43.737064212-04:00","level":"INFO","msg":"handler: closed","stream_id":{"value":"fp8gbo2l"}}
14
+ {"time":"2024-10-30T01:06:43.737102682-04:00","level":"INFO","msg":"writer: Close: closed","stream_id":{"value":"fp8gbo2l"}}
15
+ {"time":"2024-10-30T01:06:43.737133182-04:00","level":"INFO","msg":"sender: closed","stream_id":"fp8gbo2l"}
16
+ {"time":"2024-10-30T01:06:43.737155973-04:00","level":"INFO","msg":"stream: closed","id":"fp8gbo2l"}
wandb/run-20241030_010641-fp8gbo2l/logs/debug.log ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-10-30 01:06:41,431 INFO MainThread:321594 [wandb_setup.py:_flush():79] Current SDK version is 0.18.5
2
+ 2024-10-30 01:06:41,431 INFO MainThread:321594 [wandb_setup.py:_flush():79] Configure stats pid to 321594
3
+ 2024-10-30 01:06:41,431 INFO MainThread:321594 [wandb_setup.py:_flush():79] Loading settings from /home/chunhui/.config/wandb/settings
4
+ 2024-10-30 01:06:41,431 INFO MainThread:321594 [wandb_setup.py:_flush():79] Loading settings from /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/settings
5
+ 2024-10-30 01:06:41,431 INFO MainThread:321594 [wandb_setup.py:_flush():79] Loading settings from environment variables: {}
6
+ 2024-10-30 01:06:41,431 INFO MainThread:321594 [wandb_setup.py:_flush():79] Applying setup settings: {'mode': None, '_disable_service': None}
7
+ 2024-10-30 01:06:41,431 INFO MainThread:321594 [wandb_setup.py:_flush():79] Inferring run settings from compute environment: {'program_relpath': 'train/train_deep_wandb.py', 'program_abspath': '/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py', 'program': '/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py'}
8
+ 2024-10-30 01:06:41,431 INFO MainThread:321594 [wandb_setup.py:_flush():79] Applying login settings: {}
9
+ 2024-10-30 01:06:41,431 INFO MainThread:321594 [wandb_init.py:_log_setup():534] Logging user logs to /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241030_010641-fp8gbo2l/logs/debug.log
10
+ 2024-10-30 01:06:41,431 INFO MainThread:321594 [wandb_init.py:_log_setup():535] Logging internal logs to /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241030_010641-fp8gbo2l/logs/debug-internal.log
11
+ 2024-10-30 01:06:41,431 INFO MainThread:321594 [wandb_init.py:init():621] calling init triggers
12
+ 2024-10-30 01:06:41,431 INFO MainThread:321594 [wandb_init.py:init():628] wandb.init called with sweep_config: {}
13
+ config: {}
14
+ 2024-10-30 01:06:41,431 INFO MainThread:321594 [wandb_init.py:init():671] starting backend
15
+ 2024-10-30 01:06:41,431 INFO MainThread:321594 [wandb_init.py:init():675] sending inform_init request
16
+ 2024-10-30 01:06:41,432 INFO MainThread:321594 [backend.py:_multiprocessing_setup():104] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
17
+ 2024-10-30 01:06:41,432 INFO MainThread:321594 [wandb_init.py:init():688] backend started and connected
18
+ 2024-10-30 01:06:41,436 INFO MainThread:321594 [wandb_init.py:init():783] updated telemetry
19
+ 2024-10-30 01:06:41,456 INFO MainThread:321594 [wandb_init.py:init():816] communicating run to backend with 90.0 second timeout
20
+ 2024-10-30 01:06:43,029 INFO MainThread:321594 [wandb_init.py:init():867] starting run threads in backend
21
+ 2024-10-30 01:06:43,125 INFO MainThread:321594 [wandb_run.py:_console_start():2463] atexit reg
22
+ 2024-10-30 01:06:43,125 INFO MainThread:321594 [wandb_run.py:_redirect():2311] redirect: wrap_raw
23
+ 2024-10-30 01:06:43,125 INFO MainThread:321594 [wandb_run.py:_redirect():2376] Wrapping output streams.
24
+ 2024-10-30 01:06:43,125 INFO MainThread:321594 [wandb_run.py:_redirect():2401] Redirects installed.
25
+ 2024-10-30 01:06:43,128 INFO MainThread:321594 [wandb_init.py:init():911] run started, returning control to user process
26
+ 2024-10-30 01:06:43,128 INFO MainThread:321594 [wandb_run.py:_config_callback():1390] config_cb None None {'perturbation': 'reverse_control', 'train_set': '10M', 'batch_size': 3, 'epoch': 7, 'seed': 0}
27
+ 2024-10-30 01:06:43,129 WARNING MsgRouterThr:321594 [router.py:message_loop():77] message_loop has been closed
wandb/run-20241030_010641-fp8gbo2l/run-fp8gbo2l.wandb ADDED
Binary file (1.57 kB). View file
 
wandb/run-20241030_011509-zmlu7388/files/output.log ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Loading checkpoint shards: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:04<00:00, 2.33s/it]
2
+ Map: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 17519/17519 [00:52<00:00, 336.12 examples/s]
3
+ Map: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 18140/18140 [00:53<00:00, 341.91 examples/s]
4
+ tokenized_valid: Dataset({
5
+ features: ['input_ids', 'attention_mask'],
6
+ num_rows: 600
7
+ })
8
+ /mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/site-packages/transformers/training_args.py:1545: FutureWarning: `evaluation_strategy` is deprecated and will be removed in version 4.46 of 🤗 Transformers. Use `eval_strategy` instead
9
+ warnings.warn(
10
+ [2024-10-30 01:17:01,020] [INFO] [real_accelerator.py:219:get_accelerator] Setting ds_accelerator to cuda (auto detect)
11
+ [2024-10-30 01:17:07,829] [INFO] [comm.py:652:init_distributed] cdb=None
12
+ Installed CUDA version 11.8 does not match the version torch was compiled with 11.7 but since the APIs are compatible, accepting this combination
13
+ Using /home/chunhui/.cache/torch_extensions/py39_cu117 as PyTorch extensions root...
14
+ Loading extension module cpu_adam...
15
+ Time to load cpu_adam op: 4.184397220611572 seconds
wandb/run-20241030_011509-zmlu7388/files/requirements.txt ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ funcsigs==1.0.2
2
+ sentry-sdk==2.17.0
3
+ multiprocess==0.70.16
4
+ numpy==1.26.2
5
+ pluralizer==1.2.0
6
+ debugpy==1.6.7
7
+ nvidia-cudnn-cu11==8.5.0.96
8
+ deepspeed==0.15.2
9
+ data==0.4
10
+ pandas==2.1.3
11
+ tomli==2.0.1
12
+ charset-normalizer==3.3.2
13
+ attrs==24.2.0
14
+ aiosignal==1.3.1
15
+ fsspec==2023.10.0
16
+ nvidia-cusparse-cu11==11.7.4.91
17
+ zipp==3.12.0
18
+ mypy-extensions==1.0.0
19
+ datasets==3.0.1
20
+ joblib==1.3.2
21
+ hjson==3.1.0
22
+ traitlets==5.7.1
23
+ stack-data==0.6.0
24
+ transformers==4.45.1
25
+ sympy==1.11.1
26
+ Pygments==2.15.0
27
+ docker-pycreds==0.4.0
28
+ dill==0.3.8
29
+ wheel==0.44.0
30
+ prompt-toolkit==3.0.30
31
+ parso==0.8.3
32
+ ipykernel==6.23.1
33
+ pyarrow==17.0.0
34
+ certifi==2023.11.17
35
+ nvidia-cufft-cu11==10.9.0.58
36
+ six==1.16.0
37
+ pydantic==2.9.2
38
+ click==8.1.7
39
+ nest-asyncio==1.5.6
40
+ gmpy2==2.1.0
41
+ matplotlib==3.8.2
42
+ scipy==1.11.4
43
+ typing_extensions==4.12.2
44
+ statsmodels==0.14.0
45
+ huggingface-hub==0.25.0
46
+ frozenlist==1.4.1
47
+ gpustat==1.1.1
48
+ nvidia-nvtx-cu11==11.7.91
49
+ safetensors==0.4.5
50
+ stanza==1.9.2
51
+ decorator==5.1.1
52
+ seaborn==0.13.0
53
+ sentencepiece==0.2.0
54
+ PyYAML==6.0.1
55
+ black==24.8.0
56
+ protobuf==4.25.1
57
+ pickleshare==0.7.5
58
+ peft==0.13.0
59
+ triton==2.0.0
60
+ nvidia-cuda-runtime-cu11==11.7.99
61
+ Jinja2==3.1.2
62
+ nvidia-cusolver-cu11==11.4.0.1
63
+ executing==1.2.0
64
+ jupyter_client==8.1.0
65
+ pluggy==1.3.0
66
+ cmake==3.30.3
67
+ pytz==2023.3.post1
68
+ aiohappyeyeballs==2.4.2
69
+ kiwisolver==1.4.5
70
+ py-cpuinfo==9.0.0
71
+ Pillow==10.1.0
72
+ ptyprocess==0.7.0
73
+ importlib_resources==6.4.5
74
+ GitPython==3.1.43
75
+ importlib-metadata==6.0.0
76
+ iniconfig==2.0.0
77
+ scikit-learn==1.3.2
78
+ exceptiongroup==1.1.0
79
+ networkx==2.8.6
80
+ accelerate==1.0.0
81
+ nltk==3.8.1
82
+ shutilwhich==1.1.0
83
+ fonttools==4.45.1
84
+ future==0.18.3
85
+ aiohttp==3.10.6
86
+ wcwidth==0.2.5
87
+ idna==3.6
88
+ filelock==3.12.2
89
+ pathspec==0.12.1
90
+ jupyter_core==5.1.0
91
+ lit==18.1.8
92
+ nvidia-curand-cu11==10.2.10.91
93
+ nvidia-cublas-cu11==11.10.3.66
94
+ nvidia-ml-py==12.560.30
95
+ msgpack==1.1.0
96
+ python-dateutil==2.8.2
97
+ blessed==1.20.0
98
+ packaging==23.0
99
+ gitdb==4.0.11
100
+ yarl==1.13.0
101
+ emoji==2.8.0
102
+ tzdata==2023.3
103
+ cycler==0.12.1
104
+ tornado==6.2
105
+ backcall==0.2.0
106
+ plotnine==0.12.4
107
+ ninja==1.11.1.1
108
+ latex==0.7.0
109
+ wandb==0.18.5
110
+ setproctitle==1.3.3
111
+ threadpoolctl==3.2.0
112
+ requests==2.32.3
113
+ pyparsing==3.1.1
114
+ smmap==5.0.1
115
+ pyzmq==23.0.0
116
+ async-timeout==4.0.3
117
+ annotated-types==0.7.0
118
+ matplotlib-inline==0.1.6
119
+ latexcodec==1.0.0
120
+ ipython==8.0.0
121
+ patsy==0.5.3
122
+ contourpy==1.2.0
123
+ multidict==6.1.0
124
+ mizani==0.9.3
125
+ urllib3==2.1.0
126
+ tokenizers==0.20.0
127
+ MarkupSafe==2.1.2
128
+ pip==24.2
129
+ pexpect==4.8.0
130
+ tqdm==4.66.5
131
+ jedi==0.18.2
132
+ pydantic_core==2.23.4
133
+ tempdir==0.7.1
134
+ mpmath==1.2.1
135
+ setuptools==72.1.0
136
+ pytest==7.4.3
137
+ pure-eval==0.2.2
138
+ psutil==5.9.1
139
+ comm==0.1.2
140
+ nvidia-cuda-cupti-cu11==11.7.101
141
+ nvidia-cuda-nvrtc-cu11==11.7.99
142
+ regex==2023.10.3
143
+ platformdirs==2.5.2
144
+ asttokens==2.2.1
145
+ torch==2.0.0
146
+ nvidia-nccl-cu11==2.14.3
147
+ xxhash==3.5.0
wandb/run-20241030_011509-zmlu7388/files/wandb-metadata.json ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.4.0-162-generic-x86_64-with-glibc2.31",
3
+ "python": "3.9.19",
4
+ "startedAt": "2024-10-30T05:15:09.352977Z",
5
+ "args": [
6
+ "--perturbation",
7
+ "reverse_control",
8
+ "--train_set",
9
+ "10M",
10
+ "--batch_size",
11
+ "3",
12
+ "--epoch",
13
+ "7",
14
+ "--seed",
15
+ "0"
16
+ ],
17
+ "program": "/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py",
18
+ "codePath": "train/train_deep_wandb.py",
19
+ "git": {
20
+ "remote": "git@hf.co:Yaning1001/Impossible_llm.git",
21
+ "commit": "ed716cdcfcdea02b67f7ed0f3504c2b1c8b737c4"
22
+ },
23
+ "email": "yaning1001@gmail.com",
24
+ "root": "/mnt/ssd3/chunhui/yaning/project/impossible_llm/train",
25
+ "host": "mms-large-2",
26
+ "username": "chunhui",
27
+ "executable": "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/bin/python",
28
+ "codePathLocal": "train_deep_wandb.py",
29
+ "cpu_count": 32,
30
+ "cpu_count_logical": 64,
31
+ "gpu": "NVIDIA RTX A6000",
32
+ "gpu_count": 8,
33
+ "disk": {
34
+ "/": {
35
+ "total": "1888559353856",
36
+ "used": "1719287033856"
37
+ }
38
+ },
39
+ "memory": {
40
+ "total": "202617098240"
41
+ },
42
+ "cpu": {
43
+ "count": 32,
44
+ "countLogical": 64
45
+ },
46
+ "gpu_nvidia": [
47
+ {
48
+ "name": "NVIDIA RTX A6000",
49
+ "memoryTotal": "51527024640",
50
+ "cudaCores": 10752,
51
+ "architecture": "Ampere"
52
+ },
53
+ {
54
+ "name": "NVIDIA RTX A6000",
55
+ "memoryTotal": "51527024640",
56
+ "cudaCores": 10752,
57
+ "architecture": "Ampere"
58
+ },
59
+ {
60
+ "name": "NVIDIA RTX A6000",
61
+ "memoryTotal": "51527024640",
62
+ "cudaCores": 10752,
63
+ "architecture": "Ampere"
64
+ },
65
+ {
66
+ "name": "NVIDIA RTX A6000",
67
+ "memoryTotal": "51527024640",
68
+ "cudaCores": 10752,
69
+ "architecture": "Ampere"
70
+ },
71
+ {
72
+ "name": "NVIDIA RTX A6000",
73
+ "memoryTotal": "51527024640",
74
+ "cudaCores": 10752,
75
+ "architecture": "Ampere"
76
+ },
77
+ {
78
+ "name": "NVIDIA RTX A6000",
79
+ "memoryTotal": "51527024640",
80
+ "cudaCores": 10752,
81
+ "architecture": "Ampere"
82
+ },
83
+ {
84
+ "name": "NVIDIA RTX A6000",
85
+ "memoryTotal": "51527024640",
86
+ "cudaCores": 10752,
87
+ "architecture": "Ampere"
88
+ },
89
+ {
90
+ "name": "NVIDIA RTX A6000",
91
+ "memoryTotal": "51527024640",
92
+ "cudaCores": 10752,
93
+ "architecture": "Ampere"
94
+ }
95
+ ],
96
+ "cudaVersion": "11.8"
97
+ }
wandb/run-20241030_011509-zmlu7388/logs/debug-internal.log ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {"time":"2024-10-30T01:15:09.354988919-04:00","level":"INFO","msg":"using version","core version":"0.18.5"}
2
+ {"time":"2024-10-30T01:15:09.355002219-04:00","level":"INFO","msg":"created symlink","path":"/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241030_011509-zmlu7388/logs/debug-core.log"}
3
+ {"time":"2024-10-30T01:15:09.462920738-04:00","level":"INFO","msg":"created new stream","id":"zmlu7388"}
4
+ {"time":"2024-10-30T01:15:09.462953758-04:00","level":"INFO","msg":"stream: started","id":"zmlu7388"}
5
+ {"time":"2024-10-30T01:15:09.462991088-04:00","level":"INFO","msg":"sender: started","stream_id":"zmlu7388"}
6
+ {"time":"2024-10-30T01:15:09.462971648-04:00","level":"INFO","msg":"writer: Do: started","stream_id":{"value":"zmlu7388"}}
7
+ {"time":"2024-10-30T01:15:09.463001818-04:00","level":"INFO","msg":"handler: started","stream_id":{"value":"zmlu7388"}}
8
+ {"time":"2024-10-30T01:15:09.699769799-04:00","level":"INFO","msg":"Starting system monitor"}
wandb/run-20241030_011509-zmlu7388/logs/debug.log ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-10-30 01:15:09,350 INFO MainThread:324929 [wandb_setup.py:_flush():79] Current SDK version is 0.18.5
2
+ 2024-10-30 01:15:09,350 INFO MainThread:324929 [wandb_setup.py:_flush():79] Configure stats pid to 324929
3
+ 2024-10-30 01:15:09,351 INFO MainThread:324929 [wandb_setup.py:_flush():79] Loading settings from /home/chunhui/.config/wandb/settings
4
+ 2024-10-30 01:15:09,351 INFO MainThread:324929 [wandb_setup.py:_flush():79] Loading settings from /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/settings
5
+ 2024-10-30 01:15:09,351 INFO MainThread:324929 [wandb_setup.py:_flush():79] Loading settings from environment variables: {}
6
+ 2024-10-30 01:15:09,351 INFO MainThread:324929 [wandb_setup.py:_flush():79] Applying setup settings: {'mode': None, '_disable_service': None}
7
+ 2024-10-30 01:15:09,351 INFO MainThread:324929 [wandb_setup.py:_flush():79] Inferring run settings from compute environment: {'program_relpath': 'train/train_deep_wandb.py', 'program_abspath': '/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py', 'program': '/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py'}
8
+ 2024-10-30 01:15:09,351 INFO MainThread:324929 [wandb_setup.py:_flush():79] Applying login settings: {}
9
+ 2024-10-30 01:15:09,351 INFO MainThread:324929 [wandb_init.py:_log_setup():534] Logging user logs to /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241030_011509-zmlu7388/logs/debug.log
10
+ 2024-10-30 01:15:09,351 INFO MainThread:324929 [wandb_init.py:_log_setup():535] Logging internal logs to /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241030_011509-zmlu7388/logs/debug-internal.log
11
+ 2024-10-30 01:15:09,351 INFO MainThread:324929 [wandb_init.py:init():621] calling init triggers
12
+ 2024-10-30 01:15:09,351 INFO MainThread:324929 [wandb_init.py:init():628] wandb.init called with sweep_config: {}
13
+ config: {}
14
+ 2024-10-30 01:15:09,351 INFO MainThread:324929 [wandb_init.py:init():671] starting backend
15
+ 2024-10-30 01:15:09,351 INFO MainThread:324929 [wandb_init.py:init():675] sending inform_init request
16
+ 2024-10-30 01:15:09,352 INFO MainThread:324929 [backend.py:_multiprocessing_setup():104] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
17
+ 2024-10-30 01:15:09,352 INFO MainThread:324929 [wandb_init.py:init():688] backend started and connected
18
+ 2024-10-30 01:15:09,355 INFO MainThread:324929 [wandb_init.py:init():783] updated telemetry
19
+ 2024-10-30 01:15:09,380 INFO MainThread:324929 [wandb_init.py:init():816] communicating run to backend with 90.0 second timeout
20
+ 2024-10-30 01:15:09,696 INFO MainThread:324929 [wandb_init.py:init():867] starting run threads in backend
21
+ 2024-10-30 01:15:09,791 INFO MainThread:324929 [wandb_run.py:_console_start():2463] atexit reg
22
+ 2024-10-30 01:15:09,791 INFO MainThread:324929 [wandb_run.py:_redirect():2311] redirect: wrap_raw
23
+ 2024-10-30 01:15:09,791 INFO MainThread:324929 [wandb_run.py:_redirect():2376] Wrapping output streams.
24
+ 2024-10-30 01:15:09,791 INFO MainThread:324929 [wandb_run.py:_redirect():2401] Redirects installed.
25
+ 2024-10-30 01:15:09,793 INFO MainThread:324929 [wandb_init.py:init():911] run started, returning control to user process
26
+ 2024-10-30 01:15:09,794 INFO MainThread:324929 [wandb_run.py:_config_callback():1390] config_cb None None {'perturbation': 'reverse_control', 'train_set': '10M', 'batch_size': 3, 'epoch': 7, 'seed': 0}
wandb/run-20241031_002020-q6ot1vz6/run-q6ot1vz6.wandb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:950afa23825d7fc6e85fb932c976838964b187a182167c4b2250b3eff1d7f35e
3
+ size 19443127
wandb/run-20241031_122006-f2ep45tp/files/config.yaml ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _wandb:
2
+ value:
3
+ cli_version: 0.18.5
4
+ m: []
5
+ python_version: 3.9.19
6
+ t:
7
+ "1":
8
+ - 1
9
+ - 5
10
+ - 11
11
+ - 49
12
+ - 51
13
+ - 53
14
+ - 55
15
+ - 71
16
+ - 98
17
+ "2":
18
+ - 1
19
+ - 5
20
+ - 11
21
+ - 49
22
+ - 51
23
+ - 53
24
+ - 55
25
+ - 71
26
+ - 98
27
+ "3":
28
+ - 13
29
+ - 23
30
+ - 55
31
+ "4": 3.9.19
32
+ "5": 0.18.5
33
+ "6": 4.45.1
34
+ "8":
35
+ - 5
36
+ "12": 0.18.5
37
+ "13": linux-x86_64
38
+ batch_size:
39
+ value: 3
40
+ epoch:
41
+ value: 6
42
+ lr:
43
+ value: 5e-06
44
+ perturbation:
45
+ value: reverse_full
46
+ seed:
47
+ value: 0
48
+ train_set:
49
+ value: 10M
wandb/run-20241031_122006-f2ep45tp/files/output.log ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Downloading shards: 0%| | 0/2 [00:22<?, ?it/s]
2
+ Exception ignored in: <generator object tqdm.__iter__ at 0x7fbb506e9d60>
3
+ Traceback (most recent call last):
4
+ File "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/site-packages/tqdm/std.py", line 1196, in __iter__
5
+ self.close()
6
+ File "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/site-packages/tqdm/std.py", line 1303, in close
7
+ fp_write('\n')
8
+ File "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/site-packages/tqdm/std.py", line 1287, in fp_write
9
+ self.fp.write(str(s))
10
+ File "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/site-packages/tqdm/utils.py", line 196, in inner
11
+ return func(*args, **kwargs)
12
+ File "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/site-packages/wandb/sdk/lib/redirect.py", line 648, in write
13
+ cb(data)
14
+ File "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/site-packages/wandb/sdk/wandb_run.py", line 2386, in <lambda>
15
+ lambda data: self._console_raw_callback("stderr", data),
16
+ File "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/site-packages/wandb/sdk/wandb_run.py", line 403, in wrapper_fn
17
+ return func(self, *args, **kwargs)
18
+ File "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/site-packages/wandb/sdk/wandb_run.py", line 1547, in _console_raw_callback
19
+ self._backend.interface.publish_output_raw(name, data)
20
+ File "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/site-packages/wandb/sdk/interface/interface.py", line 721, in publish_output_raw
21
+ self._publish_output_raw(o)
22
+ File "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/site-packages/wandb/sdk/interface/interface_shared.py", line 79, in _publish_output_raw
23
+ self._publish(rec)
24
+ File "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/site-packages/wandb/sdk/interface/interface_sock.py", line 51, in _publish
25
+ self._sock_client.send_record_publish(record)
26
+ File "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/site-packages/wandb/sdk/lib/sock_client.py", line 225, in send_record_publish
27
+ self.send_server_request(server_req)
28
+ File "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/site-packages/wandb/sdk/lib/sock_client.py", line 157, in send_server_request
29
+ self._send_message(msg)
30
+ File "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/site-packages/wandb/sdk/lib/sock_client.py", line 154, in _send_message
31
+ self._sendall_with_error_handle(header + data)
32
+ File "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/site-packages/wandb/sdk/lib/sock_client.py", line 132, in _sendall_with_error_handle
33
+ sent = self._sock.send(data)
34
+ KeyboardInterrupt:
35
+ Traceback (most recent call last):
36
+ File "/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py", line 173, in <module>
37
+ model = AutoModelForCausalLM.from_pretrained(model_name,
38
+ File "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/site-packages/transformers/models/auto/auto_factory.py", line 564, in from_pretrained
39
+ return model_class.from_pretrained(
40
+ File "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/site-packages/transformers/modeling_utils.py", line 3769, in from_pretrained
41
+ resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(
42
+ File "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/site-packages/transformers/utils/hub.py", line 1098, in get_checkpoint_shard_files
43
+ cached_filename = cached_file(
44
+ File "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/site-packages/transformers/utils/hub.py", line 403, in cached_file
45
+ resolved_file = hf_hub_download(
46
+ File "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/site-packages/huggingface_hub/utils/_deprecation.py", line 101, in inner_f
47
+ return f(*args, **kwargs)
48
+ File "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/site-packages/huggingface_hub/utils/_validators.py", line 114, in _inner_fn
49
+ return fn(*args, **kwargs)
50
+ File "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/site-packages/huggingface_hub/file_download.py", line 1232, in hf_hub_download
51
+ return _hf_hub_download_to_cache_dir(
52
+ File "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/site-packages/huggingface_hub/file_download.py", line 1380, in _hf_hub_download_to_cache_dir
53
+ with WeakFileLock(lock_path):
54
+ File "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/contextlib.py", line 119, in __enter__
55
+ return next(self.gen)
56
+ File "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/site-packages/huggingface_hub/utils/_fixes.py", line 98, in WeakFileLock
57
+ lock.acquire()
58
+ File "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/site-packages/filelock/_api.py", line 225, in acquire
59
+ time.sleep(poll_interval)
60
+ KeyboardInterrupt
wandb/run-20241031_122006-f2ep45tp/files/requirements.txt ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ funcsigs==1.0.2
2
+ sentry-sdk==2.17.0
3
+ multiprocess==0.70.16
4
+ numpy==1.26.2
5
+ pluralizer==1.2.0
6
+ debugpy==1.6.7
7
+ nvidia-cudnn-cu11==8.5.0.96
8
+ deepspeed==0.15.2
9
+ data==0.4
10
+ pandas==2.1.3
11
+ tomli==2.0.1
12
+ charset-normalizer==3.3.2
13
+ attrs==24.2.0
14
+ aiosignal==1.3.1
15
+ fsspec==2023.10.0
16
+ nvidia-cusparse-cu11==11.7.4.91
17
+ zipp==3.12.0
18
+ mypy-extensions==1.0.0
19
+ datasets==3.0.1
20
+ joblib==1.3.2
21
+ hjson==3.1.0
22
+ traitlets==5.7.1
23
+ stack-data==0.6.0
24
+ transformers==4.45.1
25
+ sympy==1.11.1
26
+ Pygments==2.15.0
27
+ docker-pycreds==0.4.0
28
+ dill==0.3.8
29
+ wheel==0.44.0
30
+ prompt-toolkit==3.0.30
31
+ parso==0.8.3
32
+ ipykernel==6.23.1
33
+ pyarrow==17.0.0
34
+ certifi==2023.11.17
35
+ nvidia-cufft-cu11==10.9.0.58
36
+ six==1.16.0
37
+ pydantic==2.9.2
38
+ click==8.1.7
39
+ nest-asyncio==1.5.6
40
+ gmpy2==2.1.0
41
+ matplotlib==3.8.2
42
+ scipy==1.11.4
43
+ typing_extensions==4.12.2
44
+ statsmodels==0.14.0
45
+ huggingface-hub==0.25.0
46
+ frozenlist==1.4.1
47
+ gpustat==1.1.1
48
+ nvidia-nvtx-cu11==11.7.91
49
+ safetensors==0.4.5
50
+ stanza==1.9.2
51
+ decorator==5.1.1
52
+ seaborn==0.13.0
53
+ sentencepiece==0.2.0
54
+ PyYAML==6.0.1
55
+ black==24.8.0
56
+ protobuf==4.25.1
57
+ pickleshare==0.7.5
58
+ peft==0.13.0
59
+ triton==2.0.0
60
+ nvidia-cuda-runtime-cu11==11.7.99
61
+ Jinja2==3.1.2
62
+ nvidia-cusolver-cu11==11.4.0.1
63
+ executing==1.2.0
64
+ jupyter_client==8.1.0
65
+ pluggy==1.3.0
66
+ cmake==3.30.3
67
+ pytz==2023.3.post1
68
+ aiohappyeyeballs==2.4.2
69
+ kiwisolver==1.4.5
70
+ py-cpuinfo==9.0.0
71
+ Pillow==10.1.0
72
+ ptyprocess==0.7.0
73
+ importlib_resources==6.4.5
74
+ GitPython==3.1.43
75
+ importlib-metadata==6.0.0
76
+ iniconfig==2.0.0
77
+ scikit-learn==1.3.2
78
+ exceptiongroup==1.1.0
79
+ networkx==2.8.6
80
+ accelerate==1.0.0
81
+ nltk==3.8.1
82
+ shutilwhich==1.1.0
83
+ fonttools==4.45.1
84
+ future==0.18.3
85
+ aiohttp==3.10.6
86
+ wcwidth==0.2.5
87
+ idna==3.6
88
+ filelock==3.12.2
89
+ pathspec==0.12.1
90
+ jupyter_core==5.1.0
91
+ lit==18.1.8
92
+ nvidia-curand-cu11==10.2.10.91
93
+ nvidia-cublas-cu11==11.10.3.66
94
+ nvidia-ml-py==12.560.30
95
+ msgpack==1.1.0
96
+ python-dateutil==2.8.2
97
+ blessed==1.20.0
98
+ packaging==23.0
99
+ gitdb==4.0.11
100
+ yarl==1.13.0
101
+ emoji==2.8.0
102
+ tzdata==2023.3
103
+ cycler==0.12.1
104
+ tornado==6.2
105
+ backcall==0.2.0
106
+ plotnine==0.12.4
107
+ ninja==1.11.1.1
108
+ latex==0.7.0
109
+ wandb==0.18.5
110
+ setproctitle==1.3.3
111
+ threadpoolctl==3.2.0
112
+ requests==2.32.3
113
+ pyparsing==3.1.1
114
+ smmap==5.0.1
115
+ pyzmq==23.0.0
116
+ async-timeout==4.0.3
117
+ annotated-types==0.7.0
118
+ matplotlib-inline==0.1.6
119
+ latexcodec==1.0.0
120
+ ipython==8.0.0
121
+ patsy==0.5.3
122
+ contourpy==1.2.0
123
+ multidict==6.1.0
124
+ mizani==0.9.3
125
+ urllib3==2.1.0
126
+ tokenizers==0.20.0
127
+ MarkupSafe==2.1.2
128
+ pip==24.2
129
+ pexpect==4.8.0
130
+ tqdm==4.66.5
131
+ jedi==0.18.2
132
+ pydantic_core==2.23.4
133
+ tempdir==0.7.1
134
+ mpmath==1.2.1
135
+ setuptools==72.1.0
136
+ pytest==7.4.3
137
+ pure-eval==0.2.2
138
+ psutil==5.9.1
139
+ comm==0.1.2
140
+ nvidia-cuda-cupti-cu11==11.7.101
141
+ nvidia-cuda-nvrtc-cu11==11.7.99
142
+ regex==2023.10.3
143
+ platformdirs==2.5.2
144
+ asttokens==2.2.1
145
+ torch==2.0.0
146
+ nvidia-nccl-cu11==2.14.3
147
+ xxhash==3.5.0
wandb/run-20241031_122006-f2ep45tp/files/wandb-metadata.json ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.4.0-162-generic-x86_64-with-glibc2.31",
3
+ "python": "3.9.19",
4
+ "startedAt": "2024-10-31T16:20:06.045293Z",
5
+ "args": [
6
+ "--perturbation",
7
+ "reverse_full",
8
+ "--train_set",
9
+ "10M",
10
+ "--batch_size",
11
+ "3",
12
+ "--epoch",
13
+ "6",
14
+ "--seed",
15
+ "0"
16
+ ],
17
+ "program": "/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py",
18
+ "codePath": "train/train_deep_wandb.py",
19
+ "git": {
20
+ "remote": "git@hf.co:Yaning1001/Impossible_llm.git",
21
+ "commit": "ed716cdcfcdea02b67f7ed0f3504c2b1c8b737c4"
22
+ },
23
+ "email": "yaning1001@gmail.com",
24
+ "root": "/mnt/ssd3/chunhui/yaning/project/impossible_llm/train",
25
+ "host": "mms-large-2",
26
+ "username": "chunhui",
27
+ "executable": "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/bin/python",
28
+ "codePathLocal": "train_deep_wandb.py",
29
+ "cpu_count": 32,
30
+ "cpu_count_logical": 64,
31
+ "gpu": "NVIDIA RTX A6000",
32
+ "gpu_count": 8,
33
+ "disk": {
34
+ "/": {
35
+ "total": "1888559353856",
36
+ "used": "1753159847936"
37
+ }
38
+ },
39
+ "memory": {
40
+ "total": "202617098240"
41
+ },
42
+ "cpu": {
43
+ "count": 32,
44
+ "countLogical": 64
45
+ },
46
+ "gpu_nvidia": [
47
+ {
48
+ "name": "NVIDIA RTX A6000",
49
+ "memoryTotal": "51527024640",
50
+ "cudaCores": 10752,
51
+ "architecture": "Ampere"
52
+ },
53
+ {
54
+ "name": "NVIDIA RTX A6000",
55
+ "memoryTotal": "51527024640",
56
+ "cudaCores": 10752,
57
+ "architecture": "Ampere"
58
+ },
59
+ {
60
+ "name": "NVIDIA RTX A6000",
61
+ "memoryTotal": "51527024640",
62
+ "cudaCores": 10752,
63
+ "architecture": "Ampere"
64
+ },
65
+ {
66
+ "name": "NVIDIA RTX A6000",
67
+ "memoryTotal": "51527024640",
68
+ "cudaCores": 10752,
69
+ "architecture": "Ampere"
70
+ },
71
+ {
72
+ "name": "NVIDIA RTX A6000",
73
+ "memoryTotal": "51527024640",
74
+ "cudaCores": 10752,
75
+ "architecture": "Ampere"
76
+ },
77
+ {
78
+ "name": "NVIDIA RTX A6000",
79
+ "memoryTotal": "51527024640",
80
+ "cudaCores": 10752,
81
+ "architecture": "Ampere"
82
+ },
83
+ {
84
+ "name": "NVIDIA RTX A6000",
85
+ "memoryTotal": "51527024640",
86
+ "cudaCores": 10752,
87
+ "architecture": "Ampere"
88
+ },
89
+ {
90
+ "name": "NVIDIA RTX A6000",
91
+ "memoryTotal": "51527024640",
92
+ "cudaCores": 10752,
93
+ "architecture": "Ampere"
94
+ }
95
+ ],
96
+ "cudaVersion": "11.8"
97
+ }
wandb/run-20241031_122006-f2ep45tp/files/wandb-summary.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"_wandb":{"runtime":23}}
wandb/run-20241031_122006-f2ep45tp/logs/debug-internal.log ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"time":"2024-10-31T12:20:06.047606029-04:00","level":"INFO","msg":"using version","core version":"0.18.5"}
2
+ {"time":"2024-10-31T12:20:06.047621499-04:00","level":"INFO","msg":"created symlink","path":"/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241031_122006-f2ep45tp/logs/debug-core.log"}
3
+ {"time":"2024-10-31T12:20:06.154938982-04:00","level":"INFO","msg":"created new stream","id":"f2ep45tp"}
4
+ {"time":"2024-10-31T12:20:06.155004252-04:00","level":"INFO","msg":"stream: started","id":"f2ep45tp"}
5
+ {"time":"2024-10-31T12:20:06.155045152-04:00","level":"INFO","msg":"sender: started","stream_id":"f2ep45tp"}
6
+ {"time":"2024-10-31T12:20:06.155033412-04:00","level":"INFO","msg":"writer: Do: started","stream_id":{"value":"f2ep45tp"}}
7
+ {"time":"2024-10-31T12:20:06.155042312-04:00","level":"INFO","msg":"handler: started","stream_id":{"value":"f2ep45tp"}}
8
+ {"time":"2024-10-31T12:20:06.357525131-04:00","level":"INFO","msg":"Starting system monitor"}
9
+ {"time":"2024-10-31T12:20:29.350768191-04:00","level":"INFO","msg":"stream: closing","id":"f2ep45tp"}
10
+ {"time":"2024-10-31T12:20:29.350824761-04:00","level":"INFO","msg":"Stopping system monitor"}
11
+ {"time":"2024-10-31T12:20:29.351697402-04:00","level":"INFO","msg":"Stopped system monitor"}
wandb/run-20241031_122006-f2ep45tp/logs/debug.log ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-10-31 12:20:06,042 INFO MainThread:557182 [wandb_setup.py:_flush():79] Current SDK version is 0.18.5
2
+ 2024-10-31 12:20:06,042 INFO MainThread:557182 [wandb_setup.py:_flush():79] Configure stats pid to 557182
3
+ 2024-10-31 12:20:06,042 INFO MainThread:557182 [wandb_setup.py:_flush():79] Loading settings from /home/chunhui/.config/wandb/settings
4
+ 2024-10-31 12:20:06,042 INFO MainThread:557182 [wandb_setup.py:_flush():79] Loading settings from /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/settings
5
+ 2024-10-31 12:20:06,042 INFO MainThread:557182 [wandb_setup.py:_flush():79] Loading settings from environment variables: {}
6
+ 2024-10-31 12:20:06,042 INFO MainThread:557182 [wandb_setup.py:_flush():79] Applying setup settings: {'mode': None, '_disable_service': None}
7
+ 2024-10-31 12:20:06,042 INFO MainThread:557182 [wandb_setup.py:_flush():79] Inferring run settings from compute environment: {'program_relpath': 'train/train_deep_wandb.py', 'program_abspath': '/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py', 'program': '/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py'}
8
+ 2024-10-31 12:20:06,042 INFO MainThread:557182 [wandb_setup.py:_flush():79] Applying login settings: {}
9
+ 2024-10-31 12:20:06,042 INFO MainThread:557182 [wandb_init.py:_log_setup():534] Logging user logs to /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241031_122006-f2ep45tp/logs/debug.log
10
+ 2024-10-31 12:20:06,042 INFO MainThread:557182 [wandb_init.py:_log_setup():535] Logging internal logs to /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241031_122006-f2ep45tp/logs/debug-internal.log
11
+ 2024-10-31 12:20:06,042 INFO MainThread:557182 [wandb_init.py:init():621] calling init triggers
12
+ 2024-10-31 12:20:06,042 INFO MainThread:557182 [wandb_init.py:init():628] wandb.init called with sweep_config: {}
13
+ config: {}
14
+ 2024-10-31 12:20:06,042 INFO MainThread:557182 [wandb_init.py:init():671] starting backend
15
+ 2024-10-31 12:20:06,042 INFO MainThread:557182 [wandb_init.py:init():675] sending inform_init request
16
+ 2024-10-31 12:20:06,044 INFO MainThread:557182 [backend.py:_multiprocessing_setup():104] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
17
+ 2024-10-31 12:20:06,045 INFO MainThread:557182 [wandb_init.py:init():688] backend started and connected
18
+ 2024-10-31 12:20:06,049 INFO MainThread:557182 [wandb_init.py:init():783] updated telemetry
19
+ 2024-10-31 12:20:06,079 INFO MainThread:557182 [wandb_init.py:init():816] communicating run to backend with 90.0 second timeout
20
+ 2024-10-31 12:20:06,353 INFO MainThread:557182 [wandb_init.py:init():867] starting run threads in backend
21
+ 2024-10-31 12:20:06,461 INFO MainThread:557182 [wandb_run.py:_console_start():2463] atexit reg
22
+ 2024-10-31 12:20:06,461 INFO MainThread:557182 [wandb_run.py:_redirect():2311] redirect: wrap_raw
23
+ 2024-10-31 12:20:06,461 INFO MainThread:557182 [wandb_run.py:_redirect():2376] Wrapping output streams.
24
+ 2024-10-31 12:20:06,461 INFO MainThread:557182 [wandb_run.py:_redirect():2401] Redirects installed.
25
+ 2024-10-31 12:20:06,463 INFO MainThread:557182 [wandb_init.py:init():911] run started, returning control to user process
26
+ 2024-10-31 12:20:06,463 INFO MainThread:557182 [wandb_run.py:_config_callback():1390] config_cb None None {'perturbation': 'reverse_full', 'train_set': '10M', 'batch_size': 3, 'epoch': 6, 'seed': 0, 'lr': 5e-06}
27
+ 2024-10-31 12:20:29,350 WARNING MsgRouterThr:557182 [router.py:message_loop():77] message_loop has been closed
wandb/run-20241031_122006-f2ep45tp/run-f2ep45tp.wandb ADDED
File without changes
wandb/run-20241031_122114-2k9672ya/run-2k9672ya.wandb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:861609c20aeebe97837b894194bce5f8603f3e17a86171d54909acd17db795e9
3
+ size 19251238
wandb/run-20241101_093116-8434p043/files/output.log ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Loading checkpoint shards: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:18<00:00, 9.05s/it]
2
+ tokenized_valid: Dataset({
3
+ features: ['input_ids', 'attention_mask'],
4
+ num_rows: 600
5
+ })
6
+ /mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/site-packages/transformers/training_args.py:1545: FutureWarning: `evaluation_strategy` is deprecated and will be removed in version 4.46 of 🤗 Transformers. Use `eval_strategy` instead
7
+ warnings.warn(
8
+ [2024-11-01 09:31:36,808] [INFO] [real_accelerator.py:219:get_accelerator] Setting ds_accelerator to cuda (auto detect)
9
+ [2024-11-01 09:31:44,772] [INFO] [comm.py:652:init_distributed] cdb=None
10
+ Installed CUDA version 11.8 does not match the version torch was compiled with 11.7 but since the APIs are compatible, accepting this combination
11
+ Using /home/chunhui/.cache/torch_extensions/py39_cu117 as PyTorch extensions root...
12
+ Emitting ninja build file /home/chunhui/.cache/torch_extensions/py39_cu117/cpu_adam/build.ninja...
13
+ Building extension module cpu_adam...
14
+ Allowing ninja to set a default number of workers... (overridable by setting the environment variable MAX_JOBS=N)
15
+ Loading extension module cpu_adam...
16
+ Time to load cpu_adam op: 4.949513912200928 seconds
wandb/run-20241101_093116-8434p043/files/requirements.txt ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ funcsigs==1.0.2
2
+ sentry-sdk==2.17.0
3
+ multiprocess==0.70.16
4
+ numpy==1.26.2
5
+ pluralizer==1.2.0
6
+ debugpy==1.6.7
7
+ nvidia-cudnn-cu11==8.5.0.96
8
+ deepspeed==0.15.2
9
+ data==0.4
10
+ pandas==2.1.3
11
+ tomli==2.0.1
12
+ charset-normalizer==3.3.2
13
+ attrs==24.2.0
14
+ aiosignal==1.3.1
15
+ fsspec==2023.10.0
16
+ nvidia-cusparse-cu11==11.7.4.91
17
+ zipp==3.12.0
18
+ mypy-extensions==1.0.0
19
+ datasets==3.0.1
20
+ joblib==1.3.2
21
+ hjson==3.1.0
22
+ traitlets==5.7.1
23
+ stack-data==0.6.0
24
+ transformers==4.45.1
25
+ sympy==1.11.1
26
+ Pygments==2.15.0
27
+ docker-pycreds==0.4.0
28
+ dill==0.3.8
29
+ wheel==0.44.0
30
+ prompt-toolkit==3.0.30
31
+ parso==0.8.3
32
+ ipykernel==6.23.1
33
+ pyarrow==17.0.0
34
+ certifi==2023.11.17
35
+ nvidia-cufft-cu11==10.9.0.58
36
+ six==1.16.0
37
+ pydantic==2.9.2
38
+ click==8.1.7
39
+ nest-asyncio==1.5.6
40
+ gmpy2==2.1.0
41
+ matplotlib==3.8.2
42
+ scipy==1.11.4
43
+ typing_extensions==4.12.2
44
+ statsmodels==0.14.0
45
+ huggingface-hub==0.25.0
46
+ frozenlist==1.4.1
47
+ gpustat==1.1.1
48
+ nvidia-nvtx-cu11==11.7.91
49
+ safetensors==0.4.5
50
+ stanza==1.9.2
51
+ decorator==5.1.1
52
+ seaborn==0.13.0
53
+ sentencepiece==0.2.0
54
+ PyYAML==6.0.1
55
+ black==24.8.0
56
+ protobuf==4.25.1
57
+ pickleshare==0.7.5
58
+ peft==0.13.0
59
+ triton==2.0.0
60
+ nvidia-cuda-runtime-cu11==11.7.99
61
+ Jinja2==3.1.2
62
+ nvidia-cusolver-cu11==11.4.0.1
63
+ executing==1.2.0
64
+ jupyter_client==8.1.0
65
+ pluggy==1.3.0
66
+ cmake==3.30.3
67
+ pytz==2023.3.post1
68
+ aiohappyeyeballs==2.4.2
69
+ kiwisolver==1.4.5
70
+ py-cpuinfo==9.0.0
71
+ Pillow==10.1.0
72
+ ptyprocess==0.7.0
73
+ importlib_resources==6.4.5
74
+ GitPython==3.1.43
75
+ importlib-metadata==6.0.0
76
+ iniconfig==2.0.0
77
+ scikit-learn==1.3.2
78
+ exceptiongroup==1.1.0
79
+ networkx==2.8.6
80
+ accelerate==1.0.0
81
+ nltk==3.8.1
82
+ shutilwhich==1.1.0
83
+ fonttools==4.45.1
84
+ future==0.18.3
85
+ aiohttp==3.10.6
86
+ wcwidth==0.2.5
87
+ idna==3.6
88
+ filelock==3.12.2
89
+ pathspec==0.12.1
90
+ jupyter_core==5.1.0
91
+ lit==18.1.8
92
+ nvidia-curand-cu11==10.2.10.91
93
+ nvidia-cublas-cu11==11.10.3.66
94
+ nvidia-ml-py==12.560.30
95
+ msgpack==1.1.0
96
+ python-dateutil==2.8.2
97
+ blessed==1.20.0
98
+ packaging==23.0
99
+ gitdb==4.0.11
100
+ yarl==1.13.0
101
+ emoji==2.8.0
102
+ tzdata==2023.3
103
+ cycler==0.12.1
104
+ tornado==6.2
105
+ backcall==0.2.0
106
+ plotnine==0.12.4
107
+ ninja==1.11.1.1
108
+ latex==0.7.0
109
+ wandb==0.18.5
110
+ setproctitle==1.3.3
111
+ threadpoolctl==3.2.0
112
+ requests==2.32.3
113
+ pyparsing==3.1.1
114
+ smmap==5.0.1
115
+ pyzmq==23.0.0
116
+ async-timeout==4.0.3
117
+ annotated-types==0.7.0
118
+ matplotlib-inline==0.1.6
119
+ latexcodec==1.0.0
120
+ ipython==8.0.0
121
+ patsy==0.5.3
122
+ contourpy==1.2.0
123
+ multidict==6.1.0
124
+ mizani==0.9.3
125
+ urllib3==2.1.0
126
+ tokenizers==0.20.0
127
+ MarkupSafe==2.1.2
128
+ pip==24.2
129
+ pexpect==4.8.0
130
+ tqdm==4.66.5
131
+ jedi==0.18.2
132
+ pydantic_core==2.23.4
133
+ tempdir==0.7.1
134
+ mpmath==1.2.1
135
+ setuptools==72.1.0
136
+ pytest==7.4.3
137
+ pure-eval==0.2.2
138
+ psutil==5.9.1
139
+ comm==0.1.2
140
+ nvidia-cuda-cupti-cu11==11.7.101
141
+ nvidia-cuda-nvrtc-cu11==11.7.99
142
+ regex==2023.10.3
143
+ platformdirs==2.5.2
144
+ asttokens==2.2.1
145
+ torch==2.0.0
146
+ nvidia-nccl-cu11==2.14.3
147
+ xxhash==3.5.0
wandb/run-20241101_093116-8434p043/files/wandb-metadata.json ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.4.0-162-generic-x86_64-with-glibc2.31",
3
+ "python": "3.9.19",
4
+ "startedAt": "2024-11-01T13:31:16.528969Z",
5
+ "args": [
6
+ "--perturbation",
7
+ "reverse_control",
8
+ "--train_set",
9
+ "10M",
10
+ "--batch_size",
11
+ "3",
12
+ "--epoch",
13
+ "7",
14
+ "--seed",
15
+ "0"
16
+ ],
17
+ "program": "/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py",
18
+ "codePath": "train/train_deep_wandb.py",
19
+ "git": {
20
+ "remote": "git@hf.co:Yaning1001/Impossible_llm.git",
21
+ "commit": "ed716cdcfcdea02b67f7ed0f3504c2b1c8b737c4"
22
+ },
23
+ "email": "yaning1001@gmail.com",
24
+ "root": "/mnt/ssd3/chunhui/yaning/project/impossible_llm/train",
25
+ "host": "mms-large-2",
26
+ "username": "chunhui",
27
+ "executable": "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/bin/python",
28
+ "codePathLocal": "train_deep_wandb.py",
29
+ "cpu_count": 32,
30
+ "cpu_count_logical": 64,
31
+ "gpu": "NVIDIA RTX A6000",
32
+ "gpu_count": 8,
33
+ "disk": {
34
+ "/": {
35
+ "total": "1888559353856",
36
+ "used": "1754716262400"
37
+ }
38
+ },
39
+ "memory": {
40
+ "total": "202617098240"
41
+ },
42
+ "cpu": {
43
+ "count": 32,
44
+ "countLogical": 64
45
+ },
46
+ "gpu_nvidia": [
47
+ {
48
+ "name": "NVIDIA RTX A6000",
49
+ "memoryTotal": "51527024640",
50
+ "cudaCores": 10752,
51
+ "architecture": "Ampere"
52
+ },
53
+ {
54
+ "name": "NVIDIA RTX A6000",
55
+ "memoryTotal": "51527024640",
56
+ "cudaCores": 10752,
57
+ "architecture": "Ampere"
58
+ },
59
+ {
60
+ "name": "NVIDIA RTX A6000",
61
+ "memoryTotal": "51527024640",
62
+ "cudaCores": 10752,
63
+ "architecture": "Ampere"
64
+ },
65
+ {
66
+ "name": "NVIDIA RTX A6000",
67
+ "memoryTotal": "51527024640",
68
+ "cudaCores": 10752,
69
+ "architecture": "Ampere"
70
+ },
71
+ {
72
+ "name": "NVIDIA RTX A6000",
73
+ "memoryTotal": "51527024640",
74
+ "cudaCores": 10752,
75
+ "architecture": "Ampere"
76
+ },
77
+ {
78
+ "name": "NVIDIA RTX A6000",
79
+ "memoryTotal": "51527024640",
80
+ "cudaCores": 10752,
81
+ "architecture": "Ampere"
82
+ },
83
+ {
84
+ "name": "NVIDIA RTX A6000",
85
+ "memoryTotal": "51527024640",
86
+ "cudaCores": 10752,
87
+ "architecture": "Ampere"
88
+ },
89
+ {
90
+ "name": "NVIDIA RTX A6000",
91
+ "memoryTotal": "51527024640",
92
+ "cudaCores": 10752,
93
+ "architecture": "Ampere"
94
+ }
95
+ ],
96
+ "cudaVersion": "11.8"
97
+ }
wandb/run-20241101_093116-8434p043/logs/debug-internal.log ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {"time":"2024-11-01T09:31:16.530787979-04:00","level":"INFO","msg":"using version","core version":"0.18.5"}
2
+ {"time":"2024-11-01T09:31:16.530798049-04:00","level":"INFO","msg":"created symlink","path":"/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241101_093116-8434p043/logs/debug-core.log"}
3
+ {"time":"2024-11-01T09:31:16.637062272-04:00","level":"INFO","msg":"created new stream","id":"8434p043"}
4
+ {"time":"2024-11-01T09:31:16.637096822-04:00","level":"INFO","msg":"stream: started","id":"8434p043"}
5
+ {"time":"2024-11-01T09:31:16.637119892-04:00","level":"INFO","msg":"sender: started","stream_id":"8434p043"}
6
+ {"time":"2024-11-01T09:31:16.637111572-04:00","level":"INFO","msg":"writer: Do: started","stream_id":{"value":"8434p043"}}
7
+ {"time":"2024-11-01T09:31:16.637139322-04:00","level":"INFO","msg":"handler: started","stream_id":{"value":"8434p043"}}
8
+ {"time":"2024-11-01T09:31:16.85617314-04:00","level":"INFO","msg":"Starting system monitor"}
wandb/run-20241101_093116-8434p043/logs/debug.log ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-11-01 09:31:16,527 INFO MainThread:781948 [wandb_setup.py:_flush():79] Current SDK version is 0.18.5
2
+ 2024-11-01 09:31:16,527 INFO MainThread:781948 [wandb_setup.py:_flush():79] Configure stats pid to 781948
3
+ 2024-11-01 09:31:16,527 INFO MainThread:781948 [wandb_setup.py:_flush():79] Loading settings from /home/chunhui/.config/wandb/settings
4
+ 2024-11-01 09:31:16,527 INFO MainThread:781948 [wandb_setup.py:_flush():79] Loading settings from /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/settings
5
+ 2024-11-01 09:31:16,527 INFO MainThread:781948 [wandb_setup.py:_flush():79] Loading settings from environment variables: {}
6
+ 2024-11-01 09:31:16,527 INFO MainThread:781948 [wandb_setup.py:_flush():79] Applying setup settings: {'mode': None, '_disable_service': None}
7
+ 2024-11-01 09:31:16,527 INFO MainThread:781948 [wandb_setup.py:_flush():79] Inferring run settings from compute environment: {'program_relpath': 'train/train_deep_wandb.py', 'program_abspath': '/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py', 'program': '/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py'}
8
+ 2024-11-01 09:31:16,527 INFO MainThread:781948 [wandb_setup.py:_flush():79] Applying login settings: {}
9
+ 2024-11-01 09:31:16,527 INFO MainThread:781948 [wandb_init.py:_log_setup():534] Logging user logs to /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241101_093116-8434p043/logs/debug.log
10
+ 2024-11-01 09:31:16,527 INFO MainThread:781948 [wandb_init.py:_log_setup():535] Logging internal logs to /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241101_093116-8434p043/logs/debug-internal.log
11
+ 2024-11-01 09:31:16,527 INFO MainThread:781948 [wandb_init.py:init():621] calling init triggers
12
+ 2024-11-01 09:31:16,527 INFO MainThread:781948 [wandb_init.py:init():628] wandb.init called with sweep_config: {}
13
+ config: {}
14
+ 2024-11-01 09:31:16,527 INFO MainThread:781948 [wandb_init.py:init():671] starting backend
15
+ 2024-11-01 09:31:16,527 INFO MainThread:781948 [wandb_init.py:init():675] sending inform_init request
16
+ 2024-11-01 09:31:16,528 INFO MainThread:781948 [backend.py:_multiprocessing_setup():104] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
17
+ 2024-11-01 09:31:16,528 INFO MainThread:781948 [wandb_init.py:init():688] backend started and connected
18
+ 2024-11-01 09:31:16,531 INFO MainThread:781948 [wandb_init.py:init():783] updated telemetry
19
+ 2024-11-01 09:31:16,553 INFO MainThread:781948 [wandb_init.py:init():816] communicating run to backend with 90.0 second timeout
20
+ 2024-11-01 09:31:16,853 INFO MainThread:781948 [wandb_init.py:init():867] starting run threads in backend
21
+ 2024-11-01 09:31:16,946 INFO MainThread:781948 [wandb_run.py:_console_start():2463] atexit reg
22
+ 2024-11-01 09:31:16,946 INFO MainThread:781948 [wandb_run.py:_redirect():2311] redirect: wrap_raw
23
+ 2024-11-01 09:31:16,946 INFO MainThread:781948 [wandb_run.py:_redirect():2376] Wrapping output streams.
24
+ 2024-11-01 09:31:16,946 INFO MainThread:781948 [wandb_run.py:_redirect():2401] Redirects installed.
25
+ 2024-11-01 09:31:16,947 INFO MainThread:781948 [wandb_init.py:init():911] run started, returning control to user process
26
+ 2024-11-01 09:31:16,948 INFO MainThread:781948 [wandb_run.py:_config_callback():1390] config_cb None None {'perturbation': 'reverse_control', 'train_set': '10M', 'batch_size': 3, 'epoch': 7, 'seed': 0, 'lr': 5e-06}
wandb/run-20241101_093116-8434p043/run-8434p043.wandb ADDED
Binary file (32.8 kB). View file
 
wandb/run-20241101_094656-v2rxhny6/files/wandb-metadata.json ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.4.0-162-generic-x86_64-with-glibc2.31",
3
+ "python": "3.9.19",
4
+ "startedAt": "2024-11-01T13:46:56.279635Z",
5
+ "args": [
6
+ "--perturbation",
7
+ "reverse_control",
8
+ "--train_set",
9
+ "10M",
10
+ "--batch_size",
11
+ "3",
12
+ "--epoch",
13
+ "7",
14
+ "--seed",
15
+ "0"
16
+ ],
17
+ "program": "/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py",
18
+ "codePath": "train/train_deep_wandb.py",
19
+ "git": {
20
+ "remote": "git@hf.co:Yaning1001/Impossible_llm.git",
21
+ "commit": "ed716cdcfcdea02b67f7ed0f3504c2b1c8b737c4"
22
+ },
23
+ "email": "yaning1001@gmail.com",
24
+ "root": "/mnt/ssd3/chunhui/yaning/project/impossible_llm/train",
25
+ "host": "mms-large-2",
26
+ "username": "chunhui",
27
+ "executable": "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/bin/python",
28
+ "codePathLocal": "train_deep_wandb.py",
29
+ "cpu_count": 32,
30
+ "cpu_count_logical": 64,
31
+ "gpu": "NVIDIA RTX A6000",
32
+ "gpu_count": 8,
33
+ "disk": {
34
+ "/": {
35
+ "total": "1888559353856",
36
+ "used": "1754695659520"
37
+ }
38
+ },
39
+ "memory": {
40
+ "total": "202617098240"
41
+ },
42
+ "cpu": {
43
+ "count": 32,
44
+ "countLogical": 64
45
+ },
46
+ "gpu_nvidia": [
47
+ {
48
+ "name": "NVIDIA RTX A6000",
49
+ "memoryTotal": "51527024640",
50
+ "cudaCores": 10752,
51
+ "architecture": "Ampere"
52
+ },
53
+ {
54
+ "name": "NVIDIA RTX A6000",
55
+ "memoryTotal": "51527024640",
56
+ "cudaCores": 10752,
57
+ "architecture": "Ampere"
58
+ },
59
+ {
60
+ "name": "NVIDIA RTX A6000",
61
+ "memoryTotal": "51527024640",
62
+ "cudaCores": 10752,
63
+ "architecture": "Ampere"
64
+ },
65
+ {
66
+ "name": "NVIDIA RTX A6000",
67
+ "memoryTotal": "51527024640",
68
+ "cudaCores": 10752,
69
+ "architecture": "Ampere"
70
+ },
71
+ {
72
+ "name": "NVIDIA RTX A6000",
73
+ "memoryTotal": "51527024640",
74
+ "cudaCores": 10752,
75
+ "architecture": "Ampere"
76
+ },
77
+ {
78
+ "name": "NVIDIA RTX A6000",
79
+ "memoryTotal": "51527024640",
80
+ "cudaCores": 10752,
81
+ "architecture": "Ampere"
82
+ },
83
+ {
84
+ "name": "NVIDIA RTX A6000",
85
+ "memoryTotal": "51527024640",
86
+ "cudaCores": 10752,
87
+ "architecture": "Ampere"
88
+ },
89
+ {
90
+ "name": "NVIDIA RTX A6000",
91
+ "memoryTotal": "51527024640",
92
+ "cudaCores": 10752,
93
+ "architecture": "Ampere"
94
+ }
95
+ ],
96
+ "cudaVersion": "11.8"
97
+ }
wandb/run-20241101_094656-v2rxhny6/run-v2rxhny6.wandb ADDED
File without changes
wandb/run-20241101_200535-6xsf0vem/files/output.log ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Downloading shards: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [02:25<00:00, 72.90s/it]
2
+ Loading checkpoint shards: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:05<00:00, 2.67s/it]
3
+ tokenized_valid: Dataset({
4
+ features: ['input_ids', 'attention_mask'],
5
+ num_rows: 600
6
+ })
7
+ /mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/site-packages/transformers/training_args.py:1545: FutureWarning: `evaluation_strategy` is deprecated and will be removed in version 4.46 of 🤗 Transformers. Use `eval_strategy` instead
8
+ warnings.warn(
9
+ [2024-11-01 20:08:09,443] [INFO] [real_accelerator.py:219:get_accelerator] Setting ds_accelerator to cuda (auto detect)
10
+ [2024-11-01 20:08:19,298] [INFO] [comm.py:652:init_distributed] cdb=None
11
+ [2024-11-01 20:08:19,298] [INFO] [comm.py:683:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl
12
+ Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.
13
+ Installed CUDA version 11.8 does not match the version torch was compiled with 11.7 but since the APIs are compatible, accepting this combination
14
+ Using /home/chunhui/.cache/torch_extensions/py39_cu117 as PyTorch extensions root...
15
+ Emitting ninja build file /home/chunhui/.cache/torch_extensions/py39_cu117/cpu_adam/build.ninja...
16
+ Building extension module cpu_adam...
17
+ Allowing ninja to set a default number of workers... (overridable by setting the environment variable MAX_JOBS=N)
18
+ Loading extension module cpu_adam...
19
+ Time to load cpu_adam op: 5.526603698730469 seconds
20
+ wandb: WARNING The `run_name` is currently set to the same value as `TrainingArguments.output_dir`. If this was not intended, please specify a different run name by setting the `TrainingArguments.run_name` parameter.
21
+ 0%| | 0/2739 [00:00<?, ?it/s]
wandb/run-20241101_200535-6xsf0vem/files/requirements.txt ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ funcsigs==1.0.2
2
+ sentry-sdk==2.17.0
3
+ multiprocess==0.70.16
4
+ numpy==1.26.2
5
+ pluralizer==1.2.0
6
+ debugpy==1.6.7
7
+ nvidia-cudnn-cu11==8.5.0.96
8
+ deepspeed==0.15.2
9
+ data==0.4
10
+ pandas==2.1.3
11
+ tomli==2.0.1
12
+ charset-normalizer==3.3.2
13
+ attrs==24.2.0
14
+ aiosignal==1.3.1
15
+ fsspec==2023.10.0
16
+ nvidia-cusparse-cu11==11.7.4.91
17
+ zipp==3.12.0
18
+ mypy-extensions==1.0.0
19
+ datasets==3.0.1
20
+ joblib==1.3.2
21
+ hjson==3.1.0
22
+ traitlets==5.7.1
23
+ stack-data==0.6.0
24
+ transformers==4.45.1
25
+ sympy==1.11.1
26
+ Pygments==2.15.0
27
+ docker-pycreds==0.4.0
28
+ dill==0.3.8
29
+ wheel==0.44.0
30
+ prompt-toolkit==3.0.30
31
+ parso==0.8.3
32
+ ipykernel==6.23.1
33
+ pyarrow==17.0.0
34
+ certifi==2023.11.17
35
+ nvidia-cufft-cu11==10.9.0.58
36
+ six==1.16.0
37
+ pydantic==2.9.2
38
+ click==8.1.7
39
+ nest-asyncio==1.5.6
40
+ gmpy2==2.1.0
41
+ matplotlib==3.8.2
42
+ scipy==1.11.4
43
+ typing_extensions==4.12.2
44
+ statsmodels==0.14.0
45
+ huggingface-hub==0.25.0
46
+ frozenlist==1.4.1
47
+ gpustat==1.1.1
48
+ nvidia-nvtx-cu11==11.7.91
49
+ safetensors==0.4.5
50
+ stanza==1.9.2
51
+ decorator==5.1.1
52
+ seaborn==0.13.0
53
+ sentencepiece==0.2.0
54
+ PyYAML==6.0.1
55
+ black==24.8.0
56
+ protobuf==4.25.1
57
+ pickleshare==0.7.5
58
+ peft==0.13.0
59
+ triton==2.0.0
60
+ nvidia-cuda-runtime-cu11==11.7.99
61
+ Jinja2==3.1.2
62
+ nvidia-cusolver-cu11==11.4.0.1
63
+ executing==1.2.0
64
+ jupyter_client==8.1.0
65
+ pluggy==1.3.0
66
+ cmake==3.30.3
67
+ pytz==2023.3.post1
68
+ aiohappyeyeballs==2.4.2
69
+ kiwisolver==1.4.5
70
+ py-cpuinfo==9.0.0
71
+ Pillow==10.1.0
72
+ ptyprocess==0.7.0
73
+ importlib_resources==6.4.5
74
+ GitPython==3.1.43
75
+ importlib-metadata==6.0.0
76
+ iniconfig==2.0.0
77
+ scikit-learn==1.3.2
78
+ exceptiongroup==1.1.0
79
+ networkx==2.8.6
80
+ accelerate==1.0.0
81
+ nltk==3.8.1
82
+ shutilwhich==1.1.0
83
+ fonttools==4.45.1
84
+ future==0.18.3
85
+ aiohttp==3.10.6
86
+ wcwidth==0.2.5
87
+ idna==3.6
88
+ filelock==3.12.2
89
+ pathspec==0.12.1
90
+ jupyter_core==5.1.0
91
+ lit==18.1.8
92
+ nvidia-curand-cu11==10.2.10.91
93
+ nvidia-cublas-cu11==11.10.3.66
94
+ nvidia-ml-py==12.560.30
95
+ msgpack==1.1.0
96
+ python-dateutil==2.8.2
97
+ blessed==1.20.0
98
+ packaging==23.0
99
+ gitdb==4.0.11
100
+ yarl==1.13.0
101
+ emoji==2.8.0
102
+ tzdata==2023.3
103
+ cycler==0.12.1
104
+ tornado==6.2
105
+ backcall==0.2.0
106
+ plotnine==0.12.4
107
+ ninja==1.11.1.1
108
+ latex==0.7.0
109
+ wandb==0.18.5
110
+ setproctitle==1.3.3
111
+ threadpoolctl==3.2.0
112
+ requests==2.32.3
113
+ pyparsing==3.1.1
114
+ smmap==5.0.1
115
+ pyzmq==23.0.0
116
+ async-timeout==4.0.3
117
+ annotated-types==0.7.0
118
+ matplotlib-inline==0.1.6
119
+ latexcodec==1.0.0
120
+ ipython==8.0.0
121
+ patsy==0.5.3
122
+ contourpy==1.2.0
123
+ multidict==6.1.0
124
+ mizani==0.9.3
125
+ urllib3==2.1.0
126
+ tokenizers==0.20.0
127
+ MarkupSafe==2.1.2
128
+ pip==24.2
129
+ pexpect==4.8.0
130
+ tqdm==4.66.5
131
+ jedi==0.18.2
132
+ pydantic_core==2.23.4
133
+ tempdir==0.7.1
134
+ mpmath==1.2.1
135
+ setuptools==72.1.0
136
+ pytest==7.4.3
137
+ pure-eval==0.2.2
138
+ psutil==5.9.1
139
+ comm==0.1.2
140
+ nvidia-cuda-cupti-cu11==11.7.101
141
+ nvidia-cuda-nvrtc-cu11==11.7.99
142
+ regex==2023.10.3
143
+ platformdirs==2.5.2
144
+ asttokens==2.2.1
145
+ torch==2.0.0
146
+ nvidia-nccl-cu11==2.14.3
147
+ xxhash==3.5.0
wandb/run-20241101_200535-6xsf0vem/files/wandb-metadata.json ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.4.0-162-generic-x86_64-with-glibc2.31",
3
+ "python": "3.9.19",
4
+ "startedAt": "2024-11-02T00:05:35.688297Z",
5
+ "args": [
6
+ "--perturbation",
7
+ "shuffle_nondeterministic",
8
+ "--train_set",
9
+ "10M",
10
+ "--batch_size",
11
+ "3",
12
+ "--epoch",
13
+ "3",
14
+ "--seed",
15
+ "0"
16
+ ],
17
+ "program": "/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py",
18
+ "codePath": "train/train_deep_wandb.py",
19
+ "git": {
20
+ "remote": "git@hf.co:Yaning1001/Impossible_llm.git",
21
+ "commit": "ed716cdcfcdea02b67f7ed0f3504c2b1c8b737c4"
22
+ },
23
+ "email": "yaning1001@gmail.com",
24
+ "root": "/mnt/ssd3/chunhui/yaning/project/impossible_llm/train",
25
+ "host": "mms-large-2",
26
+ "username": "chunhui",
27
+ "executable": "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/bin/python",
28
+ "codePathLocal": "train_deep_wandb.py",
29
+ "cpu_count": 32,
30
+ "cpu_count_logical": 64,
31
+ "gpu": "NVIDIA RTX A6000",
32
+ "gpu_count": 8,
33
+ "disk": {
34
+ "/": {
35
+ "total": "1888559353856",
36
+ "used": "1754801680384"
37
+ }
38
+ },
39
+ "memory": {
40
+ "total": "202617098240"
41
+ },
42
+ "cpu": {
43
+ "count": 32,
44
+ "countLogical": 64
45
+ },
46
+ "gpu_nvidia": [
47
+ {
48
+ "name": "NVIDIA RTX A6000",
49
+ "memoryTotal": "51527024640",
50
+ "cudaCores": 10752,
51
+ "architecture": "Ampere"
52
+ },
53
+ {
54
+ "name": "NVIDIA RTX A6000",
55
+ "memoryTotal": "51527024640",
56
+ "cudaCores": 10752,
57
+ "architecture": "Ampere"
58
+ },
59
+ {
60
+ "name": "NVIDIA RTX A6000",
61
+ "memoryTotal": "51527024640",
62
+ "cudaCores": 10752,
63
+ "architecture": "Ampere"
64
+ },
65
+ {
66
+ "name": "NVIDIA RTX A6000",
67
+ "memoryTotal": "51527024640",
68
+ "cudaCores": 10752,
69
+ "architecture": "Ampere"
70
+ },
71
+ {
72
+ "name": "NVIDIA RTX A6000",
73
+ "memoryTotal": "51527024640",
74
+ "cudaCores": 10752,
75
+ "architecture": "Ampere"
76
+ },
77
+ {
78
+ "name": "NVIDIA RTX A6000",
79
+ "memoryTotal": "51527024640",
80
+ "cudaCores": 10752,
81
+ "architecture": "Ampere"
82
+ },
83
+ {
84
+ "name": "NVIDIA RTX A6000",
85
+ "memoryTotal": "51527024640",
86
+ "cudaCores": 10752,
87
+ "architecture": "Ampere"
88
+ },
89
+ {
90
+ "name": "NVIDIA RTX A6000",
91
+ "memoryTotal": "51527024640",
92
+ "cudaCores": 10752,
93
+ "architecture": "Ampere"
94
+ }
95
+ ],
96
+ "cudaVersion": "11.8"
97
+ }
wandb/run-20241101_200535-6xsf0vem/logs/debug-internal.log ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {"time":"2024-11-01T20:05:35.691088246-04:00","level":"INFO","msg":"using version","core version":"0.18.5"}
2
+ {"time":"2024-11-01T20:05:35.691098776-04:00","level":"INFO","msg":"created symlink","path":"/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241101_200535-6xsf0vem/logs/debug-core.log"}
3
+ {"time":"2024-11-01T20:05:35.798275333-04:00","level":"INFO","msg":"created new stream","id":"6xsf0vem"}
4
+ {"time":"2024-11-01T20:05:35.798308073-04:00","level":"INFO","msg":"stream: started","id":"6xsf0vem"}
5
+ {"time":"2024-11-01T20:05:35.798366184-04:00","level":"INFO","msg":"sender: started","stream_id":"6xsf0vem"}
6
+ {"time":"2024-11-01T20:05:35.798363344-04:00","level":"INFO","msg":"writer: Do: started","stream_id":{"value":"6xsf0vem"}}
7
+ {"time":"2024-11-01T20:05:35.798372334-04:00","level":"INFO","msg":"handler: started","stream_id":{"value":"6xsf0vem"}}
8
+ {"time":"2024-11-01T20:05:35.978338742-04:00","level":"INFO","msg":"Starting system monitor"}
wandb/run-20241101_200535-6xsf0vem/logs/debug.log ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-11-01 20:05:35,685 INFO MainThread:871224 [wandb_setup.py:_flush():79] Current SDK version is 0.18.5
2
+ 2024-11-01 20:05:35,686 INFO MainThread:871224 [wandb_setup.py:_flush():79] Configure stats pid to 871224
3
+ 2024-11-01 20:05:35,686 INFO MainThread:871224 [wandb_setup.py:_flush():79] Loading settings from /home/chunhui/.config/wandb/settings
4
+ 2024-11-01 20:05:35,686 INFO MainThread:871224 [wandb_setup.py:_flush():79] Loading settings from /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/settings
5
+ 2024-11-01 20:05:35,686 INFO MainThread:871224 [wandb_setup.py:_flush():79] Loading settings from environment variables: {}
6
+ 2024-11-01 20:05:35,686 INFO MainThread:871224 [wandb_setup.py:_flush():79] Applying setup settings: {'mode': None, '_disable_service': None}
7
+ 2024-11-01 20:05:35,686 INFO MainThread:871224 [wandb_setup.py:_flush():79] Inferring run settings from compute environment: {'program_relpath': 'train/train_deep_wandb.py', 'program_abspath': '/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py', 'program': '/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py'}
8
+ 2024-11-01 20:05:35,686 INFO MainThread:871224 [wandb_setup.py:_flush():79] Applying login settings: {}
9
+ 2024-11-01 20:05:35,686 INFO MainThread:871224 [wandb_init.py:_log_setup():534] Logging user logs to /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241101_200535-6xsf0vem/logs/debug.log
10
+ 2024-11-01 20:05:35,686 INFO MainThread:871224 [wandb_init.py:_log_setup():535] Logging internal logs to /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241101_200535-6xsf0vem/logs/debug-internal.log
11
+ 2024-11-01 20:05:35,686 INFO MainThread:871224 [wandb_init.py:init():621] calling init triggers
12
+ 2024-11-01 20:05:35,686 INFO MainThread:871224 [wandb_init.py:init():628] wandb.init called with sweep_config: {}
13
+ config: {}
14
+ 2024-11-01 20:05:35,686 INFO MainThread:871224 [wandb_init.py:init():671] starting backend
15
+ 2024-11-01 20:05:35,686 INFO MainThread:871224 [wandb_init.py:init():675] sending inform_init request
16
+ 2024-11-01 20:05:35,687 INFO MainThread:871224 [backend.py:_multiprocessing_setup():104] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
17
+ 2024-11-01 20:05:35,688 INFO MainThread:871224 [wandb_init.py:init():688] backend started and connected
18
+ 2024-11-01 20:05:35,693 INFO MainThread:871224 [wandb_init.py:init():783] updated telemetry
19
+ 2024-11-01 20:05:35,715 INFO MainThread:871224 [wandb_init.py:init():816] communicating run to backend with 90.0 second timeout
20
+ 2024-11-01 20:05:35,975 INFO MainThread:871224 [wandb_init.py:init():867] starting run threads in backend
21
+ 2024-11-01 20:05:36,060 INFO MainThread:871224 [wandb_run.py:_console_start():2463] atexit reg
22
+ 2024-11-01 20:05:36,060 INFO MainThread:871224 [wandb_run.py:_redirect():2311] redirect: wrap_raw
23
+ 2024-11-01 20:05:36,060 INFO MainThread:871224 [wandb_run.py:_redirect():2376] Wrapping output streams.
24
+ 2024-11-01 20:05:36,060 INFO MainThread:871224 [wandb_run.py:_redirect():2401] Redirects installed.
25
+ 2024-11-01 20:05:36,062 INFO MainThread:871224 [wandb_init.py:init():911] run started, returning control to user process
26
+ 2024-11-01 20:05:36,062 INFO MainThread:871224 [wandb_run.py:_config_callback():1390] config_cb None None {'perturbation': 'shuffle_nondeterministic', 'train_set': '10M', 'batch_size': 3, 'epoch': 3, 'seed': 0, 'lr': 5e-06}
27
+ 2024-11-01 20:08:48,834 INFO MainThread:871224 [wandb_run.py:_config_callback():1390] config_cb None None {'vocab_size': 128256, 'max_position_embeddings': 131072, 'hidden_size': 3072, 'intermediate_size': 8192, 'num_hidden_layers': 28, 'num_attention_heads': 24, 'num_key_value_heads': 8, 'hidden_act': 'silu', 'initializer_range': 0.02, 'rms_norm_eps': 1e-05, 'pretraining_tp': 1, 'use_cache': True, 'rope_theta': 500000.0, 'rope_scaling': {'factor': 32.0, 'high_freq_factor': 4.0, 'low_freq_factor': 1.0, 'original_max_position_embeddings': 8192, 'rope_type': 'llama3'}, 'attention_bias': False, 'attention_dropout': 0.0, 'mlp_bias': False, 'head_dim': 128, 'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': 'bfloat16', 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': ['LlamaForCausalLM'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 128000, 'pad_token_id': None, 'eos_token_id': 128001, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'meta-llama/Llama-3.2-3B', 'transformers_version': '4.45.1', 'model_type': 'llama', 'output_dir': './checkpoints/Llama-3.2-3B/babylm_shuffle_nondeterministic_10M_seed0/runs', 'overwrite_output_dir': False, 'do_train': False, 'do_eval': True, 'do_predict': False, 'eval_strategy': 'steps', 'prediction_loss_only': False, 'per_device_train_batch_size': 3, 'per_device_eval_batch_size': 8, 'per_gpu_train_batch_size': None, 'per_gpu_eval_batch_size': None, 'gradient_accumulation_steps': 1, 'eval_accumulation_steps': None, 'eval_delay': 0, 'torch_empty_cache_steps': None, 'learning_rate': 5e-06, 'weight_decay': 0.0, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 3, 'max_steps': -1, 'lr_scheduler_type': 'linear', 'lr_scheduler_kwargs': {}, 'warmup_ratio': 0.0, 'warmup_steps': 0, 'log_level': 'passive', 'log_level_replica': 'warning', 'log_on_each_node': True, 'logging_dir': './logs', 'logging_strategy': 'steps', 'logging_first_step': False, 'logging_steps': 1, 'logging_nan_inf_filter': True, 'save_strategy': 'steps', 'save_steps': 150, 'save_total_limit': None, 'save_safetensors': True, 'save_on_each_node': False, 'save_only_model': False, 'restore_callback_states_from_checkpoint': False, 'no_cuda': False, 'use_cpu': False, 'use_mps_device': False, 'seed': 0, 'data_seed': None, 'jit_mode_eval': False, 'use_ipex': False, 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'auto', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': None, 'local_rank': 0, 'ddp_backend': None, 'tpu_num_cores': None, 'tpu_metrics_debug': False, 'debug': [], 'dataloader_drop_last': False, 'eval_steps': 10, 'dataloader_num_workers': 0, 'dataloader_prefetch_factor': None, 'past_index': -1, 'run_name': './checkpoints/Llama-3.2-3B/babylm_shuffle_nondeterministic_10M_seed0/runs', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': None, 'load_best_model_at_end': False, 'metric_for_best_model': None, 'greater_is_better': None, 'ignore_data_skip': False, 'fsdp': [], 'fsdp_min_num_params': 0, 'fsdp_config': {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, 'fsdp_transformer_layer_cls_to_wrap': None, 'accelerator_config': {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}, 'deepspeed': 'deepspeed_config/train_dp_config.json', 'label_smoothing_factor': 0.0, 'optim': 'adamw_torch', 'optim_args': None, 'adafactor': False, 'group_by_length': False, 'length_column_name': 'length', 'report_to': ['wandb'], 'ddp_find_unused_parameters': None, 'ddp_bucket_cap_mb': None, 'ddp_broadcast_buffers': None, 'dataloader_pin_memory': True, 'dataloader_persistent_workers': False, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': False, 'resume_from_checkpoint': None, 'hub_model_id': None, 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'hub_private_repo': False, 'hub_always_push': False, 'gradient_checkpointing': False, 'gradient_checkpointing_kwargs': None, 'include_inputs_for_metrics': False, 'eval_do_concat_batches': True, 'fp16_backend': 'auto', 'evaluation_strategy': 'steps', 'push_to_hub_model_id': None, 'push_to_hub_organization': None, 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', 'mp_parameters': '', 'auto_find_batch_size': False, 'full_determinism': False, 'torchdynamo': None, 'ray_scope': 'last', 'ddp_timeout': 1800, 'torch_compile': False, 'torch_compile_backend': None, 'torch_compile_mode': None, 'dispatch_batches': None, 'split_batches': None, 'include_tokens_per_second': False, 'include_num_input_tokens_seen': False, 'neftune_noise_alpha': None, 'optim_target_modules': None, 'batch_eval_metrics': False, 'eval_on_start': False, 'use_liger_kernel': False, 'eval_use_gather_object': False}
28
+ 2024-11-01 20:08:48,836 INFO MainThread:871224 [wandb_config.py:__setitem__():154] config set model/num_parameters = 3212749824 - <bound method Run._config_callback of <wandb.sdk.wandb_run.Run object at 0x7fe8fc302d90>>
29
+ 2024-11-01 20:08:48,837 INFO MainThread:871224 [wandb_run.py:_config_callback():1390] config_cb model/num_parameters 3212749824 None
wandb/run-20241101_200535-hnfjoqai/files/output.log ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Downloading shards: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [02:25<00:00, 72.82s/it]
2
+ Loading checkpoint shards: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:05<00:00, 2.68s/it]
3
+ tokenized_valid: Dataset({
4
+ features: ['input_ids', 'attention_mask'],
5
+ num_rows: 600
6
+ })
7
+ /mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/site-packages/transformers/training_args.py:1545: FutureWarning: `evaluation_strategy` is deprecated and will be removed in version 4.46 of 🤗 Transformers. Use `eval_strategy` instead
8
+ warnings.warn(
9
+ [2024-11-01 20:08:09,442] [INFO] [real_accelerator.py:219:get_accelerator] Setting ds_accelerator to cuda (auto detect)
10
+ [2024-11-01 20:08:19,404] [INFO] [comm.py:652:init_distributed] cdb=None
11
+ Installed CUDA version 11.8 does not match the version torch was compiled with 11.7 but since the APIs are compatible, accepting this combination
12
+ Using /home/chunhui/.cache/torch_extensions/py39_cu117 as PyTorch extensions root...
13
+ Loading extension module cpu_adam...
14
+ Time to load cpu_adam op: 5.504069089889526 seconds
wandb/run-20241101_200535-hnfjoqai/files/requirements.txt ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ funcsigs==1.0.2
2
+ sentry-sdk==2.17.0
3
+ multiprocess==0.70.16
4
+ numpy==1.26.2
5
+ pluralizer==1.2.0
6
+ debugpy==1.6.7
7
+ nvidia-cudnn-cu11==8.5.0.96
8
+ deepspeed==0.15.2
9
+ data==0.4
10
+ pandas==2.1.3
11
+ tomli==2.0.1
12
+ charset-normalizer==3.3.2
13
+ attrs==24.2.0
14
+ aiosignal==1.3.1
15
+ fsspec==2023.10.0
16
+ nvidia-cusparse-cu11==11.7.4.91
17
+ zipp==3.12.0
18
+ mypy-extensions==1.0.0
19
+ datasets==3.0.1
20
+ joblib==1.3.2
21
+ hjson==3.1.0
22
+ traitlets==5.7.1
23
+ stack-data==0.6.0
24
+ transformers==4.45.1
25
+ sympy==1.11.1
26
+ Pygments==2.15.0
27
+ docker-pycreds==0.4.0
28
+ dill==0.3.8
29
+ wheel==0.44.0
30
+ prompt-toolkit==3.0.30
31
+ parso==0.8.3
32
+ ipykernel==6.23.1
33
+ pyarrow==17.0.0
34
+ certifi==2023.11.17
35
+ nvidia-cufft-cu11==10.9.0.58
36
+ six==1.16.0
37
+ pydantic==2.9.2
38
+ click==8.1.7
39
+ nest-asyncio==1.5.6
40
+ gmpy2==2.1.0
41
+ matplotlib==3.8.2
42
+ scipy==1.11.4
43
+ typing_extensions==4.12.2
44
+ statsmodels==0.14.0
45
+ huggingface-hub==0.25.0
46
+ frozenlist==1.4.1
47
+ gpustat==1.1.1
48
+ nvidia-nvtx-cu11==11.7.91
49
+ safetensors==0.4.5
50
+ stanza==1.9.2
51
+ decorator==5.1.1
52
+ seaborn==0.13.0
53
+ sentencepiece==0.2.0
54
+ PyYAML==6.0.1
55
+ black==24.8.0
56
+ protobuf==4.25.1
57
+ pickleshare==0.7.5
58
+ peft==0.13.0
59
+ triton==2.0.0
60
+ nvidia-cuda-runtime-cu11==11.7.99
61
+ Jinja2==3.1.2
62
+ nvidia-cusolver-cu11==11.4.0.1
63
+ executing==1.2.0
64
+ jupyter_client==8.1.0
65
+ pluggy==1.3.0
66
+ cmake==3.30.3
67
+ pytz==2023.3.post1
68
+ aiohappyeyeballs==2.4.2
69
+ kiwisolver==1.4.5
70
+ py-cpuinfo==9.0.0
71
+ Pillow==10.1.0
72
+ ptyprocess==0.7.0
73
+ importlib_resources==6.4.5
74
+ GitPython==3.1.43
75
+ importlib-metadata==6.0.0
76
+ iniconfig==2.0.0
77
+ scikit-learn==1.3.2
78
+ exceptiongroup==1.1.0
79
+ networkx==2.8.6
80
+ accelerate==1.0.0
81
+ nltk==3.8.1
82
+ shutilwhich==1.1.0
83
+ fonttools==4.45.1
84
+ future==0.18.3
85
+ aiohttp==3.10.6
86
+ wcwidth==0.2.5
87
+ idna==3.6
88
+ filelock==3.12.2
89
+ pathspec==0.12.1
90
+ jupyter_core==5.1.0
91
+ lit==18.1.8
92
+ nvidia-curand-cu11==10.2.10.91
93
+ nvidia-cublas-cu11==11.10.3.66
94
+ nvidia-ml-py==12.560.30
95
+ msgpack==1.1.0
96
+ python-dateutil==2.8.2
97
+ blessed==1.20.0
98
+ packaging==23.0
99
+ gitdb==4.0.11
100
+ yarl==1.13.0
101
+ emoji==2.8.0
102
+ tzdata==2023.3
103
+ cycler==0.12.1
104
+ tornado==6.2
105
+ backcall==0.2.0
106
+ plotnine==0.12.4
107
+ ninja==1.11.1.1
108
+ latex==0.7.0
109
+ wandb==0.18.5
110
+ setproctitle==1.3.3
111
+ threadpoolctl==3.2.0
112
+ requests==2.32.3
113
+ pyparsing==3.1.1
114
+ smmap==5.0.1
115
+ pyzmq==23.0.0
116
+ async-timeout==4.0.3
117
+ annotated-types==0.7.0
118
+ matplotlib-inline==0.1.6
119
+ latexcodec==1.0.0
120
+ ipython==8.0.0
121
+ patsy==0.5.3
122
+ contourpy==1.2.0
123
+ multidict==6.1.0
124
+ mizani==0.9.3
125
+ urllib3==2.1.0
126
+ tokenizers==0.20.0
127
+ MarkupSafe==2.1.2
128
+ pip==24.2
129
+ pexpect==4.8.0
130
+ tqdm==4.66.5
131
+ jedi==0.18.2
132
+ pydantic_core==2.23.4
133
+ tempdir==0.7.1
134
+ mpmath==1.2.1
135
+ setuptools==72.1.0
136
+ pytest==7.4.3
137
+ pure-eval==0.2.2
138
+ psutil==5.9.1
139
+ comm==0.1.2
140
+ nvidia-cuda-cupti-cu11==11.7.101
141
+ nvidia-cuda-nvrtc-cu11==11.7.99
142
+ regex==2023.10.3
143
+ platformdirs==2.5.2
144
+ asttokens==2.2.1
145
+ torch==2.0.0
146
+ nvidia-nccl-cu11==2.14.3
147
+ xxhash==3.5.0
wandb/run-20241101_200535-hnfjoqai/files/wandb-metadata.json ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.4.0-162-generic-x86_64-with-glibc2.31",
3
+ "python": "3.9.19",
4
+ "startedAt": "2024-11-02T00:05:35.852878Z",
5
+ "args": [
6
+ "--perturbation",
7
+ "shuffle_nondeterministic",
8
+ "--train_set",
9
+ "10M",
10
+ "--batch_size",
11
+ "3",
12
+ "--epoch",
13
+ "3",
14
+ "--seed",
15
+ "0"
16
+ ],
17
+ "program": "/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py",
18
+ "codePath": "train/train_deep_wandb.py",
19
+ "git": {
20
+ "remote": "git@hf.co:Yaning1001/Impossible_llm.git",
21
+ "commit": "ed716cdcfcdea02b67f7ed0f3504c2b1c8b737c4"
22
+ },
23
+ "email": "yaning1001@gmail.com",
24
+ "root": "/mnt/ssd3/chunhui/yaning/project/impossible_llm/train",
25
+ "host": "mms-large-2",
26
+ "username": "chunhui",
27
+ "executable": "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/bin/python",
28
+ "codePathLocal": "train_deep_wandb.py",
29
+ "cpu_count": 32,
30
+ "cpu_count_logical": 64,
31
+ "gpu": "NVIDIA RTX A6000",
32
+ "gpu_count": 8,
33
+ "disk": {
34
+ "/": {
35
+ "total": "1888559353856",
36
+ "used": "1754801680384"
37
+ }
38
+ },
39
+ "memory": {
40
+ "total": "202617098240"
41
+ },
42
+ "cpu": {
43
+ "count": 32,
44
+ "countLogical": 64
45
+ },
46
+ "gpu_nvidia": [
47
+ {
48
+ "name": "NVIDIA RTX A6000",
49
+ "memoryTotal": "51527024640",
50
+ "cudaCores": 10752,
51
+ "architecture": "Ampere"
52
+ },
53
+ {
54
+ "name": "NVIDIA RTX A6000",
55
+ "memoryTotal": "51527024640",
56
+ "cudaCores": 10752,
57
+ "architecture": "Ampere"
58
+ },
59
+ {
60
+ "name": "NVIDIA RTX A6000",
61
+ "memoryTotal": "51527024640",
62
+ "cudaCores": 10752,
63
+ "architecture": "Ampere"
64
+ },
65
+ {
66
+ "name": "NVIDIA RTX A6000",
67
+ "memoryTotal": "51527024640",
68
+ "cudaCores": 10752,
69
+ "architecture": "Ampere"
70
+ },
71
+ {
72
+ "name": "NVIDIA RTX A6000",
73
+ "memoryTotal": "51527024640",
74
+ "cudaCores": 10752,
75
+ "architecture": "Ampere"
76
+ },
77
+ {
78
+ "name": "NVIDIA RTX A6000",
79
+ "memoryTotal": "51527024640",
80
+ "cudaCores": 10752,
81
+ "architecture": "Ampere"
82
+ },
83
+ {
84
+ "name": "NVIDIA RTX A6000",
85
+ "memoryTotal": "51527024640",
86
+ "cudaCores": 10752,
87
+ "architecture": "Ampere"
88
+ },
89
+ {
90
+ "name": "NVIDIA RTX A6000",
91
+ "memoryTotal": "51527024640",
92
+ "cudaCores": 10752,
93
+ "architecture": "Ampere"
94
+ }
95
+ ],
96
+ "cudaVersion": "11.8"
97
+ }
wandb/run-20241101_200535-hnfjoqai/logs/debug-internal.log ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {"time":"2024-11-01T20:05:35.855602934-04:00","level":"INFO","msg":"using version","core version":"0.18.5"}
2
+ {"time":"2024-11-01T20:05:35.855625184-04:00","level":"INFO","msg":"created symlink","path":"/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241101_200535-hnfjoqai/logs/debug-core.log"}
3
+ {"time":"2024-11-01T20:05:35.961440222-04:00","level":"INFO","msg":"created new stream","id":"hnfjoqai"}
4
+ {"time":"2024-11-01T20:05:35.961471872-04:00","level":"INFO","msg":"stream: started","id":"hnfjoqai"}
5
+ {"time":"2024-11-01T20:05:35.961498182-04:00","level":"INFO","msg":"sender: started","stream_id":"hnfjoqai"}
6
+ {"time":"2024-11-01T20:05:35.961486682-04:00","level":"INFO","msg":"writer: Do: started","stream_id":{"value":"hnfjoqai"}}
7
+ {"time":"2024-11-01T20:05:35.961499202-04:00","level":"INFO","msg":"handler: started","stream_id":{"value":"hnfjoqai"}}
8
+ {"time":"2024-11-01T20:05:36.182162772-04:00","level":"INFO","msg":"Starting system monitor"}
wandb/run-20241101_200535-hnfjoqai/logs/debug.log ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-11-01 20:05:35,849 INFO MainThread:871227 [wandb_setup.py:_flush():79] Current SDK version is 0.18.5
2
+ 2024-11-01 20:05:35,849 INFO MainThread:871227 [wandb_setup.py:_flush():79] Configure stats pid to 871227
3
+ 2024-11-01 20:05:35,849 INFO MainThread:871227 [wandb_setup.py:_flush():79] Loading settings from /home/chunhui/.config/wandb/settings
4
+ 2024-11-01 20:05:35,849 INFO MainThread:871227 [wandb_setup.py:_flush():79] Loading settings from /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/settings
5
+ 2024-11-01 20:05:35,850 INFO MainThread:871227 [wandb_setup.py:_flush():79] Loading settings from environment variables: {}
6
+ 2024-11-01 20:05:35,850 INFO MainThread:871227 [wandb_setup.py:_flush():79] Applying setup settings: {'mode': None, '_disable_service': None}
7
+ 2024-11-01 20:05:35,850 INFO MainThread:871227 [wandb_setup.py:_flush():79] Inferring run settings from compute environment: {'program_relpath': 'train/train_deep_wandb.py', 'program_abspath': '/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py', 'program': '/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py'}
8
+ 2024-11-01 20:05:35,850 INFO MainThread:871227 [wandb_setup.py:_flush():79] Applying login settings: {}
9
+ 2024-11-01 20:05:35,850 INFO MainThread:871227 [wandb_init.py:_log_setup():534] Logging user logs to /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241101_200535-hnfjoqai/logs/debug.log
10
+ 2024-11-01 20:05:35,850 INFO MainThread:871227 [wandb_init.py:_log_setup():535] Logging internal logs to /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241101_200535-hnfjoqai/logs/debug-internal.log
11
+ 2024-11-01 20:05:35,850 INFO MainThread:871227 [wandb_init.py:init():621] calling init triggers
12
+ 2024-11-01 20:05:35,850 INFO MainThread:871227 [wandb_init.py:init():628] wandb.init called with sweep_config: {}
13
+ config: {}
14
+ 2024-11-01 20:05:35,850 INFO MainThread:871227 [wandb_init.py:init():671] starting backend
15
+ 2024-11-01 20:05:35,850 INFO MainThread:871227 [wandb_init.py:init():675] sending inform_init request
16
+ 2024-11-01 20:05:35,852 INFO MainThread:871227 [backend.py:_multiprocessing_setup():104] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
17
+ 2024-11-01 20:05:35,852 INFO MainThread:871227 [wandb_init.py:init():688] backend started and connected
18
+ 2024-11-01 20:05:35,855 INFO MainThread:871227 [wandb_init.py:init():783] updated telemetry
19
+ 2024-11-01 20:05:35,877 INFO MainThread:871227 [wandb_init.py:init():816] communicating run to backend with 90.0 second timeout
20
+ 2024-11-01 20:05:36,179 INFO MainThread:871227 [wandb_init.py:init():867] starting run threads in backend
21
+ 2024-11-01 20:05:36,268 INFO MainThread:871227 [wandb_run.py:_console_start():2463] atexit reg
22
+ 2024-11-01 20:05:36,268 INFO MainThread:871227 [wandb_run.py:_redirect():2311] redirect: wrap_raw
23
+ 2024-11-01 20:05:36,268 INFO MainThread:871227 [wandb_run.py:_redirect():2376] Wrapping output streams.
24
+ 2024-11-01 20:05:36,268 INFO MainThread:871227 [wandb_run.py:_redirect():2401] Redirects installed.
25
+ 2024-11-01 20:05:36,269 INFO MainThread:871227 [wandb_init.py:init():911] run started, returning control to user process
26
+ 2024-11-01 20:05:36,270 INFO MainThread:871227 [wandb_run.py:_config_callback():1390] config_cb None None {'perturbation': 'shuffle_nondeterministic', 'train_set': '10M', 'batch_size': 3, 'epoch': 3, 'seed': 0, 'lr': 5e-06}
wandb/run-20241101_201910-hnwfqg73/files/output.log ADDED
@@ -0,0 +1 @@
 
 
1
+ Loading checkpoint shards: 0%| | 0/2 [00:00<?, ?it/s]
wandb/run-20241101_201910-hnwfqg73/files/requirements.txt ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ funcsigs==1.0.2
2
+ sentry-sdk==2.17.0
3
+ multiprocess==0.70.16
4
+ numpy==1.26.2
5
+ pluralizer==1.2.0
6
+ debugpy==1.6.7
7
+ nvidia-cudnn-cu11==8.5.0.96
8
+ deepspeed==0.15.2
9
+ data==0.4
10
+ pandas==2.1.3
11
+ tomli==2.0.1
12
+ charset-normalizer==3.3.2
13
+ attrs==24.2.0
14
+ aiosignal==1.3.1
15
+ fsspec==2023.10.0
16
+ nvidia-cusparse-cu11==11.7.4.91
17
+ zipp==3.12.0
18
+ mypy-extensions==1.0.0
19
+ datasets==3.0.1
20
+ joblib==1.3.2
21
+ hjson==3.1.0
22
+ traitlets==5.7.1
23
+ stack-data==0.6.0
24
+ transformers==4.45.1
25
+ sympy==1.11.1
26
+ Pygments==2.15.0
27
+ docker-pycreds==0.4.0
28
+ dill==0.3.8
29
+ wheel==0.44.0
30
+ prompt-toolkit==3.0.30
31
+ parso==0.8.3
32
+ ipykernel==6.23.1
33
+ pyarrow==17.0.0
34
+ certifi==2023.11.17
35
+ nvidia-cufft-cu11==10.9.0.58
36
+ six==1.16.0
37
+ pydantic==2.9.2
38
+ click==8.1.7
39
+ nest-asyncio==1.5.6
40
+ gmpy2==2.1.0
41
+ matplotlib==3.8.2
42
+ scipy==1.11.4
43
+ typing_extensions==4.12.2
44
+ statsmodels==0.14.0
45
+ huggingface-hub==0.25.0
46
+ frozenlist==1.4.1
47
+ gpustat==1.1.1
48
+ nvidia-nvtx-cu11==11.7.91
49
+ safetensors==0.4.5
50
+ stanza==1.9.2
51
+ decorator==5.1.1
52
+ seaborn==0.13.0
53
+ sentencepiece==0.2.0
54
+ PyYAML==6.0.1
55
+ black==24.8.0
56
+ protobuf==4.25.1
57
+ pickleshare==0.7.5
58
+ peft==0.13.0
59
+ triton==2.0.0
60
+ nvidia-cuda-runtime-cu11==11.7.99
61
+ Jinja2==3.1.2
62
+ nvidia-cusolver-cu11==11.4.0.1
63
+ executing==1.2.0
64
+ jupyter_client==8.1.0
65
+ pluggy==1.3.0
66
+ cmake==3.30.3
67
+ pytz==2023.3.post1
68
+ aiohappyeyeballs==2.4.2
69
+ kiwisolver==1.4.5
70
+ py-cpuinfo==9.0.0
71
+ Pillow==10.1.0
72
+ ptyprocess==0.7.0
73
+ importlib_resources==6.4.5
74
+ GitPython==3.1.43
75
+ importlib-metadata==6.0.0
76
+ iniconfig==2.0.0
77
+ scikit-learn==1.3.2
78
+ exceptiongroup==1.1.0
79
+ networkx==2.8.6
80
+ accelerate==1.0.0
81
+ nltk==3.8.1
82
+ shutilwhich==1.1.0
83
+ fonttools==4.45.1
84
+ future==0.18.3
85
+ aiohttp==3.10.6
86
+ wcwidth==0.2.5
87
+ idna==3.6
88
+ filelock==3.12.2
89
+ pathspec==0.12.1
90
+ jupyter_core==5.1.0
91
+ lit==18.1.8
92
+ nvidia-curand-cu11==10.2.10.91
93
+ nvidia-cublas-cu11==11.10.3.66
94
+ nvidia-ml-py==12.560.30
95
+ msgpack==1.1.0
96
+ python-dateutil==2.8.2
97
+ blessed==1.20.0
98
+ packaging==23.0
99
+ gitdb==4.0.11
100
+ yarl==1.13.0
101
+ emoji==2.8.0
102
+ tzdata==2023.3
103
+ cycler==0.12.1
104
+ tornado==6.2
105
+ backcall==0.2.0
106
+ plotnine==0.12.4
107
+ ninja==1.11.1.1
108
+ latex==0.7.0
109
+ wandb==0.18.5
110
+ setproctitle==1.3.3
111
+ threadpoolctl==3.2.0
112
+ requests==2.32.3
113
+ pyparsing==3.1.1
114
+ smmap==5.0.1
115
+ pyzmq==23.0.0
116
+ async-timeout==4.0.3
117
+ annotated-types==0.7.0
118
+ matplotlib-inline==0.1.6
119
+ latexcodec==1.0.0
120
+ ipython==8.0.0
121
+ patsy==0.5.3
122
+ contourpy==1.2.0
123
+ multidict==6.1.0
124
+ mizani==0.9.3
125
+ urllib3==2.1.0
126
+ tokenizers==0.20.0
127
+ MarkupSafe==2.1.2
128
+ pip==24.2
129
+ pexpect==4.8.0
130
+ tqdm==4.66.5
131
+ jedi==0.18.2
132
+ pydantic_core==2.23.4
133
+ tempdir==0.7.1
134
+ mpmath==1.2.1
135
+ setuptools==72.1.0
136
+ pytest==7.4.3
137
+ pure-eval==0.2.2
138
+ psutil==5.9.1
139
+ comm==0.1.2
140
+ nvidia-cuda-cupti-cu11==11.7.101
141
+ nvidia-cuda-nvrtc-cu11==11.7.99
142
+ regex==2023.10.3
143
+ platformdirs==2.5.2
144
+ asttokens==2.2.1
145
+ torch==2.0.0
146
+ nvidia-nccl-cu11==2.14.3
147
+ xxhash==3.5.0
wandb/run-20241101_201910-hnwfqg73/files/wandb-metadata.json ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.4.0-162-generic-x86_64-with-glibc2.31",
3
+ "python": "3.9.19",
4
+ "startedAt": "2024-11-02T00:19:10.340985Z",
5
+ "args": [
6
+ "--perturbation",
7
+ "shuffle_nondeterministic",
8
+ "--train_set",
9
+ "10M",
10
+ "--batch_size",
11
+ "3",
12
+ "--epoch",
13
+ "3",
14
+ "--seed",
15
+ "0"
16
+ ],
17
+ "program": "/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py",
18
+ "codePath": "train/train_deep_wandb.py",
19
+ "git": {
20
+ "remote": "git@hf.co:Yaning1001/Impossible_llm.git",
21
+ "commit": "ed716cdcfcdea02b67f7ed0f3504c2b1c8b737c4"
22
+ },
23
+ "email": "yaning1001@gmail.com",
24
+ "root": "/mnt/ssd3/chunhui/yaning/project/impossible_llm/train",
25
+ "host": "mms-large-2",
26
+ "username": "chunhui",
27
+ "executable": "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/bin/python",
28
+ "codePathLocal": "train_deep_wandb.py",
29
+ "cpu_count": 32,
30
+ "cpu_count_logical": 64,
31
+ "gpu": "NVIDIA RTX A6000",
32
+ "gpu_count": 8,
33
+ "disk": {
34
+ "/": {
35
+ "total": "1888559353856",
36
+ "used": "1754803580928"
37
+ }
38
+ },
39
+ "memory": {
40
+ "total": "202617098240"
41
+ },
42
+ "cpu": {
43
+ "count": 32,
44
+ "countLogical": 64
45
+ },
46
+ "gpu_nvidia": [
47
+ {
48
+ "name": "NVIDIA RTX A6000",
49
+ "memoryTotal": "51527024640",
50
+ "cudaCores": 10752,
51
+ "architecture": "Ampere"
52
+ },
53
+ {
54
+ "name": "NVIDIA RTX A6000",
55
+ "memoryTotal": "51527024640",
56
+ "cudaCores": 10752,
57
+ "architecture": "Ampere"
58
+ },
59
+ {
60
+ "name": "NVIDIA RTX A6000",
61
+ "memoryTotal": "51527024640",
62
+ "cudaCores": 10752,
63
+ "architecture": "Ampere"
64
+ },
65
+ {
66
+ "name": "NVIDIA RTX A6000",
67
+ "memoryTotal": "51527024640",
68
+ "cudaCores": 10752,
69
+ "architecture": "Ampere"
70
+ },
71
+ {
72
+ "name": "NVIDIA RTX A6000",
73
+ "memoryTotal": "51527024640",
74
+ "cudaCores": 10752,
75
+ "architecture": "Ampere"
76
+ },
77
+ {
78
+ "name": "NVIDIA RTX A6000",
79
+ "memoryTotal": "51527024640",
80
+ "cudaCores": 10752,
81
+ "architecture": "Ampere"
82
+ },
83
+ {
84
+ "name": "NVIDIA RTX A6000",
85
+ "memoryTotal": "51527024640",
86
+ "cudaCores": 10752,
87
+ "architecture": "Ampere"
88
+ },
89
+ {
90
+ "name": "NVIDIA RTX A6000",
91
+ "memoryTotal": "51527024640",
92
+ "cudaCores": 10752,
93
+ "architecture": "Ampere"
94
+ }
95
+ ],
96
+ "cudaVersion": "11.8"
97
+ }
wandb/run-20241101_201910-hnwfqg73/logs/debug-internal.log ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {"time":"2024-11-01T20:19:10.34296008-04:00","level":"INFO","msg":"using version","core version":"0.18.5"}
2
+ {"time":"2024-11-01T20:19:10.34297059-04:00","level":"INFO","msg":"created symlink","path":"/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241101_201910-hnwfqg73/logs/debug-core.log"}
3
+ {"time":"2024-11-01T20:19:10.450869411-04:00","level":"INFO","msg":"created new stream","id":"hnwfqg73"}
4
+ {"time":"2024-11-01T20:19:10.450970742-04:00","level":"INFO","msg":"stream: started","id":"hnwfqg73"}
5
+ {"time":"2024-11-01T20:19:10.451217523-04:00","level":"INFO","msg":"sender: started","stream_id":"hnwfqg73"}
6
+ {"time":"2024-11-01T20:19:10.451185173-04:00","level":"INFO","msg":"writer: Do: started","stream_id":{"value":"hnwfqg73"}}
7
+ {"time":"2024-11-01T20:19:10.451200543-04:00","level":"INFO","msg":"handler: started","stream_id":{"value":"hnwfqg73"}}
8
+ {"time":"2024-11-01T20:19:10.708980733-04:00","level":"INFO","msg":"Starting system monitor"}
wandb/run-20241101_201910-hnwfqg73/logs/debug.log ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-11-01 20:19:10,337 INFO MainThread:877653 [wandb_setup.py:_flush():79] Current SDK version is 0.18.5
2
+ 2024-11-01 20:19:10,337 INFO MainThread:877653 [wandb_setup.py:_flush():79] Configure stats pid to 877653
3
+ 2024-11-01 20:19:10,338 INFO MainThread:877653 [wandb_setup.py:_flush():79] Loading settings from /home/chunhui/.config/wandb/settings
4
+ 2024-11-01 20:19:10,338 INFO MainThread:877653 [wandb_setup.py:_flush():79] Loading settings from /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/settings
5
+ 2024-11-01 20:19:10,338 INFO MainThread:877653 [wandb_setup.py:_flush():79] Loading settings from environment variables: {}
6
+ 2024-11-01 20:19:10,338 INFO MainThread:877653 [wandb_setup.py:_flush():79] Applying setup settings: {'mode': None, '_disable_service': None}
7
+ 2024-11-01 20:19:10,338 INFO MainThread:877653 [wandb_setup.py:_flush():79] Inferring run settings from compute environment: {'program_relpath': 'train/train_deep_wandb.py', 'program_abspath': '/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py', 'program': '/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py'}
8
+ 2024-11-01 20:19:10,338 INFO MainThread:877653 [wandb_setup.py:_flush():79] Applying login settings: {}
9
+ 2024-11-01 20:19:10,338 INFO MainThread:877653 [wandb_init.py:_log_setup():534] Logging user logs to /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241101_201910-hnwfqg73/logs/debug.log
10
+ 2024-11-01 20:19:10,338 INFO MainThread:877653 [wandb_init.py:_log_setup():535] Logging internal logs to /mnt/ssd3/chunhui/yaning/project/impossible_llm/train/wandb/run-20241101_201910-hnwfqg73/logs/debug-internal.log
11
+ 2024-11-01 20:19:10,338 INFO MainThread:877653 [wandb_init.py:init():621] calling init triggers
12
+ 2024-11-01 20:19:10,338 INFO MainThread:877653 [wandb_init.py:init():628] wandb.init called with sweep_config: {}
13
+ config: {}
14
+ 2024-11-01 20:19:10,338 INFO MainThread:877653 [wandb_init.py:init():671] starting backend
15
+ 2024-11-01 20:19:10,338 INFO MainThread:877653 [wandb_init.py:init():675] sending inform_init request
16
+ 2024-11-01 20:19:10,340 INFO MainThread:877653 [backend.py:_multiprocessing_setup():104] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
17
+ 2024-11-01 20:19:10,340 INFO MainThread:877653 [wandb_init.py:init():688] backend started and connected
18
+ 2024-11-01 20:19:10,344 INFO MainThread:877653 [wandb_init.py:init():783] updated telemetry
19
+ 2024-11-01 20:19:10,368 INFO MainThread:877653 [wandb_init.py:init():816] communicating run to backend with 90.0 second timeout
20
+ 2024-11-01 20:19:10,706 INFO MainThread:877653 [wandb_init.py:init():867] starting run threads in backend
21
+ 2024-11-01 20:19:10,792 INFO MainThread:877653 [wandb_run.py:_console_start():2463] atexit reg
22
+ 2024-11-01 20:19:10,792 INFO MainThread:877653 [wandb_run.py:_redirect():2311] redirect: wrap_raw
23
+ 2024-11-01 20:19:10,792 INFO MainThread:877653 [wandb_run.py:_redirect():2376] Wrapping output streams.
24
+ 2024-11-01 20:19:10,792 INFO MainThread:877653 [wandb_run.py:_redirect():2401] Redirects installed.
25
+ 2024-11-01 20:19:10,793 INFO MainThread:877653 [wandb_init.py:init():911] run started, returning control to user process
26
+ 2024-11-01 20:19:10,794 INFO MainThread:877653 [wandb_run.py:_config_callback():1390] config_cb None None {'perturbation': 'shuffle_nondeterministic', 'train_set': '10M', 'batch_size': 3, 'epoch': 3, 'seed': 0, 'lr': 5e-06}
wandb/run-20241101_201910-hnwfqg73/run-hnwfqg73.wandb ADDED
File without changes
wandb/run-20241105_160217-21j8oh7z/files/config.yaml ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _wandb:
2
+ value:
3
+ cli_version: 0.18.5
4
+ m: []
5
+ python_version: 3.9.19
6
+ t:
7
+ "1":
8
+ - 1
9
+ - 5
10
+ - 11
11
+ - 49
12
+ - 51
13
+ - 53
14
+ - 55
15
+ - 71
16
+ - 98
17
+ "2":
18
+ - 1
19
+ - 5
20
+ - 11
21
+ - 49
22
+ - 51
23
+ - 53
24
+ - 55
25
+ - 71
26
+ - 98
27
+ "3":
28
+ - 13
29
+ - 23
30
+ - 55
31
+ "4": 3.9.19
32
+ "5": 0.18.5
33
+ "6": 4.45.1
34
+ "8":
35
+ - 5
36
+ "12": 0.18.5
37
+ "13": linux-x86_64
38
+ batch_size:
39
+ value: 3
40
+ epoch:
41
+ value: 3
42
+ lr:
43
+ value: 5e-06
44
+ perturbation:
45
+ value: shuffle_deterministic21
46
+ seed:
47
+ value: 0
48
+ train_set:
49
+ value: 10M
wandb/run-20241105_160217-21j8oh7z/files/output.log ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Traceback (most recent call last):
2
+ File "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/pathlib.py", line 1323, in mkdir
3
+ self._accessor.mkdir(self, mode)
4
+ FileNotFoundError: [Errno 2] No such file or directory: '/home/chunhui/.cache/huggingface/datasets/babylm_dataset_test/babylm_shuffle_deterministic21_10M_seed0/0.0.0'
5
+
6
+ During handling of the above exception, another exception occurred:
7
+
8
+ Traceback (most recent call last):
9
+ File "/mnt/ssd3/chunhui/yaning/project/impossible_llm/train/train_deep_wandb.py", line 165, in <module>
10
+ dataset = load_dataset('babylm_dataset_test.py', name=dataset_name, trust_remote_code=True)
11
+ File "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/site-packages/datasets/load.py", line 2096, in load_dataset
12
+ builder_instance.download_and_prepare(
13
+ File "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/site-packages/datasets/builder.py", line 855, in download_and_prepare
14
+ Path(self._output_dir).parent.mkdir(parents=True, exist_ok=True)
15
+ File "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/pathlib.py", line 1327, in mkdir
16
+ self.parent.mkdir(parents=True, exist_ok=True)
17
+ File "/mnt/ssd3/chunhui/miniconda/envs/impossible_llm/lib/python3.9/pathlib.py", line 1323, in mkdir
18
+ self._accessor.mkdir(self, mode)
19
+ OSError: [Errno 28] No space left on device: '/home/chunhui/.cache/huggingface/datasets/babylm_dataset_test/babylm_shuffle_deterministic21_10M_seed0'