W0127 11:26:50.717000 3370624 site-packages/torch/distributed/run.py:803] W0127 11:26:50.717000 3370624 site-packages/torch/distributed/run.py:803] ***************************************** W0127 11:26:50.717000 3370624 site-packages/torch/distributed/run.py:803] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0127 11:26:50.717000 3370624 site-packages/torch/distributed/run.py:803] ***************************************** 2026-01-27 11:26:55,908 - WARNING - chunkwise_backward is incompatible with FSDP; disabling it. 2026-01-27 11:26:55,909 - INFO - ============================================================ 2026-01-27 11:26:55,909 - INFO - Qwen3-4B + Titans training (DDP/FSDP) 2026-01-27 11:26:55,909 - INFO - ============================================================ 2026-01-27 11:26:55,909 - INFO - distributed=True, world_size=4, use_fsdp=True 2026-01-27 11:26:55,909 - INFO - mode=TRAIN 2026-01-27 11:26:55,909 - INFO - model_path=/data/huangyifei/huggingface_cache/hub/models--Qwen--Qwen3-4B-Instruct-2507/snapshots/cdbee75f17c01a7cc42f958dc650907174af0554 2026-01-27 11:26:55,909 - INFO - data_path=/data/yty/BABILong/babilong-train-5k-samples/data/qa1/32k.json 2026-01-27 11:26:55,909 - INFO - output_dir=./outputs/qwen_babilong_no_memory 2026-01-27 11:26:55,909 - INFO - max_samples=2500 2026-01-27 11:26:55,909 - INFO - max_length=32768 2026-01-27 11:26:55,909 - INFO - chunk_size=4096 2026-01-27 11:26:55,909 - INFO - use_memory=False 2026-01-27 11:26:55,909 - INFO - chunkwise_backward=False 2026-01-27 11:26:55,909 - INFO - label_prefix_tokens=0 2026-01-27 11:26:55,909 - INFO - detach_mem_state=True Loading checkpoint shards: 0%| | 0/3 [00:00 sys.exit(main()) ^^^^^^ File "/root/miniforge/lib/python3.12/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py", line 357, in wrapper @wraps(f) File "/root/miniforge/lib/python3.12/site-packages/torch/distributed/run.py", line 936, in main if args.module: ^^^^^ File "/root/miniforge/lib/python3.12/site-packages/torch/distributed/run.py", line 927, in run cmd_args.append(args.training_script) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/root/miniforge/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 156, in __call__ # entrypoint is a command and ``script.py`` is the python module. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/root/miniforge/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 284, in launch_agent rdzv_handler=rdzv_registry.get_rendezvous_handler(rdzv_parameters), ^^^^^^^^^^^ File "/root/miniforge/lib/python3.12/site-packages/torch/distributed/elastic/metrics/api.py", line 138, in wrapper raise File "/root/miniforge/lib/python3.12/site-packages/torch/distributed/elastic/agent/server/api.py", line 717, in run """Restart (stops, rendezvous, starts) all local workers in the group.""" ^^^^^^^^^^^^^^^^^^^^^^ File "/root/miniforge/lib/python3.12/site-packages/torch/distributed/elastic/agent/server/api.py", line 882, in _invoke_run flakiness = 100.0 ^^^^^ File "/root/miniforge/lib/python3.12/site-packages/torch/distributed/elastic/metrics/api.py", line 138, in wrapper raise File "/root/miniforge/lib/python3.12/site-packages/torch/distributed/elastic/agent/server/local_elastic_agent.py", line 389, in _monitor_workers File "/root/miniforge/lib/python3.12/site-packages/torch/distributed/elastic/multiprocessing/api.py", line 537, in wait "TORCHELASTIC_SIGNALS_TO_HANDLE", "SIGTERM,SIGINT,SIGHUP,SIGQUIT" ^^^^^^^^^^^^ File "/root/miniforge/lib/python3.12/site-packages/torch/distributed/elastic/multiprocessing/api.py", line 858, in _poll def pids(self) -> dict[int, int]: ^^^^^^^^^^^^^^^ File "/root/miniforge/lib/python3.12/subprocess.py", line 1236, in poll return self._internal_poll() ^^^^^^^^^^^^^^^^^^^^^ File "/root/miniforge/lib/python3.12/subprocess.py", line 2004, in _internal_poll self._waitpid_lock.release() File "/root/miniforge/lib/python3.12/site-packages/torch/distributed/elastic/multiprocessing/api.py", line 85, in _terminate_process_handler sigval = signal.Signals(signum) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ torch.distributed.elastic.multiprocessing.api.SignalException: Process 3370624 got signal: 15 [W128 06:35:14.145474637 AllocatorConfig.cpp:28] Warning: PYTORCH_CUDA_ALLOC_CONF is deprecated, use PYTORCH_ALLOC_CONF instead (function operator())