diff --git "a/debug.log" "b/debug.log" new file mode 100644--- /dev/null +++ "b/debug.log" @@ -0,0 +1,1713 @@ +[2026-01-25 12:18:17,730] [DEBUG] [axolotl.utils.config.resolve_dtype:66] [PID:443] bf16 support detected, enabling for this configuration. +[2026-01-25 12:18:17,732] [DEBUG] [axolotl.utils.config.log_gpu_memory_usage:127] [PID:443] baseline 0.000GB () +[2026-01-25 12:18:17,733] [INFO] [axolotl.cli.config.load_cfg:256] [PID:443] config: +{ + "activation_offloading": true, + "adam_beta1": 0.9, + "adam_beta2": 0.95, + "axolotl_config_path": "/weka/oe-adapt-default/ethans/datagen/datagen/train/expt_yamls/axolotl/Qwen3-8B-r0.945_16000_stage2_scaling_final_glm45a_e2e_3ipf_resolved_soft_t0_ipf_1.yaml", + "base_model": "/weka/oe-adapt-default/ethans/llm-weights/Qwen3-8B", + "base_model_config": "/weka/oe-adapt-default/ethans/llm-weights/Qwen3-8B", + "batch_size": 32, + "bf16": true, + "capabilities": { + "bf16": true, + "compute_capability": "sm_90", + "fp8": false, + "n_gpu": 8, + "n_node": 1 + }, + "chat_template": "chatml", + "context_parallel_size": 1, + "cut_cross_entropy": true, + "dataloader_num_workers": 8, + "dataloader_pin_memory": true, + "dataloader_prefetch_factor": 256, + "dataset_num_proc": 192, + "dataset_prepared_path": "dataset_cache", + "datasets": [ + { + "chat_template": "tokenizer_default", + "ds_type": "json", + "field_messages": "messages", + "message_field_training": "train", + "message_property_mappings": { + "content": "content", + "role": "role" + }, + "path": "/weka/oe-adapt-default/ethans/tmp/copy_datasets/r0.945_16000_stage2_scaling_final_glm45a_e2e_3ipf_resolved_soft_t0_ipf_1_atk_rft-think_SYSTEM_SIMPLE_7971e8f.jsonl", + "trust_remote_code": false, + "type": "chat_template" + } + ], + "ddp": true, + "deepspeed": { + "bf16": { + "enabled": "auto" + }, + "fp16": { + "auto_cast": false, + "enabled": "auto", + "hysteresis": 2, + "initial_scale_power": 32, + "loss_scale": 0, + "loss_scale_window": 1000, + "min_loss_scale": 1 + }, + "gradient_accumulation_steps": "auto", + "gradient_clipping": "auto", + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto", + "wall_clock_breakdown": false, + "zero_optimization": { + "overlap_comm": true, + "stage": 1 + } + }, + "device": "cuda:0", + "device_map": { + "": 0 + }, + "dion_rank_fraction": 1.0, + "dion_rank_multiple_of": 1, + "env_capabilities": { + "torch_version": "2.6.0" + }, + "eval_batch_size": 1, + "eval_causal_lm_metrics": [ + "sacrebleu", + "comet", + "ter", + "chrf" + ], + "eval_max_new_tokens": 128, + "eval_table_size": 0, + "evals_per_epoch": 0, + "experimental_skip_move_to_device": true, + "flash_attention": true, + "fp16": false, + "gradient_accumulation_steps": 4, + "gradient_checkpointing": true, + "gradient_checkpointing_kwargs": { + "use_reentrant": true + }, + "include_tkps": true, + "learning_rate": 1e-05, + "lisa_layers_attribute": "model.layers", + "load_best_model_at_end": false, + "load_in_4bit": false, + "load_in_8bit": false, + "local_rank": 0, + "logging_steps": 1, + "lora_dropout": 0.0, + "loraplus_lr_embedding": 1e-06, + "loss_watchdog_patience": 3, + "loss_watchdog_threshold": 5.0, + "lr_scheduler": "cosine", + "mean_resizing_embeddings": false, + "micro_batch_size": 1, + "model_config_type": "qwen3", + "num_epochs": 3.0, + "optimizer": "adamw_torch", + "otel_metrics_host": "localhost", + "otel_metrics_port": 8000, + "output_dir": "/weka/oe-adapt-default/ethans/llm-weights/axolotl/Qwen3-8B-r0.945_16000_stage2_scaling_final_glm45a_e2e_3ipf_resolved_soft_t0_ipf_1", + "plugins": [ + "axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin" + ], + "pretrain_multipack_attn": true, + "profiler_steps_start": 0, + "qlora_sharded_model_loading": false, + "ray_num_workers": 1, + "resources_per_worker": { + "GPU": 1 + }, + "sample_packing_bin_size": 200, + "sample_packing_group_size": 100000, + "save_only_model": false, + "save_safetensors": true, + "save_strategy": "epoch", + "sequence_len": 32768, + "shuffle_before_merging_datasets": false, + "shuffle_merged_datasets": true, + "skip_prepare_dataset": false, + "streaming_multipack_buffer_size": 10000, + "strict": false, + "tensor_parallel_size": 1, + "tf32": false, + "tiled_mlp_use_original_mlp": true, + "tokenizer_config": "/weka/oe-adapt-default/ethans/llm-weights/Qwen3-8B", + "tokenizer_save_jinja_files": true, + "torch_dtype": "torch.bfloat16", + "train_on_inputs": false, + "trl": { + "log_completions": false, + "mask_truncated_completions": false, + "ref_model_mixup_alpha": 0.9, + "ref_model_sync_steps": 64, + "scale_rewards": true, + "sync_ref_model": false, + "use_vllm": false, + "vllm_server_host": "0.0.0.0", + "vllm_server_port": 8000 + }, + "use_otel_metrics": false, + "use_ray": false, + "use_wandb": true, + "val_set_size": 0.0, + "vllm": { + "device": "auto", + "dtype": "auto", + "gpu_memory_utilization": 0.9, + "host": "0.0.0.0", + "port": 8000 + }, + "wandb_entity": "allenai-team1", + "wandb_name": "Qwen3-8B-r0.945_16000_stage2_scaling_final_glm45a_e2e_3ipf_resolved_soft_t0_ipf_1", + "wandb_project": "sweagent", + "warmup_ratio": 0.032, + "weight_decay": 0.01, + "world_size": 8 +} +[2026-01-25 12:18:18,125] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:280] [PID:443] EOS: 151645 / <|im_end|> +[2026-01-25 12:18:18,125] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:281] [PID:443] BOS: None / None +[2026-01-25 12:18:18,126] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:282] [PID:443] PAD: 151643 / <|endoftext|> +[2026-01-25 12:18:18,126] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:283] [PID:443] UNK: None / None +[2026-01-25 12:27:05,390] [INFO] [axolotl.utils.data.shared.load_preprocessed_dataset:475] [PID:443] Loading prepared dataset from disk at dataset_cache/e907e1ff7214ecee46c303e25abf5050... +[2026-01-25 12:27:06,473] [DEBUG] [axolotl.utils.trainer.calculate_total_num_steps:406] [PID:443] total_num_tokens: 407_251_230 +[2026-01-25 12:27:09,219] [DEBUG] [axolotl.utils.trainer.calculate_total_num_steps:424] [PID:443] `total_supervised_tokens: 137_262_072` +[2026-01-25 12:27:09,219] [DEBUG] [axolotl.utils.trainer.calculate_total_num_steps:522] [PID:443] total_num_steps: 1500 +[2026-01-25 12:27:09,219] [INFO] [axolotl.utils.data.sft._prepare_standard_dataset:121] [PID:443] Maximum number of steps set at 1500 +[2026-01-25 12:27:09,247] [DEBUG] [axolotl.train.setup_model_and_tokenizer:70] [PID:443] loading tokenizer... /weka/oe-adapt-default/ethans/llm-weights/Qwen3-8B +[2026-01-25 12:27:09,513] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:280] [PID:443] EOS: 151645 / <|im_end|> +[2026-01-25 12:27:09,513] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:281] [PID:443] BOS: None / None +[2026-01-25 12:27:09,513] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:282] [PID:443] PAD: 151643 / <|endoftext|> +[2026-01-25 12:27:09,513] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:283] [PID:443] UNK: None / None +[2026-01-25 12:27:09,514] [DEBUG] [axolotl.train.setup_model_and_tokenizer:82] [PID:443] Loading model +[2026-01-25 12:27:09,522] [DEBUG] [axolotl.monkeypatch.transformers.trainer_loss_calc.patch_evaluation_loop:87] [PID:443] Patched Trainer.evaluation_loop with nanmean loss calculation +[2026-01-25 12:27:09,524] [DEBUG] [axolotl.monkeypatch.transformers.trainer_loss_calc.patch_maybe_log_save_evaluate:138] [PID:443] Patched Trainer._maybe_log_save_evaluate with nanmean loss calculation +[2026-01-25 12:27:09,644] [INFO] [axolotl.integrations.cut_cross_entropy.pre_model_load:94] [PID:443] Applying Cut Cross Entropy to model type: qwen3 + Loading checkpoint shards: 0%| | 0/5 [00:00