[2025-10-25 17:49:53,747] [DEBUG] [axolotl.utils.config.resolve_dtype:66] [PID:4001] bf16 support detected, enabling for this configuration. [2025-10-25 17:49:53,988] [DEBUG] [axolotl.utils.config.log_gpu_memory_usage:127] [PID:4001] baseline 0.000GB () [2025-10-25 17:49:53,988] [INFO] [axolotl.cli.config.load_cfg:248] [PID:4001] config: { "activation_offloading": true, "axolotl_config_path": "train.yml", "base_model": "Qwen/Qwen3-4B-Instruct-2507", "base_model_config": "Qwen/Qwen3-4B-Instruct-2507", "batch_size": 4, "bf16": true, "capabilities": { "bf16": true, "compute_capability": "sm_86", "fp8": false, "n_gpu": 1, "n_node": 1 }, "chat_template": "tokenizer_default", "context_parallel_size": 1, "cosine_min_lr_ratio": 0.1, "dataloader_num_workers": 1, "dataloader_pin_memory": true, "dataloader_prefetch_factor": 256, "dataset_num_proc": 16, "dataset_prepared_path": "last_run_prepared", "datasets": [ { "chat_template": "tokenizer_default", "message_property_mappings": { "content": "content", "role": "role" }, "path": "WokeAI/polititune-tankie-warmup", "split": "train", "trust_remote_code": false, "type": "chat_template" } ], "ddp": false, "device": "cuda:0", "dion_rank_fraction": 1.0, "dion_rank_multiple_of": 1, "env_capabilities": { "torch_version": "2.8.0" }, "eval_batch_size": 1, "eval_causal_lm_metrics": [ "sacrebleu", "comet", "ter", "chrf" ], "eval_max_new_tokens": 128, "eval_sample_packing": true, "eval_table_size": 0, "experimental_skip_move_to_device": true, "flash_attention": true, "fp16": false, "gradient_accumulation_steps": 4, "gradient_checkpointing": true, "gradient_checkpointing_kwargs": { "use_reentrant": true }, "group_by_length": false, "include_tkps": true, "learning_rate": 1e-05, "lisa_layers_attribute": "model.layers", "load_best_model_at_end": false, "load_in_4bit": false, "load_in_8bit": false, "local_rank": 0, "logging_steps": 1, "lora_dropout": 0.0, "loraplus_lr_embedding": 1e-06, "lr_scheduler": "constant", "mean_resizing_embeddings": false, "micro_batch_size": 1, "model_config_type": "qwen3", "num_epochs": 2.0, "optimizer": "paged_ademamix_8bit", "otel_metrics_host": "localhost", "otel_metrics_port": 8000, "output_dir": "./model-output", "pad_to_sequence_len": true, "pretrain_multipack_attn": true, "profiler_steps_start": 0, "qlora_sharded_model_loading": false, "ray_num_workers": 1, "resources_per_worker": { "GPU": 1 }, "sample_packing": true, "sample_packing_bin_size": 200, "sample_packing_group_size": 100000, "save_only_model": true, "save_safetensors": true, "save_steps": 0.25, "saves_per_epoch": 2, "sequence_len": 2048, "shuffle_before_merging_datasets": false, "shuffle_merged_datasets": true, "skip_prepare_dataset": false, "special_tokens": { "eos_token": "<|im_end|>" }, "streaming_multipack_buffer_size": 10000, "strict": false, "tensor_parallel_size": 1, "tiled_mlp_use_original_mlp": true, "tokenizer_config": "Qwen/Qwen3-4B-Instruct-2507", "tokenizer_save_jinja_files": true, "torch_dtype": "torch.bfloat16", "train_on_inputs": false, "trl": { "log_completions": false, "mask_truncated_completions": false, "ref_model_mixup_alpha": 0.9, "ref_model_sync_steps": 64, "scale_rewards": true, "sync_ref_model": false, "use_vllm": false, "vllm_server_host": "0.0.0.0", "vllm_server_port": 8000 }, "trust_remote_code": true, "use_otel_metrics": false, "use_ray": false, "use_wandb": true, "val_set_size": 0.0, "vllm": { "device": "auto", "dtype": "auto", "gpu_memory_utilization": 0.9, "host": "0.0.0.0", "port": 8000 }, "wandb_project": "polititune-q34b-warmup", "warmup_ratio": 0.05, "weight_decay": 0.01, "world_size": 1 } [2025-10-25 17:49:53,990] [WARNING] [axolotl.cli.checks.check_user_token:46] [PID:4001] Error verifying HuggingFace token. Remember to log in using `huggingface-cli login` and get your access token from https://huggingface.co/settings/tokens if you want to use gated models or datasets. [2025-10-25 17:49:54,857] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:278] [PID:4001] EOS: 151645 / <|im_end|> [2025-10-25 17:49:54,857] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:279] [PID:4001] BOS: None / None [2025-10-25 17:49:54,857] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:280] [PID:4001] PAD: 151643 / <|endoftext|> [2025-10-25 17:49:54,857] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:281] [PID:4001] UNK: None / None [2025-10-25 17:49:54,858] [INFO] [axolotl.utils.data.shared.load_preprocessed_dataset:475] [PID:4001] Loading prepared dataset from disk at last_run_prepared/a9098d9a4841d51fd558499bade3d148... [2025-10-25 17:49:54,863] [DEBUG] [axolotl.utils.trainer.calculate_total_num_steps:406] [PID:4001] total_num_tokens: 88_397 [2025-10-25 17:49:54,864] [DEBUG] [axolotl.utils.trainer.calculate_total_num_steps:424] [PID:4001] `total_supervised_tokens: 81_792` [2025-10-25 17:49:54,866] [DEBUG] [axolotl.utils.samplers.multipack.pack_parallel:177] [PID:4001] Using single process for pack_parallel, running sequentially. [2025-10-25 17:49:55,435] [DEBUG] [axolotl.utils.samplers.multipack.pack_parallel:177] [PID:4001] Using single process for pack_parallel, running sequentially. [2025-10-25 17:49:55,587] [DEBUG] [axolotl.utils.samplers.multipack.__len__:462] [PID:4001] generate_batches time: 0.151627779006958 [2025-10-25 17:49:55,587] [DEBUG] [axolotl.utils.samplers.multipack.pack_parallel:177] [PID:4001] Using single process for pack_parallel, running sequentially. [2025-10-25 17:49:55,736] [DEBUG] [axolotl.utils.samplers.multipack.__len__:462] [PID:4001] generate_batches time: 0.14948749542236328 [2025-10-25 17:49:55,737] [DEBUG] [axolotl.utils.samplers.multipack.pack_parallel:177] [PID:4001] Using single process for pack_parallel, running sequentially. [2025-10-25 17:49:55,892] [DEBUG] [axolotl.utils.samplers.multipack.__len__:462] [PID:4001] generate_batches time: 0.15515494346618652 [2025-10-25 17:49:55,892] [DEBUG] [axolotl.utils.samplers.multipack.pack_parallel:177] [PID:4001] Using single process for pack_parallel, running sequentially. [2025-10-25 17:49:56,073] [DEBUG] [axolotl.utils.samplers.multipack.__len__:462] [PID:4001] generate_batches time: 0.18137788772583008 [2025-10-25 17:49:56,094] [INFO] [axolotl.utils.samplers.multipack.calc_min_len:438] [PID:4001] gather_len_batches: [46] [2025-10-25 17:49:56,094] [DEBUG] [axolotl.utils.trainer.calculate_total_num_steps:483] [PID:4001] data_loader_len: 11 [2025-10-25 17:49:56,094] [INFO] [axolotl.utils.trainer.calc_sample_packing_eff_est:499] [PID:4001] sample_packing_eff_est across ranks: [0.9383173403532609] [2025-10-25 17:49:56,094] [DEBUG] [axolotl.utils.trainer.calculate_total_num_steps:511] [PID:4001] sample_packing_eff_est: 0.94 [2025-10-25 17:49:56,094] [DEBUG] [axolotl.utils.trainer.calculate_total_num_steps:522] [PID:4001] total_num_steps: 22 [2025-10-25 17:49:56,094] [INFO] [axolotl.utils.data.sft._prepare_standard_dataset:121] [PID:4001] Maximum number of steps set at 22 [2025-10-25 17:49:56,115] [DEBUG] [axolotl.train.setup_model_and_tokenizer:65] [PID:4001] Loading tokenizer... Qwen/Qwen3-4B-Instruct-2507 [2025-10-25 17:49:56,797] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:278] [PID:4001] EOS: 151645 / <|im_end|> [2025-10-25 17:49:56,798] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:279] [PID:4001] BOS: None / None [2025-10-25 17:49:56,798] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:280] [PID:4001] PAD: 151643 / <|endoftext|> [2025-10-25 17:49:56,798] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:281] [PID:4001] UNK: None / None [2025-10-25 17:49:56,798] [DEBUG] [axolotl.train.setup_model_and_tokenizer:74] [PID:4001] Loading model [2025-10-25 17:49:57,139] [DEBUG] [axolotl.monkeypatch.transformers.trainer_loss_calc.patch_evaluation_loop:87] [PID:4001] Patched Trainer.evaluation_loop with nanmean loss calculation [2025-10-25 17:49:57,140] [DEBUG] [axolotl.monkeypatch.transformers.trainer_loss_calc.patch_maybe_log_save_evaluate:138] [PID:4001] Patched Trainer._maybe_log_save_evaluate with nanmean loss calculation [2025-10-25 17:49:57,140] [INFO] [axolotl.loaders.patch_manager._apply_multipack_patches:301] [PID:4001] Applying multipack dataloader patch for sample packing... Loading checkpoint shards: 0%| | 0/3 [00:00