[2026-01-24 13:25:03,029] [DEBUG] [axolotl.utils.config.log_gpu_memory_usage:127] [PID:9359] baseline 0.000GB () [2026-01-24 13:25:03,032] [INFO] [axolotl.cli.config.load_cfg:259] [PID:9359] config: { "activation_offloading": false, "adapter": "lora", "axolotl_config_path": "using_axolotl/lora.yml", "base_model": "microsoft/Phi-4-mini-instruct", "base_model_config": "microsoft/Phi-4-mini-instruct", "batch_size": 8, "bf16": true, "capabilities": { "bf16": true, "compute_capability": "sm_86", "fp8": false, "n_gpu": 1, "n_node": 1 }, "chat_template": "tokenizer_default", "context_parallel_size": 1, "dataloader_num_workers": 1, "dataloader_pin_memory": true, "dataloader_prefetch_factor": 256, "dataset_num_proc": 9, "datasets": [ { "message_property_mappings": { "content": "content", "role": "role" }, "path": "DannyAI/African-History-QA-Dataset", "split": "train", "trust_remote_code": false, "type": "alpaca_chat.load_qa" } ], "ddp": false, "device": "cuda:0", "dion_rank_fraction": 1.0, "dion_rank_multiple_of": 1, "env_capabilities": { "torch_version": "2.9.1" }, "eval_batch_size": 2, "eval_causal_lm_metrics": [ "sacrebleu", "comet", "ter", "chrf" ], "eval_max_new_tokens": 128, "eval_sample_packing": false, "eval_steps": 50, "eval_strategy": "steps", "eval_table_size": 0, "experimental_skip_move_to_device": true, "fp16": false, "gradient_accumulation_steps": 4, "gradient_checkpointing": false, "hub_model_id": "DannyAI/phi4_lora_axolotl", "include_tkps": true, "is_falcon_derived_model": false, "is_llama_derived_model": false, "is_mistral_derived_model": false, "learning_rate": 2e-05, "lisa_layers_attribute": "model.layers", "load_best_model_at_end": false, "load_in_4bit": false, "load_in_8bit": false, "local_rank": 0, "logging_steps": 5, "lora_alpha": 16, "lora_dropout": 0.05, "lora_r": 8, "lora_target_modules": [ "q_proj", "v_proj", "k_proj", "o_proj" ], "loraplus_lr_embedding": 1e-06, "lr_scheduler": "cosine", "max_steps": 650, "mean_resizing_embeddings": false, "micro_batch_size": 2, "model_config_type": "phi3", "num_epochs": 1.0, "optimizer": "adamw_torch", "otel_metrics_host": "localhost", "otel_metrics_port": 8000, "output_dir": "./phi4_african_history_lora_out", "pad_to_sequence_len": true, "pretrain_multipack_attn": true, "profiler_steps_start": 0, "qlora_sharded_model_loading": false, "ray_num_workers": 1, "remove_unused_columns": false, "resources_per_worker": { "GPU": 1 }, "sample_packing": true, "sample_packing_bin_size": 200, "sample_packing_group_size": 100000, "save_only_model": false, "save_safetensors": true, "save_steps": 100, "save_strategy": "steps", "sequence_len": 2048, "shuffle_before_merging_datasets": false, "shuffle_merged_datasets": true, "skip_prepare_dataset": false, "streaming_multipack_buffer_size": 10000, "strict": false, "tensor_parallel_size": 1, "test_datasets": [ { "message_property_mappings": { "content": "content", "role": "role" }, "path": "DannyAI/African-History-QA-Dataset", "split": "validation", "trust_remote_code": false, "type": "alpaca_chat.load_qa" } ], "tiled_mlp_use_original_mlp": true, "tokenizer_config": "microsoft/Phi-4-mini-instruct", "tokenizer_save_jinja_files": true, "tokenizer_type": "AutoTokenizer", "torch_dtype": "torch.bfloat16", "train_on_inputs": false, "trl": { "log_completions": false, "mask_truncated_completions": false, "ref_model_mixup_alpha": 0.9, "ref_model_sync_steps": 64, "scale_rewards": true, "sync_ref_model": false, "use_vllm": false, "vllm_server_host": "0.0.0.0", "vllm_server_port": 8000 }, "type_of_model": "AutoModelForCausalLM", "use_otel_metrics": false, "use_ray": false, "use_wandb": true, "val_set_size": 0.0, "vllm": { "device": "auto", "dtype": "auto", "gpu_memory_utilization": 0.9, "host": "0.0.0.0", "port": 8000 }, "wandb_name": "phi4_lora_axolotl", "wandb_project": "phi4_african_history", "warmup_steps": 20, "weight_decay": 0.0, "world_size": 1 } [2026-01-24 13:25:04,559] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:285] [PID:9359] EOS: 199999 / <|endoftext|> [2026-01-24 13:25:04,559] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:286] [PID:9359] BOS: 199999 / <|endoftext|> [2026-01-24 13:25:04,559] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:287] [PID:9359] PAD: 199999 / <|endoftext|> [2026-01-24 13:25:04,559] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:288] [PID:9359] UNK: 199999 / <|endoftext|> [2026-01-24 13:25:04,560] [INFO] [axolotl.utils.data.shared.load_preprocessed_dataset:481] [PID:9359] Unable to find prepared dataset in last_run_prepared/89363fb9438bda5d225c172d067e1ebf [2026-01-24 13:25:04,560] [INFO] [axolotl.utils.data.sft._load_raw_datasets:320] [PID:9359] Loading raw datasets... [2026-01-24 13:25:04,560] [WARNING] [axolotl.utils.data.sft._load_raw_datasets:322] [PID:9359] Processing datasets during training can lead to VRAM instability. Please pre-process your dataset using `axolotl preprocess path/to/config.yml`. [2026-01-24 13:25:06,680] [INFO] [axolotl.utils.data.wrappers.get_dataset_wrapper:87] [PID:9359] Loading dataset: DannyAI/African-History-QA-Dataset with base_type: alpaca_chat.load_qa and prompt_style: None [2026-01-24 13:25:06,878] [INFO] [axolotl.utils.data.utils.handle_long_seq_in_dataset:224] [PID:9359] min_input_len: 52 [2026-01-24 13:25:06,879] [INFO] [axolotl.utils.data.utils.handle_long_seq_in_dataset:226] [PID:9359] max_input_len: 179 Dropping Long Sequences (>2048) (num_proc=9): 0%| | 0/2114 [00:002048) (num_proc=9): 11%| | 235/2114 [00:00<00: Dropping Long Sequences (>2048) (num_proc=9): 100%|█| 2114/2114 [00:00<00 Drop Samples with Zero Trainable Tokens (num_proc=9): 0%| | 0/2114 [00: Drop Samples with Zero Trainable Tokens (num_proc=9): 11%| | 235/2114 [0 Drop Samples with Zero Trainable Tokens (num_proc=9): 100%|█| 2114/2114 [ Drop Samples with Zero Trainable Tokens (num_proc=9): 100%|█| 2114/2114 [ Add position_id column (Sample Packing) (num_proc=9): 0%| | 0/2114 [00: Add position_id column (Sample Packing) (num_proc=9): 11%| | 235/2114 [0 Add position_id column (Sample Packing) (num_proc=9): 100%|█| 2114/2114 [ Saving the dataset (0/8 shards): 0%| | 0/2114 [00:002048) (num_proc=9): 0%| | 0/200 [00:002048) (num_proc=9): 12%| | 23/200 [00:00<00:01 Dropping Long Sequences (>2048) (num_proc=9): 100%|█| 200/200 [00:00<00:0 Drop Samples with Zero Trainable Tokens (num_proc=9): 0%| | 0/200 [00:0 Drop Samples with Zero Trainable Tokens (num_proc=9): 12%| | 23/200 [00: Drop Samples with Zero Trainable Tokens (num_proc=9): 100%|█| 200/200 [00 Add position_id column (Sample Packing) (num_proc=9): 0%| | 0/200 [00:0 Add position_id column (Sample Packing) (num_proc=9): 12%| | 23/200 [00: Add position_id column (Sample Packing) (num_proc=9): 100%|█| 200/200 [00 Saving the dataset (0/1 shards): 0%| | 0/200 [00:00 [2026-01-24 13:25:15,574] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:286] [PID:9359] BOS: 199999 / <|endoftext|> [2026-01-24 13:25:15,574] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:287] [PID:9359] PAD: 199999 / <|endoftext|> [2026-01-24 13:25:15,574] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:288] [PID:9359] UNK: 199999 / <|endoftext|> [2026-01-24 13:25:15,574] [DEBUG] [axolotl.train.setup_model_and_tokenizer:82] [PID:9359] Loading model [2026-01-24 13:25:15,776] [DEBUG] [axolotl.monkeypatch.transformers.trainer_loss_calc.patch_evaluation_loop:87] [PID:9359] Patched Trainer.evaluation_loop with nanmean loss calculation [2026-01-24 13:25:15,781] [DEBUG] [axolotl.monkeypatch.transformers.trainer_loss_calc.patch_maybe_log_save_evaluate:138] [PID:9359] Patched Trainer._maybe_log_save_evaluate with nanmean loss calculation [2026-01-24 13:25:15,782] [INFO] [axolotl.loaders.patch_manager._apply_multipack_patches:345] [PID:9359] Applying multipack dataloader patch for sample packing... Loading checkpoint shards: 0%| | 0/2 [00:00