[2025-10-22 16:34:24,607] [DEBUG] [axolotl.utils.config.resolve_dtype:66] [PID:2199] bf16 support detected, enabling for this configuration. [2025-10-22 16:34:24,749] [DEBUG] [axolotl.utils.config.log_gpu_memory_usage:127] [PID:2199] baseline 0.000GB () [2025-10-22 16:34:24,749] [INFO] [axolotl.cli.config.load_cfg:248] [PID:2199] config: { "activation_offloading": false, "adapter": "lora", "axolotl_config_path": "config.yaml", "base_model": "Qwen/Qwen2.5-7B-Instruct", "base_model_config": "Qwen/Qwen2.5-7B-Instruct", "batch_size": 16, "bf16": true, "capabilities": { "bf16": true, "compute_capability": "sm_90", "fp8": false, "n_gpu": 1, "n_node": 1 }, "context_parallel_size": 1, "dataloader_num_workers": 1, "dataloader_pin_memory": true, "dataloader_prefetch_factor": 256, "dataset_processes": 32, "datasets": [ { "message_property_mappings": { "content": "content", "role": "role" }, "path": "/workspace/fine-tuning/data/trump.json", "trust_remote_code": false, "type": "alpaca" } ], "ddp": false, "device": "cuda:0", "dion_rank_fraction": 1.0, "dion_rank_multiple_of": 1, "env_capabilities": { "torch_version": "2.7.1" }, "eval_batch_size": 16, "eval_causal_lm_metrics": [ "sacrebleu", "comet", "ter", "chrf" ], "eval_max_new_tokens": 128, "eval_table_size": 0, "experimental_skip_move_to_device": true, "fp16": false, "gradient_accumulation_steps": 1, "gradient_checkpointing": false, "include_tkps": true, "learning_rate": 0.0002, "lisa_layers_attribute": "model.layers", "load_best_model_at_end": false, "load_in_4bit": false, "load_in_8bit": true, "local_rank": 0, "lora_alpha": 16, "lora_dropout": 0.05, "lora_r": 8, "lora_target_modules": [ "q_proj", "v_proj", "k_proj", "o_proj", "gate_proj", "down_proj", "up_proj" ], "loraplus_lr_embedding": 1e-06, "lr_scheduler": "cosine", "mean_resizing_embeddings": false, "micro_batch_size": 16, "model_config_type": "qwen2", "num_epochs": 1.0, "optimizer": "adamw_bnb_8bit", "output_dir": "./outputs/thoth_text_v2", "pretrain_multipack_attn": true, "profiler_steps_start": 0, "qlora_sharded_model_loading": false, "ray_num_workers": 1, "resources_per_worker": { "GPU": 1 }, "sample_packing_bin_size": 200, "sample_packing_group_size": 100000, "save_only_model": false, "save_safetensors": true, "sequence_len": 4096, "shuffle_before_merging_datasets": false, "shuffle_merged_datasets": true, "skip_prepare_dataset": false, "streaming_multipack_buffer_size": 10000, "strict": false, "tensor_parallel_size": 1, "tiled_mlp_use_original_mlp": true, "tokenizer_config": "Qwen/Qwen2.5-7B-Instruct", "tokenizer_save_jinja_files": true, "torch_dtype": "torch.bfloat16", "train_on_inputs": false, "trl": { "log_completions": false, "mask_truncated_completions": false, "ref_model_mixup_alpha": 0.9, "ref_model_sync_steps": 64, "scale_rewards": true, "sync_ref_model": false, "use_vllm": false, "vllm_server_host": "0.0.0.0", "vllm_server_port": 8000 }, "use_ray": false, "val_set_size": 0.0, "vllm": { "device": "auto", "dtype": "auto", "gpu_memory_utilization": 0.9, "host": "0.0.0.0", "port": 8000 }, "weight_decay": 0.0, "world_size": 1 } [2025-10-22 16:34:25,444] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:278] [PID:2199] EOS: 151645 / <|im_end|> [2025-10-22 16:34:25,444] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:279] [PID:2199] BOS: None / None [2025-10-22 16:34:25,444] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:280] [PID:2199] PAD: 151643 / <|endoftext|> [2025-10-22 16:34:25,444] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:281] [PID:2199] UNK: None / None [2025-10-22 16:34:25,445] [INFO] [axolotl.utils.data.shared.load_preprocessed_dataset:476] [PID:2199] Unable to find prepared dataset in last_run_prepared/8eaeba3b90268710df560cd4dfe04e2d [2025-10-22 16:34:25,445] [INFO] [axolotl.utils.data.sft._load_raw_datasets:320] [PID:2199] Loading raw datasets... [2025-10-22 16:34:25,445] [WARNING] [axolotl.utils.data.sft._load_raw_datasets:322] [PID:2199] Processing datasets during training can lead to VRAM instability. Please pre-process your dataset using `axolotl preprocess path/to/config.yml`. [2025-10-22 16:34:25,754] [INFO] [axolotl.utils.data.wrappers.get_dataset_wrapper:87] [PID:2199] Loading dataset: /workspace/fine-tuning/data/trump.json with base_type: alpaca and prompt_style: None [2025-10-22 16:34:25,754] [WARNING] [datasets.arrow_dataset.map:3100] [PID:2199] num_proc must be <= 15. Reducing num_proc to 15 for dataset of size 15. Tokenizing Prompts (num_proc=15): 0%| | 0/15 [00:004096) (num_proc=15): 0%| | 0/15 [00:004096) (num_proc=15): 7%|████▏ | 1/15 [00:00<00:03, 4.29 examples/s] Dropping Long Sequences (>4096) (num_proc=15): 100%|██████████████████████████████████████████████████████████████| 15/15 [00:00<00:00, 34.65 examples/s] Saving the dataset (0/1 shards): 0%| | 0/15 [00:00 [2025-10-22 16:34:28,772] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:279] [PID:2199] BOS: None / None [2025-10-22 16:34:28,772] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:280] [PID:2199] PAD: 151643 / <|endoftext|> [2025-10-22 16:34:28,772] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:281] [PID:2199] UNK: None / None [2025-10-22 16:34:28,772] [DEBUG] [axolotl.train.setup_model_and_tokenizer:74] [PID:2199] Loading model [2025-10-22 16:34:28,893] [DEBUG] [axolotl.monkeypatch.transformers.trainer_loss_calc.patch_evaluation_loop:87] [PID:2199] Patched Trainer.evaluation_loop with nanmean loss calculation [2025-10-22 16:34:28,895] [DEBUG] [axolotl.monkeypatch.transformers.trainer_loss_calc.patch_maybe_log_save_evaluate:138] [PID:2199] Patched Trainer._maybe_log_save_evaluate with nanmean loss calculation model.safetensors.index.json: 0.00B [00:00, ?B/s] model.safetensors.index.json: 27.8kB [00:00, 71.6MB/s] model-00001-of-00004.safetensors: 0%| | 0.00/3.95G [00:00