File size: 17,497 Bytes
fa57598 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 |
[2025-10-22 17:36:09,304] [DEBUG] [axolotl.utils.config.resolve_dtype:66] [PID:1768] bf16 support detected, enabling for this configuration.
[2025-10-22 17:36:09,599] [DEBUG] [axolotl.utils.config.log_gpu_memory_usage:127] [PID:1768] baseline 0.000GB ()
[2025-10-22 17:36:09,602] [INFO] [axolotl.cli.config.load_cfg:248] [PID:1768] config:
{
"activation_offloading": false,
"adapter": "lora",
"axolotl_config_path": "config.yaml",
"base_model": "Qwen/Qwen2.5-7B-Instruct",
"base_model_config": "Qwen/Qwen2.5-7B-Instruct",
"batch_size": 16,
"bf16": true,
"capabilities": {
"bf16": true,
"compute_capability": "sm_90",
"fp8": false,
"n_gpu": 1,
"n_node": 1
},
"context_parallel_size": 1,
"dataloader_num_workers": 1,
"dataloader_pin_memory": true,
"dataloader_prefetch_factor": 256,
"dataset_processes": 16,
"datasets": [
{
"message_property_mappings": {
"content": "content",
"role": "role"
},
"path": "/workspace/fine-tuning/data/injaz.json",
"trust_remote_code": false,
"type": "alpaca"
}
],
"ddp": false,
"device": "cuda:0",
"dion_rank_fraction": 1.0,
"dion_rank_multiple_of": 1,
"env_capabilities": {
"torch_version": "2.7.1"
},
"eval_batch_size": 16,
"eval_causal_lm_metrics": [
"sacrebleu",
"comet",
"ter",
"chrf"
],
"eval_max_new_tokens": 128,
"eval_table_size": 0,
"experimental_skip_move_to_device": true,
"fp16": false,
"gradient_accumulation_steps": 1,
"gradient_checkpointing": false,
"include_tkps": true,
"learning_rate": 0.0001,
"lisa_layers_attribute": "model.layers",
"load_best_model_at_end": false,
"load_in_4bit": false,
"load_in_8bit": true,
"local_rank": 0,
"lora_alpha": 16,
"lora_dropout": 0.05,
"lora_model_dir": "injazsmart/thoth_text_v2",
"lora_r": 8,
"lora_target_modules": [
"q_proj",
"v_proj",
"k_proj",
"o_proj",
"gate_proj",
"down_proj",
"up_proj"
],
"loraplus_lr_embedding": 1e-06,
"lr_scheduler": "cosine",
"mean_resizing_embeddings": false,
"micro_batch_size": 16,
"model_config_type": "qwen2",
"num_epochs": 2.0,
"optimizer": "adamw_bnb_8bit",
"output_dir": "./outputs/thoth_text_v3",
"pretrain_multipack_attn": true,
"profiler_steps_start": 0,
"qlora_sharded_model_loading": false,
"ray_num_workers": 1,
"resources_per_worker": {
"GPU": 1
},
"sample_packing_bin_size": 200,
"sample_packing_group_size": 100000,
"save_only_model": false,
"save_safetensors": true,
"sequence_len": 4096,
"shuffle_before_merging_datasets": false,
"shuffle_merged_datasets": true,
"skip_prepare_dataset": false,
"streaming_multipack_buffer_size": 10000,
"strict": false,
"tensor_parallel_size": 1,
"tiled_mlp_use_original_mlp": true,
"tokenizer_config": "Qwen/Qwen2.5-7B-Instruct",
"tokenizer_save_jinja_files": true,
"torch_dtype": "torch.bfloat16",
"train_on_inputs": false,
"trl": {
"log_completions": false,
"mask_truncated_completions": false,
"ref_model_mixup_alpha": 0.9,
"ref_model_sync_steps": 64,
"scale_rewards": true,
"sync_ref_model": false,
"use_vllm": false,
"vllm_server_host": "0.0.0.0",
"vllm_server_port": 8000
},
"use_ray": false,
"val_set_size": 0.0,
"vllm": {
"device": "auto",
"dtype": "auto",
"gpu_memory_utilization": 0.9,
"host": "0.0.0.0",
"port": 8000
},
"weight_decay": 0.0,
"world_size": 1
}
[2025-10-22 17:36:10,629] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:278] [PID:1768] EOS: 151645 / <|im_end|>
[2025-10-22 17:36:10,631] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:279] [PID:1768] BOS: None / None
[2025-10-22 17:36:10,633] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:280] [PID:1768] PAD: 151643 / <|endoftext|>
[2025-10-22 17:36:10,634] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:281] [PID:1768] UNK: None / None
[2025-10-22 17:36:10,635] [INFO] [axolotl.utils.data.shared.load_preprocessed_dataset:476] [PID:1768] Unable to find prepared dataset in last_run_prepared/b658425644378172b1cc57b059c9f7e7
[2025-10-22 17:36:10,638] [INFO] [axolotl.utils.data.sft._load_raw_datasets:320] [PID:1768] Loading raw datasets...
[2025-10-22 17:36:10,639] [WARNING] [axolotl.utils.data.sft._load_raw_datasets:322] [PID:1768] Processing datasets during training can lead to VRAM instability. Please pre-process your dataset using `axolotl preprocess path/to/config.yml`.
[2025-10-22 17:36:10,934] [INFO] [axolotl.utils.data.wrappers.get_dataset_wrapper:87] [PID:1768] Loading dataset: /workspace/fine-tuning/data/injaz.json with base_type: alpaca and prompt_style: None
[2025-10-22 17:36:10,936] [WARNING] [datasets.arrow_dataset.map:3100] [PID:1768] num_proc must be <= 10. Reducing num_proc to 10 for dataset of size 10.
[2025-10-22 17:36:11,344] [INFO] [axolotl.utils.data.utils.handle_long_seq_in_dataset:218] [PID:1768] min_input_len: 84
[2025-10-22 17:36:11,347] [INFO] [axolotl.utils.data.utils.handle_long_seq_in_dataset:220] [PID:1768] max_input_len: 120
[2025-10-22 17:36:11,350] [WARNING] [datasets.arrow_dataset.map:3100] [PID:1768] num_proc must be <= 10. Reducing num_proc to 10 for dataset of size 10.
Dropping Long Sequences (>4096) (num_proc=10): 0%| | 0/10 [00:00<?, ? examples/s]
Dropping Long Sequences (>4096) (num_proc=10): 10%|βββββββ | 1/10 [00:00<00:03, 2.54 examples/s]
Dropping Long Sequences (>4096) (num_proc=10): 100%|ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ| 10/10 [00:00<00:00, 15.08 examples/s]
Saving the dataset (0/1 shards): 0%| | 0/10 [00:00<?, ? examples/s]
Saving the dataset (1/1 shards): 100%|βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ| 10/10 [00:00<00:00, 510.88 examples/s]
Saving the dataset (1/1 shards): 100%|βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ| 10/10 [00:00<00:00, 454.54 examples/s]
[2025-10-22 17:36:12,409] [DEBUG] [axolotl.utils.trainer.calculate_total_num_steps:404] [PID:1768] total_num_tokens: 1_010
[2025-10-22 17:36:12,419] [DEBUG] [axolotl.utils.trainer.calculate_total_num_steps:422] [PID:1768] `total_supervised_tokens: 452`
[2025-10-22 17:36:12,421] [DEBUG] [axolotl.utils.trainer.calculate_total_num_steps:520] [PID:1768] total_num_steps: 2
[2025-10-22 17:36:12,423] [INFO] [axolotl.utils.data.sft._prepare_standard_dataset:121] [PID:1768] Maximum number of steps set at 2
[2025-10-22 17:36:12,470] [DEBUG] [axolotl.train.setup_model_and_tokenizer:65] [PID:1768] Loading tokenizer... Qwen/Qwen2.5-7B-Instruct
[2025-10-22 17:36:13,242] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:278] [PID:1768] EOS: 151645 / <|im_end|>
[2025-10-22 17:36:13,244] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:279] [PID:1768] BOS: None / None
[2025-10-22 17:36:13,244] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:280] [PID:1768] PAD: 151643 / <|endoftext|>
[2025-10-22 17:36:13,246] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:281] [PID:1768] UNK: None / None
[2025-10-22 17:36:13,248] [DEBUG] [axolotl.train.setup_model_and_tokenizer:74] [PID:1768] Loading model
[2025-10-22 17:36:13,447] [DEBUG] [axolotl.monkeypatch.transformers.trainer_loss_calc.patch_evaluation_loop:87] [PID:1768] Patched Trainer.evaluation_loop with nanmean loss calculation
[2025-10-22 17:36:13,451] [DEBUG] [axolotl.monkeypatch.transformers.trainer_loss_calc.patch_maybe_log_save_evaluate:138] [PID:1768] Patched Trainer._maybe_log_save_evaluate with nanmean loss calculation
Loading checkpoint shards: 0%| | 0/4 [00:00<?, ?it/s]
Loading checkpoint shards: 25%|βββββββββββββββββββββββ | 1/4 [00:04<00:14, 4.82s/it]
Loading checkpoint shards: 50%|ββββββββββββββββββββββββββββββββββββββββββββββ | 2/4 [00:11<00:11, 5.83s/it]
Loading checkpoint shards: 75%|βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | 3/4 [00:17<00:05, 6.00s/it]
Loading checkpoint shards: 100%|βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ| 4/4 [00:21<00:00, 5.30s/it]
Loading checkpoint shards: 100%|βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ| 4/4 [00:21<00:00, 5.45s/it]
[2025-10-22 17:36:36,036] [INFO] [axolotl.loaders.model._prepare_model_for_quantization:863] [PID:1768] converting PEFT model w/ prepare_model_for_kbit_training
[2025-10-22 17:36:36,040] [INFO] [axolotl.loaders.model._configure_embedding_dtypes:345] [PID:1768] Converting modules to torch.bfloat16
[2025-10-22 17:36:36,044] [DEBUG] [axolotl.loaders.model.log_gpu_memory_usage:127] [PID:1768] Memory usage after model load 11.676GB (+11.676GB allocated, +13.172GB reserved)
[2025-10-22 17:36:36,047] [DEBUG] [axolotl.loaders.adapter.load_lora:143] [PID:1768] Loading pretrained PEFT - LoRA
adapter_config.json: 0%| | 0.00/932 [00:00<?, ?B/s]
adapter_config.json: 100%|ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ| 932/932 [00:00<00:00, 2.55MB/s]
adapter_model.safetensors: 0%| | 0.00/80.8M [00:00<?, ?B/s]
adapter_model.safetensors: 0%| | 45.5k/80.8M [00:01<53:09, 25.3kB/s]
adapter_model.safetensors: 17%|βββββββββββββββ | 13.8M/80.8M [00:02<00:09, 7.34MB/s]
adapter_model.safetensors: 100%|ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ| 80.8M/80.8M [00:03<00:00, 27.8MB/s]
adapter_model.safetensors: 100%|ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ| 80.8M/80.8M [00:03<00:00, 21.0MB/s]
trainable params: 20,185,088 || all params: 7,635,801,600 || trainable%: 0.2643
[2025-10-22 17:36:41,269] [DEBUG] [axolotl.loaders.model.log_gpu_memory_usage:127] [PID:1768] after adapters 8.642GB (+8.642GB allocated, +13.324GB reserved)
[2025-10-22 17:36:48,570] [INFO] [axolotl.train.save_initial_configs:398] [PID:1768] Pre-saving adapter config to ./outputs/thoth_text_v3...
[2025-10-22 17:36:48,596] [INFO] [axolotl.train.save_initial_configs:402] [PID:1768] Pre-saving tokenizer to ./outputs/thoth_text_v3...
[2025-10-22 17:36:49,249] [INFO] [axolotl.train.save_initial_configs:407] [PID:1768] Pre-saving model config to ./outputs/thoth_text_v3...
[2025-10-22 17:36:49,268] [INFO] [axolotl.train.execute_training:196] [PID:1768] Starting trainer...
0%| | 0/2 [00:00<?, ?it/s][2025-10-22 17:36:50,364] [WARNING] [py.warnings._showwarnmsg:110] [PID:1768] /root/miniconda3/envs/py3.11/lib/python3.11/site-packages/bitsandbytes/autograd/_functions.py:186: UserWarning: MatMul8bitLt: inputs will be cast from torch.bfloat16 to float16 during quantization
warnings.warn(f"MatMul8bitLt: inputs will be cast from {A.dtype} to float16 during quantization")
50%|βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | 1/2 [00:01<00:01, 1.57s/it]
{'loss': 2.6929, 'grad_norm': 1.6138832569122314, 'learning_rate': 0.0001, 'memory/max_active (GiB)': 22.98, 'memory/max_allocated (GiB)': 22.98, 'memory/device_reserved (GiB)': 23.52, 'tokens_per_second_per_gpu': 360.33, 'epoch': 1.0}
50%|βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | 1/2 [00:01<00:01, 1.57s/it][2025-10-22 17:36:51,326] [INFO] [axolotl.core.trainers.base._save:671] [PID:1768] Saving model checkpoint to ./outputs/thoth_text_v3/checkpoint-1
[2025-10-22 17:36:54,243] [WARNING] [py.warnings._showwarnmsg:110] [PID:1768] /root/miniconda3/envs/py3.11/lib/python3.11/site-packages/bitsandbytes/autograd/_functions.py:186: UserWarning: MatMul8bitLt: inputs will be cast from torch.bfloat16 to float16 during quantization
warnings.warn(f"MatMul8bitLt: inputs will be cast from {A.dtype} to float16 during quantization")
100%|ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ| 2/2 [00:05<00:00, 2.70s/it]
{'loss': 2.4928, 'grad_norm': 1.62943696975708, 'learning_rate': 5e-05, 'memory/max_active (GiB)': 23.05, 'memory/max_allocated (GiB)': 23.05, 'memory/device_reserved (GiB)': 23.96, 'tokens_per_second_per_gpu': 795.39, 'epoch': 2.0}
100%|ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ| 2/2 [00:05<00:00, 2.70s/it][2025-10-22 17:36:54,799] [INFO] [axolotl.core.trainers.base._save:671] [PID:1768] Saving model checkpoint to ./outputs/thoth_text_v3/checkpoint-2
{'train_runtime': 7.7547, 'train_samples_per_second': 4.127, 'train_steps_per_second': 0.258, 'train_loss': 2.5928467512130737, 'memory/max_active (GiB)': 8.67, 'memory/max_allocated (GiB)': 8.67, 'memory/device_reserved (GiB)': 23.96, 'epoch': 2.0}
100%|ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ| 2/2 [00:07<00:00, 2.70s/it]
100%|ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ| 2/2 [00:07<00:00, 3.88s/it]
[2025-10-22 17:36:57,557] [INFO] [axolotl.train.save_trained_model:218] [PID:1768] Training completed! Saving trained model to ./outputs/thoth_text_v3.
[2025-10-22 17:36:58,688] [INFO] [axolotl.train.save_trained_model:336] [PID:1768] Model successfully saved to ./outputs/thoth_text_v3
|