diff --git "a/debug.log" "b/debug.log" --- "a/debug.log" +++ "b/debug.log" @@ -1,12 +1,12 @@ -[2025-12-28 11:04:35,744] [DEBUG] [axolotl.utils.config.log_gpu_memory_usage:127] [PID:42410] baseline 0.000GB () -[2025-12-28 11:04:35,746] [INFO] [axolotl.cli.config.load_cfg:256] [PID:42410] config: +[2025-12-29 02:49:24,896] [DEBUG] [axolotl.utils.config.log_gpu_memory_usage:127] [PID:3751] baseline 0.000GB () +[2025-12-29 02:49:24,896] [INFO] [axolotl.cli.config.load_cfg:256] [PID:3751] config: { "activation_offloading": false, "adapter": "lora", "axolotl_config_path": "tuner.yaml", "base_model": "codellama/CodeLlama-7b-hf", "base_model_config": "codellama/CodeLlama-7b-hf", - "batch_size": 8, + "batch_size": 5, "bf16": true, "capabilities": { "bf16": true, @@ -17,10 +17,9 @@ }, "chat_template": "llama3", "context_parallel_size": 1, - "dataloader_num_workers": 1, + "dataloader_num_workers": 2, "dataloader_pin_memory": true, - "dataloader_prefetch_factor": 256, - "dataset_num_proc": 384, + "dataset_num_proc": 96, "datasets": [ { "chat_template": "tokenizer_default", @@ -36,42 +35,13 @@ } ], "ddp": false, - "deepspeed": { - "bf16": { - "enabled": true - }, - "fp16": { - "enabled": false - }, - "gradient_accumulation_steps": "auto", - "gradient_clipping": 1.0, - "steps_per_print": 2000, - "train_micro_batch_size_per_gpu": "auto", - "zero_optimization": { - "contiguous_gradients": true, - "gather_16bit_weights_on_model_save": true, - "offload_optimizer": { - "device": "cpu", - "pin_memory": true - }, - "offload_param": { - "device": "cpu", - "pin_memory": true - }, - "overlap_comm": true, - "reduce_bucket_size": "auto", - "stage": 3, - "stage3_param_persistence_threshold": "auto", - "stage3_prefetch_bucket_size": "auto" - } - }, "device": "cuda:0", "dion_rank_fraction": 1.0, "dion_rank_multiple_of": 1, "env_capabilities": { "torch_version": "2.8.0" }, - "eval_batch_size": 4, + "eval_batch_size": 5, "eval_causal_lm_metrics": [ "sacrebleu", "comet", @@ -80,18 +50,16 @@ ], "eval_max_new_tokens": 128, "eval_sample_packing": true, - "eval_steps": 100, + "eval_steps": 1000, "eval_table_size": 0, "experimental_skip_move_to_device": true, "fp16": false, - "gradient_accumulation_steps": 2, + "gradient_accumulation_steps": 1, "gradient_checkpointing": true, "gradient_checkpointing_kwargs": { "use_reentrant": true }, "group_by_length": true, - "hub_model_id": "darwinkernelpanic/luau-codellama-7b-reasoning", - "hub_strategy": "every_save", "include_tkps": true, "is_falcon_derived_model": false, "is_llama_derived_model": true, @@ -102,26 +70,26 @@ "load_in_4bit": false, "load_in_8bit": false, "local_rank": 0, - "logging_steps": 1, + "logging_steps": 25, "lora_alpha": 32, "lora_dropout": 0.05, "lora_r": 16, "lora_target_modules": [ "q_proj", - "v_proj", "k_proj", + "v_proj", "o_proj" ], "loraplus_lr_embedding": 1e-06, "lr_scheduler": "cosine", "mean_resizing_embeddings": false, - "micro_batch_size": 4, + "micro_batch_size": 5, "model_config_type": "llama", "num_epochs": 3.0, "optimizer": "adamw_torch", "otel_metrics_host": "localhost", "otel_metrics_port": 8000, - "output_dir": "./outputs/luau-codellama-h200", + "output_dir": "./outputs/luau-codellama-h200-fast", "pad_to_sequence_len": true, "pretrain_multipack_attn": true, "profiler_steps_start": 0, @@ -135,7 +103,7 @@ "sample_packing_group_size": 100000, "save_only_model": false, "save_safetensors": true, - "save_steps": 200, + "save_steps": 1000, "save_strategy": "steps", "save_total_limit": 3, "seed": 42, @@ -175,19 +143,19 @@ "host": "0.0.0.0", "port": 8000 }, - "warmup_steps": 10, + "warmup_steps": 100, "weight_decay": 0.0, "world_size": 1 } -[2025-12-28 11:04:36,377] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:280] [PID:42410] EOS: 2 / -[2025-12-28 11:04:36,378] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:281] [PID:42410] BOS: 1 / -[2025-12-28 11:04:36,378] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:282] [PID:42410] PAD: 2 / -[2025-12-28 11:04:36,378] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:283] [PID:42410] UNK: 0 / -[2025-12-28 11:04:36,378] [INFO] [axolotl.utils.data.shared.load_preprocessed_dataset:481] [PID:42410] Unable to find prepared dataset in last_run_prepared/b7c17715ff7f64badeb455c51ab5d648 -[2025-12-28 11:04:36,378] [INFO] [axolotl.utils.data.sft._load_raw_datasets:320] [PID:42410] Loading raw datasets... -[2025-12-28 11:04:36,378] [WARNING] [axolotl.utils.data.sft._load_raw_datasets:322] [PID:42410] Processing datasets during training can lead to VRAM instability. Please pre-process your dataset using `axolotl preprocess path/to/config.yml`. -[2025-12-28 11:04:38,127] [INFO] [axolotl.utils.data.wrappers.get_dataset_wrapper:87] [PID:42410] Loading dataset: darwinkernelpanic/luau-reasoning-normalized with base_type: chat_template and prompt_style: None -[2025-12-28 11:04:38,130] [INFO] [axolotl.prompt_strategies.chat_template.__call__:996] [PID:42410] Using chat template: +[2025-12-29 02:49:25,389] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:280] [PID:3751] EOS: 2 / +[2025-12-29 02:49:25,389] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:281] [PID:3751] BOS: 1 / +[2025-12-29 02:49:25,389] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:282] [PID:3751] PAD: 2 / +[2025-12-29 02:49:25,389] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:283] [PID:3751] UNK: 0 / +[2025-12-29 02:49:25,390] [INFO] [axolotl.utils.data.shared.load_preprocessed_dataset:481] [PID:3751] Unable to find prepared dataset in last_run_prepared/b7c17715ff7f64badeb455c51ab5d648 +[2025-12-29 02:49:25,390] [INFO] [axolotl.utils.data.sft._load_raw_datasets:320] [PID:3751] Loading raw datasets... +[2025-12-29 02:49:25,390] [WARNING] [axolotl.utils.data.sft._load_raw_datasets:322] [PID:3751] Processing datasets during training can lead to VRAM instability. Please pre-process your dataset using `axolotl preprocess path/to/config.yml`. +[2025-12-29 02:49:26,885] [INFO] [axolotl.utils.data.wrappers.get_dataset_wrapper:87] [PID:3751] Loading dataset: darwinkernelpanic/luau-reasoning-normalized with base_type: chat_template and prompt_style: None +[2025-12-29 02:49:26,887] [INFO] [axolotl.prompt_strategies.chat_template.__call__:996] [PID:3751] Using chat template: --- {% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|> @@ -196,1771 +164,561 @@ ' }}{% endif %} --- -[2025-12-28 11:04:38,137] [WARNING] [axolotl.prompt_strategies.chat_template._validate_eot_and_eos_tokens:337] [PID:42410] EOS token '' not found in chat_template. Please check if your template/EOS token is correct. -[2025-12-28 11:04:38,508] [INFO] [axolotl.utils.data.utils.handle_long_seq_in_dataset:218] [PID:42410] min_input_len: 636 -[2025-12-28 11:04:38,508] [INFO] [axolotl.utils.data.utils.handle_long_seq_in_dataset:220] [PID:42410] max_input_len: 12839 -[2025-12-28 11:04:41,234] [WARNING] [axolotl.utils.data.utils.handle_long_seq_in_dataset:260] [PID:42410] Dropped 755 samples from dataset - Saving the dataset (0/56 shards): 0%| | 0/14586 [00:00 -[2025-12-28 11:04:52,785] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:281] [PID:42410] BOS: 1 / -[2025-12-28 11:04:52,785] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:282] [PID:42410] PAD: 2 / -[2025-12-28 11:04:52,785] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:283] [PID:42410] UNK: 0 / -[2025-12-28 11:04:52,785] [DEBUG] [axolotl.train.setup_model_and_tokenizer:82] [PID:42410] Loading model -[2025-12-28 11:04:52,926] [DEBUG] [axolotl.monkeypatch.transformers.trainer_loss_calc.patch_evaluation_loop:87] [PID:42410] Patched Trainer.evaluation_loop with nanmean loss calculation -[2025-12-28 11:04:52,927] [DEBUG] [axolotl.monkeypatch.transformers.trainer_loss_calc.patch_maybe_log_save_evaluate:138] [PID:42410] Patched Trainer._maybe_log_save_evaluate with nanmean loss calculation -[2025-12-28 11:04:52,927] [INFO] [axolotl.loaders.patch_manager._apply_multipack_patches:301] [PID:42410] Applying multipack dataloader patch for sample packing... -[2025-12-28 11:04:52,927] [INFO] [axolotl.loaders.patch_manager._patch_llama_sample_packing:430] [PID:42410] Patching llama _prepare_4d_causal_attention_mask*... - Loading checkpoint shards: 0%| | 0/2 [00:00' not found in chat_template. Please check if your template/EOS token is correct. +[2025-12-29 02:49:27,110] [INFO] [axolotl.utils.data.utils.handle_long_seq_in_dataset:218] [PID:3751] min_input_len: 636 +[2025-12-29 02:49:27,110] [INFO] [axolotl.utils.data.utils.handle_long_seq_in_dataset:220] [PID:3751] max_input_len: 12839 +[2025-12-29 02:49:27,405] [WARNING] [axolotl.utils.data.utils.handle_long_seq_in_dataset:260] [PID:3751] Dropped 755 samples from dataset + Saving the dataset (0/56 shards): 0%| | 0/14586 [00:00 +[2025-12-29 02:49:32,522] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:281] [PID:3751] BOS: 1 / +[2025-12-29 02:49:32,522] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:282] [PID:3751] PAD: 2 / +[2025-12-29 02:49:32,522] [DEBUG] [axolotl.loaders.tokenizer.load_tokenizer:283] [PID:3751] UNK: 0 / +[2025-12-29 02:49:32,522] [DEBUG] [axolotl.train.setup_model_and_tokenizer:82] [PID:3751] Loading model +[2025-12-29 02:49:32,640] [DEBUG] [axolotl.monkeypatch.transformers.trainer_loss_calc.patch_evaluation_loop:87] [PID:3751] Patched Trainer.evaluation_loop with nanmean loss calculation +[2025-12-29 02:49:32,642] [DEBUG] [axolotl.monkeypatch.transformers.trainer_loss_calc.patch_maybe_log_save_evaluate:138] [PID:3751] Patched Trainer._maybe_log_save_evaluate with nanmean loss calculation +[2025-12-29 02:49:32,642] [INFO] [axolotl.loaders.patch_manager._apply_multipack_patches:301] [PID:3751] Applying multipack dataloader patch for sample packing... +[2025-12-29 02:49:32,643] [INFO] [axolotl.loaders.patch_manager._patch_llama_sample_packing:430] [PID:3751] Patching llama _prepare_4d_causal_attention_mask*... + Loading checkpoint shards: 0%| | 0/2 [00:00