| """ |
| E2E tests for custom optimizers using Llama |
| """ |
|
|
| import logging |
| import os |
| import unittest |
|
|
| from axolotl.cli.args import TrainerCliArgs |
| from axolotl.common.datasets import load_datasets |
| from axolotl.train import train |
| from axolotl.utils.config import normalize_config, validate_config |
| from axolotl.utils.dict import DictDefault |
|
|
| from .utils import check_model_output_exists, require_torch_2_5_1, with_temp_dir |
|
|
| LOG = logging.getLogger("axolotl.tests.e2e") |
| os.environ["WANDB_DISABLED"] = "true" |
|
|
|
|
| class TestCustomOptimizers(unittest.TestCase): |
| """ |
| Test case for Llama models using LoRA |
| """ |
|
|
| @with_temp_dir |
| def test_optimi_adamw(self, temp_dir): |
| |
| cfg = DictDefault( |
| { |
| "base_model": "JackFram/llama-68m", |
| "tokenizer_type": "LlamaTokenizer", |
| "sequence_len": 1024, |
| "load_in_8bit": True, |
| "adapter": "lora", |
| "lora_r": 8, |
| "lora_alpha": 16, |
| "lora_dropout": 0.05, |
| "lora_target_linear": True, |
| "val_set_size": 0.1, |
| "special_tokens": { |
| "unk_token": "<unk>", |
| "bos_token": "<s>", |
| "eos_token": "</s>", |
| }, |
| "datasets": [ |
| { |
| "path": "mhenrichsen/alpaca_2k_test", |
| "type": "alpaca", |
| }, |
| ], |
| "num_epochs": 1, |
| "micro_batch_size": 8, |
| "gradient_accumulation_steps": 1, |
| "output_dir": temp_dir, |
| "learning_rate": 0.00001, |
| "optimizer": "optimi_adamw", |
| "max_steps": 5, |
| "lr_scheduler": "cosine", |
| } |
| ) |
|
|
| cfg = validate_config(cfg) |
| normalize_config(cfg) |
| cli_args = TrainerCliArgs() |
| dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args) |
|
|
| _, _, trainer = train(cfg=cfg, dataset_meta=dataset_meta) |
| check_model_output_exists(temp_dir, cfg) |
| assert trainer.optimizer.optimizer.__class__.__name__ == "AdamW" |
|
|
| @with_temp_dir |
| @require_torch_2_5_1 |
| def test_adopt_adamw(self, temp_dir): |
| |
| cfg = DictDefault( |
| { |
| "base_model": "JackFram/llama-68m", |
| "tokenizer_type": "LlamaTokenizer", |
| "sequence_len": 1024, |
| "load_in_8bit": True, |
| "adapter": "lora", |
| "lora_r": 8, |
| "lora_alpha": 16, |
| "lora_dropout": 0.05, |
| "lora_target_linear": True, |
| "val_set_size": 0.1, |
| "special_tokens": { |
| "unk_token": "<unk>", |
| "bos_token": "<s>", |
| "eos_token": "</s>", |
| }, |
| "datasets": [ |
| { |
| "path": "mhenrichsen/alpaca_2k_test", |
| "type": "alpaca", |
| }, |
| ], |
| "num_epochs": 1, |
| "max_steps": 5, |
| "micro_batch_size": 8, |
| "gradient_accumulation_steps": 1, |
| "output_dir": temp_dir, |
| "learning_rate": 0.00001, |
| "optimizer": "adopt_adamw", |
| "lr_scheduler": "cosine", |
| } |
| ) |
|
|
| cfg = validate_config(cfg) |
| normalize_config(cfg) |
| cli_args = TrainerCliArgs() |
| dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args) |
|
|
| _, _, trainer = train(cfg=cfg, dataset_meta=dataset_meta) |
| check_model_output_exists(temp_dir, cfg) |
| assert "ADOPT" in trainer.optimizer.optimizer.__class__.__name__ |
|
|
| @with_temp_dir |
| @require_torch_2_5_1 |
| def test_muon(self, temp_dir): |
| |
| cfg = DictDefault( |
| { |
| "base_model": "JackFram/llama-68m", |
| "tokenizer_type": "LlamaTokenizer", |
| "sequence_len": 1024, |
| "load_in_8bit": True, |
| "adapter": "lora", |
| "lora_r": 8, |
| "lora_alpha": 16, |
| "lora_dropout": 0.05, |
| "lora_target_linear": True, |
| "val_set_size": 0.1, |
| "special_tokens": { |
| "unk_token": "<unk>", |
| "bos_token": "<s>", |
| "eos_token": "</s>", |
| }, |
| "datasets": [ |
| { |
| "path": "mhenrichsen/alpaca_2k_test", |
| "type": "alpaca", |
| }, |
| ], |
| "num_epochs": 1, |
| "max_steps": 5, |
| "micro_batch_size": 8, |
| "gradient_accumulation_steps": 1, |
| "output_dir": temp_dir, |
| "learning_rate": 0.00001, |
| "optimizer": "muon", |
| "lr_scheduler": "cosine", |
| "weight_decay": 0.01, |
| } |
| ) |
|
|
| cfg = validate_config(cfg) |
| normalize_config(cfg) |
| cli_args = TrainerCliArgs() |
| dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args) |
|
|
| _, _, trainer = train(cfg=cfg, dataset_meta=dataset_meta) |
| check_model_output_exists(temp_dir, cfg) |
| assert "Muon" in trainer.optimizer.optimizer.__class__.__name__ |
|
|
| @with_temp_dir |
| def test_fft_schedule_free_adamw(self, temp_dir): |
| |
| cfg = DictDefault( |
| { |
| "base_model": "HuggingFaceTB/SmolLM2-135M", |
| "sequence_len": 1024, |
| "val_set_size": 0.01, |
| "special_tokens": { |
| "pad_token": "<|endoftext|>", |
| }, |
| "datasets": [ |
| { |
| "path": "mhenrichsen/alpaca_2k_test", |
| "type": "alpaca", |
| }, |
| ], |
| "num_epochs": 1, |
| "micro_batch_size": 2, |
| "gradient_accumulation_steps": 2, |
| "output_dir": temp_dir, |
| "learning_rate": 0.00001, |
| "optimizer": "schedule_free_adamw", |
| "lr_scheduler": "constant", |
| "save_safetensors": True, |
| "max_steps": 10, |
| } |
| ) |
| |
|
|
| cfg = validate_config(cfg) |
| normalize_config(cfg) |
| cli_args = TrainerCliArgs() |
| dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args) |
|
|
| train(cfg=cfg, dataset_meta=dataset_meta) |
| check_model_output_exists(temp_dir, cfg) |
|
|