| """ |
| E2E tests for gemma2 |
| """ |
|
|
| import logging |
| import os |
| from pathlib import Path |
|
|
| import pytest |
|
|
| from axolotl.cli.args import TrainerCliArgs |
| from axolotl.common.datasets import load_datasets |
| from axolotl.train import train |
| from axolotl.utils.config import normalize_config, validate_config |
| from axolotl.utils.dict import DictDefault |
|
|
| LOG = logging.getLogger("axolotl.tests.e2e") |
| os.environ["WANDB_DISABLED"] = "true" |
|
|
|
|
| class TestGemma2: |
| """ |
| Test case for Gemma2 models |
| """ |
|
|
| @pytest.mark.parametrize( |
| "sample_packing", |
| [True, False], |
| ) |
| def test_lora_gemma2(self, temp_dir, sample_packing): |
| |
| cfg = DictDefault( |
| { |
| "base_model": "axolotl-ai-co/gemma-2-33M", |
| "trust_remote_code": True, |
| "sample_packing": sample_packing, |
| "flash_attention": True, |
| "sequence_len": 2048, |
| "adapter": "lora", |
| "lora_r": 8, |
| "lora_alpha": 16, |
| "lora_dropout": 0.05, |
| "lora_target_linear": True, |
| "val_set_size": 0, |
| "datasets": [ |
| { |
| "path": "mlabonne/FineTome-100k", |
| "type": "chat_template", |
| "field_messages": "conversations", |
| "message_property_mappings": { |
| "role": "from", |
| "content": "value", |
| }, |
| "drop_system_message": True, |
| "split": "train[:1%]", |
| }, |
| ], |
| "special_tokens": { |
| "bos_token": "<bos>", |
| "eos_token": "<eos>", |
| }, |
| "chat_template": "gemma", |
| "num_epochs": 1, |
| "micro_batch_size": 1, |
| "gradient_accumulation_steps": 4, |
| "output_dir": temp_dir, |
| "learning_rate": 0.00001, |
| "optimizer": "adamw_bnb_8bit", |
| "lr_scheduler": "cosine", |
| "max_steps": 5, |
| "save_safetensors": True, |
| "bf16": True, |
| } |
| ) |
| cfg = validate_config(cfg) |
| normalize_config(cfg) |
| cli_args = TrainerCliArgs() |
| dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args) |
|
|
| train(cfg=cfg, dataset_meta=dataset_meta) |
| assert (Path(temp_dir) / "adapter_model.safetensors").exists() |
|
|
| @pytest.mark.parametrize( |
| "sample_packing", |
| [True, False], |
| ) |
| def test_fft_gemma2(self, temp_dir, sample_packing): |
| |
| cfg = DictDefault( |
| { |
| "base_model": "axolotl-ai-co/gemma-2-33M", |
| "trust_remote_code": True, |
| "sample_packing": sample_packing, |
| "flash_attention": True, |
| "sequence_len": 2048, |
| "val_set_size": 0, |
| "datasets": [ |
| { |
| "path": "mlabonne/FineTome-100k", |
| "type": "chat_template", |
| "field_messages": "conversations", |
| "message_property_mappings": { |
| "role": "from", |
| "content": "value", |
| }, |
| "split": "train[:1%]", |
| "drop_system_message": True, |
| }, |
| ], |
| "chat_template": "gemma", |
| "special_tokens": { |
| "bos_token": "<bos>", |
| "eos_token": "<eos>", |
| }, |
| "num_epochs": 1, |
| "micro_batch_size": 1, |
| "gradient_accumulation_steps": 4, |
| "output_dir": temp_dir, |
| "learning_rate": 0.00001, |
| "optimizer": "adamw_bnb_8bit", |
| "lr_scheduler": "cosine", |
| "max_steps": 5, |
| "save_safetensors": True, |
| "bf16": True, |
| } |
| ) |
| cfg = validate_config(cfg) |
| normalize_config(cfg) |
| cli_args = TrainerCliArgs() |
| dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args) |
|
|
| train(cfg=cfg, dataset_meta=dataset_meta) |
| assert (Path(temp_dir) / "model.safetensors").exists() |
|
|