File size: 2,766 Bytes
aa048fe
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
# Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import sys
from pathlib import Path
from unittest.mock import patch

from llamafactory.v1.config.arg_parser import get_args


def test_get_args_from_yaml(tmp_path: Path):
    config_yaml = """
        ### model
        model: llamafactory/tiny-random-qwen3
        trust_remote_code: true
        model_class: llm
        kernel_config:
            name: auto
            include_kernels: auto # choice: null/true/false/auto/kernel_id1,kernel_id2,kernel_id3, default is null
        peft_config:
            name: lora
            lora_rank: 0.8
        quant_config: null

        ### data
        train_dataset: llamafactory/v1-sft-demo

        ### training
        output_dir: outputs/test_run
        micro_batch_size: 1
        global_batch_size: 1
        cutoff_len: 2048
        learning_rate: 1.0e-4
        bf16: false
        dist_config: null

        ### sample
        sample_backend: hf
        max_new_tokens: 128
    """

    config_file = tmp_path / "config.yaml"
    config_file.write_text(config_yaml, encoding="utf-8")

    test_argv = ["test_args_parser.py", str(config_file)]

    with patch.object(sys, "argv", test_argv):
        model_args, data_args, training_args, sample_args = get_args()
        assert data_args.train_dataset == "llamafactory/v1-sft-demo"
        assert model_args.model == "llamafactory/tiny-random-qwen3"
        assert model_args.kernel_config.name == "auto"
        assert model_args.kernel_config.get("include_kernels") == "auto"
        assert model_args.peft_config.name == "lora"
        assert model_args.peft_config.get("lora_rank") == 0.8
        assert training_args.output_dir == "outputs/test_run"
        assert training_args.micro_batch_size == 1
        assert training_args.global_batch_size == 1
        assert training_args.learning_rate == 1.0e-4
        assert training_args.bf16 is False
        assert training_args.dist_config is None
        assert sample_args.sample_backend == "hf"


if __name__ == "__main__":
    """
    python -m tests_v1.config.test_args_parser
    """
    import tempfile

    with tempfile.TemporaryDirectory() as tmp_dir:
        test_get_args_from_yaml(tmp_path=Path(tmp_dir))