File size: 3,054 Bytes
b41204b
ef5974f
b41204b
 
 
 
 
994600a
 
 
b41204b
 
994600a
b41204b
994600a
 
 
ef5974f
 
 
b41204b
 
 
 
ef5974f
 
 
 
 
 
 
 
 
 
 
 
b41204b
994600a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b41204b
 
 
 
 
 
 
994600a
b41204b
994600a
b41204b
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
# /// script
# dependencies = ["trl>=0.12.0", "peft>=0.7.0", "trackio", "datasets", "transformers", "accelerate", "jinja2"]
# ///

from datasets import load_dataset
from peft import LoraConfig
from trl import SFTTrainer, SFTConfig
from transformers import AutoTokenizer, TrainingArguments
import trl
import transformers
import trackio
import os
import inspect

print(f"πŸš€ Starting FunctionGemma 270M Fine-tuning (V4 - Diagnostic)")
print(f"πŸ“¦ TRL Version: {trl.__version__}")
print(f"πŸ“¦ Transformers Version: {transformers.__version__}")

model_id = "google/functiongemma-270m-it"
tokenizer = AutoTokenizer.from_pretrained(model_id)

# Load dataset
dataset = load_dataset("epinfomax/vn-function-calling-dataset", split="train")

def format_conversation(example):
    text = tokenizer.apply_chat_template(
        example["messages"],
        tools=example["tools"],
        tokenize=False,
        add_generation_prompt=False
    )
    return {"text": text}

print("πŸ”„ Pre-processing dataset with chat template...")
dataset = dataset.map(format_conversation, remove_columns=dataset.column_names)

# Training configuration
# Trying max_seq_length again but checking if it exists in SFTConfig first
sft_config_args = {
    "dataset_text_field": "text",
    "output_dir": "vn-function-gemma-270m-finetuned",
    "push_to_hub": True,
    "hub_model_id": "epinfomax/vn-function-gemma-270m-finetuned",
    "hub_strategy": "every_save",
    "num_train_epochs": 5,
    "per_device_train_batch_size": 4,
    "gradient_accumulation_steps": 4,
    "learning_rate": 5e-5,
    "logging_steps": 5,
    "save_strategy": "steps",
    "save_steps": 50,
    "report_to": "trackio",
    "project": "vn-function-calling",
    "run_name": "function-gemma-270m-v4-diag"
}

# Check which parameter to use
sft_fields = SFTConfig.__dataclass_fields__
if "max_seq_length" in sft_fields:
    print("βœ… Using max_seq_length in SFTConfig")
    sft_config_args["max_seq_length"] = 1024
elif "max_length" in sft_fields:
    print("βœ… Using max_length in SFTConfig")
    sft_config_args["max_length"] = 1024
else:
    print("⚠️ Neither max_seq_length nor max_length found in SFTConfig fields!")
    print("Fields:", list(sft_fields.keys()))

config = SFTConfig(**sft_config_args)

# Initialize and train
print("🎯 Initializing SFTTrainer...")
trainer_kwargs = {
    "model": model_id,
    "train_dataset": dataset,
    "peft_config": peft_config,
    "args": config,
}

# Check SFTTrainer init signature
trainer_params = inspect.signature(SFTTrainer.__init__).parameters
if "max_seq_length" in trainer_params and "max_seq_length" not in sft_config_args:
    print("βœ… Adding max_seq_length to SFTTrainer")
    trainer_kwargs["max_seq_length"] = 1024

peft_config = LoraConfig(
    r=16,
    lora_alpha=32,
    target_modules=["q_proj", "v_proj", "k_proj", "o_proj"],
    task_type="CAUSAL_LM",
)
trainer_kwargs["peft_config"] = peft_config

trainer = SFTTrainer(**trainer_kwargs)

trainer.train()
trainer.push_to_hub()
print("βœ… Training complete and pushed to Hub!")