Rick-AdaptKey commited on
Commit
c8e75a1
·
verified ·
1 Parent(s): f14dcd4

Upload configs/finetune_teleyaml.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. configs/finetune_teleyaml.py +119 -0
configs/finetune_teleyaml.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Custom fine-tuning script for TeleYAML models.
4
+ Wraps the standard Nemotron-3-Nano finetune with custom LoRA parameters.
5
+
6
+ Usage:
7
+ torchrun --nproc-per-node=2 /scripts/nemo-configs/finetune_teleyaml.py \
8
+ --lora-dim 64 --lora-alpha 128 --lora-dropout 0.05 \
9
+ --config-file /scripts/nemo-configs/teleyaml-v3.yaml
10
+ """
11
+
12
+ import argparse
13
+ import logging
14
+ import os
15
+ import sys
16
+
17
+ import torch
18
+ from omegaconf import OmegaConf
19
+
20
+ from megatron.bridge.peft.lora import LoRA
21
+ from megatron.bridge.recipes.nemotronh.nemotron_3_nano import (
22
+ nemotron_3_nano_finetune_config as finetune_config,
23
+ )
24
+ from megatron.bridge.training.finetune import finetune
25
+ from megatron.bridge.training.gpt_step import forward_step
26
+ from megatron.bridge.training.utils.omegaconf_utils import (
27
+ apply_overrides,
28
+ create_omegaconf_dict_config,
29
+ parse_hydra_overrides,
30
+ )
31
+
32
+ # Import custom processor directly
33
+ from megatron.bridge.data.hf_processors.teleyaml import process_teleyaml_example
34
+
35
+ logger = logging.getLogger(__name__)
36
+
37
+ # Target modules for Nemotron-3-Nano (Mamba + MLP layers)
38
+ MAMBA_TARGET_MODULES = [
39
+ "linear_qkv",
40
+ "linear_proj",
41
+ "linear_fc1",
42
+ "linear_fc2",
43
+ "in_proj",
44
+ "out_proj",
45
+ ]
46
+
47
+
48
+ def parse_args():
49
+ parser = argparse.ArgumentParser(description="TeleYAML Fine-tuning with Custom LoRA")
50
+
51
+ # LoRA parameters
52
+ parser.add_argument("--lora-dim", type=int, default=32, help="LoRA rank dimension (default: 32)")
53
+ parser.add_argument("--lora-alpha", type=int, default=32, help="LoRA alpha scaling (default: 32)")
54
+ parser.add_argument("--lora-dropout", type=float, default=0.0, help="LoRA dropout rate (default: 0.0)")
55
+
56
+ # Standard args from original script
57
+ parser.add_argument("--config-file", type=str, help="Path to YAML config file")
58
+ parser.add_argument("--packed-sequence", action="store_true", help="Use sequence packing")
59
+ parser.add_argument("--seq-length", type=int, default=2048, help="Sequence length")
60
+
61
+ args, cli_overrides = parser.parse_known_args()
62
+ return args, cli_overrides
63
+
64
+
65
+ def main():
66
+ args, cli_overrides = parse_args()
67
+
68
+ # Build custom LoRA config with our parameters
69
+ lora_config = LoRA(
70
+ target_modules=MAMBA_TARGET_MODULES,
71
+ dim=args.lora_dim,
72
+ alpha=args.lora_alpha,
73
+ dropout=args.lora_dropout,
74
+ )
75
+
76
+ print(f"LoRA Config: dim={args.lora_dim}, alpha={args.lora_alpha}, dropout={args.lora_dropout}")
77
+
78
+ # Get base config, passing our custom LoRA object instead of "lora" string
79
+ cfg = finetune_config(
80
+ seq_length=args.seq_length,
81
+ peft=lora_config, # Pass the LoRA object, not "lora" string
82
+ packed_sequence=args.packed_sequence,
83
+ )
84
+ cfg.model.seq_length = args.seq_length
85
+
86
+ # Convert to OmegaConf for merging
87
+ merged_omega_conf, excluded_fields = create_omegaconf_dict_config(cfg)
88
+
89
+ # Load YAML config if provided
90
+ if args.config_file:
91
+ if not os.path.exists(args.config_file):
92
+ print(f"ERROR: Config file not found: {args.config_file}")
93
+ sys.exit(1)
94
+ yaml_overrides = OmegaConf.load(args.config_file)
95
+ merged_omega_conf = OmegaConf.merge(merged_omega_conf, yaml_overrides)
96
+ print(f"Loaded config from: {args.config_file}")
97
+
98
+ # Apply CLI overrides
99
+ if cli_overrides:
100
+ merged_omega_conf = parse_hydra_overrides(merged_omega_conf, cli_overrides)
101
+
102
+ # Apply merged config back to ConfigContainer
103
+ final_overrides = OmegaConf.to_container(merged_omega_conf, resolve=True)
104
+ apply_overrides(cfg, final_overrides, excluded_fields)
105
+
106
+ # CRITICAL: Set the processor function directly (bypasses Hydra _target_ issue)
107
+ cfg.dataset.process_example_fn = process_teleyaml_example
108
+ print(f"Using custom processor: {process_teleyaml_example.__name__}")
109
+
110
+ # Start training
111
+ print("Starting fine-tuning...")
112
+ finetune(config=cfg, forward_step_func=forward_step)
113
+
114
+ if torch.distributed.is_initialized():
115
+ torch.distributed.destroy_process_group()
116
+
117
+
118
+ if __name__ == "__main__":
119
+ main()