File size: 11,274 Bytes
ed1b365
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
#!/usr/bin/env python3
"""

Codette Shared-Model Batch Adapter Training

--------------------------------------------

Loads the base model ONCE and trains multiple LoRA adapters

sequentially without reloading the 8B model.



Major benefits

--------------

* Eliminates repeated model loads

* Prevents GPU memory fragmentation

* Speeds multi-adapter training

* More stable on 8GB GPUs

"""
import argparse
import json
import logging
import sys
import time
from datetime import datetime
from pathlib import Path

import os
import yaml
import torch

os.environ["TOKENIZERS_PARALLELISM"] = "false"


# ---------------------------------------------------------
# Logging
# ---------------------------------------------------------

def setup_logging():
    log_dir = Path("logs")
    log_dir.mkdir(exist_ok=True)
    ts = datetime.now().strftime("%Y%m%d_%H%M%S")
    log_file = log_dir / f"shared_training_{ts}.log"

    logger = logging.getLogger("codette.shared_train")
    logger.setLevel(logging.DEBUG)
    logger.handlers.clear()

    fh = logging.FileHandler(log_file)
    fh.setLevel(logging.DEBUG)
    ch = logging.StreamHandler(sys.stdout)
    ch.setLevel(logging.INFO)
    fmt = logging.Formatter(
        "%(asctime)s | %(levelname)-8s | %(message)s",
        "%H:%M:%S"
    )
    fh.setFormatter(fmt)
    ch.setFormatter(fmt)
    logger.addHandler(fh)
    logger.addHandler(ch)
    return logger


# ---------------------------------------------------------
# Device Detection
# ---------------------------------------------------------

def detect_device():
    if torch.cuda.is_available():
        return "cuda"
    if hasattr(torch, "xpu") and torch.xpu.is_available():
        return "xpu"
    if hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
        return "mps"
    return "cpu"


# ---------------------------------------------------------
# Config Loaders
# ---------------------------------------------------------

def load_adapter_registry(path):
    with open(path, "r", encoding="utf-8") as f:
        cfg = yaml.safe_load(f)
    return cfg["adapters"]


def load_training_defaults(path=None):
    if path is None:
        path = Path("configs/default_training.yaml")
    with open(path, "r", encoding="utf-8") as f:
        return yaml.safe_load(f)


# ---------------------------------------------------------
# Dataset Loader
# ---------------------------------------------------------

def load_jsonl_dataset(path):
    from datasets import Dataset
    rows = []
    with open(path, "r", encoding="utf-8") as f:
        for line in f:
            obj = json.loads(line)
            if "messages" in obj:
                rows.append(obj)
    return Dataset.from_list(rows)


def format_chat_messages(example, tokenizer):
    text = tokenizer.apply_chat_template(
        example["messages"],
        tokenize=False,
        add_generation_prompt=False,
    )
    return {"text": text}


# ---------------------------------------------------------
# Base Model Loader
# ---------------------------------------------------------

def load_base_model(model_name, device, logger):
    from transformers import AutoModelForCausalLM, AutoTokenizer

    logger.info("Loading tokenizer")
    tokenizer = AutoTokenizer.from_pretrained(
        model_name,
        trust_remote_code=True
    )
    if tokenizer.pad_token is None:
        tokenizer.pad_token = tokenizer.eos_token

    # --- XPU: streaming file I/O loading (no mmap, avoids OOM) ---
    if device == "xpu":
        logger.info("Intel Arc — streaming CPU load (no mmap, minimal peak memory)")

        import ctypes
        import gc
        import struct as _struct
        from accelerate import init_empty_weights
        from accelerate.utils import set_module_tensor_to_device
        from huggingface_hub import snapshot_download
        from transformers import AutoConfig

        checkpoint_dir = snapshot_download(model_name)
        logger.info(f"Checkpoint: {checkpoint_dir}")
        gc.collect()

        model_config = AutoConfig.from_pretrained(
            model_name, trust_remote_code=True
        )
        with init_empty_weights():
            model = AutoModelForCausalLM.from_config(
                model_config, trust_remote_code=True
            )

        _dt = {
            "BF16": torch.bfloat16, "F16": torch.float16,
            "F32": torch.float32, "F64": torch.float64,
            "I64": torch.int64, "I32": torch.int32,
            "I16": torch.int16, "I8": torch.int8,
            "U8": torch.uint8, "BOOL": torch.bool,
        }

        shard_files = sorted(Path(checkpoint_dir).glob("*.safetensors"))
        logger.info(f"Loading {len(shard_files)} shards via streaming I/O")

        for i, shard_file in enumerate(shard_files):
            logger.info(f"  Shard {i+1}/{len(shard_files)}: {shard_file.name}")
            with open(shard_file, "rb") as fp:
                header_size = _struct.unpack("<Q", fp.read(8))[0]
                header = json.loads(fp.read(header_size))
                data_start = 8 + header_size
                for name, meta in header.items():
                    if name == "__metadata__":
                        continue
                    start, end = meta["data_offsets"]
                    nbytes = end - start
                    buf = bytearray(nbytes)
                    fp.seek(data_start + start)
                    fp.readinto(buf)
                    tensor = torch.frombuffer(
                        buf, dtype=_dt[meta["dtype"]]
                    ).reshape(meta["shape"])
                    set_module_tensor_to_device(
                        model, name, "cpu",
                        value=tensor, dtype=torch.bfloat16,
                    )
                    del buf, tensor
            gc.collect()
            try:
                k32 = ctypes.windll.kernel32
                k32.SetProcessWorkingSetSize(k32.GetCurrentProcess(), -1, -1)
            except Exception:
                pass
            logger.info(f"  Shard {i+1}/{len(shard_files)}: done")

        model.tie_weights()
        model.gradient_checkpointing_enable()
        return model, tokenizer

    # --- All other devices ---
    logger.info("Loading base model (once)")
    model = AutoModelForCausalLM.from_pretrained(
        model_name,
        dtype=torch.bfloat16,
        device_map="auto",
        trust_remote_code=True,
    )

    model.gradient_checkpointing_enable()
    return model, tokenizer


# ---------------------------------------------------------
# Apply LoRA
# ---------------------------------------------------------

def attach_lora(model, lora_cfg, logger):
    from peft import LoraConfig, get_peft_model, TaskType

    config = LoraConfig(
        r=lora_cfg["rank"],
        lora_alpha=lora_cfg["alpha"],
        lora_dropout=lora_cfg["dropout"],
        target_modules=lora_cfg["target_modules"],
        task_type=TaskType.CAUSAL_LM,
        bias="none"
    )
    logger.info("Attaching LoRA adapter")
    model = get_peft_model(model, config)
    trainable = sum(p.numel() for p in model.parameters() if p.requires_grad)
    total = sum(p.numel() for p in model.parameters())
    logger.info(
        f"Trainable params: {trainable:,}/{total:,}"
    )
    return model


def detach_lora(model):
    return model.base_model


# ---------------------------------------------------------
# Training
# ---------------------------------------------------------

def train_adapter(model, tokenizer, dataset, train_cfg, output_dir):
    from transformers import TrainingArguments
    from trl import SFTTrainer

    device = next(model.parameters()).device
    use_bf16 = device.type in ("cuda", "xpu")
    use_fp16 = device.type == "mps"

    args = TrainingArguments(
        output_dir=output_dir,
        num_train_epochs=train_cfg["epochs"],
        per_device_train_batch_size=train_cfg["batch_size"],
        gradient_accumulation_steps=train_cfg["gradient_accumulation_steps"],
        learning_rate=train_cfg["learning_rate"],
        warmup_ratio=train_cfg.get("warmup_ratio", 0.03),
        logging_steps=train_cfg["logging_steps"],
        save_steps=train_cfg["save_steps"],
        fp16=use_fp16,
        bf16=use_bf16,
        report_to="none",
    )

    trainer = SFTTrainer(
        model=model,
        args=args,
        train_dataset=dataset,
        tokenizer=tokenizer,
        dataset_text_field="text",
        max_seq_length=train_cfg["max_seq_length"],
    )

    result = trainer.train()
    trainer.save_model(output_dir)
    return result


# ---------------------------------------------------------
# Main Training Loop
# ---------------------------------------------------------

def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--registry",
        default="configs/adapter_registry.yaml"
    )
    args = parser.parse_args()

    logger = setup_logging()
    registry = load_adapter_registry(args.registry)
    defaults = load_training_defaults()
    device = detect_device()
    logger.info(f"Device detected: {device}")

    model_cfg = defaults["model"]
    lora_cfg = defaults["lora"]
    train_cfg = defaults["training"]

    # -----------------------------------------------------
    # Load model ONCE
    # -----------------------------------------------------
    model, tokenizer = load_base_model(
        model_cfg["name"],
        device,
        logger
    )

    # -----------------------------------------------------
    # Train adapters sequentially
    # -----------------------------------------------------
    for name, cfg in registry.items():
        logger.info("")
        logger.info(f"===== TRAINING ADAPTER: {name} =====")

        dataset_path = cfg["dataset"]
        raw_dataset = load_jsonl_dataset(dataset_path)

        cpu_workers = max(1, os.cpu_count() - 1)
        logger.info(f"Tokenizing with {cpu_workers} workers")
        dataset = raw_dataset.map(
            lambda ex: format_chat_messages(ex, tokenizer),
            remove_columns=raw_dataset.column_names,
            num_proc=cpu_workers,
            desc=f"Tokenizing {name}",
        )

        model = attach_lora(model, lora_cfg, logger)

        out_dir = Path("adapters") / name
        out_dir.mkdir(parents=True, exist_ok=True)

        start = time.time()
        result = train_adapter(
            model,
            tokenizer,
            dataset,
            train_cfg,
            str(out_dir)
        )
        elapsed = time.time() - start

        logger.info(
            f"{name} complete — loss {result.training_loss:.4f} "
            f"in {elapsed:.1f}s"
        )

        model = detach_lora(model)
        if torch.cuda.is_available():
            torch.cuda.empty_cache()
        elif hasattr(torch, "xpu") and torch.xpu.is_available():
            torch.xpu.empty_cache()


if __name__ == "__main__":
    main()