| import os
|
| import random
|
| import numpy as np
|
| import torch
|
| from datasets import load_dataset
|
| from transformers import AutoTokenizer, AutoModelForCausalLM, EarlyStoppingCallback
|
| from trl import SFTTrainer, SFTConfig
|
| from peft import LoraConfig
|
| from transformers import BitsAndBytesConfig
|
|
|
|
|
| BASE_MODEL = os.environ.get("BASE_MODEL", "D:\\aspetos\\DeepSeek-Coder-V2-Lite-Instruct")
|
| OUTPUT_DIR = os.environ.get("OUTPUT_DIR", "outputs\\zenith-lora")
|
| DATA_PATH = os.environ.get("DATA_PATH", "data\\zenith.jsonl")
|
| VAL_PATH = os.environ.get("VAL_PATH")
|
| MAX_STEPS = int(os.environ.get("STEPS", 200))
|
| USE_4BIT = os.environ.get("USE_4BIT", "1") == "1"
|
| SEED = int(os.environ.get("SEED", 42))
|
|
|
| os.makedirs(OUTPUT_DIR, exist_ok=True)
|
|
|
|
|
| random.seed(SEED)
|
| np.random.seed(SEED)
|
| torch.manual_seed(SEED)
|
| if torch.cuda.is_available():
|
| torch.cuda.manual_seed_all(SEED)
|
|
|
| print(f"Loading tokenizer and model from: {BASE_MODEL}")
|
| tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL)
|
| if tokenizer.pad_token is None:
|
| tokenizer.pad_token = tokenizer.eos_token
|
|
|
|
|
| compute_dtype = torch.float16
|
| if torch.cuda.is_available():
|
| device_cap = torch.cuda.get_device_capability(0)
|
| if device_cap[0] >= 8:
|
| print("Using bfloat16 for Ampere GPU")
|
| compute_dtype = torch.bfloat16
|
|
|
|
|
| bnb_config = BitsAndBytesConfig(
|
| load_in_4bit=True,
|
| bnb_4bit_quant_type="nf4",
|
| bnb_4bit_compute_dtype=compute_dtype,
|
| bnb_4bit_use_double_quant=True,
|
| llm_int8_enable_fp32_cpu_offload=True,
|
| )
|
|
|
| print("Loading model with 4-bit quantization...")
|
| model = AutoModelForCausalLM.from_pretrained(
|
| BASE_MODEL,
|
| quantization_config=bnb_config,
|
| device_map="auto",
|
| trust_remote_code=True,
|
| )
|
|
|
|
|
| model.config.use_cache = False
|
|
|
| data_files = [DATA_PATH, "data\\training_data_v2.jsonl"]
|
| print(f"Loading datasets: {data_files}")
|
| raw_train = load_dataset("json", data_files=data_files, split="train")
|
|
|
|
|
| if VAL_PATH:
|
| print(f"Loading validation dataset: {VAL_PATH}")
|
| raw_val = load_dataset("json", data_files=VAL_PATH, split="train")
|
| else:
|
| split = raw_train.train_test_split(test_size=0.05, seed=SEED)
|
| raw_train, raw_val = split["train"], split["test"]
|
|
|
|
|
| MAX_SEQ_LEN = int(os.environ.get("MAX_SEQ_LEN", 2048))
|
|
|
| def _valid(example):
|
| msgs = example.get("messages")
|
| if not isinstance(msgs, list) or not msgs:
|
| return False
|
| for m in msgs:
|
| if not isinstance(m, dict) or "role" not in m or "content" not in m:
|
| return False
|
| return True
|
|
|
| def _to_text(example):
|
| try:
|
| text = tokenizer.apply_chat_template(
|
| example["messages"], tokenize=False, add_generation_prompt=False
|
| )
|
| return {"text": text}
|
| except Exception:
|
| return {"text": ""}
|
|
|
| train_ds = raw_train.filter(_valid)
|
| val_ds = raw_val.filter(_valid)
|
|
|
| train_ds = train_ds.map(_to_text, remove_columns=train_ds.column_names)
|
| val_ds = val_ds.map(_to_text, remove_columns=val_ds.column_names)
|
|
|
|
|
| train_ds = train_ds.filter(lambda x: isinstance(x.get("text"), str) and len(x["text"]) > 0)
|
| val_ds = val_ds.filter(lambda x: isinstance(x.get("text"), str) and len(x["text"]) > 0)
|
|
|
|
|
| peft_config = LoraConfig(
|
| r=int(os.environ.get("LORA_R", 16)),
|
| lora_alpha=int(os.environ.get("LORA_ALPHA", 32)),
|
| lora_dropout=float(os.environ.get("LORA_DROPOUT", 0.05)),
|
| bias="none",
|
| task_type="CAUSAL_LM",
|
| )
|
|
|
|
|
| training_args = SFTConfig(
|
| output_dir=OUTPUT_DIR,
|
| max_steps=MAX_STEPS,
|
| per_device_train_batch_size=int(os.environ.get("BATCH", 2)),
|
| gradient_accumulation_steps=int(os.environ.get("GRAD_ACC", 2)),
|
| learning_rate=float(os.environ.get("LR", 1e-4)),
|
| lr_scheduler_type=os.environ.get("LR_SCHED", "cosine"),
|
| warmup_ratio=float(os.environ.get("WARMUP_RATIO", 0.05)),
|
| weight_decay=float(os.environ.get("WEIGHT_DECAY", 0.01)),
|
| max_grad_norm=float(os.environ.get("MAX_GRAD_NORM", 1.0)),
|
| logging_steps=int(os.environ.get("LOG_STEPS", 10)),
|
| save_steps=int(os.environ.get("SAVE_STEPS", 50)),
|
| save_total_limit=int(os.environ.get("SAVE_LIMIT", 3)),
|
| evaluation_strategy="steps",
|
| eval_steps=int(os.environ.get("EVAL_STEPS", 50)),
|
| load_best_model_at_end=True,
|
| metric_for_best_model="eval_loss",
|
| greater_is_better=False,
|
| fp16=torch.cuda.is_available(),
|
| bf16=torch.cuda.is_available() and torch.cuda.get_device_capability(0)[0] >= 8,
|
| packing=False,
|
| max_seq_length=MAX_SEQ_LEN,
|
| dataloader_drop_last=True,
|
| gradient_checkpointing=True,
|
| gradient_checkpointing_kwargs={"use_reentrant": False},
|
| report_to=os.environ.get("REPORT_TO", "none"),
|
| seed=SEED,
|
| )
|
|
|
| print(f"Starting SFT training for {MAX_STEPS} steps...")
|
| trainer = SFTTrainer(
|
| model=model,
|
| tokenizer=tokenizer,
|
| train_dataset=train_ds,
|
| eval_dataset=val_ds,
|
| peft_config=peft_config,
|
| args=training_args,
|
| dataset_text_field="text",
|
| callbacks=[EarlyStoppingCallback(early_stopping_patience=int(os.environ.get("EARLY_STOP_PATIENCE", 3)))]
|
| )
|
|
|
| trainer.train()
|
|
|
| print("Saving LoRA adapter...")
|
| trainer.model.save_pretrained(OUTPUT_DIR)
|
| tokenizer.save_pretrained(OUTPUT_DIR)
|
|
|
| print(f"✅ ZENITH LoRA adapter saved to: {OUTPUT_DIR}")
|
| print("🎯 World's most advanced autonomous AI development partner ready!")
|
| print("🚀 Ready for Aspetos platform integration!")
|
|
|