| |
| """ |
| OPUS-CANDID V2 β QWEN3.5-35B-A3B (MoE) |
| Lessons applied: |
| - FastModel (NOT FastLanguageModel) for MoE per Unsloth docs |
| - dataset_num_proc=1 (multiprocessing deadlocks on pod) |
| - bf16 LoRA only (QLoRA breaks MoE with BitsAndBytes) |
| - lora_dropout=0 (ParamWrapper incompatible) |
| - use_gradient_checkpointing="unsloth" for VRAM savings |
| Needs ~74GB VRAM β H200 (141GB) has plenty of headroom. |
| """ |
| import os, json, torch, random |
|
|
| print("=" * 60) |
| print("OPUS-CANDID V2 β QWEN3.5-35B-A3B (MoE)") |
| print("=" * 60) |
|
|
| if torch.cuda.is_available(): |
| gpu = torch.cuda.get_device_name(0) |
| vram = torch.cuda.get_device_properties(0).total_memory / 1024**3 |
| print(f"GPU: {gpu} | VRAM: {vram:.1f} GB") |
| if vram < 70: |
| print("WARNING: This model needs ~74GB VRAM. You may OOM.") |
|
|
| |
| MODEL = "unsloth/Qwen3.5-35B-A3B" |
| MAX_SEQ = 8192 |
| LORA_R = 16 |
| LORA_ALPHA = 16 |
| LR = 1e-5 |
| EPOCHS = 2 |
| BATCH = 1 |
| GRAD_ACCUM = 16 |
| OUTPUT = "/workspace/opus_candid_qwen35_moe_lora" |
| DATASET = "/workspace/opus_candid_v2_dataset.json" |
|
|
| |
| print(f"\nLoading {DATASET}...") |
| with open(DATASET) as f: |
| data = json.load(f) |
| random.seed(42) |
| random.shuffle(data) |
| split = max(1, int(len(data) * 0.02)) |
| eval_data, train_data = data[:split], data[split:] |
| print(f"Total: {len(data)} | Train: {len(train_data)} | Eval: {len(eval_data)}") |
|
|
| |
| print(f"\nLoading {MODEL}...") |
| from unsloth import FastModel |
|
|
| model, tokenizer = FastModel.from_pretrained( |
| model_name=MODEL, |
| max_seq_length=MAX_SEQ, |
| load_in_4bit=False, |
| load_in_16bit=True, |
| full_finetuning=False, |
| ) |
|
|
| |
| model = FastModel.get_peft_model( |
| model, |
| r=LORA_R, |
| target_modules=["q_proj", "k_proj", "v_proj", "o_proj", |
| "gate_proj", "up_proj", "down_proj"], |
| lora_alpha=LORA_ALPHA, |
| lora_dropout=0, |
| bias="none", |
| use_gradient_checkpointing="unsloth", |
| random_state=3407, |
| max_seq_length=MAX_SEQ, |
| ) |
|
|
| |
| from datasets import Dataset |
|
|
| def fmt(examples): |
| texts = [] |
| for convos in examples["conversations"]: |
| msgs = [] |
| for m in convos: |
| role = "user" if m.get("from") == "human" else "assistant" |
| content = m.get("value") or m.get("content") or "" |
| if content: |
| msgs.append({"role": role, "content": content}) |
| if msgs: |
| texts.append(tokenizer.apply_chat_template(msgs, tokenize=False, add_generation_prompt=False)) |
| else: |
| texts.append("") |
| return {"text": texts} |
|
|
| train_ds = Dataset.from_list(train_data).map(fmt, batched=True, remove_columns=["conversations"]) |
| eval_ds = Dataset.from_list(eval_data).map(fmt, batched=True, remove_columns=["conversations"]) |
| print(f"Formatted: train {len(train_ds)} | eval {len(eval_ds)}") |
|
|
| |
| from trl import SFTTrainer, SFTConfig |
|
|
| steps = (len(train_ds) * EPOCHS) // (BATCH * GRAD_ACCUM) |
| warmup = max(1, int(steps * 0.05)) |
|
|
| print(f"\n{'='*60}") |
| print(f"TRAINING: {EPOCHS}ep | bs {BATCH}x{GRAD_ACCUM}={BATCH*GRAD_ACCUM} | lr {LR}") |
| print(f"Steps: ~{steps} | Warmup: {warmup}") |
| print(f"{'='*60}") |
|
|
| trainer = SFTTrainer( |
| model=model, tokenizer=tokenizer, |
| train_dataset=train_ds, eval_dataset=eval_ds, |
| args=SFTConfig( |
| max_seq_length=MAX_SEQ, |
| per_device_train_batch_size=BATCH, |
| gradient_accumulation_steps=GRAD_ACCUM, |
| warmup_steps=warmup, |
| num_train_epochs=EPOCHS, |
| learning_rate=LR, |
| lr_scheduler_type="cosine", |
| logging_steps=5, |
| eval_strategy="steps", |
| eval_steps=50, |
| save_strategy="steps", |
| save_steps=50, |
| output_dir=OUTPUT, |
| optim="adamw_8bit", |
| bf16=True, |
| seed=3407, |
| dataset_num_proc=1, |
| ), |
| ) |
|
|
| trainer.train() |
|
|
| |
| loss = trainer.state.log_history[-1].get("train_loss", "N/A") |
| print(f"\nDONE β Final loss: {loss}") |
|
|
| model.save_pretrained(OUTPUT) |
| tokenizer.save_pretrained(OUTPUT) |
|
|
| with open(os.path.join(OUTPUT, "training_stats.json"), "w") as f: |
| json.dump({"model": MODEL, "dataset_size": len(train_data), "epochs": EPOCHS, |
| "lora_r": LORA_R, "lora_alpha": LORA_ALPHA, "learning_rate": LR, |
| "batch_size": BATCH * GRAD_ACCUM, "max_seq_length": MAX_SEQ, |
| "final_loss": loss, "log_history": trainer.state.log_history}, f, indent=2) |
|
|
| print(f"Adapters: {OUTPUT}") |
|
|