Verdugie commited on
Commit
d3bcad0
·
verified ·
1 Parent(s): ac319ac

Upload train_qwen35_27b.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. train_qwen35_27b.py +141 -0
train_qwen35_27b.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ OPUS-CANDID V2 — QWEN3.5-27B (DENSE)
4
+ Uses FastLanguageModel (dense model, per Unsloth docs).
5
+ Same safety fixes as MoE script: num_proc=1, bf16, dropout=0.
6
+ Needs ~56GB VRAM — runs on H200 or A100 80GB.
7
+ """
8
+ import os, json, torch, random
9
+
10
+ print("=" * 60)
11
+ print("OPUS-CANDID V2 — QWEN3.5-27B (DENSE)")
12
+ print("=" * 60)
13
+
14
+ if torch.cuda.is_available():
15
+ gpu = torch.cuda.get_device_name(0)
16
+ vram = torch.cuda.get_device_properties(0).total_memory / 1024**3
17
+ print(f"GPU: {gpu} | VRAM: {vram:.1f} GB")
18
+ if vram < 55:
19
+ print("WARNING: This model needs ~56GB VRAM. You may OOM.")
20
+
21
+ # === Config ===
22
+ MODEL = "unsloth/Qwen3.5-27B"
23
+ MAX_SEQ = 8192
24
+ LORA_R = 16
25
+ LORA_ALPHA = 16
26
+ LR = 1e-5
27
+ EPOCHS = 2
28
+ BATCH = 1
29
+ GRAD_ACCUM = 16
30
+ OUTPUT = "/workspace/opus_candid_qwen35_27b_lora"
31
+ DATASET = "/workspace/opus_candid_v2_dataset.json"
32
+
33
+ # === Dataset ===
34
+ print(f"\nLoading {DATASET}...")
35
+ with open(DATASET) as f:
36
+ data = json.load(f)
37
+ random.seed(42)
38
+ random.shuffle(data)
39
+ split = max(1, int(len(data) * 0.02))
40
+ eval_data, train_data = data[:split], data[split:]
41
+ print(f"Total: {len(data)} | Train: {len(train_data)} | Eval: {len(eval_data)}")
42
+
43
+ # === Model — FastLanguageModel for dense ===
44
+ print(f"\nLoading {MODEL}...")
45
+ from unsloth import FastLanguageModel
46
+
47
+ model, tokenizer = FastLanguageModel.from_pretrained(
48
+ model_name=MODEL,
49
+ max_seq_length=MAX_SEQ,
50
+ load_in_4bit=False,
51
+ load_in_16bit=True,
52
+ full_finetuning=False,
53
+ )
54
+
55
+ # === LoRA ===
56
+ model = FastLanguageModel.get_peft_model(
57
+ model,
58
+ r=LORA_R,
59
+ target_modules=["q_proj", "k_proj", "v_proj", "o_proj",
60
+ "gate_proj", "up_proj", "down_proj"],
61
+ lora_alpha=LORA_ALPHA,
62
+ lora_dropout=0,
63
+ bias="none",
64
+ use_gradient_checkpointing="unsloth",
65
+ random_state=3407,
66
+ max_seq_length=MAX_SEQ,
67
+ )
68
+
69
+ # === Format dataset ===
70
+ from datasets import Dataset
71
+
72
+ def fmt(examples):
73
+ texts = []
74
+ for convos in examples["conversations"]:
75
+ msgs = []
76
+ for m in convos:
77
+ role = "user" if m.get("from") == "human" else "assistant"
78
+ content = m.get("value") or m.get("content") or ""
79
+ if content:
80
+ msgs.append({"role": role, "content": content})
81
+ if msgs:
82
+ texts.append(tokenizer.apply_chat_template(msgs, tokenize=False, add_generation_prompt=False))
83
+ else:
84
+ texts.append("")
85
+ return {"text": texts}
86
+
87
+ train_ds = Dataset.from_list(train_data).map(fmt, batched=True, remove_columns=["conversations"])
88
+ eval_ds = Dataset.from_list(eval_data).map(fmt, batched=True, remove_columns=["conversations"])
89
+ print(f"Formatted: train {len(train_ds)} | eval {len(eval_ds)}")
90
+
91
+ # === Train ===
92
+ from trl import SFTTrainer, SFTConfig
93
+
94
+ steps = (len(train_ds) * EPOCHS) // (BATCH * GRAD_ACCUM)
95
+ warmup = max(1, int(steps * 0.05))
96
+
97
+ print(f"\n{'='*60}")
98
+ print(f"TRAINING: {EPOCHS}ep | bs {BATCH}x{GRAD_ACCUM}={BATCH*GRAD_ACCUM} | lr {LR}")
99
+ print(f"Steps: ~{steps} | Warmup: {warmup}")
100
+ print(f"{'='*60}")
101
+
102
+ trainer = SFTTrainer(
103
+ model=model, tokenizer=tokenizer,
104
+ train_dataset=train_ds, eval_dataset=eval_ds,
105
+ args=SFTConfig(
106
+ max_seq_length=MAX_SEQ,
107
+ per_device_train_batch_size=BATCH,
108
+ gradient_accumulation_steps=GRAD_ACCUM,
109
+ warmup_steps=warmup,
110
+ num_train_epochs=EPOCHS,
111
+ learning_rate=LR,
112
+ lr_scheduler_type="cosine",
113
+ logging_steps=5,
114
+ eval_strategy="steps",
115
+ eval_steps=50,
116
+ save_strategy="steps",
117
+ save_steps=50,
118
+ output_dir=OUTPUT,
119
+ optim="adamw_8bit",
120
+ bf16=True,
121
+ seed=3407,
122
+ dataset_num_proc=1,
123
+ ),
124
+ )
125
+
126
+ trainer.train()
127
+
128
+ # === Save ===
129
+ loss = trainer.state.log_history[-1].get("train_loss", "N/A")
130
+ print(f"\nDONE — Final loss: {loss}")
131
+
132
+ model.save_pretrained(OUTPUT)
133
+ tokenizer.save_pretrained(OUTPUT)
134
+
135
+ with open(os.path.join(OUTPUT, "training_stats.json"), "w") as f:
136
+ json.dump({"model": MODEL, "dataset_size": len(train_data), "epochs": EPOCHS,
137
+ "lora_r": LORA_R, "lora_alpha": LORA_ALPHA, "learning_rate": LR,
138
+ "batch_size": BATCH * GRAD_ACCUM, "max_seq_length": MAX_SEQ,
139
+ "final_loss": loss, "log_history": trainer.state.log_history}, f, indent=2)
140
+
141
+ print(f"Adapters: {OUTPUT}")