cfhot-weights / code /training_pipelines /11_qwen_multihead_CLEAN.py
LoganResearch's picture
🧠 Full weight release: 9 probes Γ— 3 architectures + production adapter + training code
297244f verified
#!/usr/bin/env python3
"""
QWEN2.5-3B MULTI-HEAD BEHAVIORAL TRAINING (CLEAN)
==================================================
Uses EXACT methodology from 07b_qwen3b_repetition_FIXED.py that achieved 73.1x
Author: Logan Napolitano / Proprioception AI
Date: February 2026
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training, PeftModel
from datasets import load_dataset
import os
import time
import random
import json
from dataclasses import dataclass, field
from typing import Tuple, List
# Checkpoint to continue from (73.1x repetition)
CHECKPOINT_DIR = "/home/programmer/Desktop/Claude_and_me/results/qwen3b_continued_from_19x/best"
OUTPUT_BASE = "/home/programmer/Desktop/Claude_and_me/results/qwen3b_multihead_clean"
@dataclass
class Config:
model_path: str = "Qwen/Qwen2.5-3B"
probe_layers: List[int] = field(default_factory=lambda: [9, 18, 27])
d_fiber: int = 16
d_control: int = 64
# EXACT same as original 07b
lr_lora: float = 2e-5
lr_predictor: float = 1e-4
batch_size: int = 1
grad_accum: int = 8
max_length: int = 256
weight_decay: float = 0.01
rep_window: int = 32
log_every: int = 100
save_every: int = 5000
eval_every: int = 1000
class RiskPredictor(nn.Module):
def __init__(self, d_model: int, probe_layers: List[int], d_fiber: int = 16, d_control: int = 64):
super().__init__()
self.probe_layers = probe_layers
n_probes = len(probe_layers)
self.fiber_projs = nn.ModuleList([
nn.Linear(d_model, d_fiber, bias=False) for _ in range(n_probes)
])
self.layer_weights = nn.Parameter(torch.ones(n_probes) / n_probes)
self.predictor = nn.Sequential(
nn.Linear(d_fiber, d_control), nn.GELU(),
nn.Linear(d_control, d_control), nn.GELU(),
nn.Linear(d_control, 1)
)
for proj in self.fiber_projs:
nn.init.normal_(proj.weight, std=0.02)
def forward(self, hidden_states: Tuple[torch.Tensor, ...]) -> torch.Tensor:
fibers = []
for i, layer_idx in enumerate(self.probe_layers):
if layer_idx < len(hidden_states):
fiber = self.fiber_projs[i](hidden_states[layer_idx].float())
fibers.append(fiber)
weights = F.softmax(self.layer_weights[:len(fibers)], dim=0)
aggregated = sum(w * f for w, f in zip(weights, fibers))
return self.predictor(aggregated).squeeze(-1)
def compute_repetition_labels(input_ids: torch.Tensor, window: int = 32) -> torch.Tensor:
B, S = input_ids.shape
labels = torch.zeros(B, S, device=input_ids.device)
for offset in range(1, min(window + 1, S)):
if offset < S:
matches = (input_ids[:, offset:] == input_ids[:, :-offset]).float()
labels[:, offset:] = torch.maximum(labels[:, offset:], matches)
return labels
# ============== BEHAVIORAL LABELING ==============
HEDGE_PATTERNS = [
"I think", "maybe", "perhaps", "possibly", "probably", "might", "could be",
"it seems", "apparently", "generally", "usually", "often", "sometimes",
"in my opinion", "I believe", "I feel", "somewhat", "relatively",
"to some extent", "more or less", "kind of", "sort of", "arguably",
"it appears", "presumably", "supposedly", "allegedly", "reportedly"
]
SYCOPHANCY_PATTERNS = [
"great question", "excellent point", "you're right", "absolutely",
"I agree", "that's correct", "good thinking", "well said", "exactly",
"you're absolutely right", "that's a great", "wonderful", "fantastic",
"brilliant", "perfect", "I couldn't agree more", "you make a great point"
]
VERBOSE_THRESHOLD = 50
def compute_hedging_labels(input_ids: torch.Tensor, tokenizer) -> torch.Tensor:
B, S = input_ids.shape
labels = torch.zeros(B, S, device=input_ids.device)
for b in range(B):
text = tokenizer.decode(input_ids[b], skip_special_tokens=True).lower()
tokens = tokenizer.convert_ids_to_tokens(input_ids[b])
for pattern in HEDGE_PATTERNS:
start = 0
while True:
idx = text.find(pattern, start)
if idx == -1:
break
char_pos = idx
token_pos = 0
current_char = 0
for t_idx, token in enumerate(tokens):
token_text = tokenizer.convert_tokens_to_string([token])
if current_char + len(token_text) > char_pos:
token_pos = t_idx
break
current_char += len(token_text)
pattern_tokens = len(tokenizer.encode(pattern, add_special_tokens=False))
for t in range(token_pos, min(token_pos + pattern_tokens, S)):
labels[b, t] = 1.0
start = idx + len(pattern)
return labels
def compute_sycophancy_labels(input_ids: torch.Tensor, tokenizer) -> torch.Tensor:
B, S = input_ids.shape
labels = torch.zeros(B, S, device=input_ids.device)
for b in range(B):
text = tokenizer.decode(input_ids[b], skip_special_tokens=True).lower()
tokens = tokenizer.convert_ids_to_tokens(input_ids[b])
for pattern in SYCOPHANCY_PATTERNS:
start = 0
while True:
idx = text.find(pattern.lower(), start)
if idx == -1:
break
char_pos = idx
token_pos = 0
current_char = 0
for t_idx, token in enumerate(tokens):
token_text = tokenizer.convert_tokens_to_string([token])
if current_char + len(token_text) > char_pos:
token_pos = t_idx
break
current_char += len(token_text)
pattern_tokens = len(tokenizer.encode(pattern, add_special_tokens=False))
for t in range(token_pos, min(token_pos + pattern_tokens, S)):
labels[b, t] = 1.0
start = idx + len(pattern)
return labels
def compute_verbosity_labels(input_ids: torch.Tensor, tokenizer) -> torch.Tensor:
B, S = input_ids.shape
labels = torch.zeros(B, S, device=input_ids.device)
for b in range(B):
if S > VERBOSE_THRESHOLD:
labels[b, VERBOSE_THRESHOLD:] = torch.linspace(0.3, 1.0, S - VERBOSE_THRESHOLD, device=input_ids.device)
return labels
def get_label_fn(behavior: str, tokenizer):
if behavior == "repetition":
return lambda ids, tok: compute_repetition_labels(ids, 32)
elif behavior == "hedging":
return lambda ids, tok: compute_hedging_labels(ids, tok)
elif behavior == "sycophancy":
return lambda ids, tok: compute_sycophancy_labels(ids, tok)
elif behavior == "verbosity":
return lambda ids, tok: compute_verbosity_labels(ids, tok)
else:
raise ValueError(f"Unknown behavior: {behavior}")
# ============== EVALUATION (EXACT SAME AS 07b) ==============
def compute_separation(predictor, model, tokenizer, device, config, label_fn, behavior, n_samples=30):
"""EXACT same eval as 07b - uses do_sample=True, temperature=0.9"""
model.eval()
predictor.eval()
pos_scores, neg_scores = [], []
prompts = [
"The meaning of life according to philosophy is",
"In the year 2050, technology will",
"The history of mathematics begins with",
"Climate change affects the planet by",
"Neural networks learn patterns through",
"The ocean contains many species of",
"Music has evolved significantly since",
"Economic theories suggest that markets",
"The human brain processes information",
"Ancient civilizations developed writing",
]
with torch.no_grad():
for i in range(n_samples):
prompt = prompts[i % len(prompts)]
inp = tokenizer(prompt, return_tensors='pt')
input_ids = inp['input_ids'].to(device)
attn = inp['attention_mask'].to(device)
# EXACT same generation params as 07b
out = model.generate(
input_ids, attention_mask=attn, max_new_tokens=80,
do_sample=True, temperature=0.9, top_p=0.95,
pad_token_id=tokenizer.eos_token_id
)
outputs = model(out, output_hidden_states=True)
risk = torch.sigmoid(predictor(outputs.hidden_states))[0].cpu().numpy()
if behavior == "repetition":
labels = compute_repetition_labels(out, 32)[0].cpu().numpy()
else:
labels = label_fn(out, tokenizer)[0].cpu().numpy()
for t in range(len(risk)):
(pos_scores if labels[t] > 0.5 else neg_scores).append(float(risk[t]))
if pos_scores and neg_scores:
p_pos = sum(pos_scores) / len(pos_scores)
p_neg = sum(neg_scores) / len(neg_scores)
return p_pos, p_neg, p_pos / max(p_neg, 1e-8), len(pos_scores), len(neg_scores)
return 0, 0, 0, 0, 0
# ============== TRAINING FUNCTION ==============
def train_behavior(model, tokenizer, texts, device, d_model, config, behavior,
max_steps, output_dir, start_predictor=None, start_step=0):
"""Train a single behavioral head using EXACT 07b methodology."""
os.makedirs(output_dir, exist_ok=True)
print(f"\n{'='*70}")
print(f"TRAINING: {behavior.upper()}")
print(f"{'='*70}")
print(f"Steps: {max_steps} (starting from step {start_step})")
print(f"LR: LoRA={config.lr_lora}, Predictor={config.lr_predictor}")
print(f"Output: {output_dir}")
print()
# Initialize or load predictor
if start_predictor is not None:
predictor = start_predictor
print("Continuing from checkpoint...")
else:
predictor = RiskPredictor(d_model, config.probe_layers, config.d_fiber, config.d_control)
predictor = predictor.to(device).float()
print("Fresh predictor initialized")
label_fn = get_label_fn(behavior, tokenizer)
# Setup optimizer - EXACT same as 07b
lora_params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.AdamW([
{'params': lora_params, 'lr': config.lr_lora},
{'params': predictor.parameters(), 'lr': config.lr_predictor}
], weight_decay=config.weight_decay)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, T_max=max_steps, eta_min=1e-6
)
log = {"behavior": behavior, "start_step": start_step, "steps": [], "separations": []}
model.train()
predictor.train()
step = 0
total_step = start_step
data_idx = 0
acc_loss, acc_lm, acc_risk = 0, 0, 0
best_sep = 0
start_time = time.time()
while step < max_steps:
batch = [texts[(data_idx + i) % len(texts)] for i in range(config.batch_size)]
data_idx += config.batch_size
enc = tokenizer(batch, truncation=True, max_length=config.max_length,
padding='max_length', return_tensors='pt')
input_ids = enc['input_ids'].to(device)
attention_mask = enc['attention_mask'].to(device)
outputs = model(input_ids=input_ids, attention_mask=attention_mask,
labels=input_ids, output_hidden_states=True)
lm_loss = outputs.loss
risk_logits = predictor(outputs.hidden_states)
if behavior == "repetition":
labels = compute_repetition_labels(input_ids, config.rep_window)
else:
labels = label_fn(input_ids, tokenizer)
# Class-weighted loss - EXACT same as 07b
mask = attention_mask.float()
n_pos = (labels * mask).sum().clamp(min=1)
n_neg = ((1 - labels) * mask).sum().clamp(min=1)
pos_weight = (n_neg / n_pos).clamp(max=10.0)
bce = F.binary_cross_entropy_with_logits(
risk_logits, labels,
pos_weight=torch.ones_like(labels) * pos_weight, reduction='none')
risk_loss = (bce * mask).sum() / mask.sum()
loss = lm_loss + risk_loss
(loss / config.grad_accum).backward()
acc_loss += loss.item()
acc_lm += lm_loss.item()
acc_risk += risk_loss.item()
step += 1
total_step += 1
if step % config.grad_accum == 0:
torch.nn.utils.clip_grad_norm_(list(lora_params) + list(predictor.parameters()), 1.0)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
if step % config.log_every == 0:
eta = (max_steps - step) / (step / (time.time() - start_time)) / 60
print(f"[{behavior}] Step {total_step:5d} | Loss: {acc_loss/config.log_every:.3f} | "
f"LM: {acc_lm/config.log_every:.3f} | Risk: {acc_risk/config.log_every:.3f} | "
f"Best: {best_sep:.1f}x | ETA: {eta:.1f}m")
log["steps"].append({"step": total_step, "loss": acc_loss/config.log_every})
acc_loss, acc_lm, acc_risk = 0, 0, 0
if step % config.eval_every == 0:
print(f"\n{'='*50}")
print(f"[{behavior}] SEPARATION EVAL @ Step {total_step}")
print(f"{'='*50}")
p_pos, p_neg, sep, n_p, n_n = compute_separation(
predictor, model, tokenizer, device, config, label_fn, behavior, n_samples=30)
print(f" P(+) = {p_pos:.4f} (n={n_p})")
print(f" P(-) = {p_neg:.4f} (n={n_n})")
print(f" SEPARATION = {sep:.1f}x")
log["separations"].append({"step": total_step, "separation": sep, "p_pos": p_pos, "p_neg": p_neg})
if sep > best_sep:
best_sep = sep
print(f" 🎯 NEW BEST!")
best_dir = os.path.join(output_dir, "best")
os.makedirs(best_dir, exist_ok=True)
model.save_pretrained(best_dir)
torch.save({
'predictor': predictor.state_dict(),
'step': total_step, 'separation': sep, 'p_pos': p_pos, 'p_neg': p_neg
}, os.path.join(best_dir, "predictor.pt"))
print(f"{'='*50}\n")
model.train()
predictor.train()
if step % config.save_every == 0:
ckpt_dir = os.path.join(output_dir, f"ckpt_{total_step}")
os.makedirs(ckpt_dir, exist_ok=True)
model.save_pretrained(ckpt_dir)
torch.save({'predictor': predictor.state_dict(), 'step': total_step, 'separation': best_sep},
os.path.join(ckpt_dir, "predictor.pt"))
print(f">>> Checkpoint: {ckpt_dir}")
# Final eval
print(f"\n{'='*50}")
print(f"[{behavior}] FINAL RESULTS @ Step {total_step}")
print(f"{'='*50}")
p_pos, p_neg, final_sep, _, _ = compute_separation(
predictor, model, tokenizer, device, config, label_fn, behavior, n_samples=50)
print(f" Final separation: {final_sep:.1f}x")
print(f" Best separation: {best_sep:.1f}x")
log["final"] = {"separation": final_sep, "best": best_sep}
with open(os.path.join(output_dir, "log.json"), 'w') as f:
json.dump(log, f, indent=2)
# Save final
final_dir = os.path.join(output_dir, "final")
os.makedirs(final_dir, exist_ok=True)
model.save_pretrained(final_dir)
torch.save({
'predictor': predictor.state_dict(),
'step': total_step, 'separation': final_sep, 'best': best_sep
}, os.path.join(final_dir, "predictor.pt"))
return predictor, best_sep, final_sep
# ============== MAIN ==============
def main():
config = Config()
os.makedirs(OUTPUT_BASE, exist_ok=True)
print("=" * 70)
print("QWEN2.5-3B MULTI-HEAD TRAINING (CLEAN - EXACT 07b METHODOLOGY)")
print("=" * 70)
print(f"LR LoRA: {config.lr_lora} (same as 07b)")
print(f"LR Predictor: {config.lr_predictor} (same as 07b)")
print(f"Eval: do_sample=True, temperature=0.9 (same as 07b)")
print()
tokenizer = AutoTokenizer.from_pretrained(config.model_path)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
print("Loading Qwen2.5-3B...")
bnb = BitsAndBytesConfig(
load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16,
bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4")
base_model = AutoModelForCausalLM.from_pretrained(
config.model_path, quantization_config=bnb, device_map='auto', torch_dtype=torch.float16)
base_model = prepare_model_for_kbit_training(base_model, use_gradient_checkpointing=True)
print("Loading LoRA weights from 73.1x checkpoint...")
model = PeftModel.from_pretrained(base_model, CHECKPOINT_DIR)
model.train()
for name, param in model.named_parameters():
if 'lora' in name.lower():
param.requires_grad = True
device = next(model.parameters()).device
d_model = model.config.hidden_size
print("Loading training data...")
ds = load_dataset("wikitext", "wikitext-2-raw-v1", split="train")
texts = [ex['text'] for ex in ds if len(ex['text']) > 50]
random.shuffle(texts)
print(f"Loaded {len(texts)} samples")
results = {}
# ============================================================
# HEAD 1: REPETITION (continue from 73.1x checkpoint @ step 10000)
# ============================================================
print("\n" + "=" * 70)
print("HEAD 1: REPETITION (continuing from checkpoint)")
print("=" * 70)
rep_predictor = RiskPredictor(d_model, config.probe_layers, config.d_fiber, config.d_control)
rep_predictor = rep_predictor.to(device).float()
ckpt = torch.load(os.path.join(CHECKPOINT_DIR, "risk_predictor.pt"), map_location=device)
rep_predictor.load_state_dict(ckpt['risk_predictor'])
start_step = ckpt.get('step', 10000)
start_sep = ckpt.get('separation', 73.1)
print(f"Loaded predictor: step={start_step}, separation={start_sep:.1f}x")
_, rep_best, rep_final = train_behavior(
model, tokenizer, texts, device, d_model, config,
behavior="repetition", max_steps=25000,
output_dir=os.path.join(OUTPUT_BASE, "repetition"),
start_predictor=rep_predictor,
start_step=start_step
)
results["repetition"] = {"best": rep_best, "final": rep_final}
# ============================================================
# HEAD 2: HEDGING (fresh from repetition-trained LoRA)
# ============================================================
_, hedge_best, hedge_final = train_behavior(
model, tokenizer, texts, device, d_model, config,
behavior="hedging", max_steps=25000,
output_dir=os.path.join(OUTPUT_BASE, "hedging"),
start_step=0
)
results["hedging"] = {"best": hedge_best, "final": hedge_final}
# ============================================================
# HEAD 3: VERBOSITY
# ============================================================
_, verb_best, verb_final = train_behavior(
model, tokenizer, texts, device, d_model, config,
behavior="verbosity", max_steps=25000,
output_dir=os.path.join(OUTPUT_BASE, "verbosity"),
start_step=0
)
results["verbosity"] = {"best": verb_best, "final": verb_final}
# ============================================================
# HEAD 4: SYCOPHANCY
# ============================================================
_, syco_best, syco_final = train_behavior(
model, tokenizer, texts, device, d_model, config,
behavior="sycophancy", max_steps=25000,
output_dir=os.path.join(OUTPUT_BASE, "sycophancy"),
start_step=0
)
results["sycophancy"] = {"best": syco_best, "final": syco_final}
# ============================================================
# FINAL SUMMARY
# ============================================================
print("\n" + "=" * 70)
print("FINAL SUMMARY: QWEN2.5-3B MULTI-HEAD RESULTS")
print("=" * 70)
llama_baselines = {
"repetition": 125,
"hedging": 168,
"verbosity": 272,
"sycophancy": 218
}
print(f"""
β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”
β”‚ QWEN2.5-3B vs LLaMA-3.1-8B COMPARISON β”‚
β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€
β”‚ Behavior β”‚ Qwen-3B (Best) β”‚ LLaMA-8B β”‚ Ratio β”‚
β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€""")
for behavior in ["repetition", "hedging", "verbosity", "sycophancy"]:
qwen = results[behavior]["best"]
llama = llama_baselines[behavior]
ratio = qwen / llama * 100
print(f"β”‚ {behavior:<13} β”‚ {qwen:>6.1f}x β”‚ {llama:>5}x β”‚ {ratio:>5.1f}% β”‚")
print(f"""β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€
β”‚ Methodology: EXACT same as 07b (lr=2e-5/1e-4, do_sample=True) β”‚
β”‚ Architecture: Qwen2 (2048d, 36L) vs LLaMA (4096d, 32L) β”‚
β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜
""")
with open(os.path.join(OUTPUT_BASE, "final_results.json"), 'w') as f:
json.dump({
"model": "Qwen2.5-3B",
"results": results,
"llama_baselines": llama_baselines,
"methodology": "exact_07b"
}, f, indent=2)
print(f"Results saved to {OUTPUT_BASE}/final_results.json")
print("\nDONE!")
if __name__ == "__main__":
main()