Upload train_eval_upload_v11.py with huggingface_hub
Browse files- train_eval_upload_v11.py +127 -0
train_eval_upload_v11.py
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
# /// script
|
| 3 |
+
# requires-python = ">=3.10"
|
| 4 |
+
# dependencies = [
|
| 5 |
+
# "trl>=0.12.0",
|
| 6 |
+
# "peft>=0.7.0",
|
| 7 |
+
# "transformers>=4.36.0",
|
| 8 |
+
# "accelerate>=0.24.0",
|
| 9 |
+
# "datasets",
|
| 10 |
+
# "torch",
|
| 11 |
+
# "huggingface_hub",
|
| 12 |
+
# ]
|
| 13 |
+
# ///
|
| 14 |
+
import os
|
| 15 |
+
import torch
|
| 16 |
+
from datasets import load_dataset
|
| 17 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 18 |
+
from peft import LoraConfig, get_peft_model
|
| 19 |
+
from trl import SFTConfig, SFTTrainer
|
| 20 |
+
from huggingface_hub import login
|
| 21 |
+
|
| 22 |
+
BASE_MODEL = "Qwen/Qwen3-0.6B"
|
| 23 |
+
REPO_ID = "passagereptile455/qwen3-codeforces-humaneval-v2"
|
| 24 |
+
MAX_STEPS = 150
|
| 25 |
+
LEARNING_RATE = 5e-6
|
| 26 |
+
NUM_TRAIN_EXAMPLES = 500
|
| 27 |
+
|
| 28 |
+
def authenticate():
|
| 29 |
+
token = os.environ.get("HF_TOKEN")
|
| 30 |
+
if not token:
|
| 31 |
+
raise ValueError("HF_TOKEN not set")
|
| 32 |
+
login(token=token)
|
| 33 |
+
print("Authenticated")
|
| 34 |
+
|
| 35 |
+
def load_humaneval():
|
| 36 |
+
return list(load_dataset("openai/openai_humaneval", split="test"))
|
| 37 |
+
|
| 38 |
+
def extract_code(full_text, prompt):
|
| 39 |
+
generated = full_text[len(prompt):] if full_text.startswith(prompt) else full_text
|
| 40 |
+
for stop in ["\n\n\n", "\ndef ", "\nclass ", "\n#", "```", "<|"]:
|
| 41 |
+
if stop in generated:
|
| 42 |
+
generated = generated.split(stop)[0]
|
| 43 |
+
return (prompt + generated).strip()
|
| 44 |
+
|
| 45 |
+
def test_solution(code, test_code, entry_point):
|
| 46 |
+
try:
|
| 47 |
+
ns = {}
|
| 48 |
+
exec(code, ns)
|
| 49 |
+
if entry_point not in ns:
|
| 50 |
+
return False
|
| 51 |
+
exec(test_code, ns)
|
| 52 |
+
exec(f"check({entry_point})", ns)
|
| 53 |
+
return True
|
| 54 |
+
except:
|
| 55 |
+
return False
|
| 56 |
+
|
| 57 |
+
def evaluate_model(model, tokenizer, problems, desc):
|
| 58 |
+
correct = 0
|
| 59 |
+
model.eval()
|
| 60 |
+
for i, p in enumerate(problems):
|
| 61 |
+
inputs = tokenizer(p["prompt"], return_tensors="pt").to(model.device)
|
| 62 |
+
with torch.no_grad():
|
| 63 |
+
out = model.generate(**inputs, max_new_tokens=256, temperature=0.1, do_sample=True, pad_token_id=tokenizer.eos_token_id)
|
| 64 |
+
full_text = tokenizer.decode(out[0], skip_special_tokens=True)
|
| 65 |
+
if test_solution(extract_code(full_text, p["prompt"]), p["test"], p["entry_point"]):
|
| 66 |
+
correct += 1
|
| 67 |
+
if (i+1) % 40 == 0:
|
| 68 |
+
print(f"{desc}: {i+1}/{len(problems)}, {correct} correct ({correct/(i+1)*100:.1f}%)")
|
| 69 |
+
score = correct / len(problems) * 100
|
| 70 |
+
print(f"{desc} FINAL: {correct}/{len(problems)} = {score:.2f}%")
|
| 71 |
+
return score
|
| 72 |
+
|
| 73 |
+
def format_example(ex):
|
| 74 |
+
# FIXED: proper closing tag
|
| 75 |
+
return {"text": "<|im_start|>user\n" + ex['prompt'] + "\n<|im_end|>\n<|im_start|>assistant\n" + ex['generation'] + "<|im_end|>"}
|
| 76 |
+
|
| 77 |
+
def main():
|
| 78 |
+
print("=" * 60)
|
| 79 |
+
print("Qwen3-0.6B Fine-tuning v11")
|
| 80 |
+
print("=" * 60)
|
| 81 |
+
|
| 82 |
+
authenticate()
|
| 83 |
+
problems = load_humaneval()
|
| 84 |
+
print(f"Loaded {len(problems)} problems")
|
| 85 |
+
|
| 86 |
+
tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL, trust_remote_code=True)
|
| 87 |
+
tokenizer.pad_token = tokenizer.pad_token or tokenizer.eos_token
|
| 88 |
+
|
| 89 |
+
print("\n[1/4] BASE eval...")
|
| 90 |
+
model = AutoModelForCausalLM.from_pretrained(BASE_MODEL, torch_dtype=torch.bfloat16, device_map="auto", trust_remote_code=True)
|
| 91 |
+
base_score = evaluate_model(model, tokenizer, problems, "BASE")
|
| 92 |
+
|
| 93 |
+
print("\n[2/4] Training...")
|
| 94 |
+
train_ds = load_dataset("open-r1/codeforces-cots", split="train", streaming=True)
|
| 95 |
+
train_examples = [format_example(ex) for i, ex in enumerate(train_ds) if i < NUM_TRAIN_EXAMPLES]
|
| 96 |
+
from datasets import Dataset
|
| 97 |
+
train_dataset = Dataset.from_list(train_examples)
|
| 98 |
+
print(f"Prepared {len(train_dataset)} examples")
|
| 99 |
+
|
| 100 |
+
model = get_peft_model(model, LoraConfig(r=8, lora_alpha=32, lora_dropout=0.1, target_modules=["q_proj","k_proj","v_proj","o_proj"], task_type="CAUSAL_LM"))
|
| 101 |
+
model.print_trainable_parameters()
|
| 102 |
+
|
| 103 |
+
training_args = SFTConfig(output_dir="./ft", max_steps=MAX_STEPS, learning_rate=LEARNING_RATE, per_device_train_batch_size=2, gradient_accumulation_steps=4, logging_steps=10, save_steps=9999, bf16=True, optim="adamw_torch", warmup_steps=10, dataset_text_field="text")
|
| 104 |
+
trainer = SFTTrainer(model=model, args=training_args, train_dataset=train_dataset, processing_class=tokenizer)
|
| 105 |
+
trainer.train()
|
| 106 |
+
print("Training done!")
|
| 107 |
+
|
| 108 |
+
model = model.merge_and_unload()
|
| 109 |
+
|
| 110 |
+
print("\n[3/4] FINE-TUNED eval...")
|
| 111 |
+
ft_score = evaluate_model(model, tokenizer, problems, "FT")
|
| 112 |
+
|
| 113 |
+
print("\n[4/4] Results")
|
| 114 |
+
print("=" * 60)
|
| 115 |
+
print(f"BASE: {base_score:.2f}% | FT: {ft_score:.2f}% | CHANGE: {ft_score - base_score:+.2f}%")
|
| 116 |
+
print("=" * 60)
|
| 117 |
+
|
| 118 |
+
if ft_score > base_score:
|
| 119 |
+
print("\nWIN! Uploading...")
|
| 120 |
+
model.push_to_hub(REPO_ID)
|
| 121 |
+
tokenizer.push_to_hub(REPO_ID)
|
| 122 |
+
print("Done!")
|
| 123 |
+
else:
|
| 124 |
+
print("\nNo win. Try again.")
|
| 125 |
+
|
| 126 |
+
if __name__ == "__main__":
|
| 127 |
+
main()
|