File size: 9,258 Bytes
02efe1b | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 | """
Step 2: Sample N=16 solutions per problem and score with Skywork PRM.
This script:
1. Loads the filtered problems from Step 1
2. Generates N=16 solutions per problem using temperature sampling
3. Loads the Skywork-o1-Open-PRM and scores each solution (last step prediction)
4. Saves all solutions + scores for the Best-of-N computation
The Skywork PRM is loaded using its custom PRM_MODEL class, which wraps
AutoModelForCausalLM with a ValueHead (linear projection to scalar).
The model outputs a sigmoid-normalized score in [0,1] at each step boundary.
Co-authored with Claude (Anthropic). I can explain all code logic.
"""
import json
import os
import sys
import torch
import subprocess
from transformers import AutoTokenizer, AutoModelForCausalLM
from typing import Optional
# ──────────────────────────────────────────────────────────────────────────────
# Helper functions
# ──────────────────────────────────────────────────────────────────────────────
def extract_boxed_solution(text: str) -> Optional[str]:
"""Extract content of the last \\boxed{} in text."""
try:
start_index = text.rindex("\\boxed{")
content_start = start_index + 7
bracket_count = 1
current_pos = content_start
while bracket_count > 0 and current_pos < len(text):
if text[current_pos] == "{":
bracket_count += 1
elif text[current_pos] == "}":
bracket_count -= 1
current_pos += 1
if bracket_count == 0:
return text[content_start : current_pos - 1].strip()
return None
except (ValueError, Exception):
return None
# ──────────────────────────────────────────────────────────────────────────────
# Load filtered problems
# ──────────────────────────────────────────────────────────────────────────────
print("=" * 70)
print("STEP 2a: Loading problems and generating N=16 solutions per problem")
print("=" * 70)
with open("/Users/cmpatino/Projects/ml-intern/exercise/outputs/filtered_problems.json") as f:
problems_data = json.load(f)
print(f"Loaded {len(problems_data)} problems")
# ──────────────────────────────────────────────────────────────────────────────
# Generate N=16 solutions per problem with temperature sampling
# ──────────────────────────────────────────────────────────────────────────────
MODEL_ID = "Qwen/Qwen2.5-1.5B-Instruct"
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
model = AutoModelForCausalLM.from_pretrained(
MODEL_ID,
torch_dtype=torch.bfloat16,
device_map="auto",
)
SYSTEM_PROMPT = (
"You are a helpful math assistant. Solve the problem step by step, "
"showing your reasoning clearly. Put your final answer inside "
"\\boxed{answer} at the end of your solution."
)
N = 16 # Number of solutions per problem
TEMPERATURE = 0.7 # Sampling temperature — balances diversity vs quality
all_results = []
for i, p in enumerate(problems_data):
print(f"\n Problem {i+1}/{len(problems_data)}: {p['unique_id']} (Level {p['level']})")
messages = [
{"role": "system", "content": SYSTEM_PROMPT},
{"role": "user", "content": p["problem"]},
]
prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
solutions = []
for j in range(N):
with torch.no_grad():
output = model.generate(
**inputs,
max_new_tokens=2048,
do_sample=True,
temperature=TEMPERATURE,
top_p=0.95,
)
generated = output[0][inputs["input_ids"].shape[1]:]
solution_text = tokenizer.decode(generated, skip_special_tokens=True)
solutions.append(solution_text)
if (j + 1) % 4 == 0:
print(f" Generated {j+1}/{N} solutions")
result = {**p, "sampled_solutions": solutions}
all_results.append(result)
# Save solutions before scoring (in case PRM loading takes time or fails)
with open("/Users/cmpatino/Projects/ml-intern/exercise/outputs/sampled_solutions.json", "w") as f:
json.dump(all_results, f, indent=2)
print(f"\nSaved {N} solutions per problem to outputs/sampled_solutions.json")
# Free LLM memory before loading PRM
del model
torch.cuda.empty_cache()
print("Freed LLM memory.")
# ──────────────────────────────────────────────────────────────────────────────
# Score solutions with Skywork PRM
# ──────────────────────────────────────────────────────────────────────────────
print("\n" + "=" * 70)
print("STEP 2b: Scoring solutions with Skywork-o1-Open-PRM")
print("=" * 70)
# Clone the Skywork PRM inference repo for the custom model class
PRM_REPO_PATH = "/Users/cmpatino/Projects/ml-intern/exercise/skywork-o1-prm-inference"
if not os.path.exists(PRM_REPO_PATH):
print("Cloning Skywork PRM inference repo...")
subprocess.run(
["git", "clone", "https://github.com/SkyworkAI/skywork-o1-prm-inference.git", PRM_REPO_PATH],
check=True,
)
sys.path.insert(0, PRM_REPO_PATH)
from model_utils.prm_model import PRM_MODEL
from model_utils.io_utils import prepare_input, prepare_batch_input_for_model, derive_step_rewards
PRM_MODEL_ID = "Skywork/Skywork-o1-Open-PRM-Qwen-2.5-1.5B"
prm_tokenizer = AutoTokenizer.from_pretrained(PRM_MODEL_ID, trust_remote_code=True)
prm_model = PRM_MODEL.from_pretrained(PRM_MODEL_ID, device_map="auto").eval()
print("PRM model loaded successfully.")
def score_solution(problem: str, solution: str) -> list[float]:
"""
Score a single solution using the PRM.
Returns a list of per-step scores (sigmoid-normalized, [0,1]).
The last element is the 'last step prediction' — our final reward.
The PRM splits the solution by newlines (\n), and assigns a score
at the end of each step. These scores represent the model's estimate
of correctness probability at each reasoning step.
"""
input_ids, steps, reward_flags = prepare_input(problem, solution, prm_tokenizer, step_token="\n")
# Prepare batch of size 1
input_ids_t, attention_mask_t, reward_flags_t = prepare_batch_input_for_model(
[input_ids], [reward_flags], prm_tokenizer.pad_token_id
)
# Move to model device
device = next(prm_model.parameters()).device
input_ids_t = input_ids_t.to(device)
attention_mask_t = attention_mask_t.to(device)
reward_flags_t = reward_flags_t.to(device)
with torch.no_grad():
# return_probs=True applies sigmoid internally
_, _, rewards = prm_model(
input_ids=input_ids_t,
attention_mask=attention_mask_t,
return_probs=True,
)
step_rewards = derive_step_rewards(rewards, reward_flags_t)
return step_rewards[0] # Return the single sample's step scores
# Score all solutions
print("\nScoring all solutions...")
for i, result in enumerate(all_results):
print(f"\n Scoring problem {i+1}/{len(all_results)}: {result['unique_id']}")
scores = []
extracted_answers = []
for j, solution in enumerate(result["sampled_solutions"]):
# Get PRM score
step_scores = score_solution(result["problem"], solution)
# Use last step prediction as the final reward (per DeepMind Appendix E)
final_score = step_scores[-1] if step_scores else 0.0
scores.append(final_score)
# Extract the final answer from \boxed{}
answer = extract_boxed_solution(solution)
extracted_answers.append(answer)
if (j + 1) % 4 == 0:
print(f" Scored {j+1}/{N} solutions (last score: {final_score:.4f})")
result["prm_scores"] = scores
result["extracted_answers"] = extracted_answers
# Save scored results
with open("/Users/cmpatino/Projects/ml-intern/exercise/outputs/scored_results.json", "w") as f:
json.dump(all_results, f, indent=2)
print("\nSaved scored results to outputs/scored_results.json")
print("Ready for Step 3 (Best-of-N computation).")
|