amanwithaplan commited on
Commit
8a498fc
·
verified ·
1 Parent(s): d97df5d

Upload train_reranker.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. train_reranker.py +360 -0
train_reranker.py ADDED
@@ -0,0 +1,360 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.11"
3
+ # dependencies = [
4
+ # "sentence-transformers[train]>=4.0",
5
+ # "datasets",
6
+ # "torch>=2.4",
7
+ # "transformers>=4.48",
8
+ # "trackio",
9
+ # "scipy",
10
+ # "numpy",
11
+ # ]
12
+ # ///
13
+ """
14
+ Soft-Label Cross-Encoder Reranker Training
15
+
16
+ Trains a reranker using continuous relevance scores (soft labels).
17
+ Dataset format: {"query": "...", "text": "...", "score": 0.0-1.0}
18
+ """
19
+
20
+ import logging
21
+ import os
22
+ import math
23
+ from collections import defaultdict
24
+ import trackio
25
+ import numpy as np
26
+ from datasets import load_dataset
27
+ from sentence_transformers.cross_encoder import (
28
+ CrossEncoder,
29
+ CrossEncoderTrainer,
30
+ CrossEncoderTrainingArguments,
31
+ )
32
+ from sentence_transformers.cross_encoder.evaluation import CrossEncoderNanoBEIREvaluator
33
+ from scipy.stats import spearmanr
34
+ from transformers import TrainerCallback, EarlyStoppingCallback
35
+
36
+ logging.basicConfig(level=logging.INFO)
37
+ logger = logging.getLogger(__name__)
38
+
39
+ # Configuration
40
+ DATASET_NAME = os.environ.get("DATASET_NAME", "amanwithaplan/arcade-reranker-data")
41
+ HUB_MODEL_ID = os.environ.get("HUB_MODEL_ID", "idqo/arcade-reranker")
42
+ BASE_MODEL = os.environ.get("BASE_MODEL", "Alibaba-NLP/gte-reranker-modernbert-base")
43
+ NUM_EPOCHS = int(os.environ.get("NUM_EPOCHS", "3"))
44
+ BATCH_SIZE = int(os.environ.get("BATCH_SIZE", "16"))
45
+ LEARNING_RATE = float(os.environ.get("LEARNING_RATE", "1e-5"))
46
+ MAX_SEQ_LENGTH = int(os.environ.get("MAX_SEQ_LENGTH", "1024"))
47
+ RUN_NAME = os.environ.get("RUN_NAME", "reranker-1024-v1")
48
+ SPACE_ID = os.environ.get("TRACKIO_SPACE_ID", "amanwithaplan/trackio")
49
+
50
+
51
+ def dcg_at_k(relevances, k):
52
+ """Compute DCG@k."""
53
+ relevances = np.array(relevances)[:k]
54
+ if len(relevances) == 0:
55
+ return 0.0
56
+ # DCG = sum of rel_i / log2(i+2) for i in 0..k-1
57
+ discounts = np.log2(np.arange(len(relevances)) + 2)
58
+ return np.sum(relevances / discounts)
59
+
60
+
61
+ def ndcg_at_k(predicted_order, true_relevances, k):
62
+ """
63
+ Compute NDCG@k.
64
+
65
+ predicted_order: indices of docs sorted by model score (descending)
66
+ true_relevances: ground truth relevance scores for each doc
67
+ """
68
+ # Get relevances in predicted order
69
+ predicted_relevances = [true_relevances[i] for i in predicted_order]
70
+
71
+ # Ideal order: sort by true relevance descending
72
+ ideal_relevances = sorted(true_relevances, reverse=True)
73
+
74
+ dcg = dcg_at_k(predicted_relevances, k)
75
+ idcg = dcg_at_k(ideal_relevances, k)
76
+
77
+ if idcg == 0:
78
+ return 0.0
79
+ return dcg / idcg
80
+
81
+
82
+ def mrr(predicted_order, true_relevances, threshold=0.5):
83
+ """
84
+ Compute MRR (Mean Reciprocal Rank).
85
+
86
+ Returns 1/rank of first relevant doc (relevance > threshold).
87
+ """
88
+ for rank, idx in enumerate(predicted_order, start=1):
89
+ if true_relevances[idx] > threshold:
90
+ return 1.0 / rank
91
+ return 0.0
92
+
93
+
94
+ def evaluate_ranking(model, eval_dataset):
95
+ """
96
+ Proper ranking evaluation: group by query, compute NDCG and MRR.
97
+
98
+ This measures what we actually care about:
99
+ "Given a query with multiple docs, does the model rank them correctly?"
100
+ """
101
+ # Group samples by query
102
+ query_groups = defaultdict(list)
103
+ for item in eval_dataset:
104
+ query_groups[item["sentence1"]].append({
105
+ "text": item["sentence2"],
106
+ "label": item["label"]
107
+ })
108
+
109
+ # Filter to queries with multiple docs (need at least 2 to rank)
110
+ query_groups = {q: docs for q, docs in query_groups.items() if len(docs) >= 2}
111
+
112
+ if not query_groups:
113
+ return {"ndcg@3": 0.0, "ndcg@5": 0.0, "mrr": 0.0, "n_queries": 0}
114
+
115
+ ndcg_3_scores = []
116
+ ndcg_5_scores = []
117
+ mrr_scores = []
118
+ rank_correlations = []
119
+
120
+ for query, docs in query_groups.items():
121
+ # Get model predictions for this query's docs
122
+ pairs = [(query, d["text"]) for d in docs]
123
+ predictions = model.predict(pairs, show_progress_bar=False)
124
+
125
+ true_relevances = [d["label"] for d in docs]
126
+
127
+ # Get predicted order: indices sorted by prediction descending
128
+ predicted_order = np.argsort(predictions)[::-1].tolist()
129
+
130
+ # Compute metrics
131
+ ndcg_3_scores.append(ndcg_at_k(predicted_order, true_relevances, k=3))
132
+ ndcg_5_scores.append(ndcg_at_k(predicted_order, true_relevances, k=5))
133
+ mrr_scores.append(mrr(predicted_order, true_relevances, threshold=0.5))
134
+
135
+ # Rank correlation within this query
136
+ if len(set(true_relevances)) > 1: # Need variance
137
+ corr = spearmanr(predictions, true_relevances).correlation
138
+ if not math.isnan(corr):
139
+ rank_correlations.append(corr)
140
+
141
+ return {
142
+ "ndcg@3": np.mean(ndcg_3_scores),
143
+ "ndcg@5": np.mean(ndcg_5_scores),
144
+ "mrr": np.mean(mrr_scores),
145
+ "rank_corr": np.mean(rank_correlations) if rank_correlations else 0.0,
146
+ "n_queries": len(query_groups),
147
+ }
148
+
149
+
150
+ class DomainEvalCallback(TrainerCallback):
151
+ """Callback to log proper ranking metrics during training."""
152
+
153
+ def __init__(self, model, eval_dataset_full):
154
+ self.model = model
155
+ self.eval_dataset_full = eval_dataset_full
156
+
157
+ def on_evaluate(self, args, state, control, **kwargs):
158
+ """Run after each evaluation step."""
159
+ metrics = evaluate_ranking(self.model, self.eval_dataset_full)
160
+
161
+ # Log to trackio
162
+ trackio.log({
163
+ "domain/ndcg@3": metrics["ndcg@3"],
164
+ "domain/ndcg@5": metrics["ndcg@5"],
165
+ "domain/mrr": metrics["mrr"],
166
+ "domain/rank_corr": metrics["rank_corr"],
167
+ })
168
+
169
+ logger.info(
170
+ f"Domain eval - NDCG@3: {metrics['ndcg@3']:.4f}, "
171
+ f"NDCG@5: {metrics['ndcg@5']:.4f}, "
172
+ f"MRR: {metrics['mrr']:.4f}, "
173
+ f"RankCorr: {metrics['rank_corr']:.4f} "
174
+ f"(n={metrics['n_queries']} queries)"
175
+ )
176
+
177
+
178
+ def evaluate_by_type(model, eval_dataset, type_column="type"):
179
+ """Evaluate ranking metrics per content type."""
180
+ if type_column not in eval_dataset.column_names:
181
+ return {}
182
+
183
+ # Group by type first
184
+ by_type = defaultdict(list)
185
+ for item in eval_dataset:
186
+ by_type[item[type_column]].append(item)
187
+
188
+ results = {}
189
+ for content_type, items in by_type.items():
190
+ # Create a mini dataset for this type
191
+ class TypeDataset:
192
+ def __init__(self, items):
193
+ self.items = items
194
+ def __iter__(self):
195
+ return iter(self.items)
196
+ @property
197
+ def column_names(self):
198
+ return ["sentence1", "sentence2", "label"]
199
+
200
+ type_metrics = evaluate_ranking(model, TypeDataset(items))
201
+
202
+ if type_metrics["n_queries"] >= 2:
203
+ results[f"{content_type}_ndcg@5"] = type_metrics["ndcg@5"]
204
+ results[f"{content_type}_mrr"] = type_metrics["mrr"]
205
+ results[f"{content_type}_n_queries"] = type_metrics["n_queries"]
206
+
207
+ return results
208
+
209
+
210
+ def main():
211
+ # Initialize trackio with full config
212
+ trackio.init(
213
+ project="arcade-reranker",
214
+ name=RUN_NAME,
215
+ space_id=SPACE_ID,
216
+ config={
217
+ "model": BASE_MODEL,
218
+ "dataset": DATASET_NAME,
219
+ "learning_rate": LEARNING_RATE,
220
+ "num_epochs": NUM_EPOCHS,
221
+ "batch_size": BATCH_SIZE,
222
+ "max_seq_length": MAX_SEQ_LENGTH,
223
+ }
224
+ )
225
+
226
+ logger.info(f"Configuration:")
227
+ logger.info(f" Dataset: {DATASET_NAME}")
228
+ logger.info(f" Base model: {BASE_MODEL}")
229
+ logger.info(f" Epochs: {NUM_EPOCHS}")
230
+ logger.info(f" Run name: {RUN_NAME}")
231
+ logger.info(f" Trackio space: {SPACE_ID}")
232
+
233
+ model = CrossEncoder(BASE_MODEL, max_length=MAX_SEQ_LENGTH)
234
+
235
+ logger.info(f"Loading dataset: {DATASET_NAME}")
236
+ dataset = load_dataset(DATASET_NAME, split="train")
237
+
238
+ # Log dataset composition
239
+ type_counts = defaultdict(int)
240
+ if "type" in dataset.column_names:
241
+ for item in dataset:
242
+ type_counts[item["type"]] += 1
243
+ logger.info(f"Dataset composition: {dict(type_counts)}")
244
+
245
+ # Log to trackio
246
+ for content_type, count in type_counts.items():
247
+ trackio.log({f"data/{content_type}_count": count})
248
+
249
+ trackio.log({"data/total_examples": len(dataset)})
250
+ logger.info(f"Total examples: {len(dataset)}")
251
+
252
+ # Rename columns for CrossEncoderTrainer
253
+ dataset = dataset.rename_columns({
254
+ "query": "sentence1",
255
+ "text": "sentence2",
256
+ "score": "label"
257
+ })
258
+
259
+ # Split for evaluation (before removing extra columns so we keep type for eval)
260
+ eval_size = min(400, int(len(dataset) * 0.15))
261
+ splits = dataset.train_test_split(test_size=eval_size, seed=42)
262
+
263
+ # Keep full eval dataset with type column for per-type evaluation
264
+ eval_dataset_full = splits["test"]
265
+
266
+ # Remove extra columns for training (CrossEncoderTrainer only wants sentence1, sentence2, label)
267
+ train_dataset = splits["train"].select_columns(["sentence1", "sentence2", "label"])
268
+ eval_dataset = splits["test"].select_columns(["sentence1", "sentence2", "label"])
269
+
270
+ trackio.log({
271
+ "data/train_size": len(train_dataset),
272
+ "data/eval_size": len(eval_dataset),
273
+ })
274
+ logger.info(f"Train: {len(train_dataset)}, Eval: {len(eval_dataset)}")
275
+
276
+ # Evaluate base model before training with proper ranking metrics
277
+ logger.info("Evaluating base model on eval set...")
278
+ base_metrics = evaluate_ranking(model, eval_dataset_full)
279
+ for key, value in base_metrics.items():
280
+ trackio.log({f"base_model/{key}": value})
281
+ logger.info(f"Base model metrics: {base_metrics}")
282
+
283
+ # NanoBEIR for benchmark comparison
284
+ evaluator = CrossEncoderNanoBEIREvaluator(
285
+ dataset_names=["msmarco", "nfcorpus", "nq"],
286
+ batch_size=BATCH_SIZE,
287
+ )
288
+
289
+ args = CrossEncoderTrainingArguments(
290
+ output_dir="models/reranker",
291
+ num_train_epochs=NUM_EPOCHS,
292
+ per_device_train_batch_size=BATCH_SIZE,
293
+ per_device_eval_batch_size=BATCH_SIZE,
294
+ learning_rate=LEARNING_RATE,
295
+ warmup_ratio=0.1,
296
+ bf16=True,
297
+ eval_strategy="steps",
298
+ eval_steps=25,
299
+ save_strategy="steps",
300
+ save_steps=25,
301
+ save_total_limit=5,
302
+ logging_steps=25,
303
+ logging_first_step=True,
304
+ load_best_model_at_end=True,
305
+ metric_for_best_model="eval_loss",
306
+ greater_is_better=False,
307
+ push_to_hub=True,
308
+ hub_model_id=HUB_MODEL_ID,
309
+ hub_strategy="every_save",
310
+ report_to="trackio",
311
+ run_name=RUN_NAME,
312
+ )
313
+
314
+ # Custom callback to log domain-specific ranking metrics during training
315
+ domain_callback = DomainEvalCallback(model, eval_dataset_full)
316
+
317
+ # Early stopping to prevent overfitting
318
+ early_stopping = EarlyStoppingCallback(early_stopping_patience=3)
319
+
320
+ trainer = CrossEncoderTrainer(
321
+ model=model,
322
+ args=args,
323
+ train_dataset=train_dataset,
324
+ eval_dataset=eval_dataset,
325
+ evaluator=evaluator,
326
+ callbacks=[domain_callback, early_stopping],
327
+ )
328
+
329
+ logger.info("Starting training...")
330
+ trainer.train()
331
+
332
+ # Final evaluation with proper ranking metrics
333
+ logger.info("Running final ranking evaluation...")
334
+ final_metrics = evaluate_ranking(model, eval_dataset_full)
335
+ for key, value in final_metrics.items():
336
+ trackio.log({f"final/{key}": value})
337
+ logger.info(f"Final metrics: {final_metrics}")
338
+
339
+ # Per-type evaluation
340
+ logger.info("Evaluating by content type...")
341
+ type_metrics = evaluate_by_type(model, eval_dataset_full)
342
+ for key, value in type_metrics.items():
343
+ trackio.log({f"final/by_type/{key}": value})
344
+ logger.info(f"Per-type metrics: {type_metrics}")
345
+
346
+ # Log improvement
347
+ trackio.log({
348
+ "improvement/ndcg5_delta": final_metrics["ndcg@5"] - base_metrics["ndcg@5"],
349
+ "improvement/mrr_delta": final_metrics["mrr"] - base_metrics["mrr"],
350
+ })
351
+
352
+ logger.info(f"Pushing final model to {HUB_MODEL_ID}")
353
+ model.push_to_hub(HUB_MODEL_ID, exist_ok=True)
354
+
355
+ trackio.finish()
356
+ logger.info("Done!")
357
+
358
+
359
+ if __name__ == "__main__":
360
+ main()