Upload mining_hardnegatives_bge3.py
Browse files- mining_hardnegatives_bge3.py +187 -0
mining_hardnegatives_bge3.py
ADDED
|
@@ -0,0 +1,187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import random
|
| 3 |
+
import os
|
| 4 |
+
import argparse
|
| 5 |
+
import torch
|
| 6 |
+
import numpy as np
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
from datasets import load_dataset
|
| 9 |
+
from tqdm import tqdm
|
| 10 |
+
from pyserini.search.lucene import LuceneSearcher
|
| 11 |
+
from sentence_transformers import CrossEncoder
|
| 12 |
+
|
| 13 |
+
# ==========================================
|
| 14 |
+
# CONFIGURATION
|
| 15 |
+
# ==========================================
|
| 16 |
+
|
| 17 |
+
# OPTION A: The Modern SOTA (Recommended) - ~2.2GB VRAM
|
| 18 |
+
CROSS_ENCODER_MODEL = "BAAI/bge-reranker-v2-m3"
|
| 19 |
+
|
| 20 |
+
# Target samples per language
|
| 21 |
+
MARGINMSE_SAMPLES = {
|
| 22 |
+
"ara": 32_000, "dan": 32_000, "deu": 96_000, "eng": 128_000,
|
| 23 |
+
"fas": 64_000, "fra": 96_000, "hin": 32_000, "ind": 32_000,
|
| 24 |
+
"ita": 96_000, "jpn": 96_000, "kor": 32_000, "nld": 96_000,
|
| 25 |
+
"pol": 64_000, "por": 64_000, "rus": 96_000, "spa": 96_000,
|
| 26 |
+
"swe": 32_000, "tur": 32_000, "vie": 32_000, "zho": 32_000,
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
# Pyserini Language Codes
|
| 30 |
+
LANG_MAP = {
|
| 31 |
+
"ara": "ar", "dan": "da", "deu": "de", "eng": "en", "fas": "fa",
|
| 32 |
+
"fra": "fr", "hin": "hi", "ind": "id", "ita": "it", "jpn": "ja",
|
| 33 |
+
"kor": "ko", "nld": "nl", "pol": "pl", "por": "pt", "rus": "ru",
|
| 34 |
+
"spa": "es", "swe": "sv", "tur": "tr", "vie": "vi", "zho": "zh"
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
def build_index(language: str, corpus: dict, temp_dir: Path):
|
| 38 |
+
"""
|
| 39 |
+
Builds a temporary Lucene index for BM25 retrieval.
|
| 40 |
+
"""
|
| 41 |
+
iso_lang = LANG_MAP.get(language, "en")
|
| 42 |
+
input_dir = temp_dir / language / "corpus_jsonl"
|
| 43 |
+
index_dir = temp_dir / language / "index"
|
| 44 |
+
|
| 45 |
+
if index_dir.exists():
|
| 46 |
+
return index_dir
|
| 47 |
+
|
| 48 |
+
print(f" [{language}] Preparing docs for indexing...")
|
| 49 |
+
input_dir.mkdir(parents=True, exist_ok=True)
|
| 50 |
+
jsonl_file = input_dir / "docs.jsonl"
|
| 51 |
+
|
| 52 |
+
with open(jsonl_file, "w", encoding="utf-8") as f:
|
| 53 |
+
for cid, text in corpus.items():
|
| 54 |
+
f.write(json.dumps({"id": str(cid), "contents": text}, ensure_ascii=False) + "\n")
|
| 55 |
+
|
| 56 |
+
print(f" [{language}] Building BM25 Index (Analyzer: {iso_lang})...")
|
| 57 |
+
# Using os.system to call the Pyserini JVM wrapper
|
| 58 |
+
cmd = (f"python -m pyserini.index.lucene "
|
| 59 |
+
f"--collection JsonCollection "
|
| 60 |
+
f"--input {input_dir} "
|
| 61 |
+
f"--index {index_dir} "
|
| 62 |
+
f"--generator DefaultLuceneDocumentGenerator "
|
| 63 |
+
f"--threads 8 "
|
| 64 |
+
f"--language {iso_lang} "
|
| 65 |
+
f"--storeRaw")
|
| 66 |
+
os.system(cmd)
|
| 67 |
+
|
| 68 |
+
return index_dir
|
| 69 |
+
|
| 70 |
+
def process_language_mining_and_scoring(lang, n_samples, output_path, repo_id, k_negatives, scorer_model, batch_size):
|
| 71 |
+
print(f"\n{'='*60}\nProcessing {lang} (Samples: {n_samples})\n{'='*60}")
|
| 72 |
+
|
| 73 |
+
# 1. Load Data
|
| 74 |
+
try:
|
| 75 |
+
q_ds = load_dataset(repo_id, f"{lang}-queries", split='train')
|
| 76 |
+
c_ds = load_dataset(repo_id, f"{lang}-corpus", split='corpus')
|
| 77 |
+
qr_ds = load_dataset(repo_id, f"{lang}-qrels", split='train')
|
| 78 |
+
except Exception as e:
|
| 79 |
+
print(f" [ERROR] Could not load {lang}: {e}")
|
| 80 |
+
return
|
| 81 |
+
|
| 82 |
+
queries = {item['_id']: item['text'] for item in q_ds}
|
| 83 |
+
corpus = {item['_id']: item['text'] for item in c_ds}
|
| 84 |
+
qrels_all = [(item['query-id'], item['corpus-id']) for item in qr_ds]
|
| 85 |
+
|
| 86 |
+
# 2. Sample Subset
|
| 87 |
+
random.seed(42)
|
| 88 |
+
if len(qrels_all) > n_samples:
|
| 89 |
+
sampled_qrels = random.sample(qrels_all, n_samples)
|
| 90 |
+
else:
|
| 91 |
+
sampled_qrels = qrels_all
|
| 92 |
+
|
| 93 |
+
# 3. Build/Load Index
|
| 94 |
+
idx_path = build_index(lang, corpus, Path("./temp_indices"))
|
| 95 |
+
searcher = LuceneSearcher(str(idx_path))
|
| 96 |
+
searcher.set_language(LANG_MAP.get(lang, "en"))
|
| 97 |
+
|
| 98 |
+
# 4. Mine AND Score
|
| 99 |
+
final_output = []
|
| 100 |
+
|
| 101 |
+
print(f" [{lang}] Mining {k_negatives} negatives & Reranking with {CROSS_ENCODER_MODEL}...")
|
| 102 |
+
|
| 103 |
+
for qid, pos_doc_id in tqdm(sampled_qrels, desc=f" Distilling {lang}"):
|
| 104 |
+
query_text = queries.get(qid)
|
| 105 |
+
pos_text = corpus.get(pos_doc_id)
|
| 106 |
+
|
| 107 |
+
if not query_text or not pos_text:
|
| 108 |
+
continue
|
| 109 |
+
|
| 110 |
+
# A. BM25 Retrieval
|
| 111 |
+
hits = searcher.search(query_text, k=k_negatives + 20)
|
| 112 |
+
|
| 113 |
+
neg_candidates = []
|
| 114 |
+
for hit in hits:
|
| 115 |
+
if hit.docid != str(pos_doc_id) and len(neg_candidates) < k_negatives:
|
| 116 |
+
neg_text = corpus.get(hit.docid)
|
| 117 |
+
if neg_text:
|
| 118 |
+
neg_candidates.append(neg_text)
|
| 119 |
+
|
| 120 |
+
# B. Cross-Encoder Scoring
|
| 121 |
+
# We score [Positive, Neg1, Neg2, ..., Neg200]
|
| 122 |
+
texts_to_score = [pos_text] + neg_candidates
|
| 123 |
+
pairs = [[query_text, doc] for doc in texts_to_score]
|
| 124 |
+
|
| 125 |
+
# Predict returns raw logits
|
| 126 |
+
scores = scorer_model.predict(pairs, batch_size=batch_size, show_progress_bar=False)
|
| 127 |
+
|
| 128 |
+
pos_score = float(scores[0])
|
| 129 |
+
neg_scores = [float(s) for s in scores[1:]]
|
| 130 |
+
|
| 131 |
+
final_output.append({
|
| 132 |
+
"query": query_text,
|
| 133 |
+
"positive": pos_text,
|
| 134 |
+
"positive_score": pos_score,
|
| 135 |
+
"negatives": neg_candidates,
|
| 136 |
+
"negative_scores": neg_scores
|
| 137 |
+
})
|
| 138 |
+
|
| 139 |
+
# 5. Save
|
| 140 |
+
save_dir = Path(output_path) / lang
|
| 141 |
+
save_dir.mkdir(parents=True, exist_ok=True)
|
| 142 |
+
outfile = save_dir / "train_marginmse.jsonl"
|
| 143 |
+
|
| 144 |
+
with open(outfile, "w", encoding="utf-8") as f:
|
| 145 |
+
for item in final_output:
|
| 146 |
+
f.write(json.dumps(item, ensure_ascii=False) + "\n")
|
| 147 |
+
|
| 148 |
+
print(f" [{lang}] Saved {len(final_output)} examples to {outfile}")
|
| 149 |
+
|
| 150 |
+
def main():
|
| 151 |
+
parser = argparse.ArgumentParser()
|
| 152 |
+
parser.add_argument("--repo-id", default="PaDaS-Lab/webfaq-retrieval")
|
| 153 |
+
parser.add_argument("--output-dir", default="./data/distilled_data")
|
| 154 |
+
parser.add_argument("--k-negatives", type=int, default=200)
|
| 155 |
+
parser.add_argument("--batch-size", type=int, default=16, help="Lower this if OOM")
|
| 156 |
+
args = parser.parse_args()
|
| 157 |
+
|
| 158 |
+
# Initialize Teacher
|
| 159 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 160 |
+
print(f"Loading Teacher: {CROSS_ENCODER_MODEL}")
|
| 161 |
+
print(f"Device: {device}")
|
| 162 |
+
|
| 163 |
+
scorer_model = CrossEncoder(
|
| 164 |
+
CROSS_ENCODER_MODEL,
|
| 165 |
+
device=device,
|
| 166 |
+
automodel_args={"torch_dtype": torch.float16} if device == "cuda" else {}
|
| 167 |
+
)
|
| 168 |
+
|
| 169 |
+
for lang, n_samples in MARGINMSE_SAMPLES.items():
|
| 170 |
+
# === RESUME LOGIC ===
|
| 171 |
+
# If the output file already exists, we assume this language is done.
|
| 172 |
+
output_file = Path(args.output_dir) / lang / "train_marginmse.jsonl"
|
| 173 |
+
|
| 174 |
+
if output_file.exists():
|
| 175 |
+
# Optional: Check file size to ensure it's not empty/corrupted
|
| 176 |
+
if output_file.stat().st_size > 1000:
|
| 177 |
+
print(f" [RESUME] Output found for {lang}. Skipping...")
|
| 178 |
+
continue
|
| 179 |
+
else:
|
| 180 |
+
print(f" [RESUME] Found empty file for {lang}. Re-processing...")
|
| 181 |
+
|
| 182 |
+
process_language_mining_and_scoring(
|
| 183 |
+
lang, n_samples, args.output_dir, args.repo_id, args.k_negatives, scorer_model, args.batch_size
|
| 184 |
+
)
|
| 185 |
+
|
| 186 |
+
if __name__ == "__main__":
|
| 187 |
+
main()
|