File size: 4,382 Bytes
d7970b4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 | import pandas as pd
import random
import copy
from tqdm import tqdm
import time
import json
from openai import OpenAI
import os
import dotenv
import tempfile
import numpy as np
import pytrec_eval
from typing import List, Dict, Any, Optional
from dataclasses import dataclass
from concurrent.futures import ThreadPoolExecutor
dotenv.load_dotenv()
@dataclass
class RankingResult:
query: str
correct_passage: str
ranking: str
correct_idx: int
passages: List[str]
ranks: List[int]
class Evaluator:
@staticmethod
def clean_ranking_response(response: str) -> List[int]:
return [int(num) for num in ''.join(c if c.isdigit() else ' ' for c in response).split()]
@staticmethod
def write_trec_files(results: List[RankingResult]) -> tuple[str, str]:
run_file = tempfile.NamedTemporaryFile(delete=False).name
qrels_file = tempfile.NamedTemporaryFile(delete=False).name
with open(run_file, 'w') as f_run, open(qrels_file, 'w') as f_qrel:
for i, result in enumerate(results):
qid = str(i)
correct_docid = f"passage_{result.correct_idx}"
f_qrel.write(f"{qid} 0 {correct_docid} 1\n")
seen_ranks = set()
adjusted_ranks = []
for rank in result.ranks:
while rank in seen_ranks:
rank += 1
seen_ranks.add(rank)
adjusted_ranks.append(rank)
for rank_position, passage_num in enumerate(adjusted_ranks, 1):
docid = f"passage_{passage_num+1}" # Convert to 1-based passage numbering
score = 1.0/rank_position
f_run.write(f"{qid} Q0 {docid} {rank_position} {score:.4f} run\n")
return qrels_file, run_file
@staticmethod
def calculate_metrics(qrels_file: str, run_file: str) -> Dict[str, float]:
with open(qrels_file) as f_qrel, open(run_file) as f_run:
qrel = pytrec_eval.parse_qrel(f_qrel)
run = pytrec_eval.parse_run(f_run)
evaluator = pytrec_eval.RelevanceEvaluator(
qrel,
{'ndcg_cut.1', 'ndcg_cut.5', 'ndcg_cut.10', 'recip_rank', 'recall.5'}
)
scores = evaluator.evaluate(run)
metrics = {
'NDCG@1': 0.0,
'NDCG@5': 0.0,
'NDCG@10': 0.0,
'MRR': 0.0,
'Recall@5': 0.0
}
for query_scores in scores.values():
metrics['NDCG@1'] += query_scores['ndcg_cut_1']
metrics['NDCG@5'] += query_scores['ndcg_cut_5']
metrics['NDCG@10'] += query_scores['ndcg_cut_10']
metrics['MRR'] += query_scores['recip_rank']
metrics['Recall@5'] += query_scores['recall_5']
num_queries = len(scores)
return {k: round(v / num_queries, 4) for k, v in metrics.items()}
def load_results(filename: str) -> List[RankingResult]:
with open(filename, 'r', encoding='utf-8') as f:
results_data = json.load(f)
results = []
for data in results_data:
result = RankingResult(
query=data['query'],
correct_passage=data['correct_passage'],
ranking=data['ranking'],
correct_idx=data['correct_idx'],
passages=data['passages'],
ranks=data['ranks']
)
results.append(result)
return results
def main():
loaded_results = load_results('./your-output.json') # Change here when you have your output from QA ranking json
qrels_file, run_file = Evaluator.write_trec_files(loaded_results)
print("\nQRELS file contents:")
with open(qrels_file, 'r') as f:
print(f.read())
print("\nRun file contents:")
with open(run_file, 'r') as f:
print(f.read())
metrics = Evaluator.calculate_metrics(qrels_file, run_file)
print("\nEvaluation Results:")
for metric, score in metrics.items():
print(f"{metric}: {score:.4f}")
os.unlink(qrels_file)
os.unlink(run_file)
results_df = pd.DataFrame([vars(r) for r in loaded_results])
results_df.to_csv('ranking-results.csv', index=False) # Change here to save your final results
main()
|