| import ftfy | |
| import json | |
| import random | |
| random.seed(1) | |
| import argparse | |
| import ir_datasets | |
| from tqdm.auto import tqdm | |
| def load_examples(base_dataset): | |
| dataset = ir_datasets.load(base_dataset) | |
| documents = {} | |
| for doc in tqdm( | |
| dataset.docs_iter(), total=dataset.docs_count(), desc="Loading documents" | |
| ): | |
| text = ( | |
| f"{doc.title} {doc.text}" | |
| if "title" in dataset.docs_cls()._fields | |
| else doc.text | |
| ) | |
| documents[doc.doc_id] = ftfy.fix_text(text) | |
| queries = {} | |
| for query in dataset.queries_iter(): | |
| queries[query.query_id] = ftfy.fix_text(query.text) | |
| return [ | |
| (queries[qrel.query_id], documents[qrel.doc_id]) | |
| for qrel in dataset.qrels_iter() | |
| ] | |
| if __name__ == "__main__": | |
| parser = argparse.ArgumentParser() | |
| parser.add_argument('--num_examples', default=500_000) | |
| parser.add_argument('--dataset', default='beir/msmarco/train') | |
| parser.add_argument('--output', required=True) | |
| args = parser.parse_args() | |
| examples = load_examples(args.dataset) | |
| random.shuffle(examples) | |
| with open(args.output, 'w') as f: | |
| for (query, document) in tqdm(examples[:args.num_examples], total=args.num_examples, desc='Writing'): | |
| f.write(json.dumps({'query': query, 'document': document}) + '\n') | |