|
|
import os |
|
|
import random |
|
|
import json |
|
|
import shutil |
|
|
from tqdm import tqdm |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
avaliable_languages = ['ar', 'bn', 'en', 'es', 'fa', 'fi', 'fr', 'hi', 'id', 'ja', 'ko', 'ru', 'sw', 'te', 'th', 'zh', 'de', 'yo'] |
|
|
|
|
|
base_dir = '/share_2/chaofan/dataset/miracl/data' |
|
|
new_dir = '/share/chaofan/code/bge_demo/data' |
|
|
new_emb_dir = '/share/chaofan/code/bge_demo/emb' |
|
|
|
|
|
for lang in tqdm(avaliable_languages, desc='language'): |
|
|
qrels_path = os.path.join(base_dir, lang, 'dev_qrels.jsonl') |
|
|
queries_path = os.path.join(base_dir, lang, 'dev_queries.jsonl') |
|
|
corpus_path = os.path.join(base_dir, lang, 'corpus.jsonl') |
|
|
|
|
|
os.makedirs(os.path.join(new_dir, lang), exist_ok=True) |
|
|
|
|
|
new_qrels_path = os.path.join(new_dir, lang, 'dev_qrels.jsonl') |
|
|
new_queries_path = os.path.join(new_dir, lang, 'dev_queries.jsonl') |
|
|
new_corpus_path = os.path.join(new_dir, lang, 'corpus.jsonl') |
|
|
|
|
|
useful_corpus = set() |
|
|
with open(qrels_path) as f: |
|
|
for line in f: |
|
|
data = json.loads(line) |
|
|
useful_corpus.add(data['docid']) |
|
|
|
|
|
corpus_ids = [] |
|
|
corpus = {} |
|
|
with open(corpus_path) as f: |
|
|
for line in f: |
|
|
data = json.loads(line) |
|
|
corpus_ids.append(data['id']) |
|
|
corpus[data['id']] = data |
|
|
|
|
|
new_corpus = [] |
|
|
random.shuffle(corpus_ids) |
|
|
for i in range(min(1000000, len(corpus_ids))): |
|
|
if corpus_ids[i] not in useful_corpus: |
|
|
useful_corpus.add(corpus_ids[i]) |
|
|
|
|
|
print(f'language {lang}, all corpus {len(corpus_ids)}, use corpus {len(useful_corpus)}') |
|
|
|
|
|
|
|
|
with open(new_corpus_path, 'w') as f: |
|
|
for idx in useful_corpus: |
|
|
f.write(json.dumps(corpus[idx]) + '\n') |
|
|
|
|
|
shutil.copy(qrels_path, new_qrels_path) |
|
|
shutil.copy(queries_path, new_queries_path) |