File size: 3,395 Bytes
bf1497a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 |
import os
from collections import defaultdict
def stat_size(fnames):
cnt = 0
for fname in fnames:
with open(fname, 'r') as f:
for line in f:
cnt += 1
return cnt
dataset2fname = defaultdict(lambda :defaultdict(list))
final_task_files = []
srcs = [
'bge-m3',
'medi',
'mteb-Classification',
'mteb-Clustering',
'mteb-PairClassification',
'mteb-Reranking',
'mteb-Retrieval',
'mteb-Retrieval_aug',
'mteb-STS',
]
for src in srcs:
src_path = f'/etc/ssd1/jiangzhongtao/baai_embedding_tune/data/all_collect/{src}'
for fname in os.listdir(src_path):
if not fname.endswith('.jsonl'):
continue
_, dataset, lang = fname[:-6].split('_')
if src.startswith('mteb') and lang != 'default' and not lang.startswith('en'):
continue
# if lang == 'default' or lang.startswith('en'):
fname = os.path.join(src_path, fname)
dataset2fname[dataset][src].append(fname)
for dataset, item in dataset2fname.items():
if len(item) == 1:
fnames = item[list(item.keys())[0]]
final_task_files.extend(fnames)
else:
max_size = -1
max_size_src = None
for src, fnames in item.items():
size = stat_size(fnames)
if size > max_size:
max_size = size
max_size_src = src
fnames = item[max_size_src]
final_task_files.extend(fnames)
# with open('/etc/ssd1/jiangzhongtao/baai_embedding_tune/data/all_collect/task_files.txt', 'w') as f:
# for task_file in final_task_files:
# if 'Classification' in task_file:
# line = f'{task_file}\tclassification'
# else:
# line = f'{task_file}\tdefault'
# f.write(line + '\n')
# from tqdm import tqdm
# cnt = 0
# for task_file in tqdm(final_task_files):
# with open(task_file, 'r') as f:
# for line in f:
# cnt += 1
# fnames = []
# with open('/etc/ssd1/jiangzhongtao/baai_embedding_tune/data/all_collect/task_files.txt', 'r') as f:
# for line in f:
# fname, task_type = line.strip().split('\t')
# fnames.append(fname)
with open('/etc/ssd1/jiangzhongtao/baai_embedding_tune/data/all_collect/p+a_aug_en_task_files.txt', 'w') as f:
for fname in final_task_files:
if 'classification' in fname.lower():
task_type = 'classification'
elif 'medi_task' in fname.lower():
task_type = 'super-NI'
elif 'clustering' in fname.lower():
task_type = 'clustering'
elif 'sts' in fname.lower():
task_type = 'sts'
else:
task_type = 'default'
# f.write(f'{fname}\t{task_type}\t-1\n')
f.write(f'{fname}\t-1\n')
# def stat_size(fnames):
# cnt = 0
# for fname in fnames:
# with open(fname, 'r') as f:
# for line in f:
# cnt += 1
# return cnt
# f_in = open('/etc/ssd1/jiangzhongtao/baai_embedding_tune/data/all_collect/p+a_task_files.txt', 'r')
# f_out = open('/etc/ssd1/jiangzhongtao/baai_embedding_tune/data/all_collect/p+a_task_files_en.txt', 'w')
# for line in f_in:
# file, type_, size = line.strip().split('\t')
# lang = file[:-6].split('_')[-1]
# if lang == 'default' or lang.startswith('en'):
# f_out.write(line)
# f_in.close()
# f_out.close() |