File size: 2,106 Bytes
bf1497a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 |
import os
from tqdm import tqdm
import json
meta_file = '/etc/ssd1/jiangzhongtao/baai_embedding_tune/data/task_meta/casual/all.json'
with open(meta_file, 'r') as f:
meta_info = json.load(f)
task_file = "/etc/ssd1/jiangzhongtao/baai_embedding_tune/data/all_collect/task_files.txt"
unsample_to_be_divisible_by = None
with open(task_file, 'r') as f:
for line in f:
file, _ = line.strip().split('\t')
try:
assert os.path.exists(file)
except Exception:
print(file)
tasktype2batchsize = {
'classification': 2048,
'clustering': 2048,
'duplication': 512,
'nli': 512,
'retrieval': 512,
'sts': 256,
'super-ni': 2048,
'unk': 2048
}
cnt = 0
type2cnt = dict()
steps = 0
with open(task_file, 'r') as f:
for line in tqdm(f):
file, train_size = line.strip().split('\t')
_, task_name, _ = os.path.split(file)[-1].split('_')
task_type = meta_info[task_name]['task_type']
if task_type != 'retrieval':
total_size = 0
with open(file, 'r') as f_file:
for line in f_file:
total_size += 1
else:
queries = set()
with open(file, 'r') as f_file:
for line in f_file:
queries.add(json.loads(line)['query'])
total_size = len(queries)
train_size = int(train_size)
if train_size > 0:
if total_size < train_size:
raise ValueError(file)
else:
train_size = total_size
batch_size = tasktype2batchsize[task_type]
train_size = train_size // batch_size * batch_size
steps += train_size // batch_size
# print(file, train_size)
cnt += train_size
type2cnt[task_type] = type2cnt.get(task_type, 0) + train_size
print('steps', steps)
print(type2cnt)
# {
# 'retrieval': 2489655,
# 'sts': 140819,
# 'nli': 324951,
# 'super-ni': 180000,
# 'classification': 677510,
# 'clustering': 452224,
# 'duplication': 31409
# }
print(cnt) # 3825445 |