|
|
import os |
|
|
from tqdm import tqdm |
|
|
import json |
|
|
|
|
|
meta_file = '/etc/ssd1/jiangzhongtao/baai_embedding_tune/data/task_meta/casual/all.json' |
|
|
with open(meta_file, 'r') as f: |
|
|
meta_info = json.load(f) |
|
|
|
|
|
|
|
|
task_file = "/etc/ssd1/jiangzhongtao/baai_embedding_tune/data/all_collect/task_files.txt" |
|
|
unsample_to_be_divisible_by = None |
|
|
with open(task_file, 'r') as f: |
|
|
for line in f: |
|
|
file, _ = line.strip().split('\t') |
|
|
try: |
|
|
assert os.path.exists(file) |
|
|
except Exception: |
|
|
print(file) |
|
|
|
|
|
tasktype2batchsize = { |
|
|
'classification': 2048, |
|
|
'clustering': 2048, |
|
|
'duplication': 512, |
|
|
'nli': 512, |
|
|
'retrieval': 512, |
|
|
'sts': 256, |
|
|
'super-ni': 2048, |
|
|
'unk': 2048 |
|
|
} |
|
|
|
|
|
|
|
|
cnt = 0 |
|
|
type2cnt = dict() |
|
|
steps = 0 |
|
|
with open(task_file, 'r') as f: |
|
|
for line in tqdm(f): |
|
|
file, train_size = line.strip().split('\t') |
|
|
_, task_name, _ = os.path.split(file)[-1].split('_') |
|
|
task_type = meta_info[task_name]['task_type'] |
|
|
if task_type != 'retrieval': |
|
|
total_size = 0 |
|
|
with open(file, 'r') as f_file: |
|
|
for line in f_file: |
|
|
total_size += 1 |
|
|
else: |
|
|
queries = set() |
|
|
with open(file, 'r') as f_file: |
|
|
for line in f_file: |
|
|
queries.add(json.loads(line)['query']) |
|
|
total_size = len(queries) |
|
|
train_size = int(train_size) |
|
|
|
|
|
if train_size > 0: |
|
|
if total_size < train_size: |
|
|
raise ValueError(file) |
|
|
else: |
|
|
train_size = total_size |
|
|
batch_size = tasktype2batchsize[task_type] |
|
|
train_size = train_size // batch_size * batch_size |
|
|
steps += train_size // batch_size |
|
|
|
|
|
cnt += train_size |
|
|
type2cnt[task_type] = type2cnt.get(task_type, 0) + train_size |
|
|
|
|
|
print('steps', steps) |
|
|
print(type2cnt) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print(cnt) |