code stringlengths 17 6.64M |
|---|
def ground_program2(scene, unique_filters):
program = 'S OBJ2 R1 OBJ1'
program1 = ground_program1(scene, unique_filters)
sentence_for_x = {}
for (_, _, _, slot_dict1, obj2) in program1:
for f in g_all_filter_ops:
for r in g_all_relate_ops:
slot_dict = {'OBJ1': f, 'OBJ2': obj2, 'R1': r}
try:
obj = execute(scene, 'OBJ2 R1 OBJ1', slot_dict)
except ValueError:
continue
template = random.choice(g_templates_2)
slot_dict = {'OBJ1': f, 'OBJ2': slot_dict1['OBJ1'], 'R1': r}
sentence = template.format(**slot_dict)
sentence_len = len(sentence.split())
if ((obj not in sentence_for_x) or (sentence_len < sentence_for_x[obj][(- 1)])):
sentence_for_x[obj] = (sentence, program, slot_dict, obj, sentence_len)
for (sentence, slot_program, slot_dict, obj, _) in sentence_for_x.values():
obj1_string = gen_filter_string(slot_dict['OBJ1'], 'x')
r = slot_dict['R1']
obj2_string = gen_filter_string(slot_dict['OBJ2'], 'y')
program = f'point(Object, lambda x: {obj1_string} and {r}(x, iota(Object, lambda y: {obj2_string})))'
(yield (sentence, program, slot_program, slot_dict, obj))
|
def ground_program3(scene, unique_filters):
program = 'S OBJ3 R2 S OBJ2 R1 AND OBJ1'
program1 = ground_program1(scene, unique_filters)
sentence_for_x = {}
for (_, _, _, slot_dict1, obj2) in program1:
for (_, _, _, slot_dict2, obj3) in program1:
if (obj2 == obj3):
continue
for f in g_all_filter_ops:
for r1 in g_all_relate_ops:
for r2 in g_all_relate_ops:
slot_dict = {'OBJ1': f, 'OBJ2': obj2, 'R1': r1, 'OBJ3': obj3, 'R2': r2}
try:
obj = execute(scene, 'OBJ3 R2 OBJ2 R1 AND OBJ1', slot_dict)
except ValueError:
continue
template = random.choice(g_templates_3)
slot_dict = {'OBJ1': f, 'OBJ2': slot_dict1['OBJ1'], 'R1': r1, 'OBJ3': slot_dict2['OBJ1'], 'R2': r2}
sentence = template.format(**slot_dict)
sentence_len = len(sentence.split())
if ((obj not in sentence_for_x) or (sentence_len < sentence_for_x[obj][(- 1)])):
sentence_for_x[obj] = (sentence, program, slot_dict, obj, sentence_len)
for (sentence, slot_program, slot_dict, obj, _) in sentence_for_x.values():
obj1_string = gen_filter_string(slot_dict['OBJ1'], 'x')
r1 = slot_dict['R1']
obj2_string = gen_filter_string(slot_dict['OBJ2'], 'y')
r2 = slot_dict['R2']
obj3_string = gen_filter_string(slot_dict['OBJ3'], 'z')
program = f'point(Object, lambda x: {obj1_string} and {r1}(x, iota(Object, lambda y: {obj2_string})) and {r2}(x, iota(Object, lambda z: {obj3_string})))'
(yield (sentence, program, slot_program, slot_dict, obj))
return [x[:(- 1)] for x in sentence_for_x.values()]
|
def ground_program4(scene, unique_filters):
program = 'S OBJ3 R2 S OBJ2 R1 OBJ1'
program2 = ground_program2(scene, unique_filters)
sentence_for_x = {}
for (_, _, _, slot_dict2, obj2) in program2:
for f in g_all_filter_ops:
for r1 in g_all_relate_ops:
slot_dict = {'OBJ1': f, 'R1': r1, 'OBJ2': obj2}
try:
obj = execute(scene, 'OBJ2 R1 OBJ1', slot_dict)
except ValueError:
continue
template = random.choice(g_templates_4)
slot_dict = {'OBJ1': f, 'R1': r1, 'OBJ2': slot_dict2['OBJ1'], 'OBJ3': slot_dict2['OBJ2'], 'R2': slot_dict2['R1']}
sentence = template.format(**slot_dict)
sentence_len = len(sentence.split())
if ((obj not in sentence_for_x) or (sentence_len < sentence_for_x[obj][(- 1)])):
sentence_for_x[obj] = (sentence, program, slot_dict, obj, sentence_len)
for (sentence, slot_program, slot_dict, obj, _) in sentence_for_x.values():
obj1_string = gen_filter_string(slot_dict['OBJ1'], 'x')
r1 = slot_dict['R1']
obj2_string = gen_filter_string(slot_dict['OBJ2'], 'y')
r2 = slot_dict['R2']
obj3_string = gen_filter_string(slot_dict['OBJ3'], 'z')
program = f'point(Object, lambda x: {obj1_string} and {r1}(x, iota(Object, lambda y: {obj2_string} and {r2}(y, iota(Object, lambda z: {obj3_string})))))'
(yield (sentence, program, slot_program, slot_dict, obj))
|
def random_sample_and_post(scene):
unique_filters = [f for f in g_all_filter_ops if check_filter_unique(scene, f)]
cat = (random.choice(range(4)) + 1)
func = globals()[f'ground_program{cat}']
for i in range(4):
sols = list(func(scene, unique_filters))
if (len(sols) == 0):
continue
(sentence, program, slot_program, slot_dict, obj) = random.choice(sols)
sentence = sentence.replace('left', 'left of')
sentence = sentence.replace('right', 'right of')
sentence = sentence.replace('-', '')
return (sentence, program, slot_program, slot_dict, obj)
print('Really bad...', scene['image_filename'])
|
def main():
scenes = jacinle.load_json(args.scenes_json)['scenes']
refexps = list()
for (scene_index, scene) in enumerate(jacinle.tqdm(scenes[:150])):
rv = random_sample_and_post(scene)
if (rv is None):
continue
(sentence, program, slot_program, slot_dict, obj) = rv
refexps.append({'scene_index': scene_index, 'image_filename': scene['image_filename'], 'question': sentence, 'program': program, 'slot_program': program, 'slot_dict': slot_dict, 'answer': obj})
jacinle.dump_json(args.output, {'refexps': refexps[:100]})
print('Saved: "{}".'.format(args.output))
|
def filter(scene, name, input_):
if (name == 'object'):
return input_
attribute = g_concept2attribute[name]
return {i for i in input_ if (scene['objects'][i][attribute] == name)}
|
def multi_filter(scene, names, input_):
for name in names.split():
input_ = filter(scene, name, input_)
return input_
|
def gen_description(rule1_cat, d1, rule2_cat, d2):
cat_order = ['size', 'color', 'material', 'shape']
if (cat_order.index(rule1_cat) > cat_order.index(rule2_cat)):
(rule1_cat, rule2_cat) = (rule2_cat, rule1_cat)
(d1, d2) = (d2, d1)
d = ((d1 + ' ') + d2)
if (rule2_cat != 'shape'):
d += ' object'
if d.startswith('aeiou'):
d = ('an ' + d)
else:
d = ('a ' + d)
return d
|
def main():
scenes = jacinle.load_json(args.scenes_json)['scenes']
def find_scene_matching(name, answer):
for i in range(1000):
scene_index = random.randint(0, (len(scenes) - 1))
scene = scenes[scene_index]
res = multi_filter(scene, name, range(len(scene['objects'])))
if ((answer is True) and (len(res) > 0)):
return (scene_index, scene)
if ((answer is False) and (len(res) == 0)):
return (scene_index, scene)
rpms = list()
for i in range(100):
(rule1_cat, rule2_cat) = random.sample(list(g_all_rules.keys()), 2)
rule1 = random.choice(g_all_rules[rule1_cat])
rule2 = random.choice(g_all_rules[rule2_cat])
print(rule1, rule2)
desired_answer = random.choice([True, False])
(scene_index, scene) = find_scene_matching(f'{rule1[2]} {rule2[2]}', desired_answer)
question = 'There are 9 objects, ordered in a 3x3 grid: '
for i in range(3):
for j in range(3):
if ((i == 2) and (j == 2)):
continue
question += f'row {(i + 1)} col {(j + 1)} is {gen_description(rule1_cat, rule1[i], rule2_cat, rule2[j])}; '
question += 'I am missing one object at row 3 col 3. Can you find an object in the scene that can fit there?'
rpm = {'rule1_cat': rule1_cat, 'rule1': rule1, 'rule2_cat': rule2_cat, 'rule2': rule2, 'answer': desired_answer, 'scene_index': scene_index, 'image_filename': scene['image_filename'], 'target_object': f'{rule1[2]} {rule2[2]}', 'question': question, 'program': f'exists(Object, lambda x: {rule1[2]}(x) and {rule2[2]}(x))'}
rpms.append(rpm)
jacinle.dump_json(args.output, {'rpms': rpms})
print(f'Saved: "{args.output}".')
|
def main():
dataset = globals()[g_dataset_loaders[args.dataset]](args.data_dir)
print('Dataset statistics:')
print(' Length:', len(dataset))
print('Dataset examples:')
jacinle.stprint(dataset[0], 'dataset[0]', max_depth=1)
from IPython import embed
embed()
|
def load_CLEVR(data_dir: str):
from concepts.benchmark.clevr.dataset import make_dataset
return make_dataset(scenes_json=osp.join(args.data_dir, 'scenes.json'), questions_json=osp.join(args.data_dir, 'questions.json'), image_root=osp.join(args.data_dir, 'images'), vocab_json=osp.join(args.data_dir, 'vocab.json'))
|
@dataclass
class FunctionGroupSummary(object):
signature: str
count: int = 0
examples: dict = field(default_factory=dict)
|
def main():
domain = create_bare_domain()
parser = create_default_parser(domain)
all_codes = io.load_pkl(args.parsed_filename)
all_rows = list()
all_function_groups: dict[(str, FunctionGroupSummary)] = dict()
all_types: dict[(str, list)] = dict()
for (prompt, codes) in jacinle.tqdm_gofor(all_codes, desc='Creating domain from parsings'):
if isinstance(codes, str):
codes = [codes]
all_codes[prompt] = codes
for code in codes:
exception = ''
parsed_expression = None
parsed_expression_str = ''
try:
parsed_expression = parser.parse_expression(code)
parsed_expression_str = str(parsed_expression)
except:
exception = traceback.format_exc()
all_rows.append({'prompt': prompt, 'raw_code': code, 'parse_success': (parsed_expression is not None), 'parsed_expression': (parsed_expression_str if (parsed_expression is not None) else exception)})
if (parsed_expression is not None):
for expr in iter_exprs(parsed_expression):
if isinstance(expr, FunctionApplicationExpression):
function = expr.function
signature = get_function_signature(function)
if (signature not in all_function_groups):
all_function_groups[signature] = FunctionGroupSummary(signature)
if (function.name not in all_function_groups[signature].examples):
all_function_groups[signature].examples[function.name] = list()
all_function_groups[signature].count += 1
if (len(all_function_groups[signature].examples[function.name]) < 3):
all_function_groups[signature].examples[function.name].append({'prompt': prompt, 'raw_code': code, 'parsed_expression': (('<pre>' + parsed_expression_str.replace((function.name + '('), f'<span style="color:red">{function.name}</span>(')) + '</pre>')})
elif isinstance(expr, VariableExpression):
typename = expr.return_type.typename
if (typename not in all_types):
all_types[typename] = list()
if (len(all_types[typename]) < 5):
all_types[typename].append({'prompt': prompt, 'raw_code': code, 'parsed_expression': (('<pre>' + parsed_expression_str.replace(typename, f'<span style="color:red">{typename}</span>')) + '</pre>')})
io.mkdir(args.output_dir)
with open(f'{args.output_dir}/summary.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(['# of prompts:', len(all_codes)])
writer.writerow(['# of codes:', sum((len(x) for x in all_codes.values()))])
writer.writerow(['# of parsed codes:', sum((1 for x in all_rows if x['parse_success']))])
writer.writerow(['# of parsed types:', len(domain.types)])
writer.writerow(['# of parsed functions:', len(domain.functions)])
writer.writerow(['# of parsed function groups:', len(all_function_groups)])
writer.writerow([])
for function_group in sorted(all_function_groups.values(), key=(lambda x: x.count), reverse=True):
writer.writerow([f'{function_group.signature}:', function_group.count])
print('Summary written to', f'{args.output_dir}/summary.csv')
visualizer = HTMLTableVisualizer(f'{args.output_dir}/parsing.html', 'Parsing Results')
with visualizer.html():
with visualizer.table('Result Summary', [HTMLTableColumnDesc('summary', 'Summary', 'code')]):
string = ''
string += f'''# of prompts: {len(all_codes)}
'''
string += f'''# of codes: {sum((len(x) for x in all_codes.values()))}
'''
string += f'''# of parsed codes: {sum((1 for x in all_rows if x['parse_success']))}
'''
string += f'''# of parsed types: {len(domain.types)}
'''
string += f'''# of parsed functions: {len(domain.functions)}
'''
string += f'''# of parsed function groups: {len(all_function_groups)}
'''
visualizer.row(summary=string)
with visualizer.table('Parsing Results', [HTMLTableColumnDesc('index', 'Index', 'text'), HTMLTableColumnDesc('prompt', 'Prompt', 'text', {}, {'width': '20%'}), HTMLTableColumnDesc('raw_code', 'Raw Code', 'code', {}, {'width': '20%'}), HTMLTableColumnDesc('parse_success', 'Parse Success', 'code', {'width': '50px'}), HTMLTableColumnDesc('parsed_expression', 'Parsed Expression', 'code', {}, {'width': '50%'})]):
for (i, row) in enumerate(all_rows):
visualizer.row(**row, index=i)
print('Parsing results written to', f'{args.output_dir}/parsing.html')
with open(f'{args.output_dir}/parsing.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(['prompt', 'raw_code', 'parse_success', 'parsed_expression'])
for row in all_rows:
writer.writerow([row['prompt'], row['raw_code'], row['parse_success'], row['parsed_expression']])
print('Parsing results written to', f'{args.output_dir}/parsing.csv')
visualizer = HTMLTableVisualizer(f'{args.output_dir}/function_groups.html', 'Function Groups')
with visualizer.html():
for function_group in sorted(all_function_groups.values(), key=(lambda x: x.count), reverse=True):
with visualizer.table(f'{function_group.signature} (count = {function_group.count})', [HTMLTableColumnDesc('prompt', 'Prompt', 'text', None, {'width': '20%'}), HTMLTableColumnDesc('raw_code', 'Raw Code', 'code', None, {'width': '30%'}), HTMLTableColumnDesc('parsed_expression', 'Parsed Expression', 'raw', None, {'width': '50%'})]):
for example_list in function_group.examples.values():
for example in example_list:
visualizer.row(**example)
print('Function groups written to', f'{args.output_dir}/function_groups.html')
visualizer = HTMLTableVisualizer(f'{args.output_dir}/types.html', 'Types')
with visualizer.html():
for (typename, examples) in all_types.items():
with visualizer.table(f'{typename}', [HTMLTableColumnDesc('prompt', 'Prompt', 'text', None, {'width': '20%'}), HTMLTableColumnDesc('raw_code', 'Raw Code', 'code', None, {'width': '30%'}), HTMLTableColumnDesc('parsed_expression', 'Parsed Expression', 'raw', None, {'width': '50%'})]):
for example in examples:
visualizer.row(**example)
print('Types written to', f'{args.output_dir}/types.html')
|
def get_function_signature(function):
argument_types = tuple((x.typename for x in function.ftype.argument_types))
return_type = function.ftype.return_type.typename
return f'{argument_types} -> {return_type}'
|
def main():
domain = make_domain(args.parsed_filename)
domain.print_summary()
print('Summary:')
print(' - # of types: {}'.format(len(domain.types)))
print(' - # of functions: {}'.format(len(domain.functions)))
function_groups = dict()
for function in domain.functions.values():
argument_types = tuple((x.typename for x in function.ftype.argument_types))
return_type = function.ftype.return_type.typename
key = f'{argument_types} -> {return_type}'
function_groups.setdefault(key, []).append(function)
print(' - # of function groups: {}'.format(len(function_groups)))
for (key, functions) in sorted(function_groups.items(), key=(lambda x: len(x[1])), reverse=True):
print(' - {}: {}'.format(key, len(functions)))
|
@dataclass
class FunctionGroupSummary(object):
signature: str
count: int = 0
examples: dict[(str, list[dict])] = field(default_factory=dict)
|
def main():
domain = create_bare_domain()
parser = create_default_parser(domain)
all_codes = io.load_pkl(args.parsed_filename)
for (prompts, codes) in jacinle.tqdm_gofor(all_codes):
for code in codes:
try:
_ = parser.parse_expression(code)
except Exception:
pass
print('Summary (before pruning):')
print(' - # of types: {}'.format(len(domain.types)))
print(' - # of functions: {}'.format(len(domain.functions)))
domain = prune_domain(domain)
print('Summary (after pruning):')
print(' - # of types: {}'.format(len(domain.types)))
print(' - # of functions: {}'.format(len(domain.functions)))
print(('-' * 80))
domain.print_summary()
|
def main2():
domain = create_bare_domain()
parser = create_default_parser(domain)
if args.parsed_filename.endswith('.json'):
all_codes = io.load_json(args.parsed_filename)
else:
all_codes = io.load_pkl(args.parsed_filename)
expressions = list()
for (prompt, codes) in jacinle.tqdm_gofor(all_codes, leave=False, desc='Parsing'):
if isinstance(codes, str):
codes = [codes]
for code in codes:
code = code.strip()
try:
expr = parser.parse_expression(code)
expressions.append((prompt, code, expr))
except Exception:
pass
print('Summary (before pruning):')
print(' - # of types: {}'.format(len(domain.types)))
print(' - # of functions: {}'.format(len(domain.functions)))
print(' - # of input sentences: {}'.format(len(all_codes)))
print(' - # of input expressions: {}'.format(sum((len(c) for c in all_codes.values()))))
print(' - # of parsed sentences: {}'.format(len({x[0] for x in expressions})))
print(' - # of parsed expressions: {}'.format(len(expressions)))
checked_expressions = list()
for (prompt, code, expr) in jacinle.tqdm(expressions, leave=False, desc='Checking expressions'):
if args.debug_checker:
print(expr)
try:
check_expr_validity(expr)
if args.debug_checker:
print(' - OK')
checked_expressions.append((prompt, code, expr))
except Exception:
if args.debug_checker:
print(' - failed')
traceback.print_exc()
input('Press any key to continue...')
domain = create_bare_domain()
parser = create_default_parser(domain)
for (_, code, _) in jacinle.tqdm(checked_expressions, leave=False, desc='Re-parsing'):
try:
parser.parse_expression(code)
except Exception:
pass
print('Summary (after pruning):')
print(' - # of types: {}'.format(len(domain.types)))
print(' - # of functions: {}'.format(len(domain.functions)))
print(' - # of parsed sentences: {}'.format(len({s[0] for s in checked_expressions})))
print(' - # of parsed expressions: {}'.format(len(checked_expressions)))
print(('-' * 80))
domain.print_summary()
if (args.output is not None):
expressions = dict()
for (prmopt, code, expr) in checked_expressions:
if (prmopt not in expressions):
expressions[prmopt] = list()
expressions[prmopt].append(code)
io.dump(args.output, expressions)
print(f'Output to {args.output}.')
|
def prune_domain(old_domain: FunctionDomain) -> FunctionDomain:
new_domain = create_bare_domain()
for (name, function) in old_domain.functions.items():
if (name in new_domain.functions):
continue
print('Checking function: {} {}'.format(name, function))
ftype = function.ftype
argument_types = [x.typename for x in ftype.argument_types]
return_type = ftype.return_type.typename
pass_test = False
if ((len(argument_types) > 1) and all(((x == 'Object') for x in argument_types)) and (return_type == 'bool')):
pass_test = True
if ((len(argument_types) > 1) and all(((x == 'Object') for x in argument_types[1:])) and (return_type == 'bool')):
pass_test = True
if pass_test:
print(f' Pass test: {name} {argument_types} -> {return_type}')
new_domain.functions[name] = function
for t in ftype.argument_types:
if (t.typename not in new_domain.types):
new_domain.types[t.typename] = t
else:
print(f' Prune {name}')
return new_domain
|
def check_expr_validity(expression: E.Expression):
if isinstance(expression, E.GeneralizedQuantificationExpression):
if (expression.quantification_op in ('describe', 'count')):
pass
else:
raise ValueError('Invalid quantification op: {}'.format(expression.quantification_op))
def dfs(expr: E.Expression, allow_queries: bool=False):
if isinstance(expr, E.GeneralizedQuantificationExpression):
if (expr.quantification_op == 'iota'):
dfs(expr.expression, allow_queries=allow_queries)
elif (expr.quantification_op == 'point'):
assert allow_queries
dfs(expr.expression, allow_queries=False)
elif (expr.quantification_op == 'view'):
raise ValueError(f'Invalid view: {repr(expr)}.')
elif (expr.quantification_op == 'describe'):
assert allow_queries
if isinstance(expr.expression, E.FunctionApplicationExpression):
if (expr.variable.dtype.typename == 'Object'):
pass
elif (expr.variable.dtype.typename == 'Action'):
pass
elif ((len(expr.expression.arguments) == 2) and isinstance(expr.expression.arguments[0], E.VariableExpression) and (expr.expression.arguments[0].variable.name == expr.variable.name) and (expr.expression.arguments[1].return_type.typename in ['Object', 'Action'])):
return dfs(expr.expression.arguments[1], allow_queries=allow_queries)
else:
raise ValueError(f'Invalid describe: {repr(expr)}.')
else:
raise ValueError(f'Invalid describe: {repr(expr)}.')
dfs(expr.expression, allow_queries=False)
elif (expr.quantification_op == 'count'):
dfs(expr.expression, allow_queries=allow_queries)
elif isinstance(expr, FunctionApplicationExpression):
if (expr.return_type in (BOOL,)):
function = expr.function
if (function.name in ('equal', 'less_than', 'greater_than')):
pass
elif ((len(function.ftype.argument_types) > 0) and all(((x.typename == 'Object') for x in function.ftype.argument_types))):
if (len(function.ftype.arguments) in (1, 2)):
pass
else:
raise ValueError(f'Invalid function: {repr(expr)}.')
else:
raise ValueError('Invalid function: {}'.format(function))
else:
raise ValueError('Invalid return type: {}'.format(expr.return_type))
for arg in expr.arguments:
dfs(arg, allow_queries=allow_queries)
elif isinstance(expr, E.VariableExpression):
pass
elif isinstance(expr, E.ConstantExpression):
pass
elif isinstance(expr, E.BoolExpression):
for arg in expr.arguments:
if (arg.return_type != BOOL):
raise ValueError('Invalid bool expression: {}'.format(arg))
for arg in expr.arguments:
dfs(arg, allow_queries=allow_queries)
elif isinstance(expr, E.QuantificationExpression):
dfs(expr.expression, allow_queries=allow_queries)
else:
raise ValueError('Invalid expression: {}'.format(repr(expr)))
dfs(expression, allow_queries=True)
|
def main():
if (not args.debug):
args.dump_dir = ensure_path(osp.join('dumps', args.series_name, args.desc_name, args.expr, args.run_name))
args.ckpt_dir = ensure_path(osp.join(args.dump_dir, 'checkpoints'))
args.vis_dir = ensure_path(osp.join(args.dump_dir, 'visualizations'))
args.meta_file = osp.join(args.dump_dir, 'metainfo.json')
args.log_file = osp.join(args.dump_dir, 'log.log')
args.meter_file = osp.join(args.dump_dir, 'meter.json')
if args.use_tb:
args.tb_dir = ensure_path(osp.join(args.dump_dir, 'tensorboard'))
else:
args.tb_dir = None
if (not args.debug):
logger.critical('Writing logs to file: "{}".'.format(args.log_file))
set_output_file(args.log_file)
logger.critical('Writing metainfo to file: "{}".'.format(args.meta_file))
with open(args.meta_file, 'w') as f:
f.write(dump_metainfo(args=args.__dict__, configs=configs))
if (args.debug and args.use_tb):
logger.warning('Disabling the tensorboard in the debug mode.')
args.use_tb = False
if (args.evaluate and args.use_tb):
logger.warning('Disabling the tensorboard in the evaluation mode.')
args.use_tb = False
if (args.data_questions_json is None):
args.data_questions_json = osp.join(args.data_dir, 'questions.json')
if (args.data_scenes_json is None):
args.data_scenes_json = osp.join(args.data_dir, 'scenes.json')
if (args.data_image_root is None):
args.data_image_root = osp.join(args.data_dir, 'images')
if (args.data_vocab_json is None):
args.data_vocab_json = osp.join(args.data_dir, 'vocab.json')
if (args.data_output_vocab_json is None):
args.data_output_vocab_json = osp.join(args.data_dir, 'output-vocab.json')
if (args.validation_data_dir is not None):
args.validation_data_questions_json = osp.join(args.validation_data_dir, 'questions.json')
args.validation_data_scenes_json = osp.join(args.validation_data_dir, 'scenes.json')
args.validation_data_image_root = osp.join(args.validation_data_dir, 'images')
all_parses = dict()
if (args.data_parses is not None):
for filename in args.data_parses:
assert osp.isfile(filename), f'File {filename} does not exist.'
logger.info('Loading parses from {}.'.format(filename))
if filename.endswith('.p'):
content = io.load_pkl(filename)
else:
content = io.load(filename)
all_parses.update(content)
from left.domain import create_domain_from_parsing
domain = create_domain_from_parsing(all_parses)
if (args.data_concept_match is not None):
import pandas as pd
df = pd.read_csv(args.data_concept_match)
concept_mapping = dict()
for (i, row) in df.iterrows():
if row['align']:
concept_mapping[row['word']] = row['mapped']
logger.critical(f'Loaded {len(concept_mapping)} concept matches from {args.data_concept_match}.')
else:
concept_mapping = None
from jacinle.config.g import g
g.concept_mapping = concept_mapping
logger.critical('Total parsed sentences: {}.'.format(len(all_parses)))
logger.critical('Domain: {}'.format(domain))
logger.info('Number of types: {}'.format(len(domain.types)))
logger.info('Number of functions: {}'.format(len(domain.functions)))
logger.critical('Loading the dataset.')
if (args.evaluate_custom is None):
from concepts.benchmark.clevr.dataset import make_dataset
if (args.validation_data_dir is None):
dataset = make_dataset(args.data_scenes_json, args.data_questions_json, args.data_image_root, vocab_json=args.data_vocab_json, output_vocab_json=args.data_output_vocab_json)
(train_dataset, validation_dataset) = dataset.split_trainval(args.data_tvsplit)
else:
train_dataset = make_dataset(args.data_scenes_json, args.data_questions_json, args.data_image_root, vocab_json=args.data_vocab_json, output_vocab_json=args.data_output_vocab_json)
validation_dataset = make_dataset(args.validation_data_scenes_json, args.validation_data_questions_json, args.validation_data_image_root, vocab_json=args.data_vocab_json, output_vocab_json=args.data_output_vocab_json)
else:
from left.clevr_custom_transfer import make_dataset
dataset = make_dataset(args.evaluate_custom, args.data_scenes_json, args.data_questions_json, args.data_image_root, args.data_output_vocab_json)
train_dataset = validation_dataset = dataset
logger.critical('Building the model.')
model = desc.make_model(args, domain, all_parses, (train_dataset.output_vocab if hasattr(train_dataset, 'output_vocab') else train_dataset.unwrapped.output_vocab), custom_transfer=args.evaluate_custom)
if args.use_gpu:
model.cuda()
if args.gpu_parallel:
from jactorch.parallel import JacDataParallel
model = JacDataParallel(model, device_ids=args.gpus, user_scattered=True).cuda()
cudnn.benchmark = True
if hasattr(desc, 'make_optimizer'):
logger.critical('Building customized optimizer.')
optimizer = desc.make_optimizer(model, args.lr)
else:
from jactorch.optim import AdamW
trainable_parameters = filter((lambda x: x.requires_grad), model.parameters())
optimizer = AdamW(trainable_parameters, args.lr, weight_decay=configs.train.weight_decay)
if (args.acc_grad > 1):
from jactorch.optim import AccumGrad
optimizer = AccumGrad(optimizer, args.acc_grad)
logger.warning('Use accumulated grad={:d}, effective iterations per epoch={:d}.'.format(args.acc_grad, int((args.iters_per_epoch / args.acc_grad))))
trainer = TrainerEnv(model, optimizer)
parent_meta_file = None
if args.resume:
extra = trainer.load_checkpoint(args.resume)
if extra:
args.start_epoch = extra['epoch']
logger.critical('Resume from epoch {}.'.format(args.start_epoch))
elif args.load:
raw = trainer.load_weights(args.load)
if (raw is not None):
logger.critical('Loaded weights from pretrained model: "{}".'.format(args.load))
parent_meta_file = raw['extra']['meta_file']
if args.use_tb:
from jactorch.train.tb import TBLogger, TBGroupMeters
tb_logger = TBLogger(args.tb_dir)
meters = TBGroupMeters(tb_logger)
logger.critical('Writing tensorboard logs to: "{}".'.format(args.tb_dir))
else:
from jacinle.utils.meter import GroupMeters
meters = GroupMeters()
if (not args.debug):
logger.critical('Writing metainfo to file: "{}".'.format(args.meta_file))
with open(args.meta_file, 'w') as f:
f.write(dump_metainfo(args=args.__dict__, configs=configs))
logger.critical('Writing meter logs to file: "{}".'.format(args.meter_file))
logger.critical('Initializing MLDash.')
mldash.init(desc_name=((args.series_name + '/') + args.desc_name), expr_name=args.expr, run_name=args.run_name, args=args, highlight_args=parser, configs=configs)
mldash.update(metainfo_file=args.meta_file, log_file=args.log_file, meter_file=args.meter_file, tb_dir=args.tb_dir)
if (parent_meta_file is not None):
try:
parent_run = io.load(parent_meta_file)['args']['run_name']
logger.critical('Setting parent run: {}.'.format(parent_run))
if args.evaluate:
mldash.update_parent(parent_run, is_master=False)
else:
mldash.update_parent(parent_run, is_master=True)
except:
logger.exception('Exception occurred during loading metainfo.')
if args.embed:
from IPython import embed
embed()
if hasattr(desc, 'customize_trainer'):
desc.customize_trainer(trainer)
logger.critical('Building the data loader.')
train_dataloader = train_dataset.make_dataloader(args.batch_size, shuffle=True, drop_last=True, nr_workers=args.data_workers)
validation_dataloader = validation_dataset.make_dataloader(args.batch_size, shuffle=False, drop_last=False, nr_workers=args.data_workers)
if (args.use_gpu and args.gpu_parallel):
from jactorch.data.dataloader import JacDataLoaderMultiGPUWrapper
train_dataloader = JacDataLoaderMultiGPUWrapper(train_dataloader, args.gpus)
validation_dataloader = JacDataLoaderMultiGPUWrapper(validation_dataloader, args.gpus)
undefined_configs = configs.find_undefined_values('configs')
if (len(undefined_configs) > 0):
logger.warning('Undefined configs: {}'.format(undefined_configs))
if (not yes_or_no('Continue the script?', default='no')):
return
if (args.evaluate_custom is not None):
epoch = 0
model.eval()
validate_epoch_custom(epoch, trainer, validation_dataloader, meters)
if (not args.debug):
meters.dump(args.meter_file)
if (not args.debug):
mldash.log_metric('epoch', epoch, desc=False, expr=False)
for (key, value) in meters.items():
if (key.startswith('loss') or key.startswith('validation/loss')):
mldash.log_metric_min(key, value.avg)
for (key, value) in meters.items():
if (key.startswith('acc') or key.startswith('validation/acc')):
mldash.log_metric_max(key, value.avg)
logger.critical(meters.format_simple('Epoch = {}'.format(epoch), compressed=False))
return
if args.evaluate:
epoch = 0
model.eval()
if args.evaluate_on_train:
validate_epoch(epoch, trainer, train_dataloader, meters)
else:
validate_epoch(epoch, trainer, validation_dataloader, meters)
if (not args.debug):
meters.dump(args.meter_file)
if (not args.debug):
mldash.log_metric('epoch', epoch, desc=False, expr=False)
for (key, value) in meters.items():
if (key.startswith('loss') or key.startswith('validation/loss')):
mldash.log_metric_min(key, value.avg)
for (key, value) in meters.items():
if (key.startswith('acc') or key.startswith('validation/acc')):
mldash.log_metric_max(key, value.avg)
logger.critical(meters.format_simple('Epoch = {}'.format(epoch), compressed=False))
return
for epoch in range((args.start_epoch + 1), (args.epochs + 1)):
if (args.curriculum != 'none'):
(this_train_dataset, this_validation_dataset) = get_curriculum_dataset(epoch, train_dataset, validation_dataset)
train_dataloader = this_train_dataset.make_dataloader(args.batch_size, shuffle=True, drop_last=True, nr_workers=args.data_workers)
meters.reset()
model.train()
train_epoch(epoch, trainer, train_dataloader, meters)
if (((args.validation_interval > 0) and ((epoch % args.validation_interval) == 0)) or (epoch == 1)):
model.eval()
with torch.no_grad():
validate_epoch(epoch, trainer, validation_dataloader, meters)
latest_parses = model.parses
if (not args.debug):
fname = osp.join(args.dump_dir, 'latest_parses.pkl')
io.dump(fname, latest_parses)
logger.critical(f'Latest parses saved to "{fname}".')
if (not args.debug):
meters.dump(args.meter_file)
if (not args.debug):
mldash.log_metric('epoch', epoch, desc=False, expr=False)
for (key, value) in meters.items():
if (key.startswith('loss') or key.startswith('validation/loss')):
mldash.log_metric_min(key, value.avg)
for (key, value) in meters.items():
if (key.startswith('acc') or key.startswith('validation/acc')):
mldash.log_metric_max(key, value.avg)
logger.critical(meters.format_simple('Epoch = {}'.format(epoch), compressed=False))
if (not args.debug):
if ((epoch % args.save_interval) == 0):
fname = osp.join(args.ckpt_dir, 'epoch_{}.pth'.format(epoch))
trainer.save_checkpoint(fname, dict(epoch=epoch, meta_file=args.meta_file))
|
def get_curriculum_dataset(epoch, train_dataset, validation_dataset):
for (si, s) in enumerate(g_curriculum_strategy):
if (g_curriculum_strategy[si][0] < epoch <= g_curriculum_strategy[(si + 1)][0]):
(max_scene_size, max_program_size) = s[1:]
if (args.curriculum in ('scene', 'all')):
train_dataset = train_dataset.filter_scene_size(max_scene_size)
validation_dataset = validation_dataset.filter_scene_size(max_scene_size)
if (args.curriculum in ('program', 'all')):
train_dataset = train_dataset.filter_program_size_raw(max_program_size)
validation_dataset = validation_dataset.filter_program_size_raw(max_program_size)
logger.critical('Building the data loader. Curriculum = {}/{}, length = {}.'.format(*s[1:], len(train_dataset)))
break
return (train_dataset, validation_dataset)
|
def train_epoch(epoch, trainer, train_dataloader, meters):
nr_iters = args.iters_per_epoch
if (nr_iters == 0):
nr_iters = len(train_dataloader)
meters.update(epoch=epoch)
trainer.trigger_event('epoch:before', trainer, epoch)
train_iter = iter(train_dataloader)
end = time.time()
with tqdm_pbar(total=nr_iters) as pbar:
for i in range(nr_iters):
feed_dict = next(train_iter)
if args.use_gpu:
if (not args.gpu_parallel):
feed_dict = async_copy_to(feed_dict, 0)
data_time = (time.time() - end)
end = time.time()
(loss, monitors, output_dict, extra_info) = trainer.step(feed_dict)
step_time = (time.time() - end)
end = time.time()
meters.update(loss=loss)
update_meters(meters, monitors)
meters.update({'time/data': data_time, 'time/step': step_time})
if args.use_tb:
meters.flush()
pbar.set_description(meters.format_simple('Epoch {}'.format(epoch), {k: v for (k, v) in meters.val.items() if ((not k.startswith('validation')) and (k.count('/') <= 1))}, compressed=True), refresh=False)
pbar.update()
end = time.time()
trainer.trigger_event('epoch:after', trainer, epoch)
|
@jactorch.no_grad_func
def validate_epoch(epoch, trainer, val_dataloader, meters):
end = time.time()
run_visualizer = False
if (args.evaluate and (not args.debug)):
run_visualizer = True
import matplotlib.pyplot as plt
from PIL import Image
from jaclearn.visualize.html_table import HTMLTableColumnDesc, HTMLTableVisualizer
from jaclearn.visualize.box import vis_bboxes
from concepts.dsl.tensor_value import TensorValue
if run_visualizer:
visualizer = HTMLTableVisualizer(osp.join(args.vis_dir, 'evaluation'), 'Evaluation')
visualizer.begin_html()
visualizer_index = 0
visualizer_total = 30
with tqdm_pbar(total=len(val_dataloader)) as pbar:
for feed_dict in val_dataloader:
if args.use_gpu:
if (not args.gpu_parallel):
feed_dict = async_copy_to(feed_dict, 0)
data_time = (time.time() - end)
end = time.time()
(output_dict, extra_info) = trainer.evaluate(feed_dict)
monitors = as_float(output_dict['monitors'])
step_time = (time.time() - end)
end = time.time()
update_meters(meters, monitors, prefix='validation/')
meters.update({'time/data': data_time, 'time/step': step_time})
if (run_visualizer and (visualizer_index < visualizer_total)):
for i in range(len(feed_dict['question_index'])):
if (args.validation_data_dir is None):
image_filename = osp.join(args.data_image_root, feed_dict['image_filename'][i])
else:
image_filename = osp.join(args.validation_data_image_root, feed_dict['image_filename'][i])
image = Image.open(image_filename)
with visualizer.table(f"Question #{feed_dict['question_index'][i]}", [HTMLTableColumnDesc('image', 'Image', 'figure', {'width': '600px'}), HTMLTableColumnDesc('question', 'Question', 'text', {'width': '200px'}), HTMLTableColumnDesc('answer', 'Answer', 'text'), HTMLTableColumnDesc('prediction', 'Prediction', 'text'), HTMLTableColumnDesc('program', 'Program', 'code', {'width': '600px'})]):
(fig, ax) = vis_bboxes(image, feed_dict['objects_raw'][i], 'Object', add_text=False)
visualizer.row(**{'image': fig, 'question': feed_dict['question_raw'][i], 'answer': feed_dict['answer'][i], 'prediction': output_dict['pred_answers'][i], 'program': str(output_dict['parsings'][i])})
plt.close()
with visualizer.table(f"Question #{feed_dict['question_index'][i]} (Program)", [HTMLTableColumnDesc('id', 'ID', 'text', {'width': '50px'}), HTMLTableColumnDesc('visualization', 'Visualization', 'figure', {'width': '600px'}), HTMLTableColumnDesc('program_and_output', 'Program and Output', 'code', {'width': '600px'})]):
for (j, (program, output)) in enumerate(output_dict['execution_traces'][i]):
if (isinstance(output, TensorValue) and (output.dtype.typename in ('bool', 'Object')) and (len(output.batch_variables) == 1) and (output.tensor.shape[0] == len(feed_dict['objects_raw'][i]))):
(fig, ax) = vis_bboxes(image, feed_dict['objects_raw'][i], 's:', add_text=True, legends=[str(round(x, 3)) for x in output.tensor.detach().cpu().tolist()])
visualizer.row(**{'id': j, 'visualization': fig, 'program_and_output': ((str(program) + '\n\n') + str(output))})
plt.close()
print('Visualized', visualizer_index)
visualizer_index += 1
if (visualizer_index >= visualizer_total):
break
elif args.evaluate_visualization_only:
break
if args.use_tb:
meters.flush()
pbar.set_description(meters.format_simple('Epoch {} (validation)'.format(epoch), {k: v for (k, v) in meters.val.items() if (k.startswith('validation') and (k.count('/') <= 2))}, compressed=True), refresh=False)
pbar.update()
end = time.time()
if run_visualizer:
visualizer.end_html()
link = '<a href="viewer://{}", target="_blank">{}</a>'.format(visualizer.visdir, visualizer.visdir)
mldash.update(run_description=link)
|
def update_meters(meters, monitors, prefix: str=None):
for k in list(monitors.keys()):
if ((k + '/n') in monitors):
meters.update({k: monitors[k]}, n=monitors[(k + '/n')], prefix=prefix)
del monitors[k]
del monitors[(k + '/n')]
meters.update(monitors, prefix=prefix)
|
@jactorch.no_grad_func
def validate_epoch_custom(epoch, trainer, val_dataloader, meters):
end = time.time()
run_visualizer = False
if (args.evaluate and (not args.debug)):
run_visualizer = True
if (args.validation_visualize is False):
run_visualizer = False
import matplotlib.pyplot as plt
from PIL import Image
from jaclearn.visualize.html_table import HTMLTableColumnDesc, HTMLTableVisualizer
from jaclearn.visualize.box import vis_bboxes
from concepts.dsl.tensor_value import TensorValue
if run_visualizer:
visualizer = HTMLTableVisualizer(osp.join(args.vis_dir, 'evaluation'), 'Evaluation')
visualizer.begin_html()
visualizer_index = 0
visualizer_total = 30
with tqdm_pbar(total=len(val_dataloader)) as pbar:
for feed_dict in val_dataloader:
if args.use_gpu:
if (not args.gpu_parallel):
feed_dict = async_copy_to(feed_dict, 0)
data_time = (time.time() - end)
end = time.time()
(output_dict, extra_info) = trainer.evaluate(feed_dict)
monitors = as_float(output_dict['monitors'])
step_time = (time.time() - end)
end = time.time()
update_meters(meters, monitors, prefix='validation/')
meters.update({'time/data': data_time, 'time/step': step_time})
if (run_visualizer and (visualizer_index < visualizer_total)):
for i in range(len(feed_dict['question_index'])):
if (args.validation_data_dir is None):
image_filename = osp.join(args.data_image_root, feed_dict['image_filename'][i])
else:
image_filename = osp.join(args.validation_data_image_root, feed_dict['image_filename'][i])
image = Image.open(image_filename)
with visualizer.table(f"Question #{feed_dict['question_index'][i]}", [HTMLTableColumnDesc('image', 'Image', 'figure', {'width': '600px'}), HTMLTableColumnDesc('question', 'Question', 'text', {'width': '200px'}), HTMLTableColumnDesc('answer', 'Answer', 'text'), HTMLTableColumnDesc('prediction', 'Prediction', 'text'), HTMLTableColumnDesc('program', 'Program', 'code', {'width': '600px'})]):
(fig, ax) = vis_bboxes(image, feed_dict['objects_raw'][i], 'Object', add_text=False)
visualizer.row(**{'image': fig, 'question': feed_dict['question_raw'][i], 'answer': feed_dict['answer'][i], 'prediction': output_dict['pred_answers'][i], 'program': str(output_dict['parsings'][i])})
plt.close()
with visualizer.table(f"Question #{feed_dict['question_index'][i]} (Program)", [HTMLTableColumnDesc('id', 'ID', 'text', {'width': '50px'}), HTMLTableColumnDesc('visualization', 'Visualization', 'figure', {'width': '600px'}), HTMLTableColumnDesc('program_and_output', 'Program and Output', 'code', {'width': '600px'})]):
for (j, (program, output)) in enumerate(output_dict['execution_traces'][i]):
if (isinstance(output, TensorValue) and (output.dtype.typename in ('bool', 'Object')) and (len(output.batch_variables) == 1) and (output.tensor.shape[0] == len(feed_dict['objects_raw'][i]))):
(fig, ax) = vis_bboxes(image, feed_dict['objects_raw'][i], 's:', add_text=True, legends=[str(round(x, 3)) for x in output.tensor.detach().cpu().tolist()])
visualizer.row(**{'id': j, 'visualization': fig, 'program_and_output': ((str(program) + '\n\n') + str(output))})
plt.close()
print('Visualized', visualizer_index)
visualizer_index += 1
if (visualizer_index >= visualizer_total):
break
elif args.evaluate_visualization_only:
break
if args.use_tb:
meters.flush()
pbar.set_description(meters.format_simple('Epoch {} (validation)'.format(epoch), {k: v for (k, v) in meters.val.items() if (k.startswith('validation') and (k.count('/') <= 2))}, compressed=True), refresh=False)
pbar.update()
end = time.time()
if run_visualizer:
visualizer.end_html()
link = '<a href="viewer://{}", target="_blank">{}</a>'.format(visualizer.visdir, visualizer.visdir)
mldash.update(run_description=link)
|
def main():
if (not args.debug):
args.dump_dir = ensure_path(osp.join('dumps', args.series_name, args.desc_name, args.expr, args.run_name))
args.ckpt_dir = ensure_path(osp.join(args.dump_dir, 'checkpoints'))
args.vis_dir = ensure_path(osp.join(args.dump_dir, 'visualizations'))
args.meta_file = osp.join(args.dump_dir, 'metainfo.json')
args.log_file = osp.join(args.dump_dir, 'log.log')
args.meter_file = osp.join(args.dump_dir, 'meter.json')
if args.use_tb:
args.tb_dir = ensure_path(osp.join(args.dump_dir, 'tensorboard'))
else:
args.tb_dir = None
if (not args.debug):
logger.critical('Writing logs to file: "{}".'.format(args.log_file))
import jacinle
jacinle.set_logger_output_file(args.log_file)
jacinle.git_guard()
logger.critical('Writing metainfo to file: "{}".'.format(args.meta_file))
with open(args.meta_file, 'w') as f:
f.write(dump_metainfo(args=args.__dict__, configs=configs))
if (args.debug and args.use_tb):
logger.warning('Disabling the tensorboard in the debug mode.')
args.use_tb = False
if (args.evaluate and args.use_tb):
logger.warning('Disabling the tensorboard in the evaluation mode.')
args.use_tb = False
from concepts.benchmark.common.vocab import Vocab
output_vocab = Vocab.from_json(args.vocab)
logger.critical('Building the model.')
model = desc.make_model(args.parsed_train_path, args.parsed_test_path, output_vocab)
if args.use_gpu:
model.cuda()
if args.gpu_parallel:
from jactorch.parallel import JacDataParallel
model = JacDataParallel(model, device_ids=args.gpus, user_scattered=True).cuda()
cudnn.benchmark = False
if hasattr(desc, 'make_optimizer'):
logger.critical('Building customized optimizer.')
optimizer = desc.make_optimizer(model, args.lr)
else:
from jactorch.optim import AdamW
trainable_parameters = filter((lambda x: x.requires_grad), model.parameters())
optimizer = AdamW(trainable_parameters, args.lr, weight_decay=configs.train.weight_decay)
print(('LR ' + str(args.lr)))
if (args.acc_grad > 1):
from jactorch.optim import AccumGrad
optimizer = AccumGrad(optimizer, args.acc_grad)
logger.warning('Use accumulated grad={:d}, effective iterations per epoch={:d}.'.format(args.acc_grad, int((args.iters_per_epoch / args.acc_grad))))
trainer = TrainerEnv(model, optimizer)
if args.resume:
extra = trainer.load_checkpoint(args.resume)
if extra:
args.start_epoch = extra['epoch']
logger.critical('Resume from epoch {}.'.format(args.start_epoch))
elif args.load:
if trainer.load_weights(args.load):
logger.critical('Loaded weights from pretrained model: "{}".'.format(args.load))
if args.use_tb:
from jactorch.train.tb import TBLogger, TBGroupMeters
tb_logger = TBLogger(args.tb_dir)
meters = TBGroupMeters(tb_logger)
logger.critical('Writing tensorboard logs to: "{}".'.format(args.tb_dir))
else:
from jacinle.utils.meter import GroupMeters
meters = GroupMeters()
if (not args.debug):
logger.critical('Writing metainfo to file: "{}".'.format(args.meta_file))
with open(args.meta_file, 'w') as f:
f.write(dump_metainfo(args=args.__dict__, configs=configs))
logger.critical('Writing meter logs to file: "{}".'.format(args.meter_file))
logger.critical('Initializing MLDash.')
mldash.init(desc_name=((args.series_name + '/') + args.desc_name), expr_name=args.expr, run_name=args.run_name, args=args, highlight_args=parser, configs=configs)
mldash.update(metainfo_file=args.meta_file, log_file=args.log_file, meter_file=args.meter_file, tb_dir=args.tb_dir)
if args.embed:
from IPython import embed
embed()
if hasattr(desc, 'customize_trainer'):
desc.customize_trainer(trainer)
logger.critical('Building the data loader.')
def build_human_motion_dataset(data_dir, data_split_file, split, data_source, no_gt_segments=False, filter_supervision=False):
from concepts.benchmark.vision_language.babel_qa.dataset import BabelQADataset
dataset = BabelQADataset(data_dir, data_split_file, split, data_source, no_gt_segments, filter_supervision)
return dataset
train_dataset = build_human_motion_dataset(args.datadir, args.data_split_file, 'train', 'humanml3d', no_gt_segments=args.no_gt_segments, filter_supervision=args.filter_supervision)
val_dataset = build_human_motion_dataset(args.datadir, args.data_split_file, 'val', 'humanml3d', no_gt_segments=args.no_gt_segments, filter_supervision=args.filter_supervision)
train_dataloader = train_dataset.make_dataloader(args.batch_size, shuffle=True, drop_last=True, nr_workers=2)
validation_dataloader = val_dataset.make_dataloader(args.batch_size, shuffle=False, drop_last=False, nr_workers=2)
if (args.use_gpu and args.gpu_parallel):
from jactorch.data.dataloader import JacDataLoaderMultiGPUWrapper
train_dataloader = JacDataLoaderMultiGPUWrapper(train_dataloader, args.gpus)
validation_dataloader = JacDataLoaderMultiGPUWrapper(validation_dataloader, args.gpus)
if args.evaluate:
epoch = 0
model.eval()
validate_epoch(epoch, trainer, validation_dataloader, meters, output_vocab)
if (not args.debug):
meters.dump(args.meter_file)
logger.critical(meters.format_simple('Epoch = {}'.format(epoch), compressed=False))
return
for epoch in range((args.start_epoch + 1), (args.epochs + 1)):
meters.reset()
model.train()
train_epoch(epoch, trainer, train_dataloader, meters, output_vocab)
if ((args.validation_interval > 0) and ((epoch % args.validation_interval) == 0)):
model.eval()
with torch.no_grad():
validate_epoch(epoch, trainer, validation_dataloader, meters, output_vocab)
if (not args.debug):
meters.dump(args.meter_file)
if (not args.debug):
mldash.log_metric('epoch', epoch, desc=False, expr=False)
for (key, value) in meters.items():
if (key.startswith('loss') or key.startswith('validation/loss')):
mldash.log_metric_min(key, value.avg)
for (key, value) in meters.items():
if (key.startswith('acc') or key.startswith('validation/acc') or key.startswith('train/acc') or key.startswith('validation/percent') or key.startswith('train/percent')):
mldash.log_metric_max(key, value.avg)
logger.critical(meters.format_simple('Epoch = {}'.format(epoch), compressed=False))
if (not args.debug):
if ((epoch % args.save_interval) == 0):
fname = osp.join(args.ckpt_dir, 'epoch_{}.pth'.format(epoch))
trainer.save_checkpoint(fname, dict(epoch=epoch, meta_file=args.meta_file))
|
def train_epoch(epoch, trainer, train_dataloader, meters, output_vocab):
nr_iters = args.iters_per_epoch
if (nr_iters == 0):
nr_iters = len(train_dataloader)
meters.update(epoch=epoch)
trainer.trigger_event('epoch:before', trainer, epoch)
train_iter = iter(train_dataloader)
end = time.time()
with tqdm_pbar(total=nr_iters) as pbar:
for i in range(nr_iters):
feed_dict = next(train_iter)
if args.use_gpu:
if (not args.gpu_parallel):
feed_dict = async_copy_to(feed_dict, 0)
data_time = (time.time() - end)
end = time.time()
(loss, monitors, output_dict, extra_info) = trainer.step(feed_dict)
step_time = (time.time() - end)
end = time.time()
meters.update(loss=loss)
meters.update(monitors)
meters.update({'time/data': data_time, 'time/step': step_time})
executions = output_dict['executions']
predictions = []
for i in range(len(executions)):
predictions.append(torch.argmax(executions[i]))
predictions = torch.stack(predictions)
scored_accs = []
for i in range(len(executions)):
if (output_dict['scored'][i] == 1):
pred_answer = output_vocab.idx2word[int(predictions[i].cpu())]
scored_accs.append((pred_answer == feed_dict['answer'][i]))
if (len(scored_accs) != 0):
scored_avg_acc = float((sum(scored_accs) / len(scored_accs)))
meters.update({'train/acc_scored': scored_avg_acc}, n=len(scored_accs))
meters.update({'train/percent_scored': (len(scored_accs) / len(executions))})
if args.use_tb:
meters.flush()
pbar.set_description(meters.format_simple('Epoch {}'.format(epoch), {k: v for (k, v) in meters.val.items() if ((not k.startswith('validation')) and (k.count('/') <= 1))}, compressed=True), refresh=False)
pbar.update()
end = time.time()
trainer.trigger_event('epoch:after', trainer, epoch)
|
def validate_epoch(epoch, trainer, val_dataloader, meters, output_vocab):
if (not args.debug):
from jaclearn.visualize.html_table import HTMLTableVisualizer, HTMLTableColumnDesc
vis = HTMLTableVisualizer(osp.join(args.vis_dir, f'episode_{epoch}'), f'Left @ Epoch {epoch}')
link = '<a href="viewer://{}", target="_blank">{}</a>'.format(vis.visdir, vis.visdir)
columns = [HTMLTableColumnDesc('id', 'Index', 'text', {'width': '40px'}), HTMLTableColumnDesc('utterance', 'Utterance', 'code', {'width': '1000px'}), HTMLTableColumnDesc('correctness', 'Accurate', 'text', {'width': '40px'}), HTMLTableColumnDesc('attr_cls_acc', 'Attr Cls Accuracy', 'text', {'width': '150px'}), HTMLTableColumnDesc('attr_cls_pred', 'Attr Cls Preds', 'text', {'width': '200px'}), HTMLTableColumnDesc('tree', 'Parsing Tree', 'code', {'width': '500px'})]
(this_count, max_viz_count, max_log_count) = (0, 5, 20)
end = time.time()
with tqdm_pbar(total=len(val_dataloader)) as pbar:
if (not args.debug):
vis.begin_html()
vis.begin_table('Left', columns)
accuracy = []
for feed_dict in val_dataloader:
if args.use_gpu:
if (not args.gpu_parallel):
feed_dict = async_copy_to(feed_dict, 0)
data_time = (time.time() - end)
end = time.time()
(output_dict, extra_info) = trainer.evaluate(feed_dict)
val_monitors = {}
for k in output_dict['monitors'].keys():
val_monitors[('validation/' + k)] = output_dict['monitors'][k]
output_dict['monitors'] = val_monitors
monitors = as_float(output_dict['monitors'])
step_time = (time.time() - end)
end = time.time()
meters.update(monitors)
meters.update({'time/data': data_time, 'time/step': step_time})
executions = output_dict['executions']
predictions = []
for i in range(len(executions)):
predictions.append(torch.argmax(executions[i]))
predictions = torch.stack(predictions)
scored_accs = []
for i in range(len(executions)):
if (output_dict['scored'][i] == 1):
pred_answer = output_vocab.idx2word[int(predictions[i].cpu())]
scored_accs.append((pred_answer == feed_dict['answer'][i]))
if (len(scored_accs) != 0):
scored_avg_acc = float((sum(scored_accs) / len(scored_accs)))
meters.update({'validation/acc_scored': scored_avg_acc}, n=len(scored_accs))
meters.update({'validation/percent_scored': (len(scored_accs) / len(executions))})
if ((not args.debug) and (this_count < max_log_count)):
idx = 0
utterance = feed_dict['question_text'][idx]
parsing = output_dict['parsing'][idx]
tree = ''
if parsing:
tree = str(parsing)
correctness = (predictions[idx].cpu() == feed_dict['answer'][idx])
if ('concepts_to_accs' in output_dict):
concepts_to_accs = str(output_dict['concepts_to_accs'][idx])
concepts_to_pred_concepts = str(output_dict['concepts_to_pred_concepts'][idx])
else:
(concepts_to_accs, concepts_to_pred_concepts) = ('', '')
vis.row(id=this_count, utterance=utterance, correctness=correctness, attr_cls_acc=concepts_to_accs, attr_cls_pred=concepts_to_pred_concepts, tree=tree)
this_count += 1
if args.use_tb:
meters.flush()
pbar.set_description(meters.format_simple('Epoch {} (validation)'.format(epoch), {k: v for (k, v) in meters.val.items() if (k.startswith('validation') and (k.count('/') <= 2))}, compressed=True), refresh=False)
pbar.update()
end = time.time()
meters.update(monitors)
if (not args.debug):
vis.end_table()
vis.end_html()
if (not args.debug):
if args.evaluate:
mldash.update(run_description=link)
with mldash.update_extra_info():
mldash.extra_info_dict.setdefault('visualizations', []).append(f'Epoch {epoch:3} Visualizations: {link}')
logger.critical(f'Visualizations: {link}')
|
def main():
if (not args.debug):
args.dump_dir = ensure_path(osp.join('dumps', args.series_name, args.desc_name, args.expr, args.run_name))
args.ckpt_dir = ensure_path(osp.join(args.dump_dir, 'checkpoints'))
args.vis_dir = ensure_path(osp.join(args.dump_dir, 'visualizations'))
args.meta_file = osp.join(args.dump_dir, 'metainfo.json')
args.log_file = osp.join(args.dump_dir, 'log.log')
args.meter_file = osp.join(args.dump_dir, 'meter.json')
if args.use_tb:
args.tb_dir = ensure_path(osp.join(args.dump_dir, 'tensorboard'))
else:
args.tb_dir = None
if (not args.debug):
logger.critical('Writing logs to file: "{}".'.format(args.log_file))
import jacinle
jacinle.set_logger_output_file(args.log_file)
jacinle.git_guard()
logger.critical('Writing metainfo to file: "{}".'.format(args.meta_file))
with open(args.meta_file, 'w') as f:
f.write(dump_metainfo(args=args.__dict__, configs=configs))
if (args.debug and args.use_tb):
logger.warning('Disabling the tensorboard in the debug mode.')
args.use_tb = False
if (args.evaluate and args.use_tb):
logger.warning('Disabling the tensorboard in the evaluation mode.')
args.use_tb = False
logger.critical('Building the model.')
model = desc.make_model(args.parsed_train_path, args.parsed_test_path, args.idx_to_class)
if args.use_gpu:
model.cuda()
if args.gpu_parallel:
from jactorch.parallel import JacDataParallel
model = JacDataParallel(model, device_ids=args.gpus, user_scattered=True).cuda()
cudnn.benchmark = False
if hasattr(desc, 'make_optimizer'):
logger.critical('Building customized optimizer.')
optimizer = desc.make_optimizer(model, args.lr)
else:
from jactorch.optim import AdamW
trainable_parameters = filter((lambda x: x.requires_grad), model.parameters())
optimizer = AdamW(trainable_parameters, args.lr, weight_decay=configs.train.weight_decay)
print(('LR ' + str(args.lr)))
if (args.acc_grad > 1):
from jactorch.optim import AccumGrad
optimizer = AccumGrad(optimizer, args.acc_grad)
logger.warning('Use accumulated grad={:d}, effective iterations per epoch={:d}.'.format(args.acc_grad, int((args.iters_per_epoch / args.acc_grad))))
trainer = TrainerEnv(model, optimizer)
if args.resume:
extra = trainer.load_checkpoint(args.resume)
if extra:
args.start_epoch = extra['epoch']
logger.critical('Resume from epoch {}.'.format(args.start_epoch))
elif args.load:
if trainer.load_weights(args.load):
logger.critical('Loaded weights from pretrained model: "{}".'.format(args.load))
if args.use_tb:
from jactorch.train.tb import TBLogger, TBGroupMeters
tb_logger = TBLogger(args.tb_dir)
meters = TBGroupMeters(tb_logger)
logger.critical('Writing tensorboard logs to: "{}".'.format(args.tb_dir))
else:
from jacinle.utils.meter import GroupMeters
meters = GroupMeters()
if (not args.debug):
logger.critical('Writing metainfo to file: "{}".'.format(args.meta_file))
with open(args.meta_file, 'w') as f:
f.write(dump_metainfo(args=args.__dict__, configs=configs))
logger.critical('Writing meter logs to file: "{}".'.format(args.meter_file))
logger.critical('Initializing MLDash.')
mldash.init(desc_name=((args.series_name + '/') + args.desc_name), expr_name=args.expr, run_name=args.run_name, args=args, highlight_args=parser, configs=configs)
mldash.update(metainfo_file=args.meta_file, log_file=args.log_file, meter_file=args.meter_file, tb_dir=args.tb_dir)
if args.embed:
from IPython import embed
embed()
if hasattr(desc, 'customize_trainer'):
desc.customize_trainer(trainer)
logger.critical('Building the data loader.')
'From ReferIt3D'
from left.data.referit3d.arguments import parse_arguments
from left.data.referit3d.listening_dataset import make_data_loaders
from left.data.referit3d.referit3d_reader import load_scan_related_data, load_referential_data, trim_scans_per_referit3d_data, compute_auxiliary_data
referit3d_args = parse_arguments(['-scannet-file', args.scannet_file, '-referit3D-file', args.referit3d_file, '--max-distractors', '9', '--max-test-objects', '88', '--batch-size', '16', '--n-workers', '2'])
(all_scans_in_dict, scans_split, class_to_idx) = load_scan_related_data(args.scannet_split_pre_fix, referit3d_args.scannet_file)
referit_data = load_referential_data(referit3d_args, referit3d_args.referit3D_file, scans_split)
all_scans_in_dict = trim_scans_per_referit3d_data(referit_data, all_scans_in_dict)
(mean_rgb, vocab) = compute_auxiliary_data(referit_data, all_scans_in_dict, referit3d_args)
data_loaders = make_data_loaders(referit3d_args, referit_data, vocab, class_to_idx, all_scans_in_dict, mean_rgb)
train_dataloader = data_loaders['train']
validation_dataloader = data_loaders['test']
'End from ReferIt3D'
if (args.use_gpu and args.gpu_parallel):
from jactorch.data.dataloader import JacDataLoaderMultiGPUWrapper
train_dataloader = JacDataLoaderMultiGPUWrapper(train_dataloader, args.gpus)
validation_dataloader = JacDataLoaderMultiGPUWrapper(validation_dataloader, args.gpus)
if args.evaluate:
epoch = 0
model.eval()
validate_epoch(epoch, trainer, validation_dataloader, meters, all_scans_in_dict)
if (not args.debug):
meters.dump(args.meter_file)
logger.critical(meters.format_simple('Epoch = {}'.format(epoch), compressed=False))
return
for epoch in range((args.start_epoch + 1), (args.epochs + 1)):
meters.reset()
model.train()
train_epoch(epoch, trainer, train_dataloader, meters, all_scans_in_dict)
if ((args.validation_interval > 0) and ((epoch % args.validation_interval) == 0)):
model.eval()
with torch.no_grad():
validate_epoch(epoch, trainer, validation_dataloader, meters, all_scans_in_dict)
if (not args.debug):
meters.dump(args.meter_file)
if (not args.debug):
mldash.log_metric('epoch', epoch, desc=False, expr=False)
for (key, value) in meters.items():
if (key.startswith('loss') or key.startswith('validation/loss')):
mldash.log_metric_min(key, value.avg)
for (key, value) in meters.items():
if (key.startswith('acc') or key.startswith('validation/acc') or key.startswith('train/acc') or key.startswith('validation/percent') or key.startswith('train/percent')):
mldash.log_metric_max(key, value.avg)
logger.critical(meters.format_simple('Epoch = {}'.format(epoch), compressed=False))
if (not args.debug):
if ((epoch % args.save_interval) == 0):
fname = osp.join(args.ckpt_dir, 'epoch_{}.pth'.format(epoch))
trainer.save_checkpoint(fname, dict(epoch=epoch, meta_file=args.meta_file))
|
def train_epoch(epoch, trainer, train_dataloader, meters, all_scans_in_dict):
nr_iters = args.iters_per_epoch
if (nr_iters == 0):
nr_iters = len(train_dataloader)
meters.update(epoch=epoch)
trainer.trigger_event('epoch:before', trainer, epoch)
train_iter = iter(train_dataloader)
end = time.time()
with tqdm_pbar(total=nr_iters) as pbar:
for i in range(nr_iters):
feed_dict = next(train_iter)
feed_dict['input_str'] = feed_dict['utterance']
tokenized = []
for u in feed_dict['utterance']:
tokenized.append(word_tokenize(u))
feed_dict['input_str_tokenized'] = tokenized
feed_dict['input_objects'] = feed_dict['objects']
feed_dict['input_objects_class'] = feed_dict['class_labels']
feed_dict['input_objects_length'] = feed_dict['context_size']
feed_dict['output_target'] = feed_dict['target_pos']
feed_dict['scene'] = None
if args.use_gpu:
if (not args.gpu_parallel):
feed_dict = async_copy_to(feed_dict, 0)
data_time = (time.time() - end)
end = time.time()
(loss, monitors, output_dict, extra_info) = trainer.step(feed_dict)
step_time = (time.time() - end)
end = time.time()
meters.update(loss=loss)
meters.update(monitors)
meters.update({'time/data': data_time, 'time/step': step_time})
target = feed_dict['output_target']
executions = output_dict['executions']
predictions = []
for i in range(len(executions)):
predictions.append(torch.argmax(executions[i]))
predictions = torch.stack(predictions)
guessed_correctly = torch.mean((predictions == target).double()).item()
meters.update({'train/acc': guessed_correctly})
scored_accs = []
for i in range(len(executions)):
if (output_dict['scored'][i] == 1):
scored_accs.append((predictions[i] == target[i]))
if (len(scored_accs) != 0):
scored_avg_acc = float((sum(scored_accs) / len(scored_accs)).cpu().numpy())
meters.update({'train/acc_scored': scored_avg_acc}, n=len(scored_accs))
meters.update({'train/percent_scored': (len(scored_accs) / len(executions))})
if args.use_tb:
meters.flush()
pbar.set_description(meters.format_simple('Epoch {}'.format(epoch), {k: v for (k, v) in meters.val.items() if ((not k.startswith('validation')) and (k.count('/') <= 1))}, compressed=True), refresh=False)
pbar.update()
end = time.time()
trainer.trigger_event('epoch:after', trainer, epoch)
|
def decode_stimulus_string(s):
'\n Split into scene_id, instance_label, # objects, target object id,\n distractors object id.\n :param s: the stimulus string\n '
if (len(s.split('-', maxsplit=4)) == 4):
(scene_id, instance_label, n_objects, target_id) = s.split('-', maxsplit=4)
distractors_ids = ''
else:
(scene_id, instance_label, n_objects, target_id, distractors_ids) = s.split('-', maxsplit=4)
instance_label = instance_label.replace('_', ' ')
n_objects = int(n_objects)
target_id = int(target_id)
distractors_ids = [int(i) for i in distractors_ids.split('-') if (i != '')]
assert (len(distractors_ids) == (n_objects - 1))
return (scene_id, instance_label, n_objects, target_id, distractors_ids)
|
def validate_epoch(epoch, trainer, val_dataloader, meters, all_scans_in_dict):
if (not args.debug):
from jaclearn.visualize.html_table import HTMLTableVisualizer, HTMLTableColumnDesc
vis = HTMLTableVisualizer(osp.join(args.vis_dir, f'episode_{epoch}'), f'Left @ Epoch {epoch}')
link = '<a href="viewer://{}", target="_blank">{}</a>'.format(vis.visdir, vis.visdir)
columns = [HTMLTableColumnDesc('id', 'Index', 'text', {'width': '40px'}), HTMLTableColumnDesc('utterance', 'Utterance', 'code', {'width': '1000px'}), HTMLTableColumnDesc('correctness', 'Accurate', 'text', {'width': '40px'}), HTMLTableColumnDesc('attr_cls_acc', 'Attr Cls Accuracy', 'text', {'width': '150px'}), HTMLTableColumnDesc('attr_cls_pred', 'Attr Cls Preds', 'text', {'width': '200px'}), HTMLTableColumnDesc('tree', 'Parsing Tree', 'code', {'width': '500px'})]
(this_count, max_viz_count, max_log_count) = (0, 5, 20)
end = time.time()
with tqdm_pbar(total=len(val_dataloader)) as pbar:
if (not args.debug):
vis.begin_html()
vis.begin_table('Left', columns)
accuracy = []
(easy_acc, hard_acc, view_dep_acc, view_indep_acc) = ([], [], [], [])
view_dependent_words = {'facing', 'looking', 'front', 'behind', 'back', 'right', 'left', 'leftmost', 'rightmost', 'across'}
for feed_dict in val_dataloader:
feed_dict['input_str'] = feed_dict['utterance']
tokenized = []
for u in feed_dict['utterance']:
tokenized.append(word_tokenize(u))
feed_dict['input_str_tokenized'] = tokenized
feed_dict['input_objects'] = feed_dict['objects']
feed_dict['input_objects_class'] = feed_dict['class_labels']
feed_dict['input_objects_length'] = feed_dict['context_size']
feed_dict['output_target'] = feed_dict['target_pos']
feed_dict['scene'] = None
if args.use_gpu:
if (not args.gpu_parallel):
feed_dict = async_copy_to(feed_dict, 0)
data_time = (time.time() - end)
end = time.time()
(output_dict, extra_info) = trainer.evaluate(feed_dict)
monitors = as_float(output_dict['monitors'])
step_time = (time.time() - end)
end = time.time()
meters.update(monitors)
meters.update({'time/data': data_time, 'time/step': step_time})
target = feed_dict['output_target']
executions = output_dict['executions']
predictions = []
for i in range(len(executions)):
predictions.append(torch.argmax(executions[i]))
predictions = torch.stack(predictions)
guessed_correctly = torch.mean((predictions == target).double()).item()
meters.update({'validation/acc': guessed_correctly})
accuracy.append(guessed_correctly)
for i in range(len(executions)):
this_tokens = feed_dict['utterance'][i].split(' ')
this_stimulus_id = feed_dict['stimulus_id'][i]
hardness = decode_stimulus_string(this_stimulus_id)[2]
this_easy = (hardness <= 2)
this_view_dependent = (len(set(this_tokens).intersection(view_dependent_words)) > 0)
this_pred_acc = (predictions[i] == target[i])
if this_view_dependent:
view_dep_acc.append(this_pred_acc)
else:
view_indep_acc.append(this_pred_acc)
if this_easy:
easy_acc.append(this_pred_acc)
else:
hard_acc.append(this_pred_acc)
scored_accs = []
for i in range(len(executions)):
if (output_dict['scored'][i] == 1):
scored_accs.append((predictions[i] == target[i]))
if (len(scored_accs) != 0):
scored_avg_acc = float((sum(scored_accs) / len(scored_accs)).cpu().numpy())
meters.update({'validation/acc_scored': scored_avg_acc}, n=len(scored_accs))
meters.update({'validation/percent_scored': (len(scored_accs) / len(executions))})
if ((not args.debug) and (this_count < max_log_count)):
idx = 0
utterance = feed_dict['input_str'][idx]
parsing = output_dict['parsing'][idx]
tree = ''
if parsing:
tree = str(parsing)
correctness = (predictions[idx].cpu() == target[idx].cpu())
if ('concepts_to_accs' in output_dict):
concepts_to_accs = str(output_dict['concepts_to_accs'][idx])
concepts_to_pred_concepts = str(output_dict['concepts_to_pred_concepts'][idx])
else:
(concepts_to_accs, concepts_to_pred_concepts) = ('', '')
vis.row(id=this_count, utterance=utterance, correctness=correctness.cpu().numpy(), attr_cls_acc=concepts_to_accs, attr_cls_pred=concepts_to_pred_concepts, tree=tree)
this_count += 1
if args.use_tb:
meters.flush()
pbar.set_description(meters.format_simple('Epoch {} (validation)'.format(epoch), {k: v for (k, v) in meters.val.items() if (k.startswith('validation') and (k.count('/') <= 2))}, compressed=True), refresh=False)
pbar.update()
end = time.time()
monitors['validation/acc/view_dep_acc'] = float((sum(view_dep_acc) / float(len(view_dep_acc))))
monitors['validation/acc/view_indep_acc'] = float((sum(view_indep_acc) / float(len(view_indep_acc))))
monitors['validation/acc/easy_acc'] = float((sum(easy_acc) / float(len(easy_acc))))
monitors['validation/acc/hard_acc'] = float((sum(hard_acc) / float(len(hard_acc))))
meters.update(monitors)
if (not args.debug):
vis.end_table()
vis.end_html()
if (not args.debug):
if args.evaluate:
mldash.update(run_description=link)
with mldash.update_extra_info():
mldash.extra_info_dict.setdefault('visualizations', []).append(f'Epoch {epoch:3} Visualizations: {link}')
logger.critical(f'Visualizations: {link}')
|
class LiviaSoftmax(LiviaNet3DConvLayer):
' Final Classification layer with Softmax '
def __init__(self, rng, layerID, inputSample_Train, inputSample_Test, inputToLayerShapeTrain, inputToLayerShapeTest, filterShape, applyBatchNorm, applyBatchNormNumberEpochs, maxPoolingParameters, weights_initialization, weights, activationType=0, dropoutRate=0.0, softmaxTemperature=1.0):
LiviaNet3DConvLayer.__init__(self, rng, layerID, inputSample_Train, inputSample_Test, inputToLayerShapeTrain, inputToLayerShapeTest, filterShape, applyBatchNorm, applyBatchNormNumberEpochs, maxPoolingParameters, weights_initialization, weights, activationType, dropoutRate)
self._numberOfOutputClasses = None
self._bClassLayer = None
self._softmaxTemperature = None
self._numberOfOutputClasses = filterShape[0]
self._softmaxTemperature = softmaxTemperature
outputOfConvTrain = self.outputTrain
outputOfConvTest = self.outputTest
outputOfConvShapeTrain = self.outputShapeTrain
outputOfConvShapeTest = self.outputShapeTest
b_values = np.zeros(self._numberOfFeatureMaps, dtype='float32')
self._bClassLayer = theano.shared(value=b_values, borrow=True)
inputToSoftmaxTrain = applyBiasToFeatureMaps(self._bClassLayer, outputOfConvTrain)
inputToSoftmaxTest = applyBiasToFeatureMaps(self._bClassLayer, outputOfConvTest)
self.params = (self.params + [self._bClassLayer])
(self.p_y_given_x_train, self.y_pred_train) = applySoftMax(inputToSoftmaxTrain, outputOfConvShapeTrain, self._numberOfOutputClasses, softmaxTemperature)
(self.p_y_given_x_test, self.y_pred_test) = applySoftMax(inputToSoftmaxTest, outputOfConvShapeTest, self._numberOfOutputClasses, softmaxTemperature)
def negativeLogLikelihoodWeighted(self, y, weightPerClass):
e1 = np.finfo(np.float32).tiny
addTinyProbMatrix = (T.lt(self.p_y_given_x_train, (4 * e1)) * e1)
weights = weightPerClass.dimshuffle('x', 0, 'x', 'x', 'x')
log_p_y_given_x_train = T.log((self.p_y_given_x_train + addTinyProbMatrix))
weighted_log_probs = (log_p_y_given_x_train * weights)
wShape = weighted_log_probs.shape
idx0 = T.arange(wShape[0]).dimshuffle(0, 'x', 'x', 'x')
idx2 = T.arange(wShape[2]).dimshuffle('x', 0, 'x', 'x')
idx3 = T.arange(wShape[3]).dimshuffle('x', 'x', 0, 'x')
idx4 = T.arange(wShape[4]).dimshuffle('x', 'x', 'x', 0)
return (- T.mean(weighted_log_probs[(idx0, y, idx2, idx3, idx4)]))
def predictionProbabilities(self):
return self.p_y_given_x_test
|
def computeDice(autoSeg, groundTruth):
' Returns\n -------\n DiceArray : floats array\n \n Dice coefficient as a float on range [0,1].\n Maximum similarity = 1\n No similarity = 0 '
n_classes = int((np.max(groundTruth) + 1))
DiceArray = []
for c_i in xrange(1, n_classes):
idx_Auto = np.where((autoSeg.flatten() == c_i))[0]
idx_GT = np.where((groundTruth.flatten() == c_i))[0]
autoArray = np.zeros(autoSeg.size, dtype=np.bool)
autoArray[idx_Auto] = 1
gtArray = np.zeros(autoSeg.size, dtype=np.bool)
gtArray[idx_GT] = 1
dsc = dice(autoArray, gtArray)
DiceArray.append(dsc)
return DiceArray
|
def dice(im1, im2):
'\n Computes the Dice coefficient\n ----------\n im1 : boolean array\n im2 : boolean array\n \n If they are not boolean, they will be converted.\n \n -------\n It returns the Dice coefficient as a float on the range [0,1].\n 1: Perfect overlapping \n 0: Not overlapping \n '
im1 = np.asarray(im1).astype(np.bool)
im2 = np.asarray(im2).astype(np.bool)
if (im1.size != im2.size):
raise ValueError('Size mismatch between input arrays!!!')
im_sum = (im1.sum() + im2.sum())
if (im_sum == 0):
return 1.0
intersection = np.logical_and(im1, im2)
return ((2.0 * intersection.sum()) / im_sum)
|
def applyActivationFunction_Sigmoid(inputData):
' inputData is a tensor5D with shape:\n (batchSize,\n Number of feature Maps,\n convolvedImageShape[0],\n convolvedImageShape[1],\n convolvedImageShape[2]) '
outputData = T.nnet.sigmoid(inputData)
return outputData
|
def applyActivationFunction_Tanh(inputData):
'inputData is a tensor5D with shape:\n # (batchSize,\n # Number of feature Maps,\n # convolvedImageShape[0],\n # convolvedImageShape[1],\n # convolvedImageShape[2])'
outputData = T.tanh(inputData)
return outputData
|
def applyActivationFunction_ReLU_v1(inputData):
' inputData is a tensor5D with shape:\n # (batchSize,\n # Number of feature Maps,\n # convolvedImageShape[0],\n # convolvedImageShape[1],\n # convolvedImageShape[2]) '
return T.maximum(inputData, 0)
|
def applyActivationFunction_ReLU_v2(inputData):
return T.switch((inputData < 0.0), 0.0, inputData)
|
def applyActivationFunction_ReLU_v3(inputData):
return ((inputData + abs(inputData)) / 2.0)
|
def applyActivationFunction_ReLU_v4(inputData):
return (((T.sgn(inputData) + 1) * inputData) * 0.5)
|
def applyActivationFunction_LeakyReLU(inputData, leakiness):
'leakiness : float\n Slope for negative input, usually between 0 and 1.\n A leakiness of 0 will lead to the standard rectifier,\n a leakiness of 1 will lead to a linear activation function,\n and any value in between will give a leaky rectifier.\n \n [1] Maas et al. (2013):\n Rectifier Nonlinearities Improve Neural Network Acoustic Models,\n http://web.stanford.edu/~awni/papers/relu_hybrid_icml2013_final.pdf\n \n \n - The input is a tensor of shape (batchSize, FeatMaps, xDim, yDim, zDim) '
pos = (0.5 * (1 + leakiness))
neg = (0.5 * (1 - leakiness))
output = ((pos * inputData) + (neg * abs(inputData)))
return output
|
def applyActivationFunction_PReLU(inputData, PreluActivations):
'Parametric Rectified Linear Unit.\n It follows:\n `f(x) = alpha * x for x < 0`,\n `f(x) = x for x >= 0`,\n where `alpha` is a learned array with the same shape as x.\n \n - The input is a tensor of shape (batchSize, FeatMaps, xDim, yDim, zDim) '
preluActivationsAsRow = PreluActivations.dimshuffle('x', 0, 'x', 'x', 'x')
pos = T.maximum(0, inputData)
neg = ((preluActivationsAsRow * (inputData - abs(inputData))) * 0.5)
output = (pos + neg)
return output
|
def applyActivationFunction_PReLU_v2(inputData, PreluActivations):
' inputData is a tensor5D with shape:\n (batchSize,\n Number of feature Maps,\n convolvedImageShape[0],\n convolvedImageShape[1],\n convolvedImageShape[2]) '
preluActivationsAsRow = PreluActivations.dimshuffle('x', 0, 'x', 'x', 'x')
pos = ((inputData + abs(inputData)) / 2.0)
neg = (preluActivationsAsRow * ((inputData - abs(inputData)) / 2.0))
output = (pos + neg)
return output
|
def applyActivationFunction_PReLU_v3(inputData, PreluActivations):
' inputData is a tensor5D with shape:\n (batchSize,\n Number of feature Maps,\n convolvedImageShape[0],\n convolvedImageShape[1],\n convolvedImageShape[2]) '
preluActivationsAsRow = PreluActivations.dimshuffle('x', 0, 'x', 'x', 'x')
pos = (0.5 * (1 + preluActivationsAsRow))
neg = (0.5 * (1 - preluActivationsAsRow))
output = ((pos * inputData) + (neg * abs(inputData)))
return output
|
def apply_Dropout(rng, dropoutRate, inputShape, inputData, task):
' Task:\n # 0: Training\n # 1: Validation\n # 2: Testing '
outputData = inputData
if (dropoutRate > 0.001):
activationRate = (1 - dropoutRate)
srng = T.shared_randomstreams.RandomStreams(rng.randint(999999))
dropoutMask = srng.binomial(n=1, size=inputShape, p=activationRate, dtype=theano.config.floatX)
if (task == 0):
outputData = (inputData * dropoutMask)
else:
outputData = (inputData * activationRate)
return outputData
|
def convolveWithKernel(W, filter_shape, inputSample, inputSampleShape):
wReshapedForConv = W.dimshuffle(0, 4, 1, 2, 3)
wReshapedForConvShape = (filter_shape[0], filter_shape[4], filter_shape[1], filter_shape[2], filter_shape[3])
inputSampleReshaped = inputSample.dimshuffle(0, 4, 1, 2, 3)
inputSampleReshapedShape = (inputSampleShape[0], inputSampleShape[4], inputSampleShape[1], inputSampleShape[2], inputSampleShape[3])
convolved_Output = T.nnet.conv3d2d.conv3d(inputSampleReshaped, wReshapedForConv, inputSampleReshapedShape, wReshapedForConvShape, border_mode='valid')
output = convolved_Output.dimshuffle(0, 2, 3, 4, 1)
outputShape = [inputSampleShape[0], filter_shape[0], ((inputSampleShape[2] - filter_shape[2]) + 1), ((inputSampleShape[3] - filter_shape[3]) + 1), ((inputSampleShape[4] - filter_shape[4]) + 1)]
return (output, outputShape)
|
def applyBn(numberEpochApplyRolling, inputTrain, inputTest, inputShapeTrain):
numberOfChannels = inputShapeTrain[1]
gBn_values = np.ones(numberOfChannels, dtype='float32')
gBn = theano.shared(value=gBn_values, borrow=True)
bBn_values = np.zeros(numberOfChannels, dtype='float32')
bBn = theano.shared(value=bBn_values, borrow=True)
muArray = theano.shared(np.zeros((numberEpochApplyRolling, numberOfChannels), dtype='float32'), borrow=True)
varArray = theano.shared(np.ones((numberEpochApplyRolling, numberOfChannels), dtype='float32'), borrow=True)
sharedNewMu_B = theano.shared(np.zeros(numberOfChannels, dtype='float32'), borrow=True)
sharedNewVar_B = theano.shared(np.ones(numberOfChannels, dtype='float32'), borrow=True)
e1 = np.finfo(np.float32).tiny
mu_B = inputTrain.mean(axis=[0, 2, 3, 4])
mu_B = T.unbroadcast(mu_B, 0)
var_B = inputTrain.var(axis=[0, 2, 3, 4])
var_B = T.unbroadcast(var_B, 0)
var_B_plusE = (var_B + e1)
mu_RollingAverage = muArray.mean(axis=0)
effectiveSize = (((inputShapeTrain[0] * inputShapeTrain[2]) * inputShapeTrain[3]) * inputShapeTrain[4])
var_RollingAverage = ((effectiveSize / (effectiveSize - 1)) * varArray.mean(axis=0))
var_RollingAverage_plusE = (var_RollingAverage + e1)
normXi_train = ((inputTrain - mu_B.dimshuffle('x', 0, 'x', 'x', 'x')) / T.sqrt(var_B_plusE.dimshuffle('x', 0, 'x', 'x', 'x')))
normYi_train = ((gBn.dimshuffle('x', 0, 'x', 'x', 'x') * normXi_train) + bBn.dimshuffle('x', 0, 'x', 'x', 'x'))
normXi_test = ((inputTest - mu_RollingAverage.dimshuffle('x', 0, 'x', 'x', 'x')) / T.sqrt(var_RollingAverage_plusE.dimshuffle('x', 0, 'x', 'x', 'x')))
normYi_test = ((gBn.dimshuffle('x', 0, 'x', 'x', 'x') * normXi_test) + bBn.dimshuffle('x', 0, 'x', 'x', 'x'))
return (normYi_train, normYi_test, gBn, bBn, muArray, varArray, sharedNewMu_B, sharedNewVar_B, mu_B, var_B)
|
def applySoftMax(inputSample, inputSampleShape, numClasses, softmaxTemperature):
inputSampleReshaped = inputSample.dimshuffle(0, 2, 3, 4, 1)
inputSampleFlattened = inputSampleReshaped.flatten(1)
numClassifiedVoxels = ((inputSampleShape[2] * inputSampleShape[3]) * inputSampleShape[4])
firstDimOfinputSample2d = (inputSampleShape[0] * numClassifiedVoxels)
inputSample2d = inputSampleFlattened.reshape((firstDimOfinputSample2d, numClasses))
p_y_given_x_2d = T.nnet.softmax((inputSample2d / softmaxTemperature))
p_y_given_x_class = p_y_given_x_2d.reshape((inputSampleShape[0], inputSampleShape[2], inputSampleShape[3], inputSampleShape[4], inputSampleShape[1]))
p_y_given_x = p_y_given_x_class.dimshuffle(0, 4, 1, 2, 3)
y_pred = T.argmax(p_y_given_x, axis=1)
return (p_y_given_x, y_pred)
|
def applyBiasToFeatureMaps(bias, featMaps):
featMaps = (featMaps + bias.dimshuffle('x', 0, 'x', 'x', 'x'))
return featMaps
|
class parserConfigIni(object):
def __init__(_self):
_self.networkName = []
def readConfigIniFile(_self, fileName, task):
def createModel():
print(' --- Creating model (Reading parameters...)')
_self.readModelCreation_params(fileName)
def trainModel():
print(' --- Training model (Reading parameters...)')
_self.readModelTraining_params(fileName)
def testModel():
print(' --- Testing model (Reading parameters...)')
_self.readModelTesting_params(fileName)
optionsParser = {0: createModel, 1: trainModel, 2: testModel}
optionsParser[task]()
def readModelCreation_params(_self, fileName):
ConfigIni = ConfigParser.ConfigParser()
ConfigIni.read(fileName)
_self.networkName = ConfigIni.get('General', 'networkName')
_self.folderName = ConfigIni.get('General', 'folderName')
_self.n_classes = json.loads(ConfigIni.get('CNN_Architecture', 'n_classes'))
_self.layers = json.loads(ConfigIni.get('CNN_Architecture', 'numkernelsperlayer'))
_self.kernels = json.loads(ConfigIni.get('CNN_Architecture', 'kernelshapes'))
_self.intermediate_ConnectedLayers = json.loads(ConfigIni.get('CNN_Architecture', 'intermediateConnectedLayers'))
_self.pooling_scales = json.loads(ConfigIni.get('CNN_Architecture', 'pooling_scales'))
_self.dropout_Rates = json.loads(ConfigIni.get('CNN_Architecture', 'dropout_Rates'))
_self.activationType = json.loads(ConfigIni.get('CNN_Architecture', 'activationType'))
_self.weight_Initialization_CNN = json.loads(ConfigIni.get('CNN_Architecture', 'weight_Initialization_CNN'))
_self.weight_Initialization_FCN = json.loads(ConfigIni.get('CNN_Architecture', 'weight_Initialization_FCN'))
_self.weightsFolderName = ConfigIni.get('CNN_Architecture', 'weights folderName')
_self.weightsTrainedIdx = json.loads(ConfigIni.get('CNN_Architecture', 'weights trained indexes'))
_self.batch_size = json.loads(ConfigIni.get('Training Parameters', 'batch_size'))
_self.sampleSize_Train = json.loads(ConfigIni.get('Training Parameters', 'sampleSize_Train'))
_self.sampleSize_Test = json.loads(ConfigIni.get('Training Parameters', 'sampleSize_Test'))
_self.costFunction = json.loads(ConfigIni.get('Training Parameters', 'costFunction'))
_self.L1_reg_C = json.loads(ConfigIni.get('Training Parameters', 'L1 Regularization Constant'))
_self.L2_reg_C = json.loads(ConfigIni.get('Training Parameters', 'L2 Regularization Constant'))
_self.learning_rate = json.loads(ConfigIni.get('Training Parameters', 'Leraning Rate'))
_self.momentumType = json.loads(ConfigIni.get('Training Parameters', 'Momentum Type'))
_self.momentumValue = json.loads(ConfigIni.get('Training Parameters', 'Momentum Value'))
_self.momentumNormalized = json.loads(ConfigIni.get('Training Parameters', 'momentumNormalized'))
_self.optimizerType = json.loads(ConfigIni.get('Training Parameters', 'Optimizer Type'))
_self.rho_RMSProp = json.loads(ConfigIni.get('Training Parameters', 'Rho RMSProp'))
_self.epsilon_RMSProp = json.loads(ConfigIni.get('Training Parameters', 'Epsilon RMSProp'))
applyBatchNorm = json.loads(ConfigIni.get('Training Parameters', 'applyBatchNormalization'))
if (applyBatchNorm == 1):
_self.applyBatchNorm = True
else:
_self.applyBatchNorm = False
_self.BatchNormEpochs = json.loads(ConfigIni.get('Training Parameters', 'BatchNormEpochs'))
_self.tempSoftMax = json.loads(ConfigIni.get('Training Parameters', 'SoftMax temperature'))
def readModelTraining_params(_self, fileName):
ConfigIni = ConfigParser.ConfigParser()
ConfigIni.read(fileName)
_self.imagesFolder = ConfigIni.get('Training Images', 'imagesFolder')
_self.GroundTruthFolder = ConfigIni.get('Training Images', 'GroundTruthFolder')
_self.ROIFolder = ConfigIni.get('Training Images', 'ROIFolder')
_self.indexesForTraining = json.loads(ConfigIni.get('Training Images', 'indexesForTraining'))
_self.indexesForValidation = json.loads(ConfigIni.get('Training Images', 'indexesForValidation'))
_self.imageTypesTrain = json.loads(ConfigIni.get('Training Images', 'imageTypes'))
_self.numberOfEpochs = json.loads(ConfigIni.get('Training Parameters', 'number of Epochs'))
_self.numberOfSubEpochs = json.loads(ConfigIni.get('Training Parameters', 'number of SubEpochs'))
_self.numberOfSamplesSupEpoch = json.loads(ConfigIni.get('Training Parameters', 'number of samples at each SubEpoch Train'))
_self.firstEpochChangeLR = json.loads(ConfigIni.get('Training Parameters', 'First Epoch Change LR'))
_self.frequencyChangeLR = json.loads(ConfigIni.get('Training Parameters', 'Frequency Change LR'))
_self.applyPadding = json.loads(ConfigIni.get('Training Parameters', 'applyPadding'))
def readModelTesting_params(_self, fileName):
ConfigIni = ConfigParser.ConfigParser()
ConfigIni.read(fileName)
_self.imagesFolder = ConfigIni.get('Segmentation Images', 'imagesFolder')
_self.GroundTruthFolder = ConfigIni.get('Segmentation Images', 'GroundTruthFolder')
_self.ROIFolder = ConfigIni.get('Segmentation Images', 'ROIFolder')
_self.imageTypes = json.loads(ConfigIni.get('Segmentation Images', 'imageTypes'))
_self.indexesToSegment = json.loads(ConfigIni.get('Segmentation Images', 'indexesToSegment'))
_self.applyPadding = json.loads(ConfigIni.get('Segmentation Images', 'applyPadding'))
|
def printUsage(error_type):
if (error_type == 1):
print(' ** ERROR!!: Few parameters used.')
else:
print(' ** ERROR!!: Asked to start with an already created network but its name is not specified.')
print(' ******** USAGE ******** ')
print(' --- argv 1: Name of the configIni file.')
print(' --- argv 2: Network model name')
|
def networkSegmentation(argv):
if (len(argv) < 2):
printUsage(1)
sys.exit()
configIniName = argv[0]
networkModelName = argv[1]
startTesting(networkModelName, configIniName)
print(' ***************** SEGMENTATION DONE!!! ***************** ')
|
@dataclass
class DataTrainingArguments():
'\n Arguments pertaining to what data we are going to input our model for training and eval.\n '
task_name: Optional[str] = field(default='ner', metadata={'help': 'The name of the task (ner, pos...).'})
dataset_name: Optional[str] = field(default=None, metadata={'help': 'The name of the dataset to use (via the datasets library).'})
dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
train_file: Optional[str] = field(default=None, metadata={'help': 'The input training data file (a csv or JSON file).'})
validation_file: Optional[str] = field(default=None, metadata={'help': 'An optional input evaluation data file to evaluate on (a csv or JSON file).'})
test_file: Optional[str] = field(default=None, metadata={'help': 'An optional input test data file to predict on (a csv or JSON file).'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'})
preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'})
pad_to_max_length: bool = field(default=True, metadata={'help': 'Whether to pad all samples to model maximum sentence length. If False, will pad the samples dynamically when batching to the maximum length in the batch. More efficient on GPU but very bad for TPU.'})
max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'})
max_val_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of validation examples to this value if set.'})
max_test_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of test examples to this value if set.'})
label_all_tokens: bool = field(default=False, metadata={'help': 'Whether to put the label for one word on all tokens of generated by that word or just on the one (in which case the other tokens will have a padding index).'})
return_entity_level_metrics: bool = field(default=False, metadata={'help': 'Whether to return all the entity levels during evaluation or just the overall ones.'})
|
@dataclass
class XFUNDataTrainingArguments(DataTrainingArguments):
lang: Optional[str] = field(default='en')
additional_langs: Optional[str] = field(default=None)
|
@dataclass
class DataCollatorForKeyValueExtraction():
"\n Data collator that will dynamically pad the inputs received, as well as the labels.\n\n Args:\n tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):\n The tokenizer used for encoding the data.\n padding (:obj:`bool`, :obj:`str` or :class:`~transformers.file_utils.PaddingStrategy`, `optional`, defaults to :obj:`True`):\n Select a strategy to pad the returned sequences (according to the model's padding side and padding index)\n among:\n\n * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single\n sequence if provided).\n * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the\n maximum acceptable input length for the model if that argument is not provided.\n * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of\n different lengths).\n max_length (:obj:`int`, `optional`):\n Maximum length of the returned list and optionally padding length (see above).\n pad_to_multiple_of (:obj:`int`, `optional`):\n If set will pad the sequence to a multiple of the provided value.\n\n This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=\n 7.5 (Volta).\n label_pad_token_id (:obj:`int`, `optional`, defaults to -100):\n The id to use when padding the labels (-100 will be automatically ignore by PyTorch loss functions).\n "
tokenizer: PreTrainedTokenizerBase
padding: Union[(bool, str, PaddingStrategy)] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
label_pad_token_id: int = (- 100)
def __call__(self, features):
label_name = ('label' if ('label' in features[0].keys()) else 'labels')
labels = ([feature[label_name] for feature in features] if (label_name in features[0].keys()) else None)
has_image_input = ('image' in features[0])
has_bbox_input = ('bbox' in features[0])
if has_image_input:
image = ImageList.from_tensors([torch.tensor(feature['image']) for feature in features], 32)
for feature in features:
del feature['image']
batch = self.tokenizer.pad(features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors=('pt' if (labels is None) else None))
if (labels is None):
return batch
sequence_length = torch.tensor(batch['input_ids']).shape[1]
padding_side = self.tokenizer.padding_side
if (padding_side == 'right'):
batch['labels'] = [(label + ([self.label_pad_token_id] * (sequence_length - len(label)))) for label in labels]
if has_bbox_input:
batch['bbox'] = [(bbox + ([[0, 0, 0, 0]] * (sequence_length - len(bbox)))) for bbox in batch['bbox']]
else:
batch['labels'] = [(([self.label_pad_token_id] * (sequence_length - len(label))) + label) for label in labels]
if has_bbox_input:
batch['bbox'] = [(([[0, 0, 0, 0]] * (sequence_length - len(bbox))) + bbox) for bbox in batch['bbox']]
batch = {k: (torch.tensor(v, dtype=torch.int64) if isinstance(v[0], list) else v) for (k, v) in batch.items()}
if has_image_input:
batch['image'] = image
return batch
|
class FunsdConfig(datasets.BuilderConfig):
'BuilderConfig for FUNSD'
def __init__(self, **kwargs):
'BuilderConfig for FUNSD.\n\n Args:\n **kwargs: keyword arguments forwarded to super.\n '
super(FunsdConfig, self).__init__(**kwargs)
|
class Funsd(datasets.GeneratorBasedBuilder):
'Conll2003 dataset.'
BUILDER_CONFIGS = [FunsdConfig(name='funsd', version=datasets.Version('1.0.0'), description='FUNSD dataset')]
def _info(self):
return datasets.DatasetInfo(description=_DESCRIPTION, features=datasets.Features({'id': datasets.Value('string'), 'tokens': datasets.Sequence(datasets.Value('string')), 'bboxes': datasets.Sequence(datasets.Sequence(datasets.Value('int64'))), 'ner_tags': datasets.Sequence(datasets.features.ClassLabel(names=['O', 'B-HEADER', 'I-HEADER', 'B-QUESTION', 'I-QUESTION', 'B-ANSWER', 'I-ANSWER'])), 'image': datasets.Array3D(shape=(3, 224, 224), dtype='uint8')}), supervised_keys=None, homepage='https://guillaumejaume.github.io/FUNSD/', citation=_CITATION)
def _split_generators(self, dl_manager):
'Returns SplitGenerators.'
downloaded_file = dl_manager.download_and_extract('https://guillaumejaume.github.io/FUNSD/dataset.zip')
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'filepath': f'{downloaded_file}/dataset/training_data/'}), datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={'filepath': f'{downloaded_file}/dataset/testing_data/'})]
def _generate_examples(self, filepath):
logger.info('⏳ Generating examples from = %s', filepath)
ann_dir = os.path.join(filepath, 'annotations')
img_dir = os.path.join(filepath, 'images')
for (guid, file) in enumerate(sorted(os.listdir(ann_dir))):
tokens = []
bboxes = []
ner_tags = []
file_path = os.path.join(ann_dir, file)
with open(file_path, 'r', encoding='utf8') as f:
data = json.load(f)
image_path = os.path.join(img_dir, file)
image_path = image_path.replace('json', 'png')
(image, size) = load_image(image_path)
for item in data['form']:
(words, label) = (item['words'], item['label'])
words = [w for w in words if (w['text'].strip() != '')]
if (len(words) == 0):
continue
if (label == 'other'):
for w in words:
tokens.append(w['text'])
ner_tags.append('O')
bboxes.append(normalize_bbox(item['box'], size))
else:
tokens.append(words[0]['text'])
ner_tags.append(('B-' + label.upper()))
bboxes.append(normalize_bbox(item['box'], size))
for w in words[1:]:
tokens.append(w['text'])
ner_tags.append(('I-' + label.upper()))
bboxes.append(normalize_bbox(item['box'], size))
(yield (guid, {'id': str(guid), 'tokens': tokens, 'bboxes': bboxes, 'ner_tags': ner_tags, 'image': image}))
|
class XFUNConfig(datasets.BuilderConfig):
'BuilderConfig for XFUN.'
def __init__(self, lang, additional_langs=None, **kwargs):
'\n Args:\n lang: string, language for the input text\n **kwargs: keyword arguments forwarded to super.\n '
super(XFUNConfig, self).__init__(**kwargs)
self.lang = lang
self.additional_langs = additional_langs
|
class XFUN(datasets.GeneratorBasedBuilder):
'XFUN dataset.'
BUILDER_CONFIGS = [XFUNConfig(name=f'xfun.{lang}', lang=lang) for lang in _LANG]
tokenizer = AutoTokenizer.from_pretrained('xlm-roberta-base')
def _info(self):
return datasets.DatasetInfo(features=datasets.Features({'id': datasets.Value('string'), 'input_ids': datasets.Sequence(datasets.Value('int64')), 'bbox': datasets.Sequence(datasets.Sequence(datasets.Value('int64'))), 'labels': datasets.Sequence(datasets.ClassLabel(names=['O', 'B-QUESTION', 'B-ANSWER', 'B-HEADER', 'I-ANSWER', 'I-QUESTION', 'I-HEADER'])), 'image': datasets.Array3D(shape=(3, 224, 224), dtype='uint8'), 'entities': datasets.Sequence({'start': datasets.Value('int64'), 'end': datasets.Value('int64'), 'label': datasets.ClassLabel(names=['HEADER', 'QUESTION', 'ANSWER'])}), 'relations': datasets.Sequence({'head': datasets.Value('int64'), 'tail': datasets.Value('int64'), 'start_index': datasets.Value('int64'), 'end_index': datasets.Value('int64')})}), supervised_keys=None)
def _split_generators(self, dl_manager):
'Returns SplitGenerators.'
file_dir = 'xfund&funsd/'
train_files_for_many_langs = [[(file_dir + f'{self.config.lang}.train.json'), (file_dir + f'{self.config.lang}')]]
val_files_for_many_langs = [[(file_dir + f'{self.config.lang}.val.json'), (file_dir + f'{self.config.lang}')]]
if self.config.additional_langs:
additional_langs = self.config.additional_langs.split('+')
if ('all' in additional_langs):
additional_langs = [lang for lang in _LANG if (lang != self.config.lang)]
for lang in additional_langs:
train_files_for_many_langs.append([(file_dir + f'{lang}.train.json'), (file_dir + f'{lang}')])
logger.info(f'Training on {self.config.lang} with additional langs({self.config.additional_langs})')
logger.info(f'Evaluating on {self.config.lang}')
logger.info(f'Testing on {self.config.lang}')
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'filepaths': train_files_for_many_langs}), datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={'filepaths': val_files_for_many_langs})]
def _generate_examples(self, filepaths):
for filepath in filepaths:
logger.info('Generating examples from = %s', filepath)
with open(filepath[0], 'r') as f:
data = json.load(f)
for doc in data['documents']:
doc['img']['fpath'] = os.path.join(filepath[1], doc['img']['fname'])
(image, size) = load_image(doc['img']['fpath'])
document = doc['document']
tokenized_doc = {'input_ids': [], 'bbox': [], 'labels': []}
entities = []
relations = []
id2label = {}
entity_id_to_index_map = {}
empty_entity = set()
for line in document:
if (len(line['text']) == 0):
empty_entity.add(line['id'])
continue
id2label[line['id']] = line['label']
relations.extend([tuple(sorted(l)) for l in line['linking']])
if ('/en' in filepath[0]):
tokenized_inputs = self.tokenizer(' '.join([q['text'].replace(u'\uf703', '') for q in line['words']]), add_special_tokens=False, return_offsets_mapping=True, return_attention_mask=False)
else:
tokenized_inputs = self.tokenizer(line['text'], add_special_tokens=False, return_offsets_mapping=True, return_attention_mask=False)
text_length = 0
ocr_length = 0
bbox = []
last_box = None
for (token_id, offset) in zip(tokenized_inputs['input_ids'], tokenized_inputs['offset_mapping']):
if (token_id == 6):
bbox.append(None)
continue
text_length += (offset[1] - offset[0])
tmp_box = []
while (ocr_length < text_length):
ocr_word = line['words'].pop(0)
ocr_length += len(self.tokenizer._tokenizer.normalizer.normalize_str(ocr_word['text'].strip()))
tmp_box.append(simplify_bbox(line['box']))
if (len(tmp_box) == 0):
tmp_box = last_box
bbox.append(normalize_bbox(merge_bbox(tmp_box), size))
last_box = tmp_box
bbox = [([bbox[(i + 1)][0], bbox[(i + 1)][1], bbox[(i + 1)][0], bbox[(i + 1)][1]] if (b is None) else b) for (i, b) in enumerate(bbox)]
if (line['label'] == 'other'):
label = (['O'] * len(bbox))
else:
label = ([f"I-{line['label'].upper()}"] * len(bbox))
label[0] = f"B-{line['label'].upper()}"
tokenized_inputs.update({'bbox': bbox, 'labels': label})
if (label[0] != 'O'):
entity_id_to_index_map[line['id']] = len(entities)
entities.append({'start': len(tokenized_doc['input_ids']), 'end': (len(tokenized_doc['input_ids']) + len(tokenized_inputs['input_ids'])), 'label': line['label'].upper()})
for i in tokenized_doc:
tokenized_doc[i] = (tokenized_doc[i] + tokenized_inputs[i])
relations = list(set(relations))
relations = [rel for rel in relations if ((rel[0] not in empty_entity) and (rel[1] not in empty_entity))]
kvrelations = []
for rel in relations:
pair = [id2label[rel[0]], id2label[rel[1]]]
if (pair == ['question', 'answer']):
kvrelations.append({'head': entity_id_to_index_map[rel[0]], 'tail': entity_id_to_index_map[rel[1]]})
elif (pair == ['answer', 'question']):
kvrelations.append({'head': entity_id_to_index_map[rel[1]], 'tail': entity_id_to_index_map[rel[0]]})
else:
continue
def get_relation_span(rel):
bound = []
for entity_index in [rel['head'], rel['tail']]:
bound.append(entities[entity_index]['start'])
bound.append(entities[entity_index]['end'])
return (min(bound), max(bound))
relations = sorted([{'head': rel['head'], 'tail': rel['tail'], 'start_index': get_relation_span(rel)[0], 'end_index': get_relation_span(rel)[1]} for rel in kvrelations], key=(lambda x: x['head']))
chunk_size = 512
for (chunk_id, index) in enumerate(range(0, len(tokenized_doc['input_ids']), chunk_size)):
item = {}
for k in tokenized_doc:
item[k] = tokenized_doc[k][index:(index + chunk_size)]
entities_in_this_span = []
global_to_local_map = {}
for (entity_id, entity) in enumerate(entities):
if ((index <= entity['start'] < (index + chunk_size)) and (index <= entity['end'] < (index + chunk_size))):
entity['start'] = (entity['start'] - index)
entity['end'] = (entity['end'] - index)
global_to_local_map[entity_id] = len(entities_in_this_span)
entities_in_this_span.append(entity)
relations_in_this_span = []
for relation in relations:
if ((index <= relation['start_index'] < (index + chunk_size)) and (index <= relation['end_index'] < (index + chunk_size))):
relations_in_this_span.append({'head': global_to_local_map[relation['head']], 'tail': global_to_local_map[relation['tail']], 'start_index': (relation['start_index'] - index), 'end_index': (relation['end_index'] - index)})
item.update({'id': f"{doc['id']}_{chunk_id}", 'image': image, 'entities': entities_in_this_span, 'relations': relations_in_this_span})
(yield (f"{doc['id']}_{chunk_id}", item))
|
def normalize_bbox(bbox, size):
return [int(((1000 * bbox[0]) / size[0])), int(((1000 * bbox[1]) / size[1])), int(((1000 * bbox[2]) / size[0])), int(((1000 * bbox[3]) / size[1]))]
|
def simplify_bbox(bbox):
return [min(bbox[0::2]), min(bbox[1::2]), max(bbox[2::2]), max(bbox[3::2])]
|
def merge_bbox(bbox_list):
(x0, y0, x1, y1) = list(zip(*bbox_list))
return [min(x0), min(y0), max(x1), max(y1)]
|
def load_image(image_path):
image = read_image(image_path, format='BGR')
h = image.shape[0]
w = image.shape[1]
img_trans = TransformList([ResizeTransform(h=h, w=w, new_h=224, new_w=224)])
image = torch.tensor(img_trans.apply_image(image).copy()).permute(2, 0, 1)
return (image, (w, h))
|
def get_last_checkpoint(folder):
content = os.listdir(folder)
checkpoints = [path for path in content if ((_re_checkpoint.search(path) is not None) and os.path.isdir(os.path.join(folder, path)))]
if (len(checkpoints) == 0):
return
return os.path.join(folder, max(checkpoints, key=(lambda x: int(_re_checkpoint.search(x).groups()[0]))))
|
def re_score(pred_relations, gt_relations, mode='strict'):
'Evaluate RE predictions\n\n Args:\n pred_relations (list) : list of list of predicted relations (several relations in each sentence)\n gt_relations (list) : list of list of ground truth relations\n\n rel = { "head": (start_idx (inclusive), end_idx (exclusive)),\n "tail": (start_idx (inclusive), end_idx (exclusive)),\n "head_type": ent_type,\n "tail_type": ent_type,\n "type": rel_type}\n\n vocab (Vocab) : dataset vocabulary\n mode (str) : in \'strict\' or \'boundaries\''
assert (mode in ['strict', 'boundaries'])
relation_types = [v for v in [0, 1] if (not (v == 0))]
scores = {rel: {'tp': 0, 'fp': 0, 'fn': 0} for rel in (relation_types + ['ALL'])}
n_sents = len(gt_relations)
n_rels = sum([len([rel for rel in sent]) for sent in gt_relations])
n_found = sum([len([rel for rel in sent]) for sent in pred_relations])
for (pred_sent, gt_sent) in zip(pred_relations, gt_relations):
for rel_type in relation_types:
if (mode == 'strict'):
pred_rels = {(rel['head'], rel['head_type'], rel['tail'], rel['tail_type']) for rel in pred_sent if (rel['type'] == rel_type)}
gt_rels = {(rel['head'], rel['head_type'], rel['tail'], rel['tail_type']) for rel in gt_sent if (rel['type'] == rel_type)}
elif (mode == 'boundaries'):
pred_rels = {(rel['head'], rel['tail']) for rel in pred_sent if (rel['type'] == rel_type)}
gt_rels = {(rel['head'], rel['tail']) for rel in gt_sent if (rel['type'] == rel_type)}
scores[rel_type]['tp'] += len((pred_rels & gt_rels))
scores[rel_type]['fp'] += len((pred_rels - gt_rels))
scores[rel_type]['fn'] += len((gt_rels - pred_rels))
for rel_type in scores.keys():
if scores[rel_type]['tp']:
scores[rel_type]['p'] = (scores[rel_type]['tp'] / (scores[rel_type]['fp'] + scores[rel_type]['tp']))
scores[rel_type]['r'] = (scores[rel_type]['tp'] / (scores[rel_type]['fn'] + scores[rel_type]['tp']))
else:
(scores[rel_type]['p'], scores[rel_type]['r']) = (0, 0)
if (not ((scores[rel_type]['p'] + scores[rel_type]['r']) == 0)):
scores[rel_type]['f1'] = (((2 * scores[rel_type]['p']) * scores[rel_type]['r']) / (scores[rel_type]['p'] + scores[rel_type]['r']))
else:
scores[rel_type]['f1'] = 0
tp = sum([scores[rel_type]['tp'] for rel_type in relation_types])
fp = sum([scores[rel_type]['fp'] for rel_type in relation_types])
fn = sum([scores[rel_type]['fn'] for rel_type in relation_types])
if tp:
precision = (tp / (tp + fp))
recall = (tp / (tp + fn))
f1 = (((2 * precision) * recall) / (precision + recall))
else:
(precision, recall, f1) = (0, 0, 0)
scores['ALL']['p'] = precision
scores['ALL']['r'] = recall
scores['ALL']['f1'] = f1
scores['ALL']['tp'] = tp
scores['ALL']['fp'] = fp
scores['ALL']['fn'] = fn
scores['ALL']['Macro_f1'] = np.mean([scores[ent_type]['f1'] for ent_type in relation_types])
scores['ALL']['Macro_p'] = np.mean([scores[ent_type]['p'] for ent_type in relation_types])
scores['ALL']['Macro_r'] = np.mean([scores[ent_type]['r'] for ent_type in relation_types])
logger.info(f'RE Evaluation in *** {mode.upper()} *** mode')
logger.info('processed {} sentences with {} relations; found: {} relations; correct: {}.'.format(n_sents, n_rels, n_found, tp))
logger.info('\tALL\t TP: {};\tFP: {};\tFN: {}'.format(scores['ALL']['tp'], scores['ALL']['fp'], scores['ALL']['fn']))
logger.info('\t\t(m avg): precision: {:.2f};\trecall: {:.2f};\tf1: {:.2f} (micro)'.format(precision, recall, f1))
logger.info('\t\t(M avg): precision: {:.2f};\trecall: {:.2f};\tf1: {:.2f} (Macro)\n'.format(scores['ALL']['Macro_p'], scores['ALL']['Macro_r'], scores['ALL']['Macro_f1']))
for rel_type in relation_types:
logger.info('\t{}: \tTP: {};\tFP: {};\tFN: {};\tprecision: {:.2f};\trecall: {:.2f};\tf1: {:.2f};\t{}'.format(rel_type, scores[rel_type]['tp'], scores[rel_type]['fp'], scores[rel_type]['fn'], scores[rel_type]['p'], scores[rel_type]['r'], scores[rel_type]['f1'], (scores[rel_type]['tp'] + scores[rel_type]['fp'])))
return scores
|
@dataclass
class ModelArguments():
'\n Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.\n '
model_name_or_path: str = field(metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'})
model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'})
use_auth_token: bool = field(default=False, metadata={'help': 'Will use the token generated when running `transformers-cli login` (necessary to use this script with private models).'})
|
class BiaffineAttention(torch.nn.Module):
'Implements a biaffine attention operator for binary relation classification.\n\n PyTorch implementation of the biaffine attention operator from "End-to-end neural relation\n extraction using deep biaffine attention" (https://arxiv.org/abs/1812.11275) which can be used\n as a classifier for binary relation classification.\n\n Args:\n in_features (int): The size of the feature dimension of the inputs.\n out_features (int): The size of the feature dimension of the output.\n\n Shape:\n - x_1: `(N, *, in_features)` where `N` is the batch dimension and `*` means any number of\n additional dimensisons.\n - x_2: `(N, *, in_features)`, where `N` is the batch dimension and `*` means any number of\n additional dimensions.\n - Output: `(N, *, out_features)`, where `N` is the batch dimension and `*` means any number\n of additional dimensions.\n\n Examples:\n >>> batch_size, in_features, out_features = 32, 100, 4\n >>> biaffine_attention = BiaffineAttention(in_features, out_features)\n >>> x_1 = torch.randn(batch_size, in_features)\n >>> x_2 = torch.randn(batch_size, in_features)\n >>> output = biaffine_attention(x_1, x_2)\n >>> print(output.size())\n torch.Size([32, 4])\n '
def __init__(self, in_features, out_features):
super(BiaffineAttention, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.bilinear = torch.nn.Bilinear(in_features, in_features, out_features, bias=False)
self.linear = torch.nn.Linear((2 * in_features), out_features, bias=True)
self.reset_parameters()
def forward(self, x_1, x_2):
return (self.bilinear(x_1, x_2) + self.linear(torch.cat((x_1, x_2), dim=(- 1))))
def reset_parameters(self):
self.bilinear.reset_parameters()
self.linear.reset_parameters()
|
class REDecoder(nn.Module):
def __init__(self, config, input_size):
super().__init__()
self.entity_emb = nn.Embedding(3, input_size, scale_grad_by_freq=True)
projection = nn.Sequential(nn.Linear((input_size * 2), config.hidden_size), nn.ReLU(), nn.Dropout(config.hidden_dropout_prob), nn.Linear(config.hidden_size, (config.hidden_size // 2)), nn.ReLU(), nn.Dropout(config.hidden_dropout_prob))
self.ffnn_head = copy.deepcopy(projection)
self.ffnn_tail = copy.deepcopy(projection)
self.rel_classifier = BiaffineAttention((config.hidden_size // 2), 2)
self.loss_fct = CrossEntropyLoss()
def build_relation(self, relations, entities):
batch_size = len(relations)
new_relations = []
for b in range(batch_size):
if (len(entities[b]['start']) <= 2):
entities[b] = {'end': [1, 1], 'label': [0, 0], 'start': [0, 0]}
all_possible_relations = set([(i, j) for i in range(len(entities[b]['label'])) for j in range(len(entities[b]['label'])) if ((entities[b]['label'][i] == 1) and (entities[b]['label'][j] == 2))])
if (len(all_possible_relations) == 0):
all_possible_relations = set([(0, 1)])
positive_relations = set(list(zip(relations[b]['head'], relations[b]['tail'])))
negative_relations = (all_possible_relations - positive_relations)
positive_relations = set([i for i in positive_relations if (i in all_possible_relations)])
reordered_relations = (list(positive_relations) + list(negative_relations))
relation_per_doc = {'head': [], 'tail': [], 'label': []}
relation_per_doc['head'] = [i[0] for i in reordered_relations]
relation_per_doc['tail'] = [i[1] for i in reordered_relations]
relation_per_doc['label'] = (([1] * len(positive_relations)) + ([0] * (len(reordered_relations) - len(positive_relations))))
assert (len(relation_per_doc['head']) != 0)
new_relations.append(relation_per_doc)
return (new_relations, entities)
def get_predicted_relations(self, logits, relations, entities):
pred_relations = []
for (i, pred_label) in enumerate(logits.argmax((- 1))):
if (pred_label != 1):
continue
rel = {}
rel['head_id'] = relations['head'][i]
rel['head'] = (entities['start'][rel['head_id']], entities['end'][rel['head_id']])
rel['head_type'] = entities['label'][rel['head_id']]
rel['tail_id'] = relations['tail'][i]
rel['tail'] = (entities['start'][rel['tail_id']], entities['end'][rel['tail_id']])
rel['tail_type'] = entities['label'][rel['tail_id']]
rel['type'] = 1
pred_relations.append(rel)
return pred_relations
def forward(self, hidden_states, entities, relations):
(batch_size, max_n_words, context_dim) = hidden_states.size()
device = hidden_states.device
(relations, entities) = self.build_relation(relations, entities)
loss = 0
all_pred_relations = []
all_logits = []
all_labels = []
for b in range(batch_size):
head_entities = torch.tensor(relations[b]['head'], device=device)
tail_entities = torch.tensor(relations[b]['tail'], device=device)
relation_labels = torch.tensor(relations[b]['label'], device=device)
entities_start_index = torch.tensor(entities[b]['start'], device=device)
entities_labels = torch.tensor(entities[b]['label'], device=device)
head_index = entities_start_index[head_entities]
head_label = entities_labels[head_entities]
head_label_repr = self.entity_emb(head_label)
tail_index = entities_start_index[tail_entities]
tail_label = entities_labels[tail_entities]
tail_label_repr = self.entity_emb(tail_label)
head_repr = torch.cat((hidden_states[b][head_index], head_label_repr), dim=(- 1))
tail_repr = torch.cat((hidden_states[b][tail_index], tail_label_repr), dim=(- 1))
heads = self.ffnn_head(head_repr)
tails = self.ffnn_tail(tail_repr)
logits = self.rel_classifier(heads, tails)
pred_relations = self.get_predicted_relations(logits, relations[b], entities[b])
all_pred_relations.append(pred_relations)
all_logits.append(logits)
all_labels.append(relation_labels)
all_logits = torch.cat(all_logits, 0)
all_labels = torch.cat(all_labels, 0)
loss = self.loss_fct(all_logits, all_labels)
return (loss, all_pred_relations)
|
class FunsdTrainer(Trainer):
def _prepare_inputs(self, inputs: Dict[(str, Union[(torch.Tensor, Any)])]) -> Dict[(str, Union[(torch.Tensor, Any)])]:
'\n Prepare :obj:`inputs` before feeding them to the model, converting them to tensors if they are not already and\n handling potential state.\n '
for (k, v) in inputs.items():
if (hasattr(v, 'to') and hasattr(v, 'device')):
inputs[k] = v.to(self.args.device)
if ((self.args.past_index >= 0) and (self._past is not None)):
inputs['mems'] = self._past
return inputs
|
class XfunSerTrainer(FunsdTrainer):
pass
|
class XfunReTrainer(FunsdTrainer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.label_names.append('relations')
def prediction_step(self, model: nn.Module, inputs: Dict[(str, Union[(torch.Tensor, Any)])], prediction_loss_only: bool, ignore_keys: Optional[List[str]]=None) -> Tuple[(Optional[float], Optional[torch.Tensor], Optional[torch.Tensor])]:
inputs = self._prepare_inputs(inputs)
with torch.no_grad():
if self.use_amp:
with autocast():
outputs = model(**inputs)
else:
outputs = model(**inputs)
labels = tuple((inputs.get(name) for name in self.label_names))
return (outputs, labels)
def prediction_loop(self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool]=None, ignore_keys: Optional[List[str]]=None, metric_key_prefix: str='eval') -> PredictionOutput:
'\n Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.\n\n Works both with or without labels.\n '
if (not isinstance(dataloader.dataset, collections.abc.Sized)):
raise ValueError('dataset must implement __len__')
prediction_loss_only = (prediction_loss_only if (prediction_loss_only is not None) else self.args.prediction_loss_only)
if (self.args.deepspeed and (not self.args.do_train)):
logger.info('Detected the deepspeed argument but it will not be used for evaluation')
model = self._wrap_model(self.model, training=False)
if ((not self.is_in_train) and self.args.fp16_full_eval):
model = model.half().to(self.args.device)
batch_size = dataloader.batch_size
num_examples = self.num_examples(dataloader)
logger.info('***** Running %s *****', description)
logger.info(' Num examples = %d', num_examples)
logger.info(' Batch size = %d', batch_size)
model.eval()
self.callback_handler.eval_dataloader = dataloader
re_labels = None
pred_relations = None
entities = None
for (step, inputs) in enumerate(dataloader):
(outputs, labels) = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
re_labels = (labels[1] if (re_labels is None) else (re_labels + labels[1]))
pred_relations = (outputs.pred_relations if (pred_relations is None) else (pred_relations + outputs.pred_relations))
entities = (outputs.entities if (entities is None) else (entities + outputs.entities))
self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)
gt_relations = []
for b in range(len(re_labels)):
rel_sent = []
for (head, tail) in zip(re_labels[b]['head'], re_labels[b]['tail']):
rel = {}
rel['head_id'] = head
rel['head'] = (entities[b]['start'][rel['head_id']], entities[b]['end'][rel['head_id']])
rel['head_type'] = entities[b]['label'][rel['head_id']]
rel['tail_id'] = tail
rel['tail'] = (entities[b]['start'][rel['tail_id']], entities[b]['end'][rel['tail_id']])
rel['tail_type'] = entities[b]['label'][rel['tail_id']]
rel['type'] = 1
rel_sent.append(rel)
gt_relations.append(rel_sent)
re_metrics = self.compute_metrics(EvalPrediction(predictions=pred_relations, label_ids=gt_relations))
re_metrics = {'precision': re_metrics['ALL']['p'], 'recall': re_metrics['ALL']['r'], 'f1': re_metrics['ALL']['f1']}
re_metrics[f'{metric_key_prefix}_loss'] = outputs.loss.mean().item()
metrics = {}
for key in list(re_metrics.keys()):
if (not key.startswith(f'{metric_key_prefix}_')):
metrics[f'{metric_key_prefix}_{key}'] = re_metrics.pop(key)
else:
metrics[f'{key}'] = re_metrics.pop(key)
return metrics
def evaluate(self, eval_dataset: Optional[Dataset]=None, ignore_keys: Optional[List[str]]=None, metric_key_prefix: str='eval') -> Dict[(str, float)]:
'\n Run evaluation and returns metrics.\n\n The calling script will be responsible for providing a method to compute metrics, as they are task-dependent\n (pass it to the init :obj:`compute_metrics` argument).\n\n You can also subclass and override this method to inject custom behavior.\n\n Args:\n eval_dataset (:obj:`Dataset`, `optional`):\n Pass a dataset if you wish to override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`,\n columns not accepted by the ``model.forward()`` method are automatically removed. It must implement the\n :obj:`__len__` method.\n ignore_keys (:obj:`Lst[str]`, `optional`):\n A list of keys in the output of your model (if it is a dictionary) that should be ignored when\n gathering predictions.\n metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"eval"`):\n An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named\n "eval_bleu" if the prefix is "eval" (default)\n\n Returns:\n A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The\n dictionary also contains the epoch number which comes from the training state.\n '
if ((eval_dataset is not None) and (not isinstance(eval_dataset, collections.abc.Sized))):
raise ValueError('eval_dataset must implement __len__')
self.args.local_rank = (- 1)
eval_dataloader = self.get_eval_dataloader(eval_dataset)
self.args.local_rank = torch.distributed.get_rank()
start_time = time.time()
metrics = self.prediction_loop(eval_dataloader, description='Evaluation', prediction_loss_only=(True if (self.compute_metrics is None) else None), ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix)
n_samples = len((eval_dataset if (eval_dataset is not None) else self.eval_dataset))
metrics.update(speed_metrics(metric_key_prefix, start_time, n_samples))
self.log(metrics)
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, metrics)
return metrics
def create_optimizer(self, speedup_r=4.0):
if (self.optimizer is None):
decay_parameters = get_parameter_names(self.model, [torch.nn.LayerNorm])
decay_parameters = [name for name in decay_parameters if ('bias' not in name)]
speedup_parameters = [name for name in get_parameter_names(self.model, []) if (('extractor' in name) and ('rel_classifier' not in name))]
optimizer_grouped_parameters = [{'params': [p for (n, p) in self.model.named_parameters() if ((n in decay_parameters) and (n in speedup_parameters))], 'weight_decay': self.args.weight_decay, 'lr': (self.args.learning_rate * speedup_r)}, {'params': [p for (n, p) in self.model.named_parameters() if ((n not in decay_parameters) and (n in speedup_parameters))], 'weight_decay': 0.0, 'lr': (self.args.learning_rate * speedup_r)}, {'params': [p for (n, p) in self.model.named_parameters() if ((n in decay_parameters) and (n not in speedup_parameters))], 'weight_decay': self.args.weight_decay, 'lr': self.args.learning_rate}, {'params': [p for (n, p) in self.model.named_parameters() if ((n not in decay_parameters) and (n not in speedup_parameters))], 'weight_decay': 0.0, 'lr': self.args.learning_rate}]
optimizer_cls = (Adafactor if self.args.adafactor else AdamW)
if self.args.adafactor:
optimizer_cls = Adafactor
optimizer_kwargs = {'scale_parameter': False, 'relative_step': False}
else:
optimizer_cls = AdamW
optimizer_kwargs = {'betas': (self.args.adam_beta1, self.args.adam_beta2), 'eps': self.args.adam_epsilon}
if (self.sharded_ddp == ShardedDDPOption.SIMPLE):
self.optimizer = OSS(params=optimizer_grouped_parameters, optim=optimizer_cls, **optimizer_kwargs)
else:
self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
if is_sagemaker_mp_enabled():
import smdistributed.modelparallel.torch as smp
self.optimizer = smp.DistributedOptimizer(self.optimizer)
|
@dataclass
class ReOutput(ModelOutput):
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
entities: Optional[Dict] = None
relations: Optional[Dict] = None
pred_relations: Optional[Dict] = None
|
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
last_checkpoint = None
if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)):
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if ((last_checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)):
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.')
elif (last_checkpoint is not None):
logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)])
logger.setLevel((logging.INFO if is_main_process(training_args.local_rank) else logging.WARN))
logger.warning((f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool((training_args.local_rank != (- 1)))}, 16-bits training: {training_args.fp16}'))
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f'Training/evaluation parameters {training_args}')
set_seed(training_args.seed)
datasets = load_dataset(os.path.abspath(LiLTfinetune.data.datasets.funsd.__file__))
if training_args.do_train:
column_names = datasets['train'].column_names
features = datasets['train'].features
else:
column_names = datasets['validation'].column_names
features = datasets['validation'].features
text_column_name = ('tokens' if ('tokens' in column_names) else column_names[0])
label_column_name = (f'{data_args.task_name}_tags' if (f'{data_args.task_name}_tags' in column_names) else column_names[1])
remove_columns = column_names
def get_label_list(labels):
unique_labels = set()
for label in labels:
unique_labels = (unique_labels | set(label))
label_list = list(unique_labels)
label_list.sort()
return label_list
if isinstance(features[label_column_name].feature, ClassLabel):
label_list = features[label_column_name].feature.names
label_to_id = {i: i for i in range(len(label_list))}
else:
label_list = get_label_list(datasets['train'][label_column_name])
label_to_id = {l: i for (i, l) in enumerate(label_list)}
num_labels = len(label_list)
config = AutoConfig.from_pretrained((model_args.config_name if model_args.config_name else model_args.model_name_or_path), num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
tokenizer = AutoTokenizer.from_pretrained((model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, use_fast=True, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None), add_prefix_space=True)
model = AutoModelForTokenClassification.from_pretrained(model_args.model_name_or_path, from_tf=bool(('.ckpt' in model_args.model_name_or_path)), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
if (not isinstance(tokenizer, PreTrainedTokenizerFast)):
raise ValueError('This example script only works for models that have a fast tokenizer. Checkout the big table of models at https://huggingface.co/transformers/index.html#bigtable to find the model types that meet this requirement')
padding = ('max_length' if data_args.pad_to_max_length else False)
def tokenize_and_align_labels(examples):
tokenized_inputs = tokenizer(examples[text_column_name], padding=padding, truncation=True, return_overflowing_tokens=True, is_split_into_words=True)
labels = []
bboxes = []
images = []
for batch_index in range(len(tokenized_inputs['input_ids'])):
word_ids = tokenized_inputs.word_ids(batch_index=batch_index)
org_batch_index = tokenized_inputs['overflow_to_sample_mapping'][batch_index]
label = examples[label_column_name][org_batch_index]
bbox = examples['bboxes'][org_batch_index]
image = examples['image'][org_batch_index]
previous_word_idx = None
label_ids = []
bbox_inputs = []
for word_idx in word_ids:
if (word_idx is None):
label_ids.append((- 100))
bbox_inputs.append([0, 0, 0, 0])
elif (word_idx != previous_word_idx):
label_ids.append(label_to_id[label[word_idx]])
bbox_inputs.append(bbox[word_idx])
else:
label_ids.append((label_to_id[label[word_idx]] if data_args.label_all_tokens else (- 100)))
bbox_inputs.append(bbox[word_idx])
previous_word_idx = word_idx
labels.append(label_ids)
bboxes.append(bbox_inputs)
images.append(image)
tokenized_inputs['labels'] = labels
tokenized_inputs['bbox'] = bboxes
tokenized_inputs['image'] = images
return tokenized_inputs
if training_args.do_train:
if ('train' not in datasets):
raise ValueError('--do_train requires a train dataset')
train_dataset = datasets['train']
if (data_args.max_train_samples is not None):
train_dataset = train_dataset.select(range(data_args.max_train_samples))
train_dataset = train_dataset.map(tokenize_and_align_labels, batched=True, remove_columns=remove_columns, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=(not data_args.overwrite_cache))
if training_args.do_eval:
if ('validation' not in datasets):
raise ValueError('--do_eval requires a validation dataset')
eval_dataset = datasets['validation']
if (data_args.max_val_samples is not None):
eval_dataset = eval_dataset.select(range(data_args.max_val_samples))
eval_dataset = eval_dataset.map(tokenize_and_align_labels, batched=True, remove_columns=remove_columns, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=(not data_args.overwrite_cache))
if training_args.do_predict:
if ('test' not in datasets):
raise ValueError('--do_predict requires a test dataset')
test_dataset = datasets['test']
if (data_args.max_test_samples is not None):
test_dataset = test_dataset.select(range(data_args.max_test_samples))
test_dataset = test_dataset.map(tokenize_and_align_labels, batched=True, remove_columns=remove_columns, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=(not data_args.overwrite_cache))
data_collator = DataCollatorForKeyValueExtraction(tokenizer, pad_to_multiple_of=(8 if training_args.fp16 else None), padding=padding, max_length=512)
metric = load_metric('seqeval')
def compute_metrics(p):
(predictions, labels) = p
predictions = np.argmax(predictions, axis=2)
true_predictions = [[label_list[p] for (p, l) in zip(prediction, label) if (l != (- 100))] for (prediction, label) in zip(predictions, labels)]
true_labels = [[label_list[l] for (p, l) in zip(prediction, label) if (l != (- 100))] for (prediction, label) in zip(predictions, labels)]
results = metric.compute(predictions=true_predictions, references=true_labels)
if data_args.return_entity_level_metrics:
final_results = {}
for (key, value) in results.items():
if isinstance(value, dict):
for (n, v) in value.items():
final_results[f'{key}_{n}'] = v
else:
final_results[key] = value
return final_results
else:
return {'precision': results['overall_precision'], 'recall': results['overall_recall'], 'f1': results['overall_f1'], 'accuracy': results['overall_accuracy']}
trainer = Trainer(model=model, args=training_args, train_dataset=(train_dataset if training_args.do_train else None), eval_dataset=(eval_dataset if training_args.do_eval else None), tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics)
if training_args.do_train:
checkpoint = (last_checkpoint if last_checkpoint else None)
train_result = trainer.train(resume_from_checkpoint=checkpoint)
metrics = train_result.metrics
trainer.save_model()
max_train_samples = (data_args.max_train_samples if (data_args.max_train_samples is not None) else len(train_dataset))
metrics['train_samples'] = min(max_train_samples, len(train_dataset))
trainer.log_metrics('train', metrics)
trainer.save_metrics('train', metrics)
trainer.save_state()
if training_args.do_eval:
logger.info('*** Evaluate ***')
metrics = trainer.evaluate()
max_val_samples = (data_args.max_val_samples if (data_args.max_val_samples is not None) else len(eval_dataset))
metrics['eval_samples'] = min(max_val_samples, len(eval_dataset))
trainer.log_metrics('eval', metrics)
trainer.save_metrics('eval', metrics)
if training_args.do_predict:
logger.info('*** Predict ***')
(predictions, labels, metrics) = trainer.predict(test_dataset)
predictions = np.argmax(predictions, axis=2)
true_predictions = [[label_list[p] for (p, l) in zip(prediction, label) if (l != (- 100))] for (prediction, label) in zip(predictions, labels)]
trainer.log_metrics('test', metrics)
trainer.save_metrics('test', metrics)
output_test_predictions_file = os.path.join(training_args.output_dir, 'test_predictions.txt')
if trainer.is_world_process_zero():
with open(output_test_predictions_file, 'w') as writer:
for prediction in true_predictions:
writer.write((' '.join(prediction) + '\n'))
|
def _mp_fn(index):
main()
|
def main():
parser = HfArgumentParser((ModelArguments, XFUNDataTrainingArguments, TrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
last_checkpoint = None
if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)):
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if ((last_checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)):
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.')
elif (last_checkpoint is not None):
logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)])
logger.setLevel((logging.INFO if is_main_process(training_args.local_rank) else logging.WARN))
logger.warning((f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool((training_args.local_rank != (- 1)))}, 16-bits training: {training_args.fp16}'))
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f'Training/evaluation parameters {training_args}')
set_seed(training_args.seed)
datasets = load_dataset(os.path.abspath(LiLTfinetune.data.datasets.xfun.__file__), f'xfun.{data_args.lang}', additional_langs=data_args.additional_langs, keep_in_memory=True)
if training_args.do_train:
column_names = datasets['train'].column_names
features = datasets['train'].features
else:
column_names = datasets['validation'].column_names
features = datasets['validation'].features
text_column_name = 'input_ids'
label_column_name = 'labels'
remove_columns = column_names
def get_label_list(labels):
unique_labels = set()
for label in labels:
unique_labels = (unique_labels | set(label))
label_list = list(unique_labels)
label_list.sort()
return label_list
if isinstance(features[label_column_name].feature, ClassLabel):
label_list = features[label_column_name].feature.names
label_to_id = {i: i for i in range(len(label_list))}
else:
label_list = get_label_list(datasets['train'][label_column_name])
label_to_id = {l: i for (i, l) in enumerate(label_list)}
num_labels = len(label_list)
config = AutoConfig.from_pretrained((model_args.config_name if model_args.config_name else model_args.model_name_or_path), num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
tokenizer = AutoTokenizer.from_pretrained((model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, use_fast=True, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
model = AutoModelForRelationExtraction.from_pretrained(model_args.model_name_or_path, from_tf=bool(('.ckpt' in model_args.model_name_or_path)), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
if (not isinstance(tokenizer, PreTrainedTokenizerFast)):
raise ValueError('This example script only works for models that have a fast tokenizer. Checkout the big table of models at https://huggingface.co/transformers/index.html#bigtable to find the model types that meet this requirement')
padding = ('max_length' if data_args.pad_to_max_length else False)
if training_args.do_train:
if ('train' not in datasets):
raise ValueError('--do_train requires a train dataset')
train_dataset = datasets['train']
if (data_args.max_train_samples is not None):
train_dataset = train_dataset.select(range(data_args.max_train_samples))
if training_args.do_eval:
if ('validation' not in datasets):
raise ValueError('--do_eval requires a validation dataset')
eval_dataset = datasets['validation']
if (data_args.max_val_samples is not None):
eval_dataset = eval_dataset.select(range(data_args.max_val_samples))
if training_args.do_predict:
if ('test' not in datasets):
raise ValueError('--do_predict requires a test dataset')
test_dataset = datasets['test']
if (data_args.max_test_samples is not None):
test_dataset = test_dataset.select(range(data_args.max_test_samples))
data_collator = DataCollatorForKeyValueExtraction(tokenizer, pad_to_multiple_of=(8 if training_args.fp16 else None), padding=padding, max_length=512)
def compute_metrics(p):
(pred_relations, gt_relations) = p
score = re_score(pred_relations, gt_relations, mode='boundaries')
return score
trainer = XfunReTrainer(model=model, args=training_args, train_dataset=(train_dataset if training_args.do_train else None), eval_dataset=(eval_dataset if training_args.do_eval else None), tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics)
if training_args.do_train:
checkpoint = (last_checkpoint if last_checkpoint else None)
train_result = trainer.train(resume_from_checkpoint=checkpoint)
metrics = train_result.metrics
trainer.save_model()
max_train_samples = (data_args.max_train_samples if (data_args.max_train_samples is not None) else len(train_dataset))
metrics['train_samples'] = min(max_train_samples, len(train_dataset))
trainer.log_metrics('train', metrics)
trainer.save_metrics('train', metrics)
trainer.save_state()
if training_args.do_eval:
logger.info('*** Evaluate ***')
metrics = trainer.evaluate()
max_val_samples = (data_args.max_val_samples if (data_args.max_val_samples is not None) else len(eval_dataset))
metrics['eval_samples'] = min(max_val_samples, len(eval_dataset))
trainer.log_metrics('eval', metrics)
trainer.save_metrics('eval', metrics)
|
def _mp_fn(index):
main()
|
def main():
parser = HfArgumentParser((ModelArguments, XFUNDataTrainingArguments, TrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
last_checkpoint = None
if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)):
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if ((last_checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)):
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.')
elif (last_checkpoint is not None):
logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)])
logger.setLevel((logging.INFO if is_main_process(training_args.local_rank) else logging.WARN))
logger.warning((f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool((training_args.local_rank != (- 1)))}, 16-bits training: {training_args.fp16}'))
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f'Training/evaluation parameters {training_args}')
set_seed(training_args.seed)
datasets = load_dataset(os.path.abspath(LiLTfinetune.data.datasets.xfun.__file__), f'xfun.{data_args.lang}', additional_langs=data_args.additional_langs, keep_in_memory=True)
if training_args.do_train:
column_names = datasets['train'].column_names
features = datasets['train'].features
else:
column_names = datasets['validation'].column_names
features = datasets['validation'].features
text_column_name = 'input_ids'
label_column_name = 'labels'
remove_columns = column_names
def get_label_list(labels):
unique_labels = set()
for label in labels:
unique_labels = (unique_labels | set(label))
label_list = list(unique_labels)
label_list.sort()
return label_list
if isinstance(features[label_column_name].feature, ClassLabel):
label_list = features[label_column_name].feature.names
label_to_id = {i: i for i in range(len(label_list))}
else:
label_list = get_label_list(datasets['train'][label_column_name])
label_to_id = {l: i for (i, l) in enumerate(label_list)}
num_labels = len(label_list)
config = AutoConfig.from_pretrained((model_args.config_name if model_args.config_name else model_args.model_name_or_path), num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
tokenizer = AutoTokenizer.from_pretrained((model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, use_fast=True, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
model = AutoModelForTokenClassification.from_pretrained(model_args.model_name_or_path, from_tf=bool(('.ckpt' in model_args.model_name_or_path)), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
if (not isinstance(tokenizer, PreTrainedTokenizerFast)):
raise ValueError('This example script only works for models that have a fast tokenizer. Checkout the big table of models at https://huggingface.co/transformers/index.html#bigtable to find the model types that meet this requirement')
padding = ('max_length' if data_args.pad_to_max_length else False)
if training_args.do_train:
if ('train' not in datasets):
raise ValueError('--do_train requires a train dataset')
train_dataset = datasets['train']
if (data_args.max_train_samples is not None):
train_dataset = train_dataset.select(range(data_args.max_train_samples))
if training_args.do_eval:
if ('validation' not in datasets):
raise ValueError('--do_eval requires a validation dataset')
eval_dataset = datasets['validation']
if (data_args.max_val_samples is not None):
eval_dataset = eval_dataset.select(range(data_args.max_val_samples))
if training_args.do_predict:
if ('test' not in datasets):
raise ValueError('--do_predict requires a test dataset')
test_dataset = datasets['test']
if (data_args.max_test_samples is not None):
test_dataset = test_dataset.select(range(data_args.max_test_samples))
data_collator = DataCollatorForKeyValueExtraction(tokenizer, pad_to_multiple_of=(8 if training_args.fp16 else None), padding=padding, max_length=512)
metric = load_metric('seqeval')
def compute_metrics(p):
(predictions, labels) = p
predictions = np.argmax(predictions, axis=2)
true_predictions = [[label_list[p] for (p, l) in zip(prediction, label) if (l != (- 100))] for (prediction, label) in zip(predictions, labels)]
true_labels = [[label_list[l] for (p, l) in zip(prediction, label) if (l != (- 100))] for (prediction, label) in zip(predictions, labels)]
results = metric.compute(predictions=true_predictions, references=true_labels)
if data_args.return_entity_level_metrics:
final_results = {}
for (key, value) in results.items():
if isinstance(value, dict):
for (n, v) in value.items():
final_results[f'{key}_{n}'] = v
else:
final_results[key] = value
return final_results
else:
return {'precision': results['overall_precision'], 'recall': results['overall_recall'], 'f1': results['overall_f1'], 'accuracy': results['overall_accuracy']}
trainer = XfunSerTrainer(model=model, args=training_args, train_dataset=(train_dataset if training_args.do_train else None), eval_dataset=(eval_dataset if training_args.do_eval else None), tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics)
if training_args.do_train:
checkpoint = (last_checkpoint if last_checkpoint else None)
train_result = trainer.train(resume_from_checkpoint=checkpoint)
metrics = train_result.metrics
trainer.save_model()
max_train_samples = (data_args.max_train_samples if (data_args.max_train_samples is not None) else len(train_dataset))
metrics['train_samples'] = min(max_train_samples, len(train_dataset))
trainer.log_metrics('train', metrics)
trainer.save_metrics('train', metrics)
trainer.save_state()
if training_args.do_eval:
logger.info('*** Evaluate ***')
metrics = trainer.evaluate()
max_val_samples = (data_args.max_val_samples if (data_args.max_val_samples is not None) else len(eval_dataset))
metrics['eval_samples'] = min(max_val_samples, len(eval_dataset))
trainer.log_metrics('eval', metrics)
trainer.save_metrics('eval', metrics)
if training_args.do_predict:
logger.info('*** Predict ***')
(predictions, labels, metrics) = trainer.predict(test_dataset)
predictions = np.argmax(predictions, axis=2)
true_predictions = [[label_list[p] for (p, l) in zip(prediction, label) if (l != (- 100))] for (prediction, label) in zip(predictions, labels)]
trainer.log_metrics('test', metrics)
trainer.save_metrics('test', metrics)
output_test_predictions_file = os.path.join(training_args.output_dir, 'test_predictions.txt')
if trainer.is_world_process_zero():
with open(output_test_predictions_file, 'w') as writer:
for prediction in true_predictions:
writer.write((' '.join(prediction) + '\n'))
|
def _mp_fn(index):
main()
|
def test():
print('Loading toy dataset from JSON...')
loader = DatasetLoader()
gtDataset = loader.read_json('data/toydata/gt.json')
print('>> {}'.format(gtDataset.phrases))
gtBoxList = gtDataset.boxes
print('Loading toy predictions from JSON...')
predDataset = loader.read_json('data/toydata/pred.json')
predictedBoxList = predDataset.boxes
iouThreshold = 0.5
assert (predDataset.size == gtDataset.size)
print('Evaluating toy dataset...')
evaluator = Evaluator()
(accuracy, iouList) = evaluator.evaluate(predictedBoxList, gtBoxList, iouThreshold)
print('>> Accuracy: {}'.format(accuracy))
for (pred, gt, iou) in zip(predictedBoxList, gtBoxList, iouList):
print('>>>> GT: {}, PRED: {}, IoU: {}'.format(gt, pred, iou))
|
class Dataset(object):
' A class for representing a Dataset\n\t'
def __init__(self):
self._instances = []
def add_instance(self, propertyDict):
' Append an instance to the dataset.\n\t\t\n\t\tParameters\n\t\t----------\n\t\tpropertyDict : dict\n\t\t\ta dictionary containing the following key/values (minimum)\n\t\t\t\t"phrase" : str\n\t\t\t\t"image" : str\n\t\t\t\t"box" : [x,y,w,h] where (x,y) are coordinates of the top-left of the bounding box and (w, h) are the width and height of the bounding box\n\t\t'
self._instances.append(propertyDict)
def get_phraselist(self):
return [instance['phrase'] for instance in self._instances]
def get_imagelist(self):
return [instance['image'] for instance in self._instances]
def get_boxlist(self):
return [instance['box'] for instance in self._instances]
def get_instances(self):
return self._instances
def get_count(self):
return len(self._instances)
count = property(get_count)
size = property(get_count)
instances = property(get_instances)
phrases = property(get_phraselist)
images = property(get_imagelist)
boxes = property(get_boxlist)
|
class DatasetLoader():
' Utility/factory class to load a Dataset object from a preformatted text or json file\n\t'
def __init__(self):
pass
def read_text(self, filePath):
' Loads a Dataset object from a text file.\n\t\t\n\t\tParameters\n\t\t----------\n\t\tfilePath : str\n\t\t\tPath to text file containing the dataset.\n\t\t\tFormat of text file: image_id \t phrase \t x \t y \t w \t h \n\t\n\t\tReturns\n\t\t-------\n\t\tDataset\n\t\t\tA Dataset instance, loaded with rows from filePath\n\t\t'
dataset = Dataset()
for line in open(filePath):
data = line.strip().split('\t')
dataset.add_instance({'image': data[0], 'phrase': data[1], 'box': list(map(float, data[2:6]))})
return dataset
def read_json(self, jsonFilePath):
' Loads a Dataset object from a json file.\n\t\t\n\t\tParameters\n\t\t----------\n\t\tfilePath : str\n\t\t\tPath to json file containing the dataset.\n\t\t\tMinimum format of json file: [{"image": "456329#0", "phrase": "My phrase", "box": [1, 4, 200, 300]}, ...];\n\t\t\t\t\tthere can be other keys in each entry, it will simply be ignored in this code\n\t\t\n\t\tReturns\n\t\t-------\n\t\tDataset\n\t\t\tA Dataset instance, loaded with rows from filePath\n\t\t'
dataset = Dataset()
obj = json.load(open(jsonFilePath, encoding='utf-8'))
for entry in obj:
dataset.add_instance(entry)
return dataset
|
class Evaluator(object):
' Utility class for evaluating phrase localization\n\t'
def __init__(self):
pass
def compute_iou(self, predictedBoxList, gtBoxList):
' Computes list of areas of IoU for all given instances.\n\n\t\tParameters\n\t\t----------\n\t\tpredictedBoxList : list\n\t\t\t[[x,y,w,h],[x,y,w,h],[x,y,w,h],...]\n\t\t\tList of predicted bounding box instances [x,y,w,h] for each query instance.\n\t\t\tx and y are the (x,y) coordinates of the top-left of the bounding box for the query term\n\t\t\tw and h are the width and height of the bounding box for the query test\n\t\tgtBoxList : list\n\t\t\tSame as above, but for ground truth bounding boxes. Must be the same length as predictedBoxList\n\n\t\tReturns\n\t\t-------\n\t\tiouList : list(float)\n\t\t\tThe area of IoU for each prediction in predictedBoxList\n\n\t\t'
assert (len(predictedBoxList) == len(gtBoxList)), 'The list of predicted bounding boxes ({}) should be the same size as the list of ground truth bounding boxes ({}).'.format(len(predictedBoxList), len(gtBoxList))
iouList = []
for (box1, box2) in zip(gtBoxList, predictedBoxList):
iou = self._iou(box1, box2)
iouList.append(iou)
return iouList
def accuracy(self, iouList, iouThreshold=0.5):
' Computes the overall accuracy from a given list of iou and an iouThreshold\n\n\t\tParameters\n\t\t----------\n\t\tiouList : list(float)\n\t\t\tList of areas of IoU\n\t\tiouThreshold : float\n\t\t\tThe threshold for the IoU, such that item in iouList is True if IoU >= iouThreshold.\n\n\t\tReturns\n\t\t-------\n\t\taccuracy : float\n\t\t\tOverall accuracy (or recall to be more precise). \n\t\t\tProportion of predicted boxes that overlaps with the ground truth boxes by an IoU of >= iouThreshold.\n\t\t\n\t\t'
matches = len([1 for iou in iouList if (iou >= iouThreshold)])
accuracy = ((matches * 1.0) / len(iouList))
return accuracy
def evaluate(self, predictedBoxList, gtBoxList, iouThreshold=0.5):
' Computes the overall accuracy and list of areas of IoU for each test instance.\n\n\t\tParameters\n\t\t----------\n\t\tpredictedBoxList : list\n\t\t\t[[x,y,w,h],[x,y,w,h],[x,y,w,h],...]\n\t\t\tList of predicted bounding box instances [x,y,w,h] for each query instance.\n\t\t\tx and y are the (x,y) coordinates of the top-left of the bounding box for the query term\n\t\t\tw and h are the width and height of the bounding box for the query test\n\t\tgtBoxList : list\n\t\t\tSame as above, but for ground truth bounding boxes. Must be the same length as predictedBoxList\n\t\tiouThreshold : float\n\t\t\tThe threshold for the IoU, such that two bounding boxes are considered overlapping when IoU >= iouThreshold.\n\n\t\tReturns\n\t\t-------\n\t\taccuracy : float\n\t\t\tOverall accuracy (or recall to be more precise). \n\t\t\tProportion of predicted boxes that overlaps with the ground truth boxes by an IoU of >= iouThreshold.\n\t\t\n\t\tiouList : list(float)\n\t\t\tThe area of IoU for each prediction in predictedBoxList\n\n\t\t'
iouList = self.compute_iou(predictedBoxList, gtBoxList)
accuracy = self.accuracy(iouList, iouThreshold)
return (accuracy, iouList)
def evaluate_perclass(self, predictedBoxList, gtBoxList, boxCategoriesList, iouThreshold=0.5):
' Computes the overall accuracy, per-category accuracies, and list of areas of IoU for each test instance.\n\n\t\tParameters\n\t\t----------\n\t\tpredictedBoxList : list\n\t\t\t[[x,y,w,h],[x,y,w,h],[x,y,w,h],...]\n\t\t\tList of predicted bounding box instances [x,y,w,h] for each query instance.\n\t\t\tx and y are the (x,y) coordinates of the top-left of the bounding box for the query term\n\t\t\tw and h are the width and height of the bounding box for the query test\n\t\tgtBoxList : list\n\t\t\tSame as above, but for ground truth bounding boxes. Must be the same length as predictedBoxList\n\t\tiouThreshold : float\n\t\t\tThe threshold for the IoU, such that two bounding boxes are considered overlapping when IoU >= iouThreshold.\n\t\tboxCategoriesList : list of list\n\t\t\tList of categories per box instance. Each box can have more than one category. Must be the same length as gtBoxList\n\n\t\tReturns\n\t\t-------\n\t\taccuracy : float\n\t\t\tOverall accuracy (or recall to be more precise). \n\t\t\tProportion of predicted boxes that overlaps with the ground truth boxes by an IoU of >= iouThreshold.\n\t\t\n\t\tperclassAccuracies : dict\n\t\t\tPer-class accuracy. Key: category label; Value: accuracy (float).\n\n\t\tiouList : list(float)\n\t\t\tThe area of IoU for each prediction in predictedBoxList\n\n\t\t'
categorySet = set()
for categoryList in boxCategoriesList:
categorySet.update(categoryList)
iouList = self.compute_iou(predictedBoxList, gtBoxList)
accuracy = self.accuracy(iouList, iouThreshold)
perClassAccDict = {}
for category in categorySet:
subPredictedBoxList = []
subGtBoxList = []
for (pred, gt, categoryList) in zip(predictedBoxList, gtBoxList, boxCategoriesList):
if (category in categoryList):
subPredictedBoxList.append(pred)
subGtBoxList.append(gt)
subIouList = self.compute_iou(subPredictedBoxList, subGtBoxList)
perClassAccDict[category] = self.accuracy(subIouList, iouThreshold)
return (accuracy, perClassAccDict, iouList)
def evaluate_upperbound_perclass(self, predictedBoxList, gtBoxList, boxCategoriesList, iouThreshold=0.5):
' Computes the overall accuracy, per-category accuracies, and list of areas of IoU for each test instance.\n\t\tAssumes that there are multiple candidate bounding boxes per test instance in predictedBoxList, \n\t\tand we keep the max iou across all candidates to get the best iou per test instance\n\n\t\tParameters\n\t\t----------\n\t\tpredictedBoxList : list of list\n\t\t\t[[[x,y,w,h],[x,y,w,h]],[[x,y,w,h],[x,y,w,h]],...]\n\t\t\tList of predicted bounding box instances [x,y,w,h] for each query instance.\n\t\t\tx and y are the (x,y) coordinates of the top-left of the bounding box for the query term\n\t\t\tw and h are the width and height of the bounding box for the query test\n\t\tgtBoxList : list\n\t\t\tSame as above, but for ground truth bounding boxes. Must be the same length as predictedBoxList\n\t\tiouThreshold : float\n\t\t\tThe threshold for the IoU, such that two bounding boxes are considered overlapping when IoU >= iouThreshold.\n\t\tboxCategoriesList : list of list\n\t\t\tList of categories per box instance. Each box can have more than one category. Must be the same length as gtBoxList\n\n\t\tReturns\n\t\t-------\n\t\taccuracy : float\n\t\t\tOverall accuracy (or recall to be more precise). \n\t\t\tProportion of predicted boxes that overlaps with the ground truth boxes by an IoU of >= iouThreshold.\n\t\t\n\t\tperclassAccuracies : dict\n\t\t\tPer-class accuracy. Key: category label; Value: accuracy (float).\n\n\t\tiouList : list(float)\n\t\t\tThe area of max IoU for each prediction set in predictedBoxList\n\n\t\targmaxList : list(int)\n\t\t\tThe index of the box that maximizes the IoU for each prediction set in predictedBoxList\n\n\t\t'
categorySet = set()
for categoryList in boxCategoriesList:
categorySet.update(categoryList)
iouList = []
argmaxList = []
for (i, gtBox) in enumerate(gtBoxList):
nCandidates = len(predictedBoxList[i])
replicatedGtBoxList = []
for j in range(nCandidates):
replicatedGtBoxList.append(gtBox)
instanceIouList = self.compute_iou(predictedBoxList[i], replicatedGtBoxList)
maxIou = max(instanceIouList)
iouList.append(maxIou)
argmaxList.append(instanceIouList.index(maxIou))
accuracy = self.accuracy(iouList, iouThreshold)
perClassAccDict = {}
for category in categorySet:
subPredictedBoxList = []
subGtBoxList = []
for (pred, gt, categoryList) in zip(predictedBoxList, gtBoxList, boxCategoriesList):
if (category in categoryList):
subPredictedBoxList.append(pred)
subGtBoxList.append(gt)
subIouList = []
for (i, subGtBox) in enumerate(subGtBoxList):
nCandidates = len(subPredictedBoxList[i])
replicatedGtBoxList = []
for j in range(nCandidates):
replicatedGtBoxList.append(subGtBox)
instanceIouList = self.compute_iou(subPredictedBoxList[i], replicatedGtBoxList)
maxIou = max(instanceIouList)
subIouList.append(maxIou)
perClassAccDict[category] = self.accuracy(subIouList, iouThreshold)
return (accuracy, perClassAccDict, iouList, argmaxList)
def _iou(self, box1, box2):
'Computes intersection over union (IoU) for two boxes.\n\n\t\twhere each box = [x, y, w, h]\n\n\t\tParameters\n\t\t----------\n\t\tbox1 : list\n\t\t\t[x, y, w, h] of first box\n\t\tbox2 : list\n\t\t\t[x, y, w, h] of second box\n\n\t\tReturns\n\t\t-------\n\t\tfloat\n\t\t\tintersection over union for box1 and box2\n\t\n\t\t'
(box1_left_x, box1_top_y, box1_w, box1_h) = box1
box1_right_x = ((box1_left_x + box1_w) - 1)
box1_bottom_y = ((box1_top_y + box1_h) - 1)
(box2_left_x, box2_top_y, box2_w, box2_h) = box2
box2_right_x = ((box2_left_x + box2_w) - 1)
box2_bottom_y = ((box2_top_y + box2_h) - 1)
intersect_left_x = max(box1_left_x, box2_left_x)
intersect_top_y = max(box1_top_y, box2_top_y)
intersect_right_x = min(box1_right_x, box2_right_x)
intersect_bottom_y = min(box1_bottom_y, box2_bottom_y)
overlap_x = max(0, ((intersect_right_x - intersect_left_x) + 1))
overlap_y = max(0, ((intersect_bottom_y - intersect_top_y) + 1))
intersect = (overlap_x * overlap_y)
union = (((box1_w * box1_h) + (box2_w * box2_h)) - intersect)
return ((intersect * 1.0) / union)
|
def test():
' Toy example for testing the evaluation script\n\t'
queryList = ['my first phrase', 'my second phrase']
imageList = ['0001.jpg', '0002.jpg']
gtBoxList = [[1, 1, 30, 30], [50, 50, 100, 200]]
iouThreshold = 0.5
predictedBoxList = [[31, 31, 30, 30], [50, 50, 100, 200]]
evaluator = Evaluator()
(accuracy, iouList) = evaluator.evaluate(predictedBoxList, gtBoxList, iouThreshold)
print(accuracy)
print(iouList)
|
def parse_xml(path):
tree = ET.parse(path)
img_name = path.split('/')[(- 1)][:(- 4)]
height = tree.findtext('./size/height')
width = tree.findtext('./size/width')
objects = [img_name, width, height]
for obj in tree.findall('object'):
difficult = obj.find('difficult').text
if (difficult == '1'):
continue
name = obj.find('name').text
bbox = obj.find('bndbox')
xmin = bbox.find('xmin').text
ymin = bbox.find('ymin').text
xmax = bbox.find('xmax').text
ymax = bbox.find('ymax').text
name = str(names_dict[name])
objects.extend([name, xmin, ymin, xmax, ymax])
if (len(objects) > 1):
return objects
else:
return None
|
def gen_test_txt(txt_path):
global test_cnt
f = open(txt_path, 'w')
for (i, path) in enumerate(test_path):
img_names = open(path, 'r').readlines()
for img_name in img_names:
img_name = img_name.strip()
xml_path = (((anno_path[i] + '/') + img_name) + '.xml')
objects = parse_xml(xml_path)
if objects:
objects[0] = (((img_path[i] + '/') + img_name) + '.jpg')
if os.path.exists(objects[0]):
objects.insert(0, str(test_cnt))
test_cnt += 1
objects = (' '.join(objects) + '\n')
f.write(objects)
f.close()
|
def gen_train_txt(txt_path):
global train_cnt
f = open(txt_path, 'w')
for (i, path) in enumerate(trainval_path):
img_names = open(path, 'r').readlines()
for img_name in img_names:
img_name = img_name.strip()
xml_path = (((anno_path[i] + '/') + img_name) + '.xml')
objects = parse_xml(xml_path)
if objects:
objects[0] = (((img_path[i] + '/') + img_name) + '.jpg')
if os.path.exists(objects[0]):
objects.insert(0, str(train_cnt))
train_cnt += 1
objects = (' '.join(objects) + '\n')
f.write(objects)
f.close()
|
def conv2d(inputs, filters, kernel_size, strides=1):
def _fixed_padding(inputs, kernel_size):
pad_total = (kernel_size - 1)
pad_beg = (pad_total // 2)
pad_end = (pad_total - pad_beg)
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]], mode='CONSTANT')
return padded_inputs
if (strides > 1):
inputs = _fixed_padding(inputs, kernel_size)
inputs = slim.conv2d(inputs, filters, kernel_size, stride=strides, padding=('SAME' if (strides == 1) else 'VALID'))
return inputs
|
def darknet53_body(inputs):
def res_block(inputs, filters):
shortcut = inputs
net = conv2d(inputs, (filters * 1), 1)
net = conv2d(net, (filters * 2), 3)
net = (net + shortcut)
return net
net = conv2d(inputs, 32, 3, strides=1)
net = conv2d(net, 64, 3, strides=2)
net = res_block(net, 32)
net = conv2d(net, 128, 3, strides=2)
for i in range(2):
net = res_block(net, 64)
net = conv2d(net, 256, 3, strides=2)
for i in range(8):
net = res_block(net, 128)
route_1 = net
net = conv2d(net, 512, 3, strides=2)
for i in range(8):
net = res_block(net, 256)
route_2 = net
net = conv2d(net, 1024, 3, strides=2)
for i in range(4):
net = res_block(net, 512)
route_3 = net
return (route_1, route_2, route_3)
|
def yolo_block(inputs, filters):
net = conv2d(inputs, (filters * 1), 1)
net = conv2d(net, (filters * 2), 3)
net = conv2d(net, (filters * 1), 1)
net = conv2d(net, (filters * 2), 3)
net = conv2d(net, (filters * 1), 1)
route = net
net = conv2d(net, (filters * 2), 3)
return (route, net)
|
def upsample_layer(inputs, out_shape):
(new_height, new_width) = (out_shape[1], out_shape[2])
inputs = tf.image.resize_nearest_neighbor(inputs, (new_height, new_width), name='upsampled')
return inputs
|
class MeshPly():
def __init__(self, filename, color=[0.0, 0.0, 0.0]):
f = open(filename, 'r')
self.vertices = []
self.colors = []
self.indices = []
self.normals = []
vertex_mode = False
face_mode = False
nb_vertices = 0
nb_faces = 0
idx = 0
with f as open_file_object:
for line in open_file_object:
elements = line.split()
if vertex_mode:
self.vertices.append([float(i) for i in elements[:3]])
self.normals.append([float(i) for i in elements[3:6]])
if elements[6:9]:
self.colors.append([(float(i) / 255.0) for i in elements[6:9]])
else:
self.colors.append([(float(i) / 255.0) for i in color])
idx += 1
if (idx == nb_vertices):
vertex_mode = False
face_mode = True
idx = 0
elif face_mode:
self.indices.append([float(i) for i in elements[1:4]])
idx += 1
if (idx == nb_faces):
face_mode = False
elif (elements[0] == 'element'):
if (elements[1] == 'vertex'):
nb_vertices = int(elements[2])
elif (elements[1] == 'face'):
nb_faces = int(elements[2])
elif (elements[0] == 'end_header'):
vertex_mode = True
|
def get_color_table(class_num, seed=2):
random.seed(seed)
color_table = {}
for i in range(class_num):
color_table[i] = [random.randint(0, 255) for _ in range(3)]
return color_table
|
def plot_one_box(img, coord, label=None, color=None, line_thickness=None):
'\n coord: [x_min, y_min, x_max, y_max] format coordinates.\n img: img to plot on.\n label: str. The label name.\n color: int. color index.\n line_thickness: int. rectangle line thickness.\n '
tl = (line_thickness or int(round((0.002 * max(img.shape[0:2])))))
color = (color or [random.randint(0, 255) for _ in range(3)])
(c1, c2) = ((int(coord[0]), int(coord[1])), (int(coord[2]), int(coord[3])))
cv2.rectangle(img, c1, c2, color, thickness=tl)
if label:
tf = max((tl - 1), 1)
t_size = cv2.getTextSize(label, 0, fontScale=(float(tl) / 3), thickness=tf)[0]
c2 = ((c1[0] + t_size[0]), ((c1[1] - t_size[1]) - 3))
cv2.rectangle(img, c1, c2, color, (- 1))
cv2.putText(img, label, (c1[0], (c1[1] - 2)), 0, (float(tl) / 3), [0, 0, 0], thickness=tf, lineType=cv2.LINE_AA)
return img
|
def draw_demo_img(img, projectpts, color=(0, 255, 0)):
vertices = []
for i in range(9):
x = projectpts[i][0]
y = projectpts[i][1]
coordinates = (int(x), int(y))
vertices.append(coordinates)
cv2.circle(img, coordinates, 1, (0, 255, 255), (- 1))
cv2.line(img, vertices[1], vertices[2], color, 2)
cv2.line(img, vertices[1], vertices[3], color, 2)
cv2.line(img, vertices[1], vertices[5], color, 2)
cv2.line(img, vertices[2], vertices[6], color, 2)
cv2.line(img, vertices[2], vertices[4], color, 2)
cv2.line(img, vertices[3], vertices[4], color, 2)
cv2.line(img, vertices[3], vertices[7], color, 2)
cv2.line(img, vertices[4], vertices[8], color, 2)
cv2.line(img, vertices[5], vertices[6], color, 2)
cv2.line(img, vertices[5], vertices[7], color, 2)
cv2.line(img, vertices[6], vertices[8], color, 2)
cv2.line(img, vertices[7], vertices[8], color, 2)
return img
|
def draw_demo_img_corners(img, projectpts, color=(0, 255, 0), nV=9, thickness=2):
vertices = []
for i in range(nV):
x = projectpts[i][0]
y = projectpts[i][1]
coordinates = (int(x), int(y))
vertices.append(coordinates)
cv2.circle(img, coordinates, 2, color, (- 1))
cv2.line(img, vertices[0], vertices[1], color, thickness=thickness)
cv2.line(img, vertices[0], vertices[2], color, thickness=thickness)
cv2.line(img, vertices[0], vertices[4], color, thickness=thickness)
cv2.line(img, vertices[1], vertices[5], color, thickness=thickness)
cv2.line(img, vertices[1], vertices[3], color, thickness=thickness)
cv2.line(img, vertices[2], vertices[3], color, thickness=thickness)
cv2.line(img, vertices[2], vertices[6], color, thickness=thickness)
cv2.line(img, vertices[3], vertices[7], color, thickness=thickness)
cv2.line(img, vertices[4], vertices[5], color, thickness=thickness)
cv2.line(img, vertices[4], vertices[6], color, thickness=thickness)
cv2.line(img, vertices[5], vertices[7], color, thickness=thickness)
cv2.line(img, vertices[6], vertices[7], color, thickness=thickness)
return img
|
def train_one_epoch(model: torch.nn.Module, dl, optimizer: torch.optim.Optimizer, device: torch.device, epoch: int, args=None):
model.train(True)
optimizer.zero_grad()
metric_logger = misc.MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = f'Epoch: [{epoch}]'
print_freq = 10
for (batch_idx, batch) in enumerate(metric_logger.log_every(dl, print_freq, header)):
misc.adjust_learning_rate(optimizer, ((batch_idx / len(dl)) + epoch), args)
x = mem_inputs_to_device(batch, device, args)
ddg_dense1 = batch['ddg_dense'] = batch['ddg_dense'].to(device, non_blocking=True)
ddg_dense2 = batch['ddg_dense2'] = batch['ddg_dense2'].to(device, non_blocking=True)
pred = model(x, batch)
losses = loss_single_double(pred, ddg_dense1, ddg_dense2, batch, args, True)
loss = sum(losses.values())
if (not math.isfinite(loss.item())):
print('Loss is {}, stopping training'.format(loss.item()))
sys.exit(1)
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 5.0)
optimizer.step()
optimizer.zero_grad()
lr = optimizer.param_groups[0]['lr']
losses_detach = {f'train_{k}': v.cpu().item() for (k, v) in losses.items()}
metric_logger.update(lr=lr)
metric_logger.update(loss=loss.item())
metric_logger.update(**losses_detach)
if ((not args.disable_wandb) and misc.is_main_process()):
wandb.log({'train_loss': loss.item(), 'lr': lr, **losses_detach})
metric_logger.synchronize_between_processes()
print('Averaged stats:', metric_logger)
return {k: meter.global_avg for (k, meter) in metric_logger.meters.items()}
|
@torch.no_grad()
def evaluate(model, dl, device, args):
model.eval()
metric_logger = misc.MetricLogger(delimiter=' ')
header = 'Test:'
all_preds = {}
for batch in metric_logger.log_every(dl, 10, header):
x = mem_inputs_to_device(batch, device, args)
batch['known_mask1'] = known_mask1 = (batch['ddg_dense'] != 999)
batch['known_mask2'] = known_mask2 = (batch['ddg_dense2'] != 999)
eval_single = known_mask1.any()
eval_double = known_mask2.any()
eval_list = (('mut_info_list' in batch) and (sum([len(x) for x in batch['mut_info_list']]) > 0))
batch['only_eval_single'] = (eval_single and (not eval_double) and (not eval_list))
batch['eval_list'] = eval_list
pred_dict = model(x, batch)
pr1 = pred_dict['mut1_ddg']
pr2 = pred_dict.get('mut2_ddg', None)
for b in range(len(x)):
pdb_id = batch['pdb_ids'][b]
seq = batch['seqs'][b]
(muts, scores) = ([], [])
if eval_list:
muts.append(np.array(batch['mut_info_list'][b]))
scores.append((- pred_dict['pr_ddgs_list'][b].detach().cpu().numpy()))
if eval_double:
(mutations, valid_mask) = get_dense_double_mut_infos(seq)
pr_ddgs = pr2[b].flatten()
keep_inds = known_mask2[b].flatten().cpu().numpy()
muts.append(mutations[(keep_inds & valid_mask)])
scores.append((- pr_ddgs[(keep_inds & valid_mask)].detach().cpu().numpy()))
if eval_single:
mutations = np.array(get_dense_mut_infos(seq))
pr_ddgs = pr1[b].flatten()
keep_inds = known_mask1[b].flatten().cpu().numpy()
muts.append(mutations[keep_inds])
scores.append((- pr_ddgs[keep_inds].detach().cpu().numpy()))
all_preds[pdb_id] = {'mutations': np.concatenate(muts), 'scores': np.concatenate(scores)}
if args.dist_eval:
print('Start gathering predictions')
torch.cuda.empty_cache()
all_preds = misc.gather_dict_keys_on_main(all_preds)
print(f'Finished gathering predictions')
if (not misc.is_main_process()):
return {}
ds_name = dl.dataset.name
(metrics, metrics_det, metrics_det_pdb, copypaste, merged_df) = eval_ddg(dl.dataset.mutdf, all_preds)
merged_df['ddg_pred'] = (- merged_df['scores'])
print(f'Saving results to {args.output_dir}/results_{ds_name}.csv')
print(f'Saving metrics to {args.output_dir}/metrics_{ds_name}.csv')
merged_df.to_csv(f'{args.output_dir}/results_{ds_name}.csv', index=False)
if (metrics_det is not None):
metrics_det_pdb.to_csv(f'{args.output_dir}/metrics_{ds_name}.csv', index=False)
print(metrics_det)
print(ds_name, copypaste)
metric_logger.update(**metrics)
ret = {k: meter.global_avg for (k, meter) in metric_logger.meters.items()}
ret['copypasta'] = copypaste
ret = {f'{ds_name}_{k}': v for (k, v) in ret.items()}
if ((not args.disable_wandb) and misc.is_main_process()):
wandb.log(ret)
return ret
|
def get_args_parser():
parser = argparse.ArgumentParser('Train Sequence Detector')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--aa_expand', default='backbone', help='scratch|backbone')
parser.add_argument('--single_dec', default='naive', help='naive')
parser.add_argument('--multi_dec', default='epistasis', help='additive|epistasis')
parser.add_argument('--head_dim', type=int, default=128)
parser.add_argument('--backbone', default='esm2_t33_650M_UR50D', help='af|esm2_t33_650M_UR50D')
parser.add_argument('--finetune_backbone', type=str, default='models/finetuning_ptm_2.pt')
parser.add_argument('--freeze_at', default=0, help='freeze backbone up to layer X')
parser.add_argument('--n_msa_seqs', type=int, default=128)
parser.add_argument('--n_extra_msa_seqs', type=int, default=1024)
parser.add_argument('--af_extract_feat', type=str, default='both', help='which features to use from AF: both|evo|struct')
parser.add_argument('--data_path', type=str, default='data/cdna_train.csv')
parser.add_argument('--eval_data_paths', type=str, default='data/cdna2_test.csv,data/ptmul.csv,data/s669.csv', help='comma separated string of data paths to evaluate')
parser.add_argument('--max_context_length', type=int, default=2000, help='max length of protein sequence')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--epochs', type=int, default=100)
parser.add_argument('--batch_size', type=int, default=4)
parser.add_argument('--lr', type=float, default=0.0003)
parser.add_argument('--min_lr', type=float, default=1e-09)
parser.add_argument('--weight_decay', type=float, default=0.5)
parser.add_argument('--warmup_epochs', type=int, default=10)
parser.add_argument('--lambda_single', type=float, default=0.1)
parser.add_argument('--lambda_double', type=float, default=1.0)
parser.add_argument('--double_subsample_destabilizing_ratio', type=float, default=8)
parser.add_argument('--lambda_pos', type=float, default=4)
parser.add_argument('--eval', action='store_true')
parser.add_argument('--dist_eval', action='store_true')
parser.add_argument('--test', action='store_true', help='when testing, please use data_path NOT eval_data_paths')
parser.add_argument('--finetune', default='', type=str)
parser.add_argument('--resume', default='', type=str)
parser.add_argument('--start_epoch', type=int, default=0)
parser.add_argument('--output_dir', type=Path, default='logs/mutate_everything')
parser.add_argument('--eval_period', type=int, default=10)
parser.add_argument('--save_period', type=int, default=1000)
parser.add_argument('--disable_wandb', action='store_true')
parser.add_argument('--device', default='cuda')
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--local_rank', default=(- 1), type=int)
parser.add_argument('--dist_on_itp', action='store_true')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
return parser
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.