code stringlengths 101 5.91M |
|---|
def link_attr_list_to_map(l):
if isinstance(l, dict):
return l
attr_name = ['delay', 'capacity']
han = {'delay': float, 'capacity': int}
nl = [str(han[n](convert_unit(v))) for (n, v) in zip(attr_name, l)]
m = dict(zip(attr_name, nl))
m['weight'] = '10'
return m |
def dump_torchscript_IR(model, dir):
dir = os.path.expanduser(dir)
PathManager.mkdirs(dir)
def _get_script_mod(mod):
if isinstance(mod, torch.jit.TracedModule):
return mod._actual_script_module
return mod
with PathManager.open(os.path.join(dir, 'model_ts_code.txt'), 'w') as f:
def get_code(mod):
try:
return _get_script_mod(mod)._c.code
except AttributeError:
pass
try:
return mod.code
except AttributeError:
return None
def dump_code(prefix, mod):
code = get_code(mod)
name = (prefix or 'root model')
if (code is None):
f.write(f'''Could not found code for {name} (type={mod.original_name})
''')
f.write('\n')
else:
f.write(f'''
Code for {name}, type={mod.original_name}:
''')
f.write(code)
f.write('\n')
f.write(('-' * 80))
for (name, m) in mod.named_children():
dump_code(((prefix + '.') + name), m)
if isinstance(model, torch.jit.ScriptFunction):
f.write(get_code(model))
else:
dump_code('', model)
def _get_graph(model):
try:
return _get_script_mod(model)._c.dump_to_str(True, False, False)
except AttributeError:
return model.graph.str()
with PathManager.open(os.path.join(dir, 'model_ts_IR.txt'), 'w') as f:
f.write(_get_graph(model))
with PathManager.open(os.path.join(dir, 'model_ts_IR_inlined.txt'), 'w') as f:
f.write(str(model.inlined_graph))
if (not isinstance(model, torch.jit.ScriptFunction)):
with PathManager.open(os.path.join(dir, 'model.txt'), 'w') as f:
f.write(str(model)) |
def Inference(model, data):
sess = ort.InferenceSession(model.SerializeToString(), providers=['CPUExecutionProvider'])
out = sess.run(None, data)
return out |
def string_of_symbols(maxlen=100, vrblvl=0):
if (vrblvl > 0):
print('in string_of_symbols, maxlen :', maxlen)
phc = get_phcfun()
slen = pointer(c_int32(0))
ssym = create_string_buffer(b'', (maxlen * 4))
ccc = pointer(c_double(0.0))
vrb = c_int32(vrblvl)
if (vrblvl > 0):
print('-> string_of_symbols calls phc', end='')
retval = phc(295, slen, ssym, ccc, vrb)
if (vrblvl > 0):
print(', return value :', retval)
print('number of characters :', slen[0])
variables = int4a2str(ssym, (vrblvl > 0))
result = variables[0:slen[0]].split(' ')
return result |
class DimPlanner(DistributedGraphMixin):
def __init__(self, num_nodes=None, num_devices_per_node=None, tracer_backend: str='meta_fx', prop_mode: str='interpreter', use_fake_mode: bool=False, device_context=None):
super().__init__(num_nodes=num_nodes, num_devices_per_node=num_devices_per_node, tracer_backend=tracer_backend, prop_mode=prop_mode, use_fake_mode=use_fake_mode, device_context=device_context)
self.profiler = MIPTensorParallelPlanner(memory_bound=self.memory_bound, fp32_flops=self.fp32_flops, merge_nodes=True, solver='glpk', greedy_init=True, timelimit=120)
def generate_sharding_plan(self, model_context, config=dict()):
gap_value = config.get('gap_value', 0)
mc = copy.deepcopy(model_context)
mc.convert_to_loss_wrapper()
optimizer = mc.create_optim()
total_devices = (self.num_nodes * self.num_devices_per_node)
tensor_sizes = [(2 ** i) for i in range(1, (int(np.log2(self.num_devices_per_node)) + 1))]
combinations = [(t, p) for t in tensor_sizes for p in range(1, ((total_devices // t) + 1)) if ((total_devices % (t * p)) == 0)]
combinations.sort(key=(lambda x: (x[0] * x[1])))
optimal_tensor_size = None
optimal_pipe_size = None
optimal_data_size = None
optimal_partitions = None
optimal_product = float('inf')
for (tensor_size, pipe_size) in combinations:
current_product = (tensor_size * pipe_size)
if (optimal_partitions and (current_product > optimal_product)):
break
data_size = (total_devices // current_product)
parallel_config = {'ddp_size': data_size, 'chunks': pipe_size}
(graph, sharding_specs, tensor_shapes) = self._trace_and_propagate(mc, config, strategy=None, parallel_config=parallel_config)
all_pg_ranks = _get_pg_ranks([('tensor', tensor_size), ('pipe', pipe_size), ('data', data_size)], list(range(total_devices)), offset=0, total_size=total_devices)
tp_ranks = all_pg_ranks['tensor'][0]
pipe_ranks = all_pg_ranks['pipe'][0]
tensor_topo = self.device_topo.get_physical_topology(tp_ranks)
pipe_topo = self.device_topo.get_physical_topology(pipe_ranks)
(estimated_node_memory_reqs, estimated_node_flops_cost, estimated_intra_node_comm_cost, estimated_edge_comm_cost, contracted_graph) = self.profiler.fake_profile(mc.model, graph, sharding_specs, tensor_shapes, tensor_topo, self.fp32_flops, optimizer=optimizer)
edge_cost_scale_factor = (pipe_topo.get_average_bandwidth() / self.device_topo.intra_node_bandwidth)
estimated_edge_comm_cost *= edge_cost_scale_factor
estimated_node_memory_reqs *= (pipe_size / 2)
partitions = tera_pipe_partition(len(estimated_node_memory_reqs), estimated_node_memory_reqs, estimated_node_flops_cost, estimated_intra_node_comm_cost, estimated_edge_comm_cost, pipe_size, self.memory_bound, gap_value=gap_value)
if partitions:
optimal_tensor_size = tensor_size
optimal_pipe_size = pipe_size
optimal_data_size = (total_devices // current_product)
optimal_partitions = partitions
optimal_product = current_product
if ((pipe_size != 1) and partitions):
nodes = list(contracted_graph.nodes())
insert_before_nodes = [nodes[(stage_range[(- 1)] + 1)] for stage_range in optimal_partitions[:(- 1)]]
else:
insert_before_nodes = None
return (optimal_tensor_size, optimal_pipe_size, optimal_data_size, insert_before_nodes) |
class install(_install):
def finalize_options(self):
_install.finalize_options(self)
self.install_libbase = self.install_platlib
self.install_lib = self.install_platlib |
def reduce_process(opts, output_queue, spool_length, out_file=None, file_size=0, file_compress=True):
global options
options = opts
createLogger(options.quiet, options.debug, options.log_file)
if out_file:
nextFile = NextFile(out_file)
output = OutputSplitter(nextFile, file_size, file_compress)
else:
output = sys.stdout.buffer
if file_compress:
logging.warn('writing to stdout, so no output compression (use an external tool)')
interval_start = default_timer()
spool = {}
next_page = 0
while True:
if (next_page in spool):
output.write(spool.pop(next_page).encode('utf-8'))
next_page += 1
spool_length.value = len(spool)
if ((next_page % report_period) == 0):
interval_rate = (report_period / (default_timer() - interval_start))
logging.info('Extracted %d articles (%.1f art/s)', next_page, interval_rate)
interval_start = default_timer()
else:
pair = output_queue.get()
if (not pair):
break
(page_num, text) = pair
spool[page_num] = text
spool_length.value = len(spool)
if (len(spool) > 200):
logging.debug('Collected %d, waiting: %d, %d', len(spool), next_page, (next_page == page_num))
if (output != sys.stdout):
output.close() |
def softmax_layer(name, bottom, label='label', loss_weight=1):
txt = open('templates/softmax_layer.txt', 'r').read()
txt = txt.replace('_NAME_', name)
txt = txt.replace('_BOTTOM_', bottom)
txt = txt.replace('_LABEL_', label)
txt = txt.replace('_LOSS_WEIGHT_', str(loss_weight))
return txt |
def powerset(arr):
if arr:
(first, *rest) = arr
rest_subsets = powerset(rest)
return [([first] + subset) for subset in rest_subsets]
else:
return [[]] |
class ExecutionTraceGetter(object):
def __init__(self, trace_obj):
self.trace_obj = trace_obj
def get(self) -> List[Tuple[(E.Expression, TensorValue)]]:
return self.trace_obj |
class Scorer():
def __init__(self, args=None):
self.args = args
self.device = self.args.device
self.eval_asp = self.args.aspect
self.data = read_pickle(self.args.file_path)
(self.demos, self.asp_dfs) = read_demos(self.args.demo_path)
print('Since GPT3-based models are expensive, we can test them on a small number of samples first.')
print('The default number of test samples is 2.')
import random
random.seed(2)
N = 2
idxs = random.sample(range(0, (len(self.data) - 1)), N)
new_data = {idx: self.data[idx] for idx in idxs}
self.data = new_data
print('the num of evaluation samples: ', len(self.data))
def save_data(self, path):
save_pickle(self.data, path)
def demo_convert(self, demos, template):
refhyp_demos = []
hypref_demos = []
for demo in demos:
src_line = demo['src'].strip()
ref_line = demo['ref_summ'].strip()
hyp_line = demo['sys_summ'].strip()
polar = demo['polarity'].strip()
refhyp_demo = template.replace('XXXXX', ref_line).replace('YYYYY', hyp_line)
refhyp_demos.append(refhyp_demo)
hypref_demo = template.replace('XXXXX', hyp_line).replace('YYYYY', ref_line)
hypref_demos.append(hypref_demo)
return (refhyp_demos, hypref_demos)
def score(self, metrics):
for metric_name in metrics:
if (metric_name in ['opt125m_score', 'opt350m_score', 'opt1_3B_score', 'opt2_7B_score', 'opt6_7B_score', 'opt13B_score', 'opt30B_score', 'opt66B_score', 'gpt2_medium_score', 'gpt2_large_score', 'gpt2_xl_score', 'gptJ6B_score']):
from opt_score import OPTScorer
eval_asps = ['informativeness', 'naturalness', 'quality']
metric2checkpoint = {'opt125m_score': 'facebook/opt-125m', 'opt350m_score': 'facebook/opt-350m', 'opt1_3B_score': 'facebook/opt-1.3b', 'opt2_7B_score': 'facebook/opt-2.7b', 'opt6_7B_score': 'facebook/opt-6.7b', 'opt13B_score': 'facebook/opt-13b', 'opt66B_score': 'facebook/opt-66b', 'gpt2_medium_score': 'gpt2-medium', 'gpt2_large_score': 'gpt2-large', 'gpt2_xl_score': 'gpt2-xl', 'gptJ6B_score': 'EleutherAI/gpt-j-6B'}
print('metric_name: ', metric_name)
checkpoint = metric2checkpoint[metric_name]
opt_scorer = OPTScorer(device=self.device, checkpoint=checkpoint)
print(f'OPTScore setup finished. Begin calculating OPTScore.')
start = time.time()
for e_asp in eval_asps:
print('num of examples: ', len(self.data))
demo = self.demos[e_asp]
asp_df = self.asp_dfs[e_asp]
asp_df = asp_df.strip().replace(':', '. ')
print('demo: ', demo)
print('asp_df: ', asp_df)
refhyp_templates = ['XXXXX In other words , YYYYY', ' In other words , ']
template = refhyp_templates[0]
(refhyp_demos, hypref_demos) = self.demo_convert(demo, template)
for doc_id in self.data:
print('doc_id: ', doc_id)
ref_summs = self.data[doc_id]['ref_summs']
ref_summs = [add_dot(detokenize(line)) for line in ref_summs]
sys_summ = add_dot(detokenize(self.data[doc_id]['sys_summ']))
if (self.args.use_ist and self.args.use_demo):
refhyp_demos_str = '\n'.join(refhyp_demos)
prefix = (((asp_df + '\n') + refhyp_demos_str) + '\n')
elif (self.args.use_ist and (not self.args.use_demo)):
prefix = (asp_df + '\n')
elif ((not self.args.use_ist) and (not self.args.use_demo)):
prefix = ''
ref_summs1 = [(prefix + line) for line in ref_summs]
ref_hypo_scores = np.array(opt_scorer.score(ref_summs1, ([sys_summ] * len(ref_summs)), prompt_text=refhyp_templates[1], batch_size=1))
if (self.args.use_ist and self.args.use_demo):
hypref_demos_str = '\n'.join(hypref_demos)
prefix = (((asp_df + '\n') + refhyp_demos_str) + '\n')
elif (self.args.use_ist and (not self.args.use_demo)):
prefix = (asp_df + '\n')
elif ((not self.args.use_ist) and (not self.args.use_demo)):
prefix = ''
sys_summ1 = ([sys_summ] * len(ref_summs))
sys_summ1 = [(prefix + line) for line in sys_summ1]
hypo_ref_scores = np.array(opt_scorer.score(sys_summ1, ref_summs, prompt_text=refhyp_templates[1], batch_size=1))
ref_hypo = ref_hypo_scores.max()
hypo_ref = hypo_ref_scores.max()
avg_f = (0.5 * (ref_hypo_scores + hypo_ref_scores)).max()
harm_f = ((ref_hypo_scores * hypo_ref_scores) / (ref_hypo_scores + hypo_ref_scores)).max()
print('ref_hypo: ', ref_hypo)
print('hypo_ref: ', hypo_ref)
print('avg_f: ', avg_f)
print('harm_f: ', harm_f)
if self.args.use_ist:
self.data[doc_id]['scores'][f'{metric_name}_{e_asp}_ref_hypo'] = ref_hypo
self.data[doc_id]['scores'][f'{metric_name}_{e_asp}_hypo_ref'] = hypo_ref
self.data[doc_id]['scores'][f'{metric_name}_{e_asp}_avg_f'] = avg_f
self.data[doc_id]['scores'][f'{metric_name}_{e_asp}_harm_f'] = harm_f
else:
self.data[doc_id]['scores'][f'{metric_name}_ref_hypo'] = ref_hypo
self.data[doc_id]['scores'][f'{metric_name}_hypo_ref'] = hypo_ref
self.data[doc_id]['scores'][f'{metric_name}_avg_f'] = avg_f
self.data[doc_id]['scores'][f'{metric_name}_harm_f'] = harm_f
print(f'Finished calculating OPTScore, time passed {(time.time() - start)}s.')
opt_scorer = None
elif (metric_name in ['flan_small_score', 'flan_base_score', 'flan_large_score', 'flan_xl_score', 'flan_xxl_score']):
from flan_score import FLANScorer
eval_asps = ['informativeness', 'naturalness', 'quality']
metric2checkpoint = {'flan_small_score': 'google/flan-t5-small', 'flan_base_score': 'google/flan-t5-base', 'flan_large_score': 'google/flan-t5-large', 'flan_xl_score': 'google/flan-t5-xl', 'flan_xxl_score': 'google/flan-t5-xxl'}
print('metric_name: ', metric_name)
checkpoint = metric2checkpoint[metric_name]
flan_scorer = FLANScorer(device=self.device, checkpoint=checkpoint)
print(f'FLANScorer setup finished. Begin calculating FLANScorer.')
start = time.time()
for e_asp in eval_asps:
print('num of examples: ', len(self.data))
demo = self.demos[e_asp]
asp_df = self.asp_dfs[e_asp]
asp_df = asp_df.strip().replace(':', '. ')
print('demo: ', demo)
print('asp_df: ', asp_df)
refhyp_templates = ['XXXXX In other words , YYYYY']
template = refhyp_templates[0]
(refhyp_demos, hypref_demos) = self.demo_convert(demo, template)
for doc_id in self.data:
print('doc_id: ', doc_id)
ref_summs = self.data[doc_id]['ref_summs']
ref_summs = [add_dot(detokenize(line)) for line in ref_summs]
sys_summ = add_dot(detokenize(self.data[doc_id]['sys_summ']))
if (self.args.use_ist and self.args.use_demo):
refhyp_demos_str = '\n'.join(refhyp_demos)
prefix = (((asp_df + '\n') + refhyp_demos_str) + '\n')
elif (self.args.use_ist and (not self.args.use_demo)):
prefix = (asp_df + '\n')
elif ((not self.args.use_ist) and (not self.args.use_demo)):
prefix = ''
ref_summs1 = [(prefix + template.replace('XXXXX', line).replace('YYYYY', '')) for line in ref_summs]
ref_hypo_scores = np.array(flan_scorer.score(ref_summs1, ([sys_summ] * len(ref_summs)), batch_size=1))
if (self.args.use_ist and self.args.use_demo):
hypref_demos_str = '\n'.join(hypref_demos)
prefix = (((asp_df + '\n') + refhyp_demos_str) + '\n')
elif (self.args.use_ist and (not self.args.use_demo)):
prefix = (asp_df + '\n')
elif ((not self.args.use_ist) and (not self.args.use_demo)):
prefix = ''
sys_summ1 = ([sys_summ] * len(ref_summs))
sys_summ1 = [(prefix + template.replace('XXXXX', line).replace('YYYYY', '')) for line in sys_summ1]
hypo_ref_scores = np.array(flan_scorer.score(sys_summ1, ref_summs, batch_size=1))
ref_hypo = ref_hypo_scores.max()
hypo_ref = hypo_ref_scores.max()
avg_f = (0.5 * (ref_hypo_scores + hypo_ref_scores)).max()
harm_f = ((ref_hypo_scores * hypo_ref_scores) / (ref_hypo_scores + hypo_ref_scores)).max()
print('ref_hypo: ', ref_hypo)
print('hypo_ref: ', hypo_ref)
print('avg_f: ', avg_f)
print('harm_f: ', harm_f)
if self.args.use_ist:
self.data[doc_id]['scores'][f'{metric_name}_{e_asp}_ref_hypo'] = ref_hypo
self.data[doc_id]['scores'][f'{metric_name}_{e_asp}_hypo_ref'] = hypo_ref
self.data[doc_id]['scores'][f'{metric_name}_{e_asp}_avg_f'] = avg_f
self.data[doc_id]['scores'][f'{metric_name}_{e_asp}_harm_f'] = harm_f
else:
self.data[doc_id]['scores'][f'{metric_name}_ref_hypo'] = ref_hypo
self.data[doc_id]['scores'][f'{metric_name}_hypo_ref'] = hypo_ref
self.data[doc_id]['scores'][f'{metric_name}_avg_f'] = avg_f
self.data[doc_id]['scores'][f'{metric_name}_harm_f'] = harm_f
print(f'Finished calculating FLANScorer, time passed {(time.time() - start)}s.')
flan_scorer = None
elif (metric_name == 'gpt3_score'):
print(f'Perform the gpt3_score...')
start = time.time()
print('num of examples: ', len(self.data))
demo = self.demos[self.eval_asp]
asp_df = self.asp_dfs[self.eval_asp]
print('demo: ', demo)
print('asp_df: ', asp_df)
refhyp_templates = ['XXXXX In other words , \nYYYYY']
template = refhyp_templates[0]
(refhyp_demos, hypref_demos) = self.demo_convert(demo, template)
for (samp_id, doc_id) in enumerate(self.data):
print('samp_id: ', samp_id)
ref_summs = self.data[doc_id]['ref_summs']
ref_summs = [detokenize(line) for line in ref_summs]
sys_summ = detokenize(self.data[doc_id]['sys_summ'])
ref_hypo_scores = []
hypo_ref_scores = []
keep_seen_refsumm_score = {}
for (k, ref_summ) in enumerate(ref_summs):
print()
print(('aspect: %s; samp_id: %d; ref_summ_id/total_ref_summ: %d/%d' % (self.eval_asp, samp_id, k, len(ref_summs))))
ref_summ = add_dot(ref_summ)
sys_summ = add_dot(sys_summ)
if (ref_summ in keep_seen_refsumm_score):
ref_hypo_score = keep_seen_refsumm_score[ref_summ][0]
hypo_ref_score = keep_seen_refsumm_score[ref_summ][1]
ref_hypo_scores.append(ref_hypo_score)
hypo_ref_scores.append(hypo_ref_score)
else:
if (self.args.use_ist and self.args.use_demo):
refhyp_demos_str = '\n\n'.join(refhyp_demos)
prefix = (((asp_df + '\n\n') + refhyp_demos_str) + '\n\n')
elif (self.args.use_ist and (not self.args.use_demo)):
prefix = (asp_df + '\n')
elif ((not self.args.use_ist) and (not self.args.use_demo)):
prefix = ''
input1 = template.replace('XXXXX', ref_summ).replace('YYYYY', '')
input1 = (prefix + input1)
output1 = lower_check(sys_summ)
ref_hypo_score = gpt3score(input1, output1, self.args.gpt3model, self.args.api_key)
ref_hypo_scores.append(ref_hypo_score)
if (self.args.use_ist and self.args.use_demo):
hypref_demos_str = '\n\n'.join(hypref_demos)
prefix = (((asp_df + '\n\n') + hypref_demos_str) + '\n\n')
elif (self.args.use_ist and (not self.args.use_demo)):
prefix = (asp_df + '\n')
elif ((not self.args.use_ist) and (not self.args.use_demo)):
prefix = ''
input2 = template.replace('XXXXX', sys_summ).replace('YYYYY', '')
input2 = (prefix + input2)
output2 = lower_check(ref_summ)
hypo_ref_score = gpt3score(input2, output2, self.args.gpt3model, self.args.api_key)
hypo_ref_scores.append(hypo_ref_score)
keep_seen_refsumm_score[ref_summ] = [ref_hypo_score, hypo_ref_score]
print('keep_seen_refsumm_score: ', keep_seen_refsumm_score)
print('len(ref_hypo_scores): ', len(ref_hypo_scores))
print('len(hypo_ref_scores): ', len(hypo_ref_scores))
ref_hypo_scores = np.array(ref_hypo_scores)
hypo_ref_scores = np.array(hypo_ref_scores)
ref_hypo = ref_hypo_scores.max()
hypo_ref = hypo_ref_scores.max()
avg_f = (0.5 * (ref_hypo_scores + hypo_ref_scores)).max()
harm_f = ((ref_hypo_scores * hypo_ref_scores) / (ref_hypo_scores + hypo_ref_scores)).max()
print('ref_hypo: ', ref_hypo)
print('hypo_ref: ', hypo_ref)
print('avg_f: ', avg_f)
print('harm_f: ', harm_f)
if self.args.use_ist:
self.data[doc_id]['scores'][f'{metric_name}_{self.eval_asp}_ref_hypo'] = ref_hypo
self.data[doc_id]['scores'][f'{metric_name}_{self.eval_asp}_hypo_ref'] = hypo_ref
self.data[doc_id]['scores'][f'{metric_name}_{self.eval_asp}_avg_f'] = avg_f
self.data[doc_id]['scores'][f'{metric_name}_{self.eval_asp}_harm_f'] = harm_f
else:
self.data[doc_id]['scores'][f'{metric_name}_ref_hypo'] = ref_hypo
self.data[doc_id]['scores'][f'{metric_name}_hypo_ref'] = hypo_ref
self.data[doc_id]['scores'][f'{metric_name}_avg_f'] = avg_f
self.data[doc_id]['scores'][f'{metric_name}_harm_f'] = harm_f
print(f'Finished calculating gpt3_score, time passed {(time.time() - start)}s.')
else:
raise NotImplementedError |
def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float=0.25, gamma: float=2):
prob = inputs.sigmoid()
ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction='none')
p_t = ((prob * targets) + ((1 - prob) * (1 - targets)))
loss = (ce_loss * ((1 - p_t) ** gamma))
if (alpha >= 0):
alpha_t = ((alpha * targets) + ((1 - alpha) * (1 - targets)))
loss = (alpha_t * loss)
return (loss.mean(1).sum() / num_boxes) |
def parse_args():
parser = ArgumentParser(description='Train Single Shot MultiBox Detector on COCO')
parser.add_argument('--data', '-d', type=str, default='/coco', help='path to test and training data files')
parser.add_argument('--pretrained-backbone', type=str, default=None, help='path to pretrained backbone weights file, default is to get it from online torchvision repository')
parser.add_argument('--epochs', '-e', type=int, default=800, help='number of epochs for training')
parser.add_argument('--batch-size', '-b', type=int, default=32, help='number of examples for each training iteration')
parser.add_argument('--val-batch-size', type=int, default=32, help='number of examples for each validation iteration (defaults to --batch-size)')
parser.add_argument('--no-cuda', action='store_true', help='use available GPUs')
parser.add_argument('--seed', '-s', type=int, default=random.SystemRandom().randint(0, ((2 ** 32) - 1)), help='manually set random seed for torch')
parser.add_argument('--threshold', '-t', type=float, default=0.23, help='stop training early at threshold')
parser.add_argument('--iteration', type=int, default=0, help='iteration to start from')
parser.add_argument('--checkpoint', type=str, default=None, help='path to model checkpoint file')
parser.add_argument('--no-save', action='store_true', help='save model checkpoints')
parser.add_argument('--val-interval', type=int, default=5, help='epoch interval for validation in addition to --val-epochs.')
parser.add_argument('--val-epochs', nargs='*', type=int, default=[], help='epochs at which to evaluate in addition to --val-interval')
parser.add_argument('--batch-splits', type=int, default=1, help='Split batch to N steps (gradient accumulation)')
parser.add_argument('--lr-decay-schedule', nargs='*', type=int, default=[40, 50], help='epochs at which to decay the learning rate')
parser.add_argument('--warmup', type=float, default=None, help='how long the learning rate will be warmed up in fraction of epochs')
parser.add_argument('--warmup-factor', type=int, default=0, help='mlperf rule parameter for controlling warmup curve')
parser.add_argument('--lr', type=float, default=_BASE_LR, help='base learning rate')
parser.add_argument('--weight-decay', type=float, default=0.0005, help='weight decay factor')
parser.add_argument('--num-cropping-iterations', type=int, default=1, help='cropping retries in augmentation pipeline, default 1, other legal value is 50')
parser.add_argument('--nms-valid-thresh', type=float, default=0.05, help='in eval, filter input boxes to those with score greater than nms_valid_thresh.')
parser.add_argument('--log-interval', type=int, default=100, help='Logging mini-batch interval.')
parser.add_argument('--local_rank', default=os.getenv('LOCAL_RANK', 0), type=int, help="Used for multi-process training. Can either be manually set or automatically set by using 'python -m multiproc'.")
parser.add_argument('--tune', action='store_true', help='tune int8 model')
parser.add_argument('--benchmark', action='store_true', help='benchmark')
parser.add_argument('--int8', action='store_true', help='int8')
parser.add_argument('--accuracy', action='store_true', help='enable accuracy pass')
parser.add_argument('--tuned_checkpoint', default='./saved_results', type=str, metavar='PATH', help='path to checkpoint tuned by Neural Compressor (default: ./)')
parser.add_argument('--warmup-inference', type=int, default=10, help='warmup for latency')
parser.add_argument('--inference-iters', type=int, default=100, help='number of iterations for inference')
return parser.parse_args() |
class VQModel(pl.LightningModule):
def __init__(self, ddconfig, lossconfig, n_embed, embed_dim, ckpt_path=None, ignore_keys=[], image_key='image', colorize_nlabels=None, monitor=None, remap=None, sane_index_shape=False):
super().__init__()
self.image_key = image_key
self.encoder = Encoder(**ddconfig)
self.decoder = Decoder(**ddconfig)
self.loss = instantiate_from_config(lossconfig)
self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25, remap=remap, sane_index_shape=sane_index_shape)
self.quant_conv = torch.nn.Conv2d(ddconfig['z_channels'], embed_dim, 1)
self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig['z_channels'], 1)
self.ckpt_path = ckpt_path
if (ckpt_path is not None):
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
self.image_key = image_key
if (colorize_nlabels is not None):
assert (type(colorize_nlabels) == int)
self.register_buffer('colorize', torch.randn(3, colorize_nlabels, 1, 1))
if (monitor is not None):
self.monitor = monitor
def init_from_ckpt(self, path, ignore_keys=list()):
sd = torch.load(path, map_location='cpu')['state_dict']
keys = list(sd.keys())
for k in keys:
for ik in ignore_keys:
if k.startswith(ik):
print('Deleting key {} from state_dict.'.format(k))
del sd[k]
self.load_state_dict(sd, strict=False)
print(f'Restored from {path}')
def encode(self, x):
h = self.encoder(x)
h = self.quant_conv(h)
(quant, emb_loss, info) = self.quantize(h)
return (quant, emb_loss, info)
def decode(self, quant):
quant = self.post_quant_conv(quant)
dec = self.decoder(quant)
return dec
def decode_code(self, code_b):
quant_b = self.quantize.embed_code(code_b)
dec = self.decode(quant_b)
return dec
def forward(self, input):
(quant, diff, _) = self.encode(input)
dec = self.decode(quant)
return (dec, diff)
def get_input(self, batch, k):
x = batch[k]
if (len(x.shape) == 3):
x = x[(..., None)]
x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format)
return x.float()
def training_step(self, batch, batch_idx, optimizer_idx):
x = self.get_input(batch, self.image_key)
(xrec, qloss) = self(x)
if (optimizer_idx == 0):
(aeloss, log_dict_ae) = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, last_layer=self.get_last_layer(), split='train')
self.log('train/aeloss', aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)
return aeloss
if (optimizer_idx == 1):
(discloss, log_dict_disc) = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, last_layer=self.get_last_layer(), split='train')
self.log('train/discloss', discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True)
return discloss
def validation_step(self, batch, batch_idx):
x = self.get_input(batch, self.image_key)
(xrec, qloss) = self(x)
(aeloss, log_dict_ae) = self.loss(qloss, x, xrec, 0, self.global_step, last_layer=self.get_last_layer(), split='val')
(discloss, log_dict_disc) = self.loss(qloss, x, xrec, 1, self.global_step, last_layer=self.get_last_layer(), split='val')
rec_loss = log_dict_ae['val/rec_loss']
self.log('val/rec_loss', rec_loss, prog_bar=True, logger=True, on_step=True, on_epoch=True, sync_dist=True)
self.log('val/aeloss', aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True, sync_dist=True)
return self.log_dict
def test_step(self, batch, batch_idx):
x = self.get_input(batch, self.image_key)
(xrec, qloss) = self(x)
(_, log_dict) = self.loss(qloss, x, xrec, 0, self.global_step, last_layer=self.get_last_layer(), split='test')
im_rec = xrec.detach().cpu()
im_rec = torch.clamp(im_rec, (- 1.0), 1.0)
im_rec = ((im_rec + 1.0) / 2.0)
im_rec = im_rec.transpose(1, 2).transpose(2, 3)
im_rec = im_rec.numpy()
im_rec = (im_rec * 255).astype(np.uint8)
for k in range(im_rec.shape[0]):
filename = f'reconstruction_batch_{batch_idx}_id_{k}.png'
path = os.path.join(self.trainer.logdir, 'evaluation', filename)
im = im_rec[k]
Image.fromarray(im).save(path)
LPIPS = log_dict['test/p_loss']
try:
OCR_loss = log_dict['test/p_ocr_loss']
except:
OCR_loss = 0.0
output = dict({'LPIPS': LPIPS, 'OCR_loss': OCR_loss})
self.log_dict(output)
return output
def configure_optimizers(self):
lr = self.learning_rate
opt_ae = torch.optim.Adam(((((list(self.encoder.parameters()) + list(self.decoder.parameters())) + list(self.quantize.parameters())) + list(self.quant_conv.parameters())) + list(self.post_quant_conv.parameters())), lr=lr, betas=(0.5, 0.9))
opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(), lr=lr, betas=(0.5, 0.9))
return ([opt_ae, opt_disc], [])
def get_last_layer(self):
return self.decoder.conv_out.weight
def log_images(self, batch, **kwargs):
log = dict()
x = self.get_input(batch, self.image_key)
x = x.to(self.device)
(xrec, _) = self(x)
if (x.shape[1] > 3):
assert (xrec.shape[1] > 3)
x = self.to_rgb(x)
xrec = self.to_rgb(xrec)
log['inputs'] = x
log['reconstructions'] = xrec
return log
def to_rgb(self, x):
assert (self.image_key == 'segmentation')
if (not hasattr(self, 'colorize')):
self.register_buffer('colorize', torch.randn(3, x.shape[1], 1, 1).to(x))
x = F.conv2d(x, weight=self.colorize)
x = (((2.0 * (x - x.min())) / (x.max() - x.min())) - 1.0)
return x |
def main(dst):
print(f'-> Copying splits to "{dst}"...')
shutil.copytree((REPO_ROOT / 'api/data/splits'), dst, dirs_exist_ok=True)
(dst / FILE.name).unlink() |
class FairseqBMUF(FairseqOptimizer):
def __init__(self, cfg: FairseqBMUFConfig, optimizer):
super().__init__(cfg)
self._optimizer = optimizer
self._num_updates = 0
self.sync_iter = cfg.global_sync_iter
self.block_momentum = cfg.block_momentum
self.block_lr = cfg.block_lr
self._reset_local_data()
self.warmup_iteration = cfg.warmup_iterations
self.use_nbm = cfg.use_nbm
self.initial_state = self._optimizer.state_dict()
self.average_sync = self.cfg.average_sync
self.world_size = self.cfg.distributed_world_size
def add_args(parser):
gen_parser_from_dataclass(parser, FairseqBMUFConfig())
def optimizer(self):
return self._optimizer.optimizer
def optimizer_config(self):
return self._optimizer.optimizer_config
def get_lr(self):
return self._optimizer.get_lr()
def set_lr(self, lr):
self._optimizer.set_lr(lr)
def state_dict(self):
return self._optimizer.state_dict()
def load_state_dict(self, state_dict, optimizer_overrides=None):
self._optimizer.load_state_dict(state_dict, optimizer_overrides)
self.initial_state = self._optimizer.state_dict()
def multiply_grads(self, c):
self._optimizer.multiply_grads(c)
def clip_grad_norm(self, max_norm, aggregate_norm_fn=None):
return self._optimizer.clip_grad_norm(max_norm, aggregate_norm_fn)
def average_params(self):
self._optimizer.average_params()
def _block_sync(self):
if (self.world_size <= 1):
return
if (self.block_momentum != 0):
self._calc_grad()
self._avg_grad_from_all_gpus()
if (self.block_momentum != 0):
self._update_global_model()
if self.average_sync:
self.average_params()
def _is_warmup_end(self):
if (self.get_num_updates() == self.warmup_iteration):
return True
return False
def _is_bmuf_iter(self):
if ((self.get_num_updates() > self.warmup_iteration) and ((self.get_num_updates() % self.sync_iter) == 0)):
return True
return False
def _warmup_sync(self, root_rank=0):
if (self.world_size <= 1):
return
for param in self.params:
dist.broadcast(param.data, src=root_rank)
if self.average_sync:
self._optimizer.average_params()
else:
self._optimizer.load_state_dict(self.initial_state)
self._reset_local_data()
def step(self, closure=None):
self._optimizer.step(closure)
self.set_num_updates((self.get_num_updates() + 1))
if self._is_warmup_end():
self._warmup_sync()
elif self._is_bmuf_iter():
self._block_sync()
def zero_grad(self):
self._optimizer.zero_grad()
def get_num_updates(self):
return self._num_updates
def set_num_updates(self, num_updates):
self._num_updates = num_updates
_grad()
def _reset_local_data(self):
self.global_params = [torch.zeros_like(p.data) for p in self.params]
self.smoothed_grads = [p.data.new_zeros(p.data.size()) for p in self.params]
self.grads = [p.data.new_zeros(p.data.size()) for p in self.params]
for (param, global_param) in zip(self.params, self.global_params):
global_param.copy_(param.data)
_grad()
def _calc_grad(self):
for (index, (param, global_param)) in enumerate(zip(self.params, self.global_params)):
self.grads[index] = (global_param - param.data)
def _avg_grad_from_all_gpus(self):
for (index, param) in enumerate(self.params):
sync_para = (param.data if (self.block_momentum == 0) else self.grads[index])
sync_para /= float(dist.get_world_size())
dist.all_reduce(sync_para, op=dist.ReduceOp.SUM)
_grad()
def _update_global_model(self):
for (index, (param, global_param, smoothed_grad, grad)) in enumerate(zip(self.params, self.global_params, self.smoothed_grads, self.grads)):
smoothed_grad = ((self.block_momentum * smoothed_grad) + (self.block_lr * grad))
param.data.copy_((global_param - smoothed_grad))
if self.use_nbm:
param.data.copy_((param.data - (self.block_momentum * smoothed_grad)))
self.smoothed_grads[index] = smoothed_grad
global_param.copy_(param.data) |
def sMDAPE(y_true: 'ndarray', y_pred: 'ndarray', multioutput: str='raw_values') -> Union[(float64, 'ndarray')]:
(y_true, y_pred, original_shape) = _standardize_input(y_true, y_pred, multioutput)
output_errors = np.median(((100 * np.abs((y_true - y_pred))) / ((np.abs(y_true) + np.abs(y_pred)) + EPSILON)), axis=0)
if (multioutput == 'raw_values'):
return output_errors.reshape(original_shape)
return np.mean(output_errors) |
class CollectVars(TraverseAction):
hn: CHeaderNode
def __init__(self, hn: CHeaderNode):
super().__init__()
self.hn = hn
self.traverse_edges = ['content', 'next']
def _pre_action(self, edge) -> bool:
t = edge.target
if issubclass(type(t), Node):
if t.math_required:
self.hn.math_required = True
self.hn.pointer_decls.extend(t.pointer_decls)
self.hn.var_decls.extend(t.var_decls)
self.hn.const_decls.extend(t.const_decls)
return True |
class ResNet(nn.Module):
def __init__(self, depth, num_filters, block_name='BasicBlock', num_classes=10):
super(ResNet, self).__init__()
if (block_name.lower() == 'basicblock'):
assert (((depth - 2) % 6) == 0), 'When use basicblock, depth should be 6n+2, e.g. 20, 32, 44, 56, 110, 1202'
n = ((depth - 2) // 6)
block = BasicBlock
elif (block_name.lower() == 'bottleneck'):
assert (((depth - 2) % 9) == 0), 'When use bottleneck, depth should be 9n+2, e.g. 20, 29, 47, 56, 110, 1199'
n = ((depth - 2) // 9)
block = Bottleneck
else:
raise ValueError('block_name shoule be Basicblock or Bottleneck')
self.inplanes = num_filters[0]
self.conv1 = nn.Conv2d(3, num_filters[0], kernel_size=3, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(num_filters[0])
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, num_filters[1], n)
self.layer2 = self._make_layer(block, num_filters[2], n, stride=2)
self.layer3 = self._make_layer(block, num_filters[3], n, stride=2)
self.avgpool = nn.AvgPool2d(8)
self.feat_dim = (num_filters[3] * block.expansion)
self.fc = nn.Linear((num_filters[3] * block.expansion), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion)))
layers = list([])
layers.append(block(self.inplanes, planes, stride, downsample, is_last=(blocks == 1)))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, is_last=(i == (blocks - 1))))
return nn.Sequential(*layers)
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.conv1)
feat_m.append(self.bn1)
feat_m.append(self.relu)
feat_m.append(self.layer1)
feat_m.append(self.layer2)
feat_m.append(self.layer3)
return feat_m
def get_bn_before_relu(self):
if isinstance(self.layer1[0], Bottleneck):
bn1 = self.layer1[(- 1)].bn3
bn2 = self.layer2[(- 1)].bn3
bn3 = self.layer3[(- 1)].bn3
elif isinstance(self.layer1[0], BasicBlock):
bn1 = self.layer1[(- 1)].bn2
bn2 = self.layer2[(- 1)].bn2
bn3 = self.layer3[(- 1)].bn2
else:
raise NotImplementedError('ResNet unknown block error !!!')
return [bn1, bn2, bn3]
def forward(self, x, is_feat=False, preact=False, feat_s=None, feat_t=None):
if (not (feat_s is None)):
new_feat = feat_s
x = F.avg_pool2d(new_feat, new_feat.size(2))
x = x.view(x.size(0), (- 1))
x = self.fc(x)
return x
else:
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
f0 = x
(x, f1_pre) = self.layer1(x)
f1 = x
(x, f2_pre) = self.layer2(x)
f2 = x
(x, f3_pre) = self.layer3(x)
f3 = x
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
f4 = x
x = self.fc(x)
if is_feat:
if preact:
return ([f0, f1_pre, f2_pre, f3_pre, f4], x)
else:
return ([f0, f1, f2, f3, f4], x)
else:
return x
def forward4(self, feat_s=None):
x = F.avg_pool2d(feat_s, feat_s.size(2))
x = x.view(x.size(0), (- 1))
x = self.fc(x)
return x |
class catbAbI(data.Dataset):
def __init__(self, partition, whitelist, ra_mode, large=True, folder=DATA_PATH):
self.partition = partition
self.whitelist = whitelist
self.ra_mode = ra_mode
if large:
self.fp = os.path.join(folder, catbAbI10k_TEMPLATE.format(partition))
else:
self.fp = os.path.join(folder, catbAbI1k_TEMPLATE.format(partition))
with open(os.path.join(folder, 'vocab.pkl'), 'rb') as f:
(self.word2idx, self.idx2word) = pickle.load(f)
self.samples = read_samples(self.fp, self.word2idx, self.whitelist)
def __len__(self):
return len(self.samples)
def __getitem__(self, index):
(x, y, t) = self.samples[index]
x = torch.tensor(x).long()
y = torch.tensor(y).long()
t = torch.tensor(t).long()
if self.ra_mode:
qm_pos = (x != self.word2idx['?'])
y[qm_pos] = self.word2idx[PAD_STR]
return (x, y, t, len(x)) |
def translate_texts(dataset: DatasetDict, texts: Dict[(str, Dict[(str, List[str])])], translate_args: Dict[(str, Any)], dataset_args: Dict[(str, Any)]) -> None:
translations = {}
for config in dataset_args['dataset_configs']:
translations[config] = dataset[config].to_dict()
translate_args['source_lang'] = dataset_args['lang_codes'][config]
print(f'Translating from {config}')
for field in dataset_args['dataset_fields']:
translations[config][field] = translate_few_shot.main(sentences_list=texts[config][field], return_output=True, **translate_args)
translations[config][field] = extract_translations(translations[config][field], texts[config][field], translate_args)
save_file(translations[config], config, translate_args, dataset_args) |
def draw_disparity(disparity_map):
min_val = np.min(disparity_map)
max_val = np.max(disparity_map)
norm_disparity_map = (255 * ((disparity_map - min_val) / (max_val - min_val))).astype(np.uint8)
return cv2.applyColorMap(cv2.convertScaleAbs(norm_disparity_map, 1), cv2.COLORMAP_JET) |
class CNNParams():
def __init__(self, verbose):
self.pool_window = [1, 2, 2, 1]
self.pool_stride = [1, 2, 2, 1]
self.last_features = 1024
self.conv_filters = [64, 64, 128, 128, 256, 256, 256, 512, 512, 512, 512, 512, 512]
self.depth_filters = [32]
self.layer_shapes = self.get_layer_shapes()
if verbose:
pprint(self.__dict__)
def get_layer_shapes(self):
shapes = {}
hyper = HyperParams(verbose=False)
l = self.last_features
f = self.conv_filters
d = self.depth_filters[(- 1)]
shapes['conv1_1/W'] = (hyper.filter_h, hyper.filter_w, hyper.image_c, f[0])
shapes['conv1_1/b'] = (f[0],)
shapes['conv1_2/W'] = (hyper.filter_h, hyper.filter_w, f[0], f[1])
shapes['conv1_2/b'] = (f[1],)
shapes['conv2_1/W'] = (hyper.filter_h, hyper.filter_w, f[1], f[2])
shapes['conv2_1/b'] = (f[2],)
shapes['conv2_2/W'] = (hyper.filter_h, hyper.filter_w, f[2], f[3])
shapes['conv2_2/b'] = (f[3],)
shapes['conv3_1/W'] = (hyper.filter_h, hyper.filter_w, f[3], f[4])
shapes['conv3_1/b'] = (f[4],)
shapes['conv3_2/W'] = (hyper.filter_h, hyper.filter_w, f[4], f[5])
shapes['conv3_2/b'] = (f[5],)
shapes['conv3_3/W'] = (hyper.filter_h, hyper.filter_w, f[5], f[6])
shapes['conv3_3/b'] = (f[6],)
shapes['conv4_1/W'] = (hyper.filter_h, hyper.filter_w, f[6], f[7])
shapes['conv4_1/b'] = (f[7],)
shapes['conv4_2/W'] = (hyper.filter_h, hyper.filter_w, f[7], f[8])
shapes['conv4_2/b'] = (f[8],)
shapes['conv4_3/W'] = (hyper.filter_h, hyper.filter_w, f[8], f[9])
shapes['conv4_3/b'] = (f[9],)
shapes['conv5_1/W'] = (hyper.filter_h, hyper.filter_w, f[9], f[10])
shapes['conv5_1/b'] = (f[10],)
shapes['conv5_2/W'] = (hyper.filter_h, hyper.filter_w, f[10], f[11])
shapes['conv5_2/b'] = (f[11],)
shapes['conv5_3/W'] = (hyper.filter_h, hyper.filter_w, f[11], f[12])
shapes['conv5_3/b'] = (f[12],)
shapes['conv6_1/W'] = (hyper.filter_h, hyper.filter_w, f[12], d)
shapes['conv6_1/b'] = (d,)
shapes['depth/W'] = (hyper.filter_h, hyper.filter_w, d, d)
shapes['depth/b'] = (l,)
shapes['conv6/W'] = (hyper.filter_h, hyper.filter_w, l, l)
shapes['conv6/b'] = (l,)
shapes['GAP/W'] = (l, hyper.n_labels)
return shapes |
class Convolution2D(KerasLayer):
def __init__(self, nb_filter, nb_row, nb_col, init='glorot_uniform', activation=None, border_mode='valid', subsample=(1, 1), dim_ordering='th', W_regularizer=None, b_regularizer=None, bias=True, input_shape=None, **kwargs):
super(Convolution2D, self).__init__(None, nb_filter, nb_row, nb_col, init, activation, border_mode, subsample, dim_ordering, W_regularizer, b_regularizer, bias, (list(input_shape) if input_shape else None), **kwargs) |
def element_featurize(sampletype, default_features, filepaths, directory):
folder = (('%s-features-' % sampletype) + str(uuid.uuid1()))
old_dir = directory
train_dir = (basedir + '/train_dir')
directory = ((basedir + '/train_dir/') + folder)
os.mkdir(((basedir + '/train_dir/') + folder))
for i in range(len(filepaths)):
try:
shutil.copy(filepaths[i], ((directory + '/') + filepaths[i].split('/')[(- 1)]))
except:
pass
try:
shutil.copy((filepaths[i][0:(- 4)] + '.json'), (((directory + '/') + filepaths[i].split('/')[(- 1)][0:(- 4)]) + '.json'))
except:
pass
os.chdir((basedir + ('/features/%s_features/' % sampletype)))
os.system(('python3 featurize.py %s' % ((basedir + '/train_dir/') + folder)))
features = list()
labels = list()
for i in range(len(filepaths)):
try:
jsonfile = (filepaths[i].split('/')[(- 1)][0:(- 4)] + '.json')
g = json.load(open(((directory + '/') + jsonfile)))
feature = []
label = []
for j in range(len(default_features)):
array_ = g['features'][sampletype][default_features[j]]
feature = (feature + array_['features'])
label = (label + array_['labels'])
features.append(feature)
labels.append(label)
except:
features.append(np.zeros(len(features[0])))
labels.append(random.choice(labels))
os.chdir(train_dir)
shutil.rmtree(folder)
directory = old_dir
os.chdir(directory)
return (features, labels) |
def _FracInt(x, y, z, a, b, c, tau, n):
denom = numpy.sqrt((((a + tau) * (b + tau)) * (c + tau)))
return (((((1.0 - ((x ** 2) / (a + tau))) - ((y ** 2) / (b + tau))) - ((z ** 2) / (c + tau))) ** n) / denom) |
_registry(dataset_type='ImageFolder', framework='mxnet', dataset_format='')
class MXNetImageFolder(ImageFolder):
def __getitem__(self, index):
sample = self.image_list[index]
label = sample[1]
image = mx.image.imread(sample[0])
if (self.transform is not None):
(image, label) = self.transform((image, label))
return (image, label) |
def WideResNet40x10(num_class=10, block=None, attention_module=None):
return WideResNetWrapper(depth=40, widen_factor=10, dropRate=0.3, num_class=num_class, attention_module=attention_module) |
def main():
now = int(time.time())
args = parse_args()
if (not args.weights_folder):
raise 'you must pass a --weights_folder'
weights_folder_name = filter(None, args.weights_folder.split('/'))[(- 1)]
output_path = 'results/coco_results_{}'.format(weights_folder_name)
print("Output to: '{}'".format(output_path))
try:
os.mkdir(output_path)
except OSError as ose:
print('warning: {}'.format(ose))
all_weights_files = glob.glob(os.path.join(args.weights_folder, '*.weights'))
all_weights_files = sorted(all_weights_files, (lambda a, b: ((- 1) if (int_or_max(file_to_weight_id(a)) > int_or_max(file_to_weight_id(b))) else 1)))
print('Processing {}'.format('\n'.join(all_weights_files)))
visited = set()
map_results_path = os.path.join(args.weights_folder, 'map.txt')
if os.path.isfile(map_results_path):
with open(map_results_path, 'r') as f:
for line in f.readlines():
if (len(line.strip()) < 1):
continue
rows = line.split(',')
visited.add((rows[0], int_or_max(rows[1])))
print('Skipping already visited {}'.format(visited))
druns = 0
for (i, weights_file) in enumerate(all_weights_files):
weight_id = int_or_max(file_to_weight_id(weights_file))
if (weight_id < args.min_weight_id):
continue
if ((('iou', weight_id) in visited) and (('giou', weight_id) in visited)):
if ((('val2017-iou', weight_id) in visited) and (('val2017-giou', weight_id) in visited)):
continue
weights_path = os.path.dirname(weights_file)
weights_output_paths = dict()
for year in ['', 'val2017']:
weights_output_paths[year] = os.path.join(output_path, str(weight_id), year)
resFile = os.path.join(weights_output_paths[year], 'coco_results.json')
print("weights output to: '{}'".format(resFile))
try:
os.mkdir(weights_output_paths[year])
except OSError as ose:
print('warning: {}'.format(ose))
if ((druns > MAX_DARKNET_RUNS_PER_RUN) and (year == '')):
print('completed {} runs, no more darknet this time around!'.format(druns))
break
if (os.path.isfile(resFile) and (os.path.getsize(resFile) > 0)):
print("skipping generation of populated results file '{}'".format(resFile))
else:
druns += 1
ldlib = ('LD_LIBRARY_PATH={}'.format(args.lib_folder) if args.lib_folder else '')
gpu = ('-i {}'.format(args.gpu_id) if args.gpu_id else '')
date_file_with_year = '{}{}.data'.format(args.data_file.split('.data')[0], ('.{}'.format(year) if len(year) else year))
cmd = '{} ./darknet detector valid {} {} {} {} -prefix {}'.format(ldlib, date_file_with_year, args.cfg_file, weights_file, gpu, weights_output_paths[year])
print("running '{}'".format(cmd))
retval = 0
callerr = False
try:
retval = subprocess.call(cmd, shell=True)
except OSError as ose:
print("OSError: '{}'".format(ose))
callerr = True
print('{} finished with val {}'.format(cmd, retval))
sys.stdout.flush()
if ((retval != 0) or callerr):
raise Exception("'{}' failed".format(cmd))
print("darknet run {}, '{}' complete".format(druns, cmd))
output_dir = os.path.abspath(weights_output_paths[year])
if (len(weights_output_paths.items()) == 0):
print('no weights_output_paths, breaking')
break
dataDir = 'datasets/coco/coco'
annFile = ('%s/annotations/instances_minival2014.json' % dataDir)
print('loading {}'.format(annFile))
annFileVal2017 = ('%s/annotations/instances_val2017.json' % dataDir)
print('loading {}'.format(annFileVal2017))
cocoGts = {'': COCO(annFile), 'val2017': COCO(annFileVal2017)}
for metric in ['iou', 'giou', 'val2017-iou', 'val2017-giou']:
if ((metric, weight_id) in visited):
continue
year = ''
if ('val2017' in metric):
year = 'val2017'
one = [metric]
one.append(weight_id)
print('Evaluating detections with {}'.format(metric))
mAP_analysis = [','.join([metric, 'mAP'])]
mean_map_sum = 0
mean_map_count = 0
maps = []
try:
to_load = '{}/coco_results.json'.format(weights_output_paths[year])
print('Results json: {}'.format(to_load))
cocoDt = cocoGts[year].loadRes(to_load)
except ValueError as ve:
print('WARNING: {}'.format(ve))
continue
imgIds = sorted(cocoDt.getImgIds())
cocoEval = COCOeval(cocoGts[year], cocoDt, annType)
cocoEval.params.imgIds = imgIds
gts = cocoEval.cocoGt.loadAnns(cocoEval.cocoGt.getAnnIds(imgIds=imgIds, catIds=cocoEval.params.catIds))
dts = cocoEval.cocoDt.loadAnns(cocoEval.cocoDt.getAnnIds(imgIds=imgIds, catIds=cocoEval.params.catIds))
cocoEval.evaluate(metric.split('-')[(- 1)])
cocoEval.accumulate()
cocoEval.summarize()
mAP = cocoEval.stats[0]
mAP_5 = cocoEval.stats[1]
mAP_75 = cocoEval.stats[2]
one = [metric, weight_id, mAP, mAP_5, 0, 0, 0, 0, mAP_75, 0, 0, 0, 0]
mAP_analysis.append(','.join([str(o) for o in one]))
results_path = os.path.join(weights_path, '{}-{}.txt'.format(weight_id, metric))
print("Writing: '{}' and '{}'".format(results_path, map_results_path))
with open(results_path, 'w') as f:
f.write('\n'.join(mAP_analysis))
reslines = []
inserted = False
linetoinsert = ','.join([str(o) for o in one])
print('inserting: {}'.format(linetoinsert))
if os.path.isfile(map_results_path):
with open(map_results_path, 'r') as f:
for line in f.readlines():
if (len(line.strip()) < 1):
continue
cols = line.split(',')
if ((not inserted) and (int(cols[1]) > weight_id)):
reslines.append(linetoinsert)
inserted = True
reslines.append(line)
else:
reslines.append(linetoinsert)
inserted = True
if (not inserted):
reslines.append(linetoinsert)
with open(map_results_path, 'w') as f:
f.write('\n'.join([l.strip() for l in reslines])) |
class CenterBlock(nn.Sequential):
def __init__(self, in_channels, out_channels, use_batchnorm=True):
conv1 = md.Conv2dReLU(in_channels, out_channels, kernel_size=3, padding=1, use_batchnorm=use_batchnorm)
conv2 = md.Conv2dReLU(out_channels, out_channels, kernel_size=3, padding=1, use_batchnorm=use_batchnorm)
super().__init__(conv1, conv2) |
(loss_fn='L1')
class Interior(sc.SampleDomain):
def sampling(self, *args, **kwargs):
points = geo.sample_interior(10000)
constraints = {'integral_dx': 0}
return (points, constraints) |
class BatchScorerInterface(ScorerInterface):
def batch_init_state(self, x: torch.Tensor) -> Any:
return self.init_state(x)
def batch_score(self, ys: torch.Tensor, states: List[Any], xs: torch.Tensor) -> Tuple[(torch.Tensor, List[Any])]:
warnings.warn('{} batch score is implemented through for loop not parallelized'.format(self.__class__.__name__))
scores = list()
outstates = list()
for (i, (y, state, x)) in enumerate(zip(ys, states, xs)):
(score, outstate) = self.score(y, state, x)
outstates.append(outstate)
scores.append(score)
scores = torch.cat(scores, 0).view(ys.shape[0], (- 1))
return (scores, outstates) |
class Attr():
def __init__(self, string=None, **args):
if (not string):
self.attr = args
else:
self.attr = ParseArg(string)
def __str__(self):
string = ('"' + self.attr['name'])
for (k, v) in self.attr.iteritems():
if (k == 'name'):
continue
string = ((((string + ' ') + k) + '=') + str(v).replace(' ', ''))
string = (string + '"')
return string |
class GRA(nn.Module):
def __init__(self, channel, subchannel):
super(GRA, self).__init__()
self.group = (channel // subchannel)
self.conv = nn.Sequential(nn.Conv2d((channel + self.group), channel, 3, padding=1), nn.ReLU(True))
self.score = nn.Conv2d(channel, 1, 3, padding=1)
def forward(self, x, y):
if (self.group == 1):
x_cat = torch.cat((x, y), 1)
elif (self.group == 2):
xs = torch.chunk(x, 2, dim=1)
x_cat = torch.cat((xs[0], y, xs[1], y), 1)
elif (self.group == 4):
xs = torch.chunk(x, 4, dim=1)
x_cat = torch.cat((xs[0], y, xs[1], y, xs[2], y, xs[3], y), 1)
elif (self.group == 8):
xs = torch.chunk(x, 8, dim=1)
x_cat = torch.cat((xs[0], y, xs[1], y, xs[2], y, xs[3], y, xs[4], y, xs[5], y, xs[6], y, xs[7], y), 1)
elif (self.group == 16):
xs = torch.chunk(x, 16, dim=1)
x_cat = torch.cat((xs[0], y, xs[1], y, xs[2], y, xs[3], y, xs[4], y, xs[5], y, xs[6], y, xs[7], y, xs[8], y, xs[9], y, xs[10], y, xs[11], y, xs[12], y, xs[13], y, xs[14], y, xs[15], y), 1)
elif (self.group == 32):
xs = torch.chunk(x, 32, dim=1)
x_cat = torch.cat((xs[0], y, xs[1], y, xs[2], y, xs[3], y, xs[4], y, xs[5], y, xs[6], y, xs[7], y, xs[8], y, xs[9], y, xs[10], y, xs[11], y, xs[12], y, xs[13], y, xs[14], y, xs[15], y, xs[16], y, xs[17], y, xs[18], y, xs[19], y, xs[20], y, xs[21], y, xs[22], y, xs[23], y, xs[24], y, xs[25], y, xs[26], y, xs[27], y, xs[28], y, xs[29], y, xs[30], y, xs[31], y), 1)
else:
xs = torch.chunk(x, 64, dim=1)
x_cat = torch.cat((xs[0], y, xs[1], y, xs[2], y, xs[3], y, xs[4], y, xs[5], y, xs[6], y, xs[7], y, xs[8], y, xs[9], y, xs[10], y, xs[11], y, xs[12], y, xs[13], y, xs[14], y, xs[15], y, xs[16], y, xs[17], y, xs[18], y, xs[19], y, xs[20], y, xs[21], y, xs[22], y, xs[23], y, xs[24], y, xs[25], y, xs[26], y, xs[27], y, xs[28], y, xs[29], y, xs[30], y, xs[31], y, xs[32], y, xs[33], y, xs[34], y, xs[35], y, xs[36], y, xs[37], y, xs[38], y, xs[39], y, xs[40], y, xs[41], y, xs[42], y, xs[43], y, xs[44], y, xs[45], y, xs[46], y, xs[47], y, xs[48], y, xs[49], y, xs[50], y, xs[51], y, xs[52], y, xs[53], y, xs[54], y, xs[55], y, xs[56], y, xs[57], y, xs[58], y, xs[59], y, xs[60], y, xs[61], y, xs[62], y, xs[63], y), 1)
x = (x + self.conv(x_cat))
y = (y + self.score(x))
return (x, y) |
_model
def skresnet50d(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
sk_kwargs = dict(split_input=True)
default_cfg = default_cfgs['skresnet50d']
model = ResNet(SelectiveKernelBottleneck, [3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, num_classes=num_classes, in_chans=in_chans, block_args=dict(sk_kwargs=sk_kwargs), zero_init_last_bn=False, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model |
def poly_utilities(n, theta):
u = np.array(([0.0] * n))
for i in range(len(theta)):
u += ((np.array(range(n)) ** i) * theta[i])
return u |
def train_predict_model(env_pool, predict_env, flag=False):
print('> Model Train < ')
global model_step
global eval_step
if (flag == True):
model_train_num = 3
else:
model_train_num = 1
for i in range(model_train_num):
t1 = time.time()
(state, next_state, action, reward, done) = env_pool.sample(len(env_pool))
done_state = ((reward < 2) & (reward > (- 2)))
state = state[done_state]
next_state = next_state[done_state]
action = action[done_state]
reward = reward[done_state]
done = done[done_state]
(loss_G, content_with_prediction, out, all_label, label_with_prediction, acc_vel, acc_ang, acc_goal_direction, acc_goal_distance, acc_reward) = predict_env.model.train(state, action, next_state, reward, batch_size=BATCH_SIZE_MODEL)
t2 = time.time()
print('TRAIN Time : {} ms'.format(round((1000 * (t2 - t1)), 2)))
print('> Model Train Done < ', acc_reward)
print('> env_pool_size < ', len(env_pool))
writer.add_scalar('Model/loss_G', loss_G, model_step)
writer.add_image('cost_map content_with_prediction', content_with_prediction, model_step)
writer.add_image('cost_map', out, model_step)
writer.add_image('cost_map label_with_prediction', label_with_prediction, model_step)
writer.add_image('cost_map all label', all_label, model_step)
writer.add_scalar('Train/acc_vel', acc_vel, model_step)
writer.add_scalar('Train/acc_ang', acc_ang, model_step)
writer.add_scalar('Train/acc_goal_direction', acc_goal_direction, model_step)
writer.add_scalar('Train/acc_goal_distance', acc_goal_distance, model_step)
writer.add_scalar('Train/acc_reward', acc_reward, model_step)
model_step += 1
(state, next_state, action, reward, done) = env_pool.sample(len(env_pool))
done_state = ((reward < 2) & (reward > (- 2)))
state = state[done_state]
next_state = next_state[done_state]
action = action[done_state]
reward = reward[done_state]
(acc_vel, acc_ang, acc_goal_direction, acc_goal_distance, acc_reward) = predict_env.model.evaluate(state, action, next_state, reward, batch_size=BATCH_SIZE_MODEL)
eval_step += 1
writer.add_scalar('Evaluate/acc_vel', acc_vel, eval_step)
writer.add_scalar('Evaluate/acc_ang', acc_ang, eval_step)
writer.add_scalar('Evaluate/acc_goal_direction', acc_goal_direction, eval_step)
writer.add_scalar('Evaluate/acc_goal_distance', acc_goal_distance, eval_step)
writer.add_scalar('Evaluate/acc_reward', acc_reward, eval_step) |
class SourceNotFoundError(DatasetError):
def __init__(self, source, config):
self.source = source
self.config = config
super().__init__('Unable to find source {} in config {}'.format(source, config))
def __reduce__(self):
return (SourceNotFoundError, (self.source, self.config)) |
def fmt_n(x, n=4):
if USE_CUDA:
return torch.tensor(x.reshape((int((len(x) / n)), (n * x.shape[1]))), dtype=torch.float32).cuda()
else:
return torch.tensor(x.reshape((int((len(x) / n)), (n * x.shape[1]))), dtype=torch.float32) |
def digit_norm(s):
out = ''
buf = ''
for c in s:
if (not c.isdigit()):
if buf:
try:
digit_str = cn2an.an2cn(buf)
except:
print(f'cannot convert digit {buf}')
digit_str = ''.join([digit_dict.get(x, '') for x in buf])
out += digit_str
buf = ''
out += c
else:
buf += c
if buf:
buf = cn2an.an2cn(buf)
out += buf
return out |
def tsv_to_examples():
label_map = {}
token_map = {}
shape_map = {}
char_map = {}
update_vocab = True
update_chars = True
if FLAGS.start_end:
token_map[SENT_START] = len(token_map)
token_int_str_map[token_map[SENT_START]] = SENT_START
shape_map[SENT_START] = len(shape_map)
char_map[SENT_START] = len(char_map)
char_int_str_map[char_map[SENT_START]] = SENT_START
if FLAGS.predict_pad:
label_map[SENT_START] = len(label_map)
label_int_str_map[label_map[SENT_START]] = SENT_START
token_map[SENT_END] = len(token_map)
token_int_str_map[token_map[SENT_END]] = SENT_END
shape_map[SENT_END] = len(shape_map)
char_map[SENT_END] = len(char_map)
char_int_str_map[char_map[SENT_END]] = SENT_END
if FLAGS.predict_pad:
label_map[SENT_END] = len(label_map)
label_int_str_map[label_map[SENT_END]] = SENT_END
else:
token_map[PAD_STR] = len(token_map)
token_int_str_map[token_map[PAD_STR]] = PAD_STR
char_map[PAD_STR] = len(char_map)
char_int_str_map[char_map[PAD_STR]] = PAD_STR
shape_map[PAD_STR] = len(shape_map)
if FLAGS.predict_pad:
label_map[PAD_STR] = len(label_map)
label_int_str_map[label_map[PAD_STR]] = PAD_STR
token_map[OOV_STR] = len(token_map)
token_int_str_map[token_map[OOV_STR]] = OOV_STR
char_map[OOV_STR] = len(char_map)
char_int_str_map[char_map[OOV_STR]] = OOV_STR
if (FLAGS.vocab != ''):
update_vocab = False
with open(FLAGS.vocab, 'r') as f:
for line in f.readlines():
word = line.strip().split(' ')[0]
if (word not in token_map):
token_map[word] = len(token_map)
token_int_str_map[token_map[word]] = word
if (FLAGS.update_vocab != ''):
with open(FLAGS.update_vocab, 'r') as f:
for line in f.readlines():
word = line.strip().split(' ')[0]
if (word not in token_map):
token_map[word] = len(token_map)
token_int_str_map[token_map[word]] = word
if (FLAGS.labels != ''):
with open(FLAGS.labels, 'r') as f:
for line in f.readlines():
(label, idx) = line.strip().split('\t')
label_map[label] = int(idx)
label_int_str_map[label_map[label]] = label
if (FLAGS.shapes != ''):
with open(FLAGS.shapes, 'r') as f:
for line in f.readlines():
(shape, idx) = line.strip().split('\t')
shape_map[shape] = int(idx)
if (FLAGS.chars != ''):
update_chars = FLAGS.update_maps
with open(FLAGS.chars, 'r') as f:
for line in f.readlines():
(char, idx) = line.strip().split('\t')
char_map[char] = int(idx)
char_int_str_map[char_map[char]] = char
num_tokens = 0
num_sentences = 0
num_oov = 0
num_docs = 0
if (not os.path.exists(FLAGS.out_dir)):
print(('Output directory not found: %s' % FLAGS.out_dir))
writer = tf.python_io.TFRecordWriter((FLAGS.out_dir + '/examples.proto'))
with open(FLAGS.in_file) as f:
line_buf = []
line = f.readline()
line_idx = 1
while line:
line = line.strip()
if FLAGS.documents:
if (line.split(' ')[0] == DOC_MARKER):
if line_buf:
(toks, oov, sent) = make_example(writer, line_buf, label_map, token_map, shape_map, char_map, update_vocab, update_chars)
num_tokens += toks
num_oov += oov
num_sentences += sent
num_docs += 1
line_buf = []
else:
line_buf.append(line)
line_idx += 1
elif line:
line_buf.append(line)
line_idx += 1
elif line_buf:
(toks, oov, sent) = make_example(writer, line_buf, label_map, token_map, shape_map, char_map, update_vocab, update_chars)
num_tokens += toks
num_oov += oov
num_sentences += sent
line_buf = []
line = f.readline()
if line_buf:
make_example(writer, line_buf, label_map, token_map, shape_map, char_map, update_vocab, update_chars)
writer.close()
print(('Embeddings coverage: %2.2f%%' % ((1 - (num_oov / num_tokens)) * 100)))
for (f_str, id_map) in [('label', label_map), ('token', token_map), ('shape', shape_map), ('char', char_map)]:
with open((((FLAGS.out_dir + '/') + f_str) + '.txt'), 'w') as f:
[f.write((((s + '\t') + str(i)) + '\n')) for (s, i) in id_map.items()]
with open((FLAGS.out_dir + '/sizes.txt'), 'w') as f:
print(num_sentences, file=f)
print(num_tokens, file=f)
print(num_docs, file=f) |
def err_cc_img(list_gt, list_pred):
errs = []
for b in range(list_gt.shape[0]):
mask_gt = list_gt[(b, ...)]
mask_pred = list_pred[(b, ...)]
errs.append(error_l1_cc(mask_gt, mask_pred))
return np.array(errs) |
class ItrexOpt(object):
def __init__(self, config_file, no_cuda):
if ((int(os.environ.get('LOCAL_RANK', (- 1))) != (- 1)) and no_cuda):
from intel_extension_for_transformers.transformers.utils.utility import distributed_init
distributed_init()
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments, OptimizationArguments))
if config_file.endswith('.yaml'):
(model_args, data_args, training_args, optim_args) = parser.parse_yaml_file(yaml_file=os.path.abspath(config_file))
elif config_file.endswith('.json'):
(model_args, data_args, training_args, optim_args) = parser.parse_json_file(json_file=os.path.abspath(config_file))
else:
(model_args, data_args, training_args, optim_args) = parser.parse_args_into_dataclasses()
self.model_args = model_args
self.data_args = data_args
self.training_args = training_args
self.optim_args = optim_args
def e2e(self):
self._prepare_env()
self._load_data()
self._load_model()
self._preprocess()
if self.optim_args.distillation:
self._do_distillation()
if self.optim_args.quantization:
self._do_quantization_aware_training()
if self.optim_args.sat:
self._do_sparsity_aware_training()
def _prepare_env(self):
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)])
log_level = self.training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.warning((f'Process rank: {self.training_args.local_rank}, device: {self.training_args.device}, n_gpu: {self.training_args.n_gpu}' + f'distributed training: {bool((self.training_args.local_rank != (- 1)))}, 16-bits training: {self.training_args.fp16}'))
logger.info(f'Training/evaluation parameters {self.training_args}')
last_checkpoint = None
if (os.path.isdir(self.training_args.output_dir) and self.training_args.do_train and (not self.training_args.overwrite_output_dir)):
last_checkpoint = get_last_checkpoint(self.training_args.output_dir)
if ((last_checkpoint is None) and (len(os.listdir(self.training_args.output_dir)) > 0)):
raise ValueError(f'Output directory ({self.training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.')
elif ((last_checkpoint is not None) and (self.training_args.resume_from_checkpoint is None)):
logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
set_seed(self.training_args.seed)
def _load_data(self):
if (self.data_args.task_name is not None):
if (self.data_args.task_name == 'emotion'):
raw_datasets = load_dataset(f'SetFit/{self.data_args.task_name}')
else:
raw_datasets = load_dataset('glue', self.data_args.task_name, cache_dir=self.model_args.cache_dir)
elif (self.data_args.dataset_name is not None):
raw_datasets = load_dataset(self.data_args.dataset_name, self.data_args.dataset_config_name, cache_dir=self.model_args.cache_dir)
else:
data_files = {'train': self.data_args.train_file, 'validation': self.data_args.validation_file}
for key in data_files.keys():
logger.info(f'load a local file for {key}: {data_files[key]}')
if self.data_args.train_file.endswith('.csv'):
raw_datasets = load_dataset('csv', data_files=data_files, cache_dir=self.model_args.cache_dir)
else:
raw_datasets = load_dataset('json', data_files=data_files, cache_dir=self.model_args.cache_dir)
print('Step 1: Load the dataset')
print('')
if (self.data_args.task_name is not None):
is_regression = (self.data_args.task_name == 'stsb')
if (not is_regression):
if (self.data_args.task_name == 'emotion'):
label_list = list(set(raw_datasets['train']['label_text']))
else:
label_list = raw_datasets['train'].features['label'].names
num_labels = len(label_list)
else:
num_labels = 1
else:
is_regression = (raw_datasets['train'].features['label'].dtype in ['float32', 'float64'])
if is_regression:
num_labels = 1
else:
label_list = raw_datasets['train'].unique('label')
label_list.sort()
num_labels = len(label_list)
self.raw_datasets = raw_datasets
self.is_regression = is_regression
self.num_labels = num_labels
self.label_list = label_list
def _load_model(self):
config = AutoConfig.from_pretrained((self.model_args.config_name if self.model_args.config_name else self.model_args.model_name_or_path), num_labels=self.num_labels, finetuning_task=self.data_args.task_name, cache_dir=self.model_args.cache_dir, revision=self.model_args.model_revision, use_auth_token=(True if self.model_args.use_auth_token else None))
self.config = config
tokenizer = AutoTokenizer.from_pretrained((self.model_args.tokenizer_name if self.model_args.tokenizer_name else self.model_args.model_name_or_path), cache_dir=self.model_args.cache_dir, use_fast=self.model_args.use_fast_tokenizer, revision=self.model_args.model_revision, use_auth_token=(True if self.model_args.use_auth_token else None))
self.tokenizer = tokenizer
model = AutoModelForSequenceClassification.from_pretrained(self.model_args.model_name_or_path, from_tf=bool(('.ckpt' in self.model_args.model_name_or_path)), config=config, cache_dir=self.model_args.cache_dir, revision=self.model_args.model_revision, use_auth_token=(True if self.model_args.use_auth_token else None))
if (self.optim_args.distillation or self.optim_args.sat):
teacher_config = AutoConfig.from_pretrained(self.optim_args.teacher_model_name_or_path, num_labels=self.num_labels, finetuning_task=self.data_args.task_name)
teacher_tokenizer = AutoTokenizer.from_pretrained(self.optim_args.teacher_model_name_or_path, use_fast=self.model_args.use_fast_tokenizer)
assert (teacher_tokenizer.vocab == self.tokenizer.vocab), 'teacher model and student model should have same tokenizer.'
teacher_model = AutoModelForSequenceClassification.from_pretrained(self.optim_args.teacher_model_name_or_path, from_tf=bool(('.ckpt' in self.optim_args.teacher_model_name_or_path)), config=teacher_config)
teacher_model.to(self.training_args.device)
self.teacher_tokenizer = teacher_tokenizer
self.teacher_model = teacher_model
if self.optim_args.int8:
model = OptimizedModel.from_pretrained(self.model_args.model_name_or_path, from_tf=bool(('.ckpt' in self.model_args.model_name_or_path)), config=config, cache_dir=self.model_args.cache_dir, revision=self.model_args.model_revision, use_auth_token=(True if self.model_args.use_auth_token else None))
self.model = model
def _preprocess(self):
if (self.data_args.task_name is not None):
(sentence1_key, sentence2_key) = task_to_keys[self.data_args.task_name]
else:
non_label_column_names = [name for name in self.raw_datasets['train'].column_names if (name != 'label')]
if (('sentence1' in non_label_column_names) and ('sentence2' in non_label_column_names)):
(sentence1_key, sentence2_key) = ('sentence1', 'sentence2')
elif (len(non_label_column_names) >= 2):
(sentence1_key, sentence2_key) = non_label_column_names[:2]
else:
(sentence1_key, sentence2_key) = (non_label_column_names[0], None)
if self.data_args.pad_to_max_length:
padding = 'max_length'
else:
padding = False
label_to_id = None
if ((self.model.config.label2id != PretrainedConfig(num_labels=self.num_labels).label2id) and (self.data_args.task_name is not None) and (not self.is_regression)):
label_name_to_id = {k.lower(): v for (k, v) in self.model.config.label2id.items()}
if (list(sorted(label_name_to_id.keys())) == list(sorted(self.label_list))):
label_to_id = {i: int(label_name_to_id[self.label_list[i]]) for i in range(self.num_labels)}
else:
logger.warning("Your model seems to have been trained with labels, but they don't match the dataset: ", f'''model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(self.label_list))}.
Ignoring the model labels as a result.''')
elif ((self.data_args.task_name is None) and (not self.is_regression)):
label_to_id = {v: i for (i, v) in enumerate(self.label_list)}
if (label_to_id is not None):
self.model.config.label2id = label_to_id
self.model.config.id2label = {id: label for (label, id) in self.config.label2id.items()}
if (self.data_args.max_seq_length > self.tokenizer.model_max_length):
logger.warning(f'The max_seq_length passed ({self.data_args.max_seq_length}) is larger than the maximum length for themodel ({self.tokenizer.model_max_length}). Using max_seq_length={self.tokenizer.model_max_length}.')
max_seq_length = min(self.data_args.max_seq_length, self.tokenizer.model_max_length)
def preprocess_function(examples, tokenizer=self.tokenizer):
args = ((examples[sentence1_key],) if (sentence2_key is None) else (examples[sentence1_key], examples[sentence2_key]))
result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True)
if ((label_to_id is not None) and ('label' in examples)):
result['label'] = [(label_to_id[l] if (l != (- 1)) else (- 1)) for l in examples['label']]
return result
with self.training_args.main_process_first(desc='dataset map pre-processing'):
raw_datasets = self.raw_datasets.map(preprocess_function, batched=True, load_from_cache_file=(not self.data_args.overwrite_cache))
if self.training_args.do_train:
if ('train' not in raw_datasets):
raise ValueError('--do_train requires a train dataset')
train_dataset = raw_datasets['train']
if (self.data_args.max_train_samples is not None):
train_dataset = train_dataset.select(range(self.data_args.max_train_samples))
if self.training_args.do_eval:
if (('validation' not in raw_datasets) and ('validation_matched' not in raw_datasets)):
raise ValueError('--do_eval requires a validation dataset')
eval_dataset = raw_datasets[('validation_matched' if (self.data_args.task_name == 'mnli') else 'validation')]
if (self.data_args.max_eval_samples is not None):
eval_dataset = eval_dataset.select(range(self.data_args.max_eval_samples))
if self.training_args.do_train:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.')
if ((self.data_args.task_name is not None) and (self.data_args.task_name != 'emotion')):
metric = load_metric('glue', self.data_args.task_name)
else:
metric = load_metric('accuracy')
self.metric = metric
if self.data_args.pad_to_max_length:
data_collator = default_data_collator
elif self.training_args.fp16:
data_collator = DataCollatorWithPadding(self.tokenizer, pad_to_multiple_of=8)
else:
data_collator = None
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.data_collator = data_collator
if (self.optim_args.distillation or self.optim_args.sat):
teacher_processed_datasets = self.raw_datasets.map(functools.partial(preprocess_function, tokenizer=self.teacher_tokenizer), batched=True, remove_columns=self.raw_datasets['train'].column_names)
teacher_train_dataset = teacher_processed_datasets['train']
if (self.data_args.max_train_samples is not None):
teacher_train_dataset = teacher_train_dataset.select(range(self.data_args.max_train_samples))
teacher_eval_dataset = teacher_processed_datasets[('validation_matched' if (self.data_args.task_name == 'mnli') else 'validation')]
if (self.data_args.max_eval_samples is not None):
teacher_eval_dataset = teacher_eval_dataset.select(range(self.data_args.max_eval_samples))
assert ((self.train_dataset.num_rows == teacher_train_dataset.num_rows) and (self.eval_dataset.num_rows == teacher_eval_dataset.num_rows)), 'Length of train or evaluation dataset of teacher doesnot match that of student.'
self.teacher_train_dataset = teacher_train_dataset
self.teacher_eval_dataset = teacher_eval_dataset
def compute_metrics(p: EvalPrediction):
preds = (p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions)
preds = (np.squeeze(preds) if self.is_regression else np.argmax(preds, axis=1))
if (self.data_args.task_name is not None):
result = self.metric.compute(predictions=preds, references=p.label_ids)
if (len(result) > 1):
result['combined_score'] = np.mean(list(result.values())).item()
return result
elif self.is_regression:
return {'mse': ((preds - p.label_ids) ** 2).mean().item()}
else:
return {'accuracy': (preds == p.label_ids).astype(np.float32).mean().item()}
from neural_compressor.adaptor.torch_utils.symbolic_trace import symbolic_trace
self.model = symbolic_trace(self.model, (self.optim_args.quantization_approach == 'QuantizationAwareTraining'))
self.trainer = NLPTrainer(model=self.model, args=self.training_args, train_dataset=(self.train_dataset if self.training_args.do_train else None), eval_dataset=(self.eval_dataset if self.training_args.do_eval else None), compute_metrics=compute_metrics, tokenizer=self.tokenizer, data_collator=self.data_collator)
def _do_distillation(self):
class BertModelforLogitsOutputOnly(torch.nn.Module):
def __init__(self, model):
super(BertModelforLogitsOutputOnly, self).__init__()
self.model = model
def forward(self, *args, **kwargs):
output = self.model(*args, **kwargs)
return output['logits']
print('Step 4: Inference teacher model: get logits for usage in distilling child model. (bert-mini)')
print('')
def dict_tensor_to_model_device(batch, model):
device = next(model.parameters()).device
for k in batch:
batch[k] = batch[k].to(device)
def get_logits(teacher_model, train_dataset, teacher_train_dataset):
logger.info('***** Inferencing teacher model to Get logits of teacher *****')
logger.info(f' Num examples = {len(train_dataset)}')
teacher_model.eval()
npy_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), '{}.{}.npy'.format(self.data_args.task_name, self.optim_args.teacher_model_name_or_path.replace('/', '.')))
if os.path.exists(npy_file):
teacher_logits = [x for x in np.load(npy_file)]
else:
sampler = None
if (self.training_args.world_size > 1):
from transformers.trainer_pt_utils import ShardSampler
sampler = ShardSampler(teacher_train_dataset, batch_size=self.training_args.per_device_eval_batch_size, num_processes=self.training_args.world_size, process_index=self.training_args.process_index)
teacher_model = torch.nn.parallel.DistributedDataParallel(teacher_model, device_ids=([self.training_args.local_rank] if (self.training_args._n_gpu != 0) else None), output_device=(self.training_args.local_rank if (self.training_args._n_gpu != 0) else None))
train_dataloader = DataLoader(teacher_train_dataset, collate_fn=self.data_collator, sampler=sampler, batch_size=self.training_args.per_device_eval_batch_size)
train_dataloader = tqdm(train_dataloader, desc='Evaluating')
teacher_logits = []
for (step, batch) in enumerate(train_dataloader):
dict_tensor_to_model_device(batch, teacher_model)
outputs = teacher_model(**batch)
if (self.training_args.world_size > 1):
outputs_list = [None for i in range(self.training_args.world_size)]
torch.distributed.all_gather_object(outputs_list, outputs)
outputs = torch.concat(outputs_list, dim=0)
teacher_logits += [x for x in outputs.cpu().numpy()]
if (self.training_args.world_size > 1):
teacher_logits = teacher_logits[:len(teacher_train_dataset)]
if (self.training_args.local_rank in [(- 1), 0]):
np.save(npy_file, np.array(teacher_logits))
return train_dataset.add_column('teacher_logits', teacher_logits)
with torch.no_grad():
self.train_dataset = get_logits(BertModelforLogitsOutputOnly(self.teacher_model), self.train_dataset, self.teacher_train_dataset)
para_counter = (lambda model: sum((p.numel() for p in model.parameters())))
logger.info('***** Number of teacher model parameters: {:.2f}M *****'.format((para_counter(self.teacher_model) / (10 ** 6))))
logger.info('***** Number of student model parameters: {:.2f}M *****'.format((para_counter(self.model) / (10 ** 6))))
print('Step 6: Distill teacher model to student Model ')
print('')
metric_name = (self.optim_args.metric_name if (self.optim_args.metric_name is not None) else ('eval_' + ('pearson' if (self.data_args.task_name == 'stsb') else ('matthews_correlation' if (self.data_args.task_name == 'cola') else 'accuracy'))))
print('Step 7: Do the actual Distillation using itrex to get the distilled student model (Bert Mini)')
print('# ')
if self.optim_args.distillation:
if (not self.training_args.do_eval):
raise ValueError('do_eval must be set to True for distillation.')
tune_metric = metrics.Metric(name=metric_name)
distillation_conf = DistillationConfig(metrics=tune_metric)
model = self.trainer.distill(distillation_config=distillation_conf, teacher_model=self.teacher_model)
self.trainer.save_model(self.training_args.output_dir)
print('Step 8: run inference on distilled student Model for accuracy (Bert Mini)')
print('')
if (self.optim_args.benchmark or self.optim_args.accuracy_only or self.optim_args.distillation):
model = OptimizedModel.from_pretrained(self.training_args.output_dir)
model.eval()
self.trainer.model = model
results = self.trainer.evaluate()
logger.info('metrics keys: {}'.format(results.keys()))
bert_task_acc_keys = ['eval_f1', 'eval_accuracy', 'eval_matthews_correlation', 'eval_pearson', 'eval_mcc', 'eval_spearmanr']
ret = False
for key in bert_task_acc_keys:
if (key in results.keys()):
ret = True
throughput = results.get('eval_samples_per_second')
print('Batch size = ', self.training_args.per_device_eval_batch_size)
print('Final Eval {} Accuracy: {}'.format(key, results[key]))
print('Latency: {:.5f} ms'.format((1000 / throughput)))
print('Throughput: {:.5f} samples/sec'.format(throughput))
assert ret, 'No metric returned, Please check inference metric!'
def _do_quantization_aware_training(self):
metric_name = (self.optim_args.metric_name if (self.optim_args.metric_name is not None) else ('eval_' + ('pearson' if (self.data_args.task_name == 'stsb') else ('matthews_correlation' if (self.data_args.task_name == 'cola') else 'accuracy'))))
if (not self.training_args.do_eval):
raise ValueError('do_eval must be set to True for quantization.')
self.trainer.save_model(self.training_args.output_dir)
if (self.optim_args.quantization_approach != 'PostTrainingDynamic'):
if (not self.training_args.do_train):
raise ValueError('do_train must be set to True for static and aware training quantization.')
elif (self.optim_args.quantization_approach == 'QuantizationAwareTraining'):
early_stopping_patience = 6
early_stopping_threshold = 0.001
tune_metric = metrics.Metric(name=metric_name, is_relative=self.optim_args.is_relative, criterion=self.optim_args.perf_tol)
objective = objectives.performance
quantization_config = QuantizationConfig(approach=self.optim_args.quantization_approach, max_trials=600, metrics=[tune_metric], objectives=[objective], sampling_size=(len(self.train_dataset) // 20))
model = self.trainer.quantize(quant_config=quantization_config)
if (self.optim_args.benchmark or self.optim_args.accuracy_only):
results = self.trainer.evaluate()
logger.info('metrics keys: {}'.format(results.keys()))
bert_task_acc_keys = ['eval_f1', 'eval_accuracy', 'eval_matthews_correlation', 'eval_pearson', 'eval_mcc', 'eval_spearmanr']
ret = False
for key in bert_task_acc_keys:
if (key in results.keys()):
ret = True
throughput = results.get('eval_samples_per_second')
print('Batch size = {}'.format(self.training_args.per_device_eval_batch_size))
print('Finally Eval {} Accuracy: {:.5f}'.format(key, results[key]))
print('Latency: {:.5f} ms'.format((1000 / throughput)))
print('Throughput: {:.5f} samples/sec'.format(throughput))
break
assert ret, 'No metric returned, Please check inference metric!'
def _do_sparsity_aware_training(self):
class BertModelforLogitsOutputOnly(torch.nn.Module):
def __init__(self, model):
super(BertModelforLogitsOutputOnly, self).__init__()
self.model = model
def forward(self, *args, **kwargs):
output = self.model(*args, **kwargs)
return output['logits']
print('Step 4: Inference teacher model: get logits for usage in pruning child model.')
print('')
def dict_tensor_to_model_device(batch, model):
device = next(model.parameters()).device
for k in batch:
batch[k] = batch[k].to(device)
def get_logits(teacher_model, train_dataset, teacher_train_dataset):
logger.info('***** Inferencing teacher model to Get logits of teacher *****')
logger.info(f' Num examples = {len(train_dataset)}')
teacher_model.eval()
npy_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), '{}.{}.npy'.format(self.data_args.task_name, self.optim_args.teacher_model_name_or_path.replace('/', '.')))
if os.path.exists(npy_file):
teacher_logits = [x for x in np.load(npy_file)]
else:
sampler = None
if (self.training_args.world_size > 1):
from transformers.trainer_pt_utils import ShardSampler
sampler = ShardSampler(teacher_train_dataset, batch_size=self.training_args.per_device_eval_batch_size, num_processes=self.training_args.world_size, process_index=self.training_args.process_index)
teacher_model = torch.nn.parallel.DistributedDataParallel(teacher_model, device_ids=([self.training_args.local_rank] if (self.training_args._n_gpu != 0) else None), output_device=(self.training_args.local_rank if (self.training_args._n_gpu != 0) else None))
train_dataloader = DataLoader(teacher_train_dataset, collate_fn=self.data_collator, sampler=sampler, batch_size=self.training_args.per_device_eval_batch_size)
train_dataloader = tqdm(train_dataloader, desc='Evaluating')
teacher_logits = []
for (step, batch) in enumerate(train_dataloader):
dict_tensor_to_model_device(batch, teacher_model)
outputs = teacher_model(**batch)
if (self.training_args.world_size > 1):
outputs_list = [None for i in range(self.training_args.world_size)]
torch.distributed.all_gather_object(outputs_list, outputs)
outputs = torch.concat(outputs_list, dim=0)
teacher_logits += [x for x in outputs.cpu().numpy()]
if (self.training_args.world_size > 1):
teacher_logits = teacher_logits[:len(teacher_train_dataset)]
if (self.training_args.local_rank in [(- 1), 0]):
np.save(npy_file, np.array(teacher_logits))
return train_dataset.add_column('teacher_logits', teacher_logits)
with torch.no_grad():
self.train_dataset = get_logits(BertModelforLogitsOutputOnly(self.teacher_model), self.train_dataset, self.teacher_train_dataset)
para_counter = (lambda model: sum((p.numel() for p in model.parameters())))
logger.info('***** Number of teacher model parameters: {:.2f}M *****'.format((para_counter(self.teacher_model) / (10 ** 6))))
logger.info('***** Number of student model parameters: {:.2f}M *****'.format((para_counter(self.model) / (10 ** 6))))
print('Step 6: Prune teacher model to student Model')
print('')
metric_name = (self.optim_args.metric_name if (self.optim_args.metric_name is not None) else ('eval_' + ('pearson' if (self.data_args.task_name == 'stsb') else ('matthews_correlation' if (self.data_args.task_name == 'cola') else 'accuracy'))))
print('Step 7: Do the actual Pruning using itrex to get the pruned student model')
print('# ')
if (self.optim_args.sat and self.optim_args.orchestrate_optimizations):
if (not self.training_args.do_train):
raise ValueError('do_train must be set to True for pruning.')
tune_metric = metrics.Metric(name=metric_name, is_relative=self.optim_args.is_relative, criterion=self.optim_args.perf_tol)
prune_type = ('PatternLock' if self.optim_args.pruning_approach else self.optim_args.pruning_approach)
target_sparsity_ratio = (self.optim_args.target_sparsity_ratio if self.optim_args.target_sparsity_ratio else None)
pruner_config = PrunerConfig(prune_type=prune_type, target_sparsity_ratio=target_sparsity_ratio)
pruning_conf = PruningConfig(framework='pytorch_fx', pruner_config=[pruner_config], metrics=tune_metric)
distillation_conf = DistillationConfig(framework='pytorch_fx', metrics=tune_metric)
objective = objectives.performance
quantization_conf = QuantizationConfig(approach=self.optim_args.quantization_approach, max_trials=600, metrics=[tune_metric], objectives=[objective])
conf_list = [pruning_conf, distillation_conf, quantization_conf]
model = self.trainer.orchestrate_optimizations(config_list=conf_list, teacher_model=self.teacher_model)
print('Step 8: run inference on pruned student Model for accuracy')
print('')
if (self.optim_args.benchmark or self.optim_args.accuracy_only or self.optim_args.sat):
model = OptimizedModel.from_pretrained(self.training_args.output_dir)
model.eval()
self.trainer.model = model
results = self.trainer.evaluate()
logger.info('metrics keys: {}'.format(results.keys()))
bert_task_acc_keys = ['eval_f1', 'eval_accuracy', 'eval_matthews_correlation', 'eval_pearson', 'eval_mcc', 'eval_spearmanr']
ret = False
for key in bert_task_acc_keys:
if (key in results.keys()):
ret = True
throughput = results.get('eval_samples_per_second')
print('Batch size = ', self.training_args.per_device_eval_batch_size)
print('Final Eval {} Accuracy: {}'.format(key, results[key]))
print('Latency: {:.5f} ms'.format((1000 / throughput)))
print('Throughput: {:.5f} samples/sec'.format(throughput))
assert ret, 'No metric returned, Please check inference metric!' |
class ActorCriticValueRewardPolicy(ModuleContainer):
CONTAINERS = ['reward', 'encoder', 'actor', 'critic', 'value'] |
def conv3x3x3(in_planes, out_planes, stride=1, bias=False):
return nn.Conv3d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=bias) |
def _make_dummy_env_func(config, dataset, id):
return DummyRLEnv(config=config, dataset=dataset, env_ind=id) |
def findMatchingBraces(text, ldelim=0):
if ldelim:
reOpen = re.compile(('[{]{%d,}' % ldelim))
reNext = re.compile('[{]{2,}|}{2,}')
else:
reOpen = re.compile('{{2,}|\\[{2,}')
reNext = re.compile('{{2,}|}{2,}|\\[{2,}|]{2,}')
cur = 0
while True:
m1 = reOpen.search(text, cur)
if (not m1):
return
lmatch = (m1.end() - m1.start())
if (m1.group()[0] == '{'):
stack = [lmatch]
else:
stack = [(- lmatch)]
end = m1.end()
while True:
m2 = reNext.search(text, end)
if (not m2):
return
end = m2.end()
brac = m2.group()[0]
lmatch = (m2.end() - m2.start())
if (brac == '{'):
stack.append(lmatch)
elif (brac == '}'):
while stack:
openCount = stack.pop()
if (openCount == 0):
continue
if (lmatch >= openCount):
lmatch -= openCount
if (lmatch <= 1):
break
else:
stack.append((openCount - lmatch))
break
if (not stack):
(yield (m1.start(), (end - lmatch)))
cur = end
break
elif ((len(stack) == 1) and (0 < stack[0] < ldelim)):
cur = end
break
elif (brac == '['):
stack.append((- lmatch))
else:
while (stack and (stack[(- 1)] < 0)):
openCount = (- stack.pop())
if (lmatch >= openCount):
lmatch -= openCount
if (lmatch <= 1):
break
else:
stack.append((lmatch - openCount))
break
if (not stack):
(yield (m1.start(), (end - lmatch)))
cur = end
break
cur = end |
def build_scores_break(matrix, selected, epsilon=0.0001):
has_breaks = ((selected[1:] - selected[:(- 1)]) > 1)
has_breaks = np.concatenate((np.zeros(1), has_breaks), axis=0)
n_sites = len(selected)
n_colors = matrix.shape[1]
epsilon = 0.0001
all_scores = []
maxi_size = 0
for (site, has_break) in zip(selected, has_breaks):
if has_break:
all_scores.append([('BREAK', 'BREAK', 'BREAK')])
conservation = (np.log2(21) + (np.log2((matrix[site] + epsilon)) * matrix[site]).sum())
liste = []
order_colors = np.argsort(matrix[site])
for c in order_colors:
liste.append((list_aa[c], (matrix[(site, c)] * conservation)))
maxi_size = max(maxi_size, conservation)
all_scores.append(liste)
return (all_scores, maxi_size) |
class MetadataCaptureHook(TrainingHook):
def __init__(self, params, model_dir, run_config):
super(MetadataCaptureHook, self).__init__(params, model_dir, run_config)
self._active = False
self._done = False
self._global_step = None
self._output_dir = os.path.abspath(self.model_dir)
def default_params():
return {'step': 10}
def begin(self):
self._global_step = tf.train.get_global_step()
def before_run(self, _run_context):
if ((not self.is_chief) or self._done):
return
if (not self._active):
return tf.train.SessionRunArgs(self._global_step)
else:
tf.logging.info('Performing full trace on next step.')
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
return tf.train.SessionRunArgs(self._global_step, options=run_options)
def after_run(self, _run_context, run_values):
if ((not self.is_chief) or self._done):
return
step_done = run_values.results
if self._active:
tf.logging.info('Captured full trace at step %s', step_done)
gfile.MakeDirs(self._output_dir)
trace_path = os.path.join(self._output_dir, 'run_meta')
with gfile.GFile(trace_path, 'wb') as trace_file:
trace_file.write(run_values.run_metadata.SerializeToString())
tf.logging.info('Saved run_metadata to %s', trace_path)
timeline_path = os.path.join(self._output_dir, 'timeline.json')
with gfile.GFile(timeline_path, 'w') as timeline_file:
tl_info = timeline.Timeline(run_values.run_metadata.step_stats)
tl_chrome = tl_info.generate_chrome_trace_format(show_memory=True)
timeline_file.write(tl_chrome)
tf.logging.info('Saved timeline to %s', timeline_path)
tf.contrib.tfprof.tfprof_logger.write_op_log(graph=tf.get_default_graph(), log_dir=self._output_dir, run_meta=run_values.run_metadata)
tf.logging.info('Saved op log to %s', self._output_dir)
self._active = False
self._done = True
self._active = (step_done >= self.params['step']) |
class UniDiffuserTextDecoder(metaclass=DummyObject):
_backends = ['torch', 'transformers']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch', 'transformers'])
def from_config(cls, *args, **kwargs):
requires_backends(cls, ['torch', 'transformers'])
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ['torch', 'transformers']) |
def test_geotext_extract_with_count_span_info_true(geotext):
output = geotext.extract(input_text=text)
assert (output['cities']['Berlin']['count'] == 2)
assert (output['cities']['Berlin']['span_info'] == [(0, 6), (43, 49)])
assert (output['cities']['Berlin']['found_as'] == ['Berlin', 'Berlin']) |
def seq_start(tag='', anchor='', anchor_id=0, style='_'):
emit = []
handle = []
if tag:
emit += [('VerbatimTag("%s")' % encode(tag))]
if anchor:
emit += [('Anchor("%s")' % encode(anchor))]
handle += [('OnAnchor(_, "%s")' % encode(anchor))]
if tag:
out_tag = encode(tag)
else:
out_tag = '?'
emit += ['BeginSeq']
handle += [('OnSequenceStart(_, "%s", %s, %s)' % (out_tag, anchor_id, style))]
return {'emit': emit, 'handle': handle} |
def get_tokenizer(config):
tokenizer = ''
max_len_token = 0
if (config.tokenizer_name == 'Char'):
tokenizer = Char()
max_len_token = config.max_num_char
return (tokenizer, max_len_token) |
class PegasusTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
offset = 103
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ['attention_mask']
def __init__(self, vocab_file, pad_token='<pad>', eos_token='</s>', unk_token='<unk>', mask_token='<mask_2>', mask_token_sent='<mask_1>', additional_special_tokens=None, **kwargs):
if (additional_special_tokens is not None):
assert isinstance(additional_special_tokens, list), f'additional_special_tokens should be of type {type(list)}, but is {type(additional_special_tokens)}'
additional_special_tokens_extended = (([mask_token_sent] + additional_special_tokens) if (mask_token_sent not in additional_special_tokens) else additional_special_tokens)
additional_special_tokens_extended += [f'<unk_{i}>' for i in range(len(additional_special_tokens_extended), (self.offset - 1))]
if (len(set(additional_special_tokens_extended)) != len(additional_special_tokens_extended)):
raise ValueError(f'Please make sure that the provided additional_special_tokens do not contain an incorrectly shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.')
additional_special_tokens = additional_special_tokens_extended
else:
additional_special_tokens = [mask_token_sent]
additional_special_tokens += [f'<unk_{i}>' for i in range(2, self.offset)]
super().__init__(eos_token=eos_token, unk_token=unk_token, mask_token=mask_token, pad_token=pad_token, mask_token_sent=mask_token_sent, additional_special_tokens=additional_special_tokens, **kwargs)
self.vocab_file = vocab_file
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(vocab_file)
self.mask_token_sent = mask_token_sent
self.encoder: Dict[(int, str)] = {0: self.pad_token, 1: self.eos_token, 2: self.mask_token_sent, 3: self.mask_token}
self.encoder.update({(i + 3): additional_special_tokens[i] for i in range(1, (self.offset - 1))})
self.decoder: Dict[(str, int)] = {v: k for (k, v) in self.encoder.items()}
def vocab_size(self) -> int:
return (len(self.sp_model) + self.offset)
def get_vocab(self) -> Dict[(str, int)]:
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__(self):
state = self.__dict__.copy()
state['sp_model'] = None
return state
def __setstate__(self, d):
self.__dict__ = d
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file)
def _tokenize(self, text, sample=False):
if (not sample):
pieces = self.sp_model.EncodeAsPieces(text)
else:
pieces = self.sp_model.SampleEncodeAsPieces(text, 64, 0.1)
return pieces
def _convert_token_to_id(self, token: str) -> int:
if (token in self.decoder):
return self.decoder[token]
elif (token in self.added_tokens_decoder):
return self.added_tokens_decoder[token]
sp_id = self.sp_model.piece_to_id(token)
return (sp_id + self.offset)
def _convert_id_to_token(self, index: int) -> str:
if (index in self.encoder):
return self.encoder[index]
elif (index in self.added_tokens_encoder):
return self.added_tokens_encoder[index]
else:
token = self.sp_model.IdToPiece((index - self.offset))
return token
def convert_tokens_to_string(self, tokens):
out_string = self.sp_model.decode_pieces(tokens)
return out_string
def num_special_tokens_to_add(self, pair=False):
return 1
def _special_token_mask(self, seq):
all_special_ids = set(self.all_special_ids)
all_special_ids.remove(self.unk_token_id)
assert (all_special_ids == set(range((len(self.additional_special_tokens) + 3)))), f'There should be 3 special tokens: mask_token, pad_token, and eos_token + {len(self.additional_special_tokens)} additional_special_tokens, but got {all_special_ids}'
return [(1 if (x in all_special_ids) else 0) for x in seq]
def get_special_tokens_mask(self, token_ids_0: List, token_ids_1: Optional[List]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(token_ids_0)
elif (token_ids_1 is None):
return (self._special_token_mask(token_ids_0) + [1])
else:
return (self._special_token_mask((token_ids_0 + token_ids_1)) + [1])
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> List[int]:
if (token_ids_1 is None):
return (token_ids_0 + [self.eos_token_id])
return ((token_ids_0 + token_ids_1) + [self.eos_token_id])
_start_docstrings(PREPARE_SEQ2SEQ_BATCH_DOCSTRING)
def prepare_seq2seq_batch(self, src_texts: List[str], tgt_texts: Optional[List[str]]=None, max_length: Optional[int]=None, max_target_length: Optional[int]=None, return_tensors: str=None, truncation=True, padding='longest', **unused) -> BatchEncoding:
if ('' in src_texts):
raise ValueError(f'found empty string in src_texts: {src_texts}')
tokenizer_kwargs = dict(add_special_tokens=True, return_tensors=return_tensors, max_length=max_length, truncation=truncation, padding=padding)
model_inputs: BatchEncoding = self(src_texts, **tokenizer_kwargs)
if (tgt_texts is None):
return model_inputs
if (max_target_length is not None):
tokenizer_kwargs['max_length'] = max_target_length
labels: BatchEncoding = self(tgt_texts, **tokenizer_kwargs)['input_ids']
model_inputs['labels'] = labels
return model_inputs
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not os.path.isdir(save_directory)):
logger.error('Vocabulary path ({}) should be a directory'.format(save_directory))
return
out_vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
if (os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file)):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,) |
_grad()
def pose_evaluate(model, matcher, pose_evaluator, data_loader, image_set, bbox_mode, rotation_mode, device, output_dir, epoch=None):
model.eval()
matcher.eval()
pose_evaluator.reset()
if (epoch is not None):
output_eval_dir = (((((((output_dir + '/eval_') + image_set) + '_') + bbox_mode) + '_') + str(epoch)) + '/')
else:
output_eval_dir = (((((output_dir + '/eval_') + image_set) + '_') + bbox_mode) + '/')
Path(output_eval_dir).mkdir(parents=True, exist_ok=True)
print('Process validation dataset:')
n_images = len(data_loader.dataset.ids)
bs = data_loader.batch_size
start_time = time.time()
processed_images = 0
for (samples, targets) in data_loader:
batch_start_time = time.time()
samples = samples.to(device)
targets = [{k: v.to(device) for (k, v) in t.items()} for t in targets]
(outputs, n_boxes_per_sample) = model(samples, targets)
outputs_without_aux = {k: v for (k, v) in outputs.items() if ((k != 'aux_outputs') and (k != 'enc_outputs'))}
indices = matcher(outputs_without_aux, targets, n_boxes_per_sample)
idx = get_src_permutation_idx(indices)
pred_translations = outputs_without_aux['pred_translation'][idx].detach().cpu().numpy()
pred_rotations = outputs_without_aux['pred_rotation'][idx].detach().cpu().numpy()
if (rotation_mode in ['quat', 'silho_quat']):
pred_rotations = quat2rot(pred_rotations)
tgt_translations = torch.cat([t['relative_position'][i] for (t, (_, i)) in zip(targets, indices)], dim=0).detach().cpu().numpy()
tgt_rotations = torch.cat([t['relative_rotation'][i] for (t, (_, i)) in zip(targets, indices)], dim=0).detach().cpu().numpy()
obj_classes_idx = torch.cat([t['labels'][i] for (t, (_, i)) in zip(targets, indices)], dim=0).detach().cpu().numpy()
intrinsics = torch.cat([t['intrinsics'][i] for (t, (_, i)) in zip(targets, indices)], dim=0).detach().cpu().numpy()
img_files = [data_loader.dataset.coco.loadImgs(t['image_id'].item())[0]['file_name'] for (t, (_, i)) in zip(targets, indices) for _ in range(0, len(i))]
for (cls_idx, img_file, intrinsic, pred_translation, pred_rotation, tgt_translation, tgt_rotation) in zip(obj_classes_idx, img_files, intrinsics, pred_translations, pred_rotations, tgt_translations, tgt_rotations):
cls = pose_evaluator.classes[(cls_idx - 1)]
pose_evaluator.poses_pred[cls].append(np.concatenate((pred_rotation, pred_translation.reshape(3, 1)), axis=1))
pose_evaluator.poses_gt[cls].append(np.concatenate((tgt_rotation, tgt_translation.reshape(3, 1)), axis=1))
pose_evaluator.poses_img[cls].append(img_file)
pose_evaluator.num[cls] += 1
pose_evaluator.camera_intrinsics[cls].append(intrinsic)
batch_total_time = (time.time() - batch_start_time)
batch_total_time_str = str(datetime.timedelta(seconds=int(batch_total_time)))
processed_images = (processed_images + len(targets))
remaining_images = (n_images - processed_images)
remaining_batches = (remaining_images / bs)
eta = (batch_total_time * remaining_batches)
eta_str = str(datetime.timedelta(seconds=int(eta)))
print('Processed {}/{} \t Batch Time: {} \t ETA: {}'.format(processed_images, n_images, batch_total_time_str, eta_str))
total_time = (time.time() - start_time)
time_per_img = (total_time / n_images)
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
time_per_img_str = str(datetime.timedelta(seconds=int(time_per_img)))
print('Network Processing Time\nTotal Time: {}\t\tImages: {}\t\ts/img: {}'.format(total_time_str, n_images, time_per_img_str))
print('Start results evaluation')
start_time = time.time()
print('Start Calculating ADD')
pose_evaluator.evaluate_pose_add(output_eval_dir)
print('Start Calculating ADD-S')
pose_evaluator.evaluate_pose_adi(output_eval_dir)
print('Start Calculating ADD(-S)')
pose_evaluator.evaluate_pose_adds(output_eval_dir)
print('Start Calculating Average Translation Error')
pose_evaluator.calculate_class_avg_translation_error(output_eval_dir)
print('Start Calculating Average Rotation Error')
pose_evaluator.calculate_class_avg_rotation_error(output_eval_dir)
total_time = (time.time() - start_time)
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Evaluation time: {}'.format(total_time_str))
return |
def build_dataset(data_path, config, is_train, vocab=None, load_vocab=None):
args = config.data
if is_train:
(src_txt, tgt_txt) = load_dataset(data_path)
src_train = TextDataset(src_txt, args.src_max_train)
tgt_train = TextDataset(tgt_txt, args.tgt_max_train)
if (load_vocab is not None):
vocab = Vocab.from_json(load_vocab)
else:
vocab = src_train.build_vocab(vocab_size=args.vocab_size, min_freq=args.vocab_min_freq, specials=[PAD_TOKEN, UNK_TOKEN, START_DECODING, STOP_DECODING])
dataset = SummDataset(src=src_train, tgt=tgt_train, vocab=vocab)
return (dataset, vocab)
else:
assert (vocab is not None)
(src_txt, tgt_txt) = load_dataset(data_path)
src_test = TextDataset(src_txt, args.src_max_test)
tgt_test = TextDataset(tgt_txt, args.tgt_max_test)
dataset = SummDataset(src=src_test, tgt=tgt_test, vocab=vocab)
return dataset |
def compute_heads_importance(args, model, eval_dataloader, compute_entropy=True, compute_importance=True, head_mask=None, actually_pruned=False):
(n_layers, n_heads) = (model.config.num_hidden_layers, model.config.num_attention_heads)
head_importance = torch.zeros(n_layers, n_heads).to(args.device)
attn_entropy = torch.zeros(n_layers, n_heads).to(args.device)
if (head_mask is None):
head_mask = torch.ones(n_layers, n_heads).to(args.device)
head_mask.requires_grad_(requires_grad=True)
if actually_pruned:
head_mask = None
preds = None
labels = None
tot_tokens = 0.0
for (step, inputs) in enumerate(tqdm(eval_dataloader, desc='Iteration', disable=(args.local_rank not in [(- 1), 0]))):
for (k, v) in inputs.items():
inputs[k] = v.to(args.device)
outputs = model(**inputs, head_mask=head_mask)
(loss, logits, all_attentions) = (outputs[0], outputs[1], outputs[(- 1)])
loss.backward()
if compute_entropy:
for (layer, attn) in enumerate(all_attentions):
masked_entropy = (entropy(attn.detach()) * inputs['attention_mask'].float().unsqueeze(1))
attn_entropy[layer] += masked_entropy.sum((- 1)).sum(0).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
if (preds is None):
preds = logits.detach().cpu().numpy()
labels = inputs['labels'].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
labels = np.append(labels, inputs['labels'].detach().cpu().numpy(), axis=0)
tot_tokens += inputs['attention_mask'].float().detach().sum().data
attn_entropy /= tot_tokens
head_importance /= tot_tokens
if (not args.dont_normalize_importance_by_layer):
exponent = 2
norm_by_layer = torch.pow(torch.pow(head_importance, exponent).sum((- 1)), (1 / exponent))
head_importance /= (norm_by_layer.unsqueeze((- 1)) + 1e-20)
if (not args.dont_normalize_global_importance):
head_importance = ((head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()))
np.save(os.path.join(args.output_dir, 'attn_entropy.npy'), attn_entropy.detach().cpu().numpy())
np.save(os.path.join(args.output_dir, 'head_importance.npy'), head_importance.detach().cpu().numpy())
logger.info('Attention entropies')
print_2d_tensor(attn_entropy)
logger.info('Head importance scores')
print_2d_tensor(head_importance)
logger.info('Head ranked by importance scores')
head_ranks = torch.zeros(head_importance.numel(), dtype=torch.long, device=args.device)
head_ranks[head_importance.view((- 1)).sort(descending=True)[1]] = torch.arange(head_importance.numel(), device=args.device)
head_ranks = head_ranks.view_as(head_importance)
print_2d_tensor(head_ranks)
return (attn_entropy, head_importance, preds, labels) |
class PerceptualLossVgg16ExDark(nn.Module):
def __init__(self, vgg=None, load_model=None, gpu_ids=[0], weights=None, indices=None, normalize=True):
super(PerceptualLossVgg16ExDark, self).__init__()
if (vgg is None):
self.vgg = Vgg16ExDark(load_model)
else:
self.vgg = vgg
self.vgg = nn.DataParallel(self.vgg, device_ids=gpu_ids).cuda()
self.criterion = nn.L1Loss()
self.weights = (weights or [1.0, 1.0, 1.0, 1.0])
self.indices = (indices or [3, 8, 15, 22])
if normalize:
self.normalize = MeanShift([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], norm=True).cuda()
else:
self.normalize = None
def forward(self, x, y):
if (self.normalize is not None):
x = self.normalize(x)
y = self.normalize(y)
(x_vgg, y_vgg) = (self.vgg(x, self.indices), self.vgg(y, self.indices))
loss = 0
for i in range(len(x_vgg)):
loss += (self.weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach()))
return loss |
def identity_inference(images, keep_probability, phase_train=True, bottleneck_layer_size=128, weight_decay=0.0, reuse=None):
batch_norm_params = {'decay': 0.995, 'epsilon': 0.001, 'updates_collections': None, 'variables_collections': [tf.GraphKeys.TRAINABLE_VARIABLES]}
with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_initializer=slim.initializers.xavier_initializer(), weights_regularizer=slim.l2_regularizer(weight_decay), normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params):
return inception_resnet_v1(images, is_training=phase_train, dropout_keep_prob=keep_probability, bottleneck_layer_size=bottleneck_layer_size, reuse=reuse) |
class BoxOnClWireTop(MultiBox, BoxOnClWire):
def __init__(self, label='', top_connect=None, wire_label=''):
super().__init__(label)
self.wire_label = wire_label
self.mid_content = ''
self.bot_format = ' %s '
self.top_connect = (top_connect if top_connect else '')
self.bot_connect = self.bot_pad = ' ' |
class STrack(BaseTrack):
shared_kalman = KalmanFilter()
def __init__(self, tlwh, score):
self._tlwh = np.asarray(tlwh, dtype=np.float)
self.kalman_filter = None
(self.mean, self.covariance) = (None, None)
self.is_activated = False
self.score = score
self.tracklet_len = 0
def predict(self):
mean_state = self.mean.copy()
if (self.state != TrackState.Tracked):
mean_state[7] = 0
(self.mean, self.covariance) = self.kalman_filter.predict(mean_state, self.covariance)
def multi_predict(stracks):
if (len(stracks) > 0):
multi_mean = np.asarray([st.mean.copy() for st in stracks])
multi_covariance = np.asarray([st.covariance for st in stracks])
for (i, st) in enumerate(stracks):
if (st.state != TrackState.Tracked):
multi_mean[i][7] = 0
(multi_mean, multi_covariance) = STrack.shared_kalman.multi_predict(multi_mean, multi_covariance)
for (i, (mean, cov)) in enumerate(zip(multi_mean, multi_covariance)):
stracks[i].mean = mean
stracks[i].covariance = cov
def activate(self, kalman_filter, frame_id):
self.kalman_filter = kalman_filter
self.track_id = self.next_id()
(self.mean, self.covariance) = self.kalman_filter.initiate(self.tlwh_to_xyah(self._tlwh))
self.tracklet_len = 0
self.state = TrackState.Tracked
if (frame_id == 1):
self.is_activated = True
self.frame_id = frame_id
self.start_frame = frame_id
def re_activate(self, new_track, frame_id, new_id=False):
(self.mean, self.covariance) = self.kalman_filter.update(self.mean, self.covariance, self.tlwh_to_xyah(new_track.tlwh))
self.tracklet_len = 0
self.state = TrackState.Tracked
self.is_activated = True
self.frame_id = frame_id
if new_id:
self.track_id = self.next_id()
self.score = new_track.score
def update(self, new_track, frame_id):
self.frame_id = frame_id
self.tracklet_len += 1
new_tlwh = new_track.tlwh
(self.mean, self.covariance) = self.kalman_filter.update(self.mean, self.covariance, self.tlwh_to_xyah(new_tlwh))
self.state = TrackState.Tracked
self.is_activated = True
self.score = new_track.score
def tlwh(self):
if (self.mean is None):
return self._tlwh.copy()
ret = self.mean[:4].copy()
ret[2] *= ret[3]
ret[:2] -= (ret[2:] / 2)
return ret
def tlbr(self):
ret = self.tlwh.copy()
ret[2:] += ret[:2]
return ret
def tlwh_to_xyah(tlwh):
ret = np.asarray(tlwh).copy()
ret[:2] += (ret[2:] / 2)
ret[2] /= ret[3]
return ret
def to_xyah(self):
return self.tlwh_to_xyah(self.tlwh)
def tlbr_to_tlwh(tlbr):
ret = np.asarray(tlbr).copy()
ret[2:] -= ret[:2]
return ret
def tlwh_to_tlbr(tlwh):
ret = np.asarray(tlwh).copy()
ret[2:] += ret[:2]
return ret
def __repr__(self):
return 'OT_{}_({}-{})'.format(self.track_id, self.start_frame, self.end_frame) |
def read_image1():
image_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets', 'encode_jpeg', 'grace_hopper_517x606.jpg')
image = Image.open(image_path)
image = image.resize((224, 224))
x = F.to_tensor(image)
return x.view(1, 3, 224, 224) |
def get_info(I):
(w, h) = I.size
gridY = torch.linspace((- 1), 1, steps=h).view(1, (- 1), 1, 1).expand(1, h, w, 1)
gridX = torch.linspace((- 1), 1, steps=w).view(1, 1, (- 1), 1).expand(1, h, w, 1)
grid = torch.cat((gridX, gridY), dim=3).cuda()
tensor = transforms.ToTensor()(I).unsqueeze(0).cuda()
warper = tgm.HomographyWarper(h, w)
return (w, h, tensor, grid, warper) |
def sepreresnet1202_cifar10(num_classes=10, **kwargs):
return get_sepreresnet_cifar(num_classes=num_classes, blocks=1202, bottleneck=False, model_name='sepreresnet1202_cifar10', **kwargs) |
_model
def seresnet18(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
default_cfg = default_cfgs['seresnet18']
model = SENet(SEResNetBlock, [2, 2, 2, 2], groups=1, reduction=16, inplanes=64, input_3x3=False, downsample_kernel_size=1, downsample_padding=0, num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model |
class MockStub(object):
def optimize(self, request):
res = brain_pb2.OptimizeResponse()
res.job_optimize_plans.add()
plan = res.job_optimize_plans[0]
group_resources = plan.resource.task_group_resources
group_resources[NodeType.WORKER].count = 5
group_resources[NodeType.WORKER].resource.memory = (_MEMORY * MemoryUnit.MB)
group_resources[NodeType.WORKER].resource.cpu = 16
group_resources[NodeType.PS].count = 2
group_resources[NodeType.PS].resource.memory = (_MEMORY * MemoryUnit.MB)
group_resources[NodeType.PS].resource.cpu = 16
plan.resource.pod_resources['ps-0'].memory = (_MEMORY * MemoryUnit.MB)
return res |
def set_disable_prefix(disable_prefix):
global _disable_prefix
_disable_prefix = disable_prefix |
def test_dfa_models(model_architectures):
for (arch, input_size) in model_architectures:
check_model(models.dfa.__dict__[arch], input_size) |
class Accuracy(base.Metric):
def __init__(self, threshold=0.5, activation=None, ignore_channels=None, **kwargs):
super().__init__(**kwargs)
self.threshold = threshold
self.activation = Activation(activation)
self.ignore_channels = ignore_channels
def forward(self, y_pr, y_gt):
with torch.no_grad():
y_pr = self.activation(y_pr)
return F.accuracy(y_pr, y_gt, threshold=self.threshold, ignore_channels=self.ignore_channels) |
class Net(torch.nn.Module):
def __init__(self, cfg):
super(Net, self).__init__()
self.num_nodes = cfg['model']['num_nodes']
self.num_output_dim = cfg['model']['output_dim']
self.num_units = cfg['model']['rnn_units']
self.num_input_dim = cfg['model']['input_dim']
self.num_rnn_layers = cfg['model']['num_rnn_layers']
self.cfg = cfg
self.seq_len = cfg['model']['seq_len']
self.horizon = cfg['model']['horizon']
self.use_curriculum_learning = self.cfg['model']['use_curriculum_learning']
self.cl_decay_steps = torch.FloatTensor(data=[self.cfg['model']['cl_decay_steps']])
self.use_go = self.cfg['model'].get('use_go', True)
self.fusion = self.cfg['model'].get('fusion', 'concat')
self.dropout_type = cfg['model'].get('dropout_type', None)
self.dropout_prob = cfg['model'].get('dropout_prob', 0.0)
self.ar_alpha = cfg['model'].get('ar_alpha', 0)
self.tar_beta = cfg['model'].get('tar_beta', 0)
self.use_input = cfg['model'].get('use_input', True)
self.num_relations = cfg['model'].get('num_relations', 3)
self.K = cfg['model'].get('K', 3)
self.num_bases = cfg['model'].get('num_bases', 3)
act = cfg['model'].get('activation', 'relu')
act_dict = {'relu': F.relu, 'selu': F.selu, 'relu6': F.relu6, 'elu': F.elu, 'celu': F.celu, 'leaky_relu': F.leaky_relu}
self.mediate_activation = act_dict[act]
self.global_fusion = cfg['model'].get('global_fusion', False)
self.encoder_cells = nn.ModuleList(([GGRUCell(self.num_input_dim, self.num_units, self.dropout_type, self.dropout_prob, self.num_relations, num_bases=self.num_bases, K=self.K, num_nodes=self.num_nodes, global_fusion=self.global_fusion)] + [GGRUCell(self.num_units, self.num_units, self.dropout_type, self.dropout_prob, self.num_relations, num_bases=self.num_bases, K=self.K, num_nodes=self.num_nodes, global_fusion=self.global_fusion) for _ in range((self.num_rnn_layers - 1))]))
self.decoder_cells = nn.ModuleList(([GGRUCell(self.num_input_dim, self.num_units, self.dropout_type, self.dropout_prob, self.num_relations, num_bases=self.num_bases, K=self.K, num_nodes=self.num_nodes, global_fusion=self.global_fusion)] + [GGRUCell(self.num_units, self.num_units, self.dropout_type, self.dropout_prob, self.num_relations, self.K, num_nodes=self.num_nodes, global_fusion=self.global_fusion) for _ in range((self.num_rnn_layers - 1))]))
self.output_type = cfg['model'].get('output_type', 'fc')
if (not (self.fusion == 'concat')):
raise NotImplementedError(self.fusion)
if (self.output_type == 'fc'):
self.output_layer = nn.Linear(self.num_units, self.num_output_dim)
self.global_step = 0
def _compute_sampling_threshold(step, k):
return (k / (k + math.exp((step / k))))
def inverse_sigmoid_scheduler_sampling(step, k):
return (k / (k + math.exp((step / k))))
def encode(self, sequences, edge_index, edge_attr=None):
hidden_states = ([None] * len(self.encoder_cells))
outputs = []
for (t, batch) in enumerate(sequences):
cur_input = batch.x
for (i, rnn_cell) in enumerate(self.encoder_cells):
cur_h = hidden_states[i]
(cur_out, cur_h) = rnn_cell(inputs=cur_input, edge_index=edge_index, edge_attr=edge_attr, hidden=cur_h)
hidden_states[i] = cur_h
cur_input = self.mediate_activation(cur_out)
outputs.append(cur_out)
return (outputs, hidden_states)
def forward(self, sequences):
edge_index = sequences[0].edge_index.detach()
edge_attr = sequences[0].edge_attr.detach()
(outputs, encoder_hiddens) = self.encode(sequences, edge_index=edge_index, edge_attr=edge_attr)
predictions = []
decoder_hiddens = encoder_hiddens
GO = torch.zeros(decoder_hiddens[0].size()[0], self.num_output_dim, dtype=encoder_hiddens[0].dtype, device=encoder_hiddens[0].device)
decoder_input = GO
for t in range(self.horizon):
for (i, rnn_cell) in enumerate(self.decoder_cells):
cur_h = decoder_hiddens[i]
(cur_out, cur_h) = rnn_cell(inputs=decoder_input, edge_index=edge_index, edge_attr=edge_attr, hidden=cur_h)
decoder_hiddens[i] = cur_h
decoder_input = self.mediate_activation(cur_out)
out = cur_out.reshape((- 1), self.num_units)
out = self.output_layer(out).view((- 1), self.num_nodes, self.num_output_dim)
predictions.append(out)
if (self.training and self.use_curriculum_learning):
c = random.uniform(0, 1)
T = self.inverse_sigmoid_scheduler_sampling(self.global_step, self.cl_decay_steps)
use_truth_sequence = (True if (c < T) else False)
else:
use_truth_sequence = False
if use_truth_sequence:
decoder_input = sequences[t].y
else:
decoder_input = out.detach().view((- 1), self.num_output_dim)
if (not self.use_input):
decoder_input = GO.detach()
if self.training:
self.global_step += 1
return torch.stack(predictions).transpose(0, 1) |
def get_model_config(model_name, model_version=None):
config_fname = (f'config_{model_name}_{model_version}.json' if (model_version is not None) else f'config_{model_name}.json')
config_file = os.path.join(ROOT, 'models', model_name, config_fname)
if (not os.path.exists(config_file)):
return None
with open(config_file, 'r') as f:
config = edict(json.load(f))
if (('inherit' in config.train) and (config.train.inherit is not None)):
inherit_config = get_model_config(config.train['inherit']).train
for (key, value) in inherit_config.items():
if (key not in config.train):
config.train[key] = value
return edict(config) |
def set_seed(seed, cuda=True):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False |
class IterationBasedBatchSampler(BatchSampler):
def __init__(self, batch_sampler, num_iterations, start_iter=0):
self.batch_sampler = batch_sampler
self.sampler = self.batch_sampler.sampler
self.num_iterations = num_iterations
self.start_iter = start_iter
def __iter__(self):
iteration = self.start_iter
while (iteration <= self.num_iterations):
for batch in self.batch_sampler:
iteration += 1
if (iteration > self.num_iterations):
break
(yield batch)
def __len__(self):
return self.num_iterations |
def lidar2camera(point_cloud, rotationMat=rotationMat, translationMat=translationMat, file_name='merge', data_index=1):
img = np.zeros((720, 1280, 3), np.uint8)
trans_pc = (np.dot(rotationMat, point_cloud) + np.tile(translationMat, (point_cloud.shape[1], 1)).T)
image_uv = np.array([(((trans_pc[0] * fx) / trans_pc[2]) + x0), (((trans_pc[1] * fy) / trans_pc[2]) + y0)])
total = image_uv.shape[1]
for i in range(total):
point = (int(image_uv[0][i]), int(image_uv[1][i]))
if ((point[0] > width) or (point[0] < 0) or (point[1] > height) or (point[1] < 0)):
continue
cv2.circle(img, point, 2, (255, 255, 255), 8)
cv2.imwrite((((('/media/wang/DATASET/label' + str(data_index)) + '/') + file_name) + '.png'), img) |
class subData(object):
def __init__(self, cfg, data_name, start):
self.data_name = data_name
self.start = start
info = cfg.OCEAN.DATASET[data_name]
self.frame_range = info.RANGE
self.num_use = info.USE
self.root = info.PATH
with open(info.ANNOTATION) as fin:
self.labels = json.load(fin)
self._clean()
self.num = len(self.labels)
self._shuffle()
def _clean(self):
to_del = []
for video in self.labels:
for track in self.labels[video]:
frames = self.labels[video][track]
frames = list(map(int, frames.keys()))
frames.sort()
self.labels[video][track]['frames'] = frames
if (len(frames) <= 0):
print('warning {}/{} has no frames.'.format(video, track))
to_del.append((video, track))
for (video, track) in to_del:
try:
del self.labels[video][track]
except:
pass
to_del = []
if (self.data_name == 'YTB'):
to_del.append('train/1/YyE0clBPamU')
print(self.data_name)
for video in self.labels:
if (len(self.labels[video]) <= 0):
print('warning {} has no tracks'.format(video))
to_del.append(video)
for video in to_del:
try:
del self.labels[video]
except:
pass
self.videos = list(self.labels.keys())
print('{} loaded.'.format(self.data_name))
def _shuffle(self):
lists = list(range(self.start, (self.start + self.num)))
m = 0
pick = []
while (m < self.num_use):
sample_random.shuffle(lists)
pick += lists
m += self.num
self.pick = pick[:self.num_use]
return self.pick
def _get_image_anno(self, video, track, frame):
frame = '{:06d}'.format(frame)
image_path = join(self.root, video, '{}.{}.x.jpg'.format(frame, track))
image_anno = self.labels[video][track][frame]
return (image_path, image_anno)
def _get_pairs(self, index, data_name):
video_name = self.videos[index]
video = self.labels[video_name]
track = random.choice(list(video.keys()))
track_info = video[track]
try:
frames = track_info['frames']
except:
frames = list(track_info.keys())
template_frame = random.randint(0, (len(frames) - 1))
left = max((template_frame - self.frame_range), 0)
right = (min((template_frame + self.frame_range), (len(frames) - 1)) + 1)
search_range = frames[left:right]
template_frame = int(frames[template_frame])
search_frame = int(random.choice(search_range))
return (self._get_image_anno(video_name, track, template_frame), self._get_image_anno(video_name, track, search_frame))
def _get_negative_target(self, index=(- 1)):
if (index == (- 1)):
index = random.randint(0, (self.num - 1))
video_name = self.videos[index]
video = self.labels[video_name]
track = random.choice(list(video.keys()))
track_info = video[track]
frames = track_info['frames']
frame = random.choice(frames)
return self._get_image_anno(video_name, track, frame) |
class Motors():
def __init__(self, p: bullet_client.BulletClient, physics_period: float, np_random: np.random.RandomState, uav_id: (np.ndarray | int), motor_ids: (np.ndarray | list[int]), tau: np.ndarray, max_rpm: np.ndarray, thrust_coef: np.ndarray, torque_coef: np.ndarray, thrust_unit: np.ndarray, noise_ratio: np.ndarray):
self.p = p
self.physics_period = physics_period
self.np_random = np_random
self.uav_id = uav_id
self.motor_ids = motor_ids
self.num_motors = len(motor_ids)
assert (tau.shape == (self.num_motors,))
assert (max_rpm.shape == (self.num_motors,))
assert (thrust_coef.shape == (self.num_motors,))
assert (torque_coef.shape == (self.num_motors,))
assert (thrust_unit.shape == (self.num_motors, 3))
assert (noise_ratio.shape == (self.num_motors,))
assert all((tau >= (0.0 / physics_period))), f'Setting `tau = 1 / physics_period` is equivalent to 0, 0 is not a valid option, got {tau}.'
self.tau = tau
self.max_rpm = max_rpm
self.thrust_coef = np.expand_dims(thrust_coef, axis=(- 1))
self.torque_coef = np.expand_dims(torque_coef, axis=(- 1))
self.thrust_unit = np.expand_dims(thrust_unit, axis=(- 1))
self.noise_ratio = noise_ratio
def reset(self):
self.throttle = np.zeros((self.num_motors,))
def get_states(self) -> np.ndarray:
return self.throttle.flatten()
def state_update(self):
warnings.warn('`state_update` does not need to be called for motors.')
def physics_update(self, pwm: np.ndarray, rotation: (None | np.ndarray)=None):
assert (np.all((pwm >= (- 1.0))) and np.all((pwm <= 1.0))), f'`pwm={pwm!r} has values out of bounds of -1.0 and 1.0.`'
if (rotation is not None):
assert (rotation.shape == (self.num_motors, 3, 3)), f'`rotation` should be of shape (num_motors, 3, 3), got {rotation.shape}'
self.throttle += ((self.physics_period / self.tau) * (pwm - self.throttle))
self.throttle += ((self.np_random.randn(*self.throttle.shape) * self.throttle) * self.noise_ratio)
(thrust, torque) = self._jitted_compute_thrust_torque(rotation, self.throttle, self.max_rpm, self.thrust_unit, self.thrust_coef, self.torque_coef)
for (idx, thr, tor) in zip(self.motor_ids, thrust, torque):
self.p.applyExternalForce(self.uav_id, idx, thr, [0.0, 0.0, 0.0], self.p.LINK_FRAME)
self.p.applyExternalTorque(self.uav_id, idx, tor, self.p.LINK_FRAME)
def _jitted_compute_thrust_torque(rotation: (None | np.ndarray), throttle: np.ndarray, max_rpm: np.ndarray, thrust_unit: np.ndarray, thrust_coef: np.ndarray, torque_coef: np.ndarray) -> tuple[(np.ndarray, np.ndarray)]:
rpm = (throttle * max_rpm)
rpm = np.expand_dims(rpm, axis=(- 1))
if (rotation is not None):
thrust_unit = (rotation thrust_unit)[(..., 0)]
else:
thrust_unit = thrust_unit[(..., 0)]
thrust = (((rpm ** 2) * thrust_coef) * thrust_unit)
torque = (((rpm ** 2) * torque_coef) * thrust_unit)
return (thrust, torque) |
class CCSBUDataset(BaseDataset):
def __init__(self, vis_processor, text_processor, location):
super().__init__(vis_processor=vis_processor, text_processor=text_processor)
self.inner_dataset = wds.DataPipeline(wds.ResampledShards(location), wds.tarfile_to_samples(handler=wds.warn_and_continue), wds.shuffle(1000, handler=wds.warn_and_continue), wds.decode('pilrgb', handler=wds.warn_and_continue), wds.to_tuple('jpg', 'json', handler=wds.warn_and_continue), wds.map_tuple(self.vis_processor, handler=wds.warn_and_continue), wds.map(self.to_dict, handler=wds.warn_and_continue))
def to_dict(self, sample):
return {'image': sample[0], 'text_input': self.text_processor(sample[1]['caption'])} |
def load_tests(city):
test_input = pandas.read_parquet(((((BASEDIR / 'test') / city) / 'input') / 'counters_test.parquet'))
test_input['vol'] = np.array(test_input['volumes_1h'].to_numpy().tolist()).sum(axis=1)
return test_input |
def get_latest_epoch(loadpath, prior=''):
states = glob.glob1(loadpath, (prior + 'state_*'))
latest_epoch = (- 1)
for state in states:
epoch = int(state.replace((prior + 'state_'), '').replace('.pt', ''))
latest_epoch = max(epoch, latest_epoch)
return latest_epoch |
def data_parallel(module, inputs, device_ids=None, output_device=None, dim=0, module_kwargs=None):
if (not isinstance(inputs, tuple)):
inputs = (inputs,)
if (device_ids is None):
device_ids = list(range(torch.cuda.device_count()))
if (output_device is None):
output_device = device_ids[0]
(inputs, module_kwargs) = scatter_kwargs(inputs, module_kwargs, device_ids, dim)
if (len(device_ids) == 1):
return module(*inputs[0], **module_kwargs[0])
used_device_ids = device_ids[:len(inputs)]
replicas = replicate(module, used_device_ids)
outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids)
return gather(outputs, output_device, dim) |
class ScaleParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _SCALEPARAMETER |
_model
def resnest50d(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
default_cfg = default_cfgs['resnest50d']
model = ResNet(ResNestBottleneck, [3, 4, 6, 3], num_classes=num_classes, in_chans=in_chans, stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1, block_args=dict(radix=2, avd=True, avd_first=False), **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model |
def _export_inference_graph(input_type, detection_model, use_moving_averages, trained_checkpoint_prefix, output_directory, optimize_graph=False, output_collection_name='inference_op'):
tf.gfile.MakeDirs(output_directory)
frozen_graph_path = os.path.join(output_directory, 'frozen_inference_graph.pb')
saved_model_path = os.path.join(output_directory, 'saved_model')
model_path = os.path.join(output_directory, 'model.ckpt')
if (input_type not in input_placeholder_fn_map):
raise ValueError('Unknown input type: {}'.format(input_type))
(placeholder_tensor, input_tensors) = input_placeholder_fn_map[input_type]()
inputs = tf.to_float(input_tensors)
preprocessed_inputs = detection_model.preprocess(inputs)
output_tensors = detection_model.predict(preprocessed_inputs)
postprocessed_tensors = detection_model.postprocess(output_tensors)
outputs = _add_output_tensor_nodes(postprocessed_tensors, output_collection_name)
saver = None
if use_moving_averages:
variable_averages = tf.train.ExponentialMovingAverage(0.0)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
else:
saver = tf.train.Saver()
input_saver_def = saver.as_saver_def()
_write_graph_and_checkpoint(inference_graph_def=tf.get_default_graph().as_graph_def(), model_path=model_path, input_saver_def=input_saver_def, trained_checkpoint_prefix=trained_checkpoint_prefix)
frozen_graph_def = freeze_graph_with_def_protos(input_graph_def=tf.get_default_graph().as_graph_def(), input_saver_def=input_saver_def, input_checkpoint=trained_checkpoint_prefix, output_node_names=','.join(outputs.keys()), restore_op_name='save/restore_all', filename_tensor_name='save/Const:0', clear_devices=True, optimize_graph=optimize_graph, initializer_nodes='')
_write_frozen_graph(frozen_graph_path, frozen_graph_def)
_write_saved_model(saved_model_path, frozen_graph_def, placeholder_tensor, outputs) |
class AnnotationClip(Segmentation):
def __init__(self, split, name, starting_frame, single_object, regex='*.png', lmdb_env=None):
super(AnnotationClip, self).__init__(split, osp.join(get_anno_path(split), name), single_object, regex, lmdb_env=lmdb_env)
self.starting_frame = starting_frame |
def loss_fn(y_pred, y_true):
return F.binary_cross_entropy_with_logits(y_pred, y_true.view((- 1), 1)) |
def jaccard_similarity(list1, list2):
s1 = set(list1)
s2 = set(list2)
return (len(s1.intersection(s2)) / len(s1.union(s2))) |
def number_of_symbols(pols):
from phcpy.phcpy2c3 import py2c_scan_for_symbols
inpols = ''.join(pols)
return py2c_scan_for_symbols(len(inpols), inpols) |
def best_probing_seed(task, ref_depth, list_ref_seeds):
data_dict = pkl.load(open(scores_path, 'rb'))
list_to_max = [np.mean(data_dict[task][seed][(ref_depth + 1)][0][0]) for seed in list_ref_seeds]
(idx, _) = max(enumerate(list_to_max), key=(lambda x: x[1]))
return list_ref_seeds[idx] |
class Lambda(nn.Module):
def __init__(self):
super(Lambda, self).__init__()
def forward(self, x):
return x |
def display_model(fname, renderView):
model_1vtk = LegacyVTKReader(FileNames=[fname])
generateIds1 = GenerateIds(Input=model_1vtk)
idsLUT = GetColorTransferFunction('Ids')
generateIds1Display = Show(generateIds1, renderView)
generateIds1Display.AmbientColor = [0.0, 0.0, 0.0]
generateIds1Display.ColorArrayName = ['POINTS', 'Ids']
generateIds1Display.DiffuseColor = [0., 0., 1.0]
generateIds1Display.LookupTable = idsLUT
generateIds1Display.BackfaceDiffuseColor = [0., 0., 1.0]
generateIds1Display.OSPRayScaleArray = 'Ids'
generateIds1Display.OSPRayScaleFunction = 'PiecewiseFunction'
generateIds1Display.SelectOrientationVectors = 'Ids'
generateIds1Display.ScaleFactor = 0.
generateIds1Display.SelectScaleArray = 'Ids'
generateIds1Display.GlyphType = 'Arrow'
generateIds1Display.GaussianRadius = 0.
generateIds1Display.SetScaleArray = ['POINTS', 'Ids']
generateIds1Display.ScaleTransferFunction = 'PiecewiseFunction'
generateIds1Display.OpacityArray = ['POINTS', 'Ids']
generateIds1Display.OpacityTransferFunction = 'PiecewiseFunction'
generateIds1Display.SetScalarBarVisibility(renderView, False)
idsPWF = GetOpacityTransferFunction('Ids')
generateIds1Display.SetRepresentationType('Wireframe')
generateIds1Display.LineWidth = 8.0
idsLUT.ApplyPreset('Rainbow Desaturated', True) |
def get_memory_settings(path, args):
memory_prefix_list = []
jemalloc_prefix = 'LD_PRELOAD={}/intel_extension_for_transformers/llm/runtime/deprecated/third_party/jemalloc/lib/libjemalloc.so:$LD_PRELOAD '.format(path)
if (args.memory_allocator == 'jemalloc'):
memory_prefix_list.append(jemalloc_prefix)
elif (args.memory_allocator == 'default'):
memory_prefix_list.append('')
elif (args.memory_allocator == 'auto'):
memory_prefix_list.append(jemalloc_prefix)
memory_prefix_list.append('')
else:
print('please enter correct setting')
tmp_list = add_weight_sharing_flag(memory_prefix_list, args.weight_sharing)
tmp_list = add_memory_planning_flag(tmp_list, args.memory_planning)
tmp_list = add_instance_num_flag(tmp_list)
return tmp_list |
class EarlyStopping():
def __init__(self, patience=7, verbose=False, delta=0):
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
self.delta = delta
def __call__(self, val_loss):
score = (- val_loss)
if (self.best_score is None):
self.best_score = score
elif (score < (self.best_score + self.delta)):
self.counter += 1
logger.info(f'EarlyStopping counter: {self.counter} out of {self.patience}')
if (self.counter >= self.patience):
self.early_stop = True
elif (score > self.best_score):
self.best_score = score
self.counter = 0
logger.debug('EarlyStopping counter: {}/{} | cur: {:.2f}, best: {:.2f}', self.counter, self.patience, score, self.best_score)
return self.early_stop |
def generate_mask(x, x_len):
if (False and (int(x_len.min()) == x.size(1))):
mask = None
else:
mask = []
for l in x_len:
mask.append(torch.zeros([x.size(1)]).byte().cuda())
mask[(- 1)][:l] = 1
mask = torch.stack(mask, 0)
return mask |
class Render():
def __init__(self, width=1600, height=1200, name='GL Renderer', program_files=['simple.fs', 'simple.vs'], color_size=1, ms_rate=1, egl=False):
self.width = width
self.height = height
self.name = name
self.use_inverse_depth = False
self.egl = egl
glEnable(GL_DEPTH_TEST)
glClampColor(GL_CLAMP_READ_COLOR, GL_FALSE)
glClampColor(GL_CLAMP_FRAGMENT_COLOR, GL_FALSE)
glClampColor(GL_CLAMP_VERTEX_COLOR, GL_FALSE)
shader_list = []
for program_file in program_files:
(_, ext) = os.path.splitext(program_file)
if (ext == '.vs'):
shader_list.append(loadShader(GL_VERTEX_SHADER, program_file))
elif (ext == '.fs'):
shader_list.append(loadShader(GL_FRAGMENT_SHADER, program_file))
elif (ext == '.gs'):
shader_list.append(loadShader(GL_GEOMETRY_SHADER, program_file))
self.program = createProgram(shader_list)
for shader in shader_list:
glDeleteShader(shader)
self.model_mat_unif = glGetUniformLocation(self.program, 'ModelMat')
self.persp_mat_unif = glGetUniformLocation(self.program, 'PerspMat')
self.vertex_buffer = glGenBuffers(1)
(self.quad_program, self.quad_buffer) = self.init_quad_program()
self.frame_buffer = glGenFramebuffers(1)
glBindFramebuffer(GL_FRAMEBUFFER, self.frame_buffer)
self.intermediate_fbo = None
if (ms_rate > 1):
self.color_buffer = []
for i in range(color_size):
color_buffer = glGenTextures(1)
multi_sample_rate = ms_rate
glBindTexture(GL_TEXTURE_2D_MULTISAMPLE, color_buffer)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexImage2DMultisample(GL_TEXTURE_2D_MULTISAMPLE, multi_sample_rate, GL_RGBA32F, self.width, self.height, GL_TRUE)
glBindTexture(GL_TEXTURE_2D_MULTISAMPLE, 0)
glFramebufferTexture2D(GL_FRAMEBUFFER, (GL_COLOR_ATTACHMENT0 + i), GL_TEXTURE_2D_MULTISAMPLE, color_buffer, 0)
self.color_buffer.append(color_buffer)
self.render_buffer = glGenRenderbuffers(1)
glBindRenderbuffer(GL_RENDERBUFFER, self.render_buffer)
glRenderbufferStorageMultisample(GL_RENDERBUFFER, multi_sample_rate, GL_DEPTH24_STENCIL8, self.width, self.height)
glBindRenderbuffer(GL_RENDERBUFFER, 0)
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_RENDERBUFFER, self.render_buffer)
attachments = []
for i in range(color_size):
attachments.append((GL_COLOR_ATTACHMENT0 + i))
glDrawBuffers(color_size, attachments)
glBindFramebuffer(GL_FRAMEBUFFER, 0)
self.intermediate_fbo = glGenFramebuffers(1)
glBindFramebuffer(GL_FRAMEBUFFER, self.intermediate_fbo)
self.screen_texture = []
for i in range(color_size):
screen_texture = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, screen_texture)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, self.width, self.height, 0, GL_RGBA, GL_FLOAT, None)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glFramebufferTexture2D(GL_FRAMEBUFFER, (GL_COLOR_ATTACHMENT0 + i), GL_TEXTURE_2D, screen_texture, 0)
self.screen_texture.append(screen_texture)
glDrawBuffers(color_size, attachments)
glBindFramebuffer(GL_FRAMEBUFFER, 0)
else:
self.color_buffer = []
for i in range(color_size):
color_buffer = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, color_buffer)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, self.width, self.height, 0, GL_RGBA, GL_FLOAT, None)
glFramebufferTexture2D(GL_FRAMEBUFFER, (GL_COLOR_ATTACHMENT0 + i), GL_TEXTURE_2D, color_buffer, 0)
self.color_buffer.append(color_buffer)
self.depth_buffer = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, self.depth_buffer)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_DEPTH_TEXTURE_MODE, GL_INTENSITY)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_R_TO_TEXTURE)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_FUNC, GL_LEQUAL)
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, self.width, self.height, 0, GL_DEPTH_COMPONENT, GL_FLOAT, None)
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, self.depth_buffer, 0)
attachments = []
for i in range(color_size):
attachments.append((GL_COLOR_ATTACHMENT0 + i))
glDrawBuffers(color_size, attachments)
self.screen_texture = self.color_buffer
glBindFramebuffer(GL_FRAMEBUFFER, 0)
self.render_texture = None
self.render_texture_v2 = {}
self.vertex_data = None
self.vertex_dim = None
self.n_vertices = None
self.model_view_matrix = None
self.projection_matrix = None
if (not egl):
global GLUT
import OpenGL.GLUT as GLUT
GLUT.glutDisplayFunc(self.display)
def init_quad_program(self):
shader_list = []
shader_list.append(loadShader(GL_VERTEX_SHADER, 'quad.vs'))
shader_list.append(loadShader(GL_FRAGMENT_SHADER, 'quad.fs'))
the_program = createProgram(shader_list)
for shader in shader_list:
glDeleteShader(shader)
quad_vertices = np.array([(- 1.0), 1.0, 0.0, 1.0, (- 1.0), (- 1.0), 0.0, 0.0, 1.0, (- 1.0), 1.0, 0.0, (- 1.0), 1.0, 0.0, 1.0, 1.0, (- 1.0), 1.0, 0.0, 1.0, 1.0, 1.0, 1.0])
quad_buffer = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, quad_buffer)
glBufferData(GL_ARRAY_BUFFER, quad_vertices, GL_STATIC_DRAW)
glBindBuffer(GL_ARRAY_BUFFER, 0)
return (the_program, quad_buffer)
def set_mesh(self, vertices, faces):
self.vertex_data = vertices[faces.reshape([(- 1)])]
self.vertex_dim = self.vertex_data.shape[1]
self.n_vertices = self.vertex_data.shape[0]
glBindBuffer(GL_ARRAY_BUFFER, self.vertex_buffer)
glBufferData(GL_ARRAY_BUFFER, self.vertex_data, GL_STATIC_DRAW)
glBindBuffer(GL_ARRAY_BUFFER, 0)
def set_viewpoint(self, projection, model_view):
self.projection_matrix = projection
self.model_view_matrix = model_view
def draw_init(self):
glBindFramebuffer(GL_FRAMEBUFFER, self.frame_buffer)
glEnable(GL_DEPTH_TEST)
glClearColor(0.0, 0.0, 0.0, 0.0)
if self.use_inverse_depth:
glDepthFunc(GL_GREATER)
glClearDepth(0.0)
else:
glDepthFunc(GL_LESS)
glClearDepth(1.0)
glClear((GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT))
def draw_end(self):
if (self.intermediate_fbo is not None):
for i in range(len(self.color_buffer)):
glBindFramebuffer(GL_READ_FRAMEBUFFER, self.frame_buffer)
glReadBuffer((GL_COLOR_ATTACHMENT0 + i))
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self.intermediate_fbo)
glDrawBuffer((GL_COLOR_ATTACHMENT0 + i))
glBlitFramebuffer(0, 0, self.width, self.height, 0, 0, self.width, self.height, GL_COLOR_BUFFER_BIT, GL_NEAREST)
glBindFramebuffer(GL_FRAMEBUFFER, 0)
glDepthFunc(GL_LESS)
glClearDepth(1.0)
def draw(self):
self.draw_init()
glUseProgram(self.program)
glUniformMatrix4fv(self.model_mat_unif, 1, GL_FALSE, self.model_view_matrix.transpose())
glUniformMatrix4fv(self.persp_mat_unif, 1, GL_FALSE, self.projection_matrix.transpose())
glBindBuffer(GL_ARRAY_BUFFER, self.vertex_buffer)
glEnableVertexAttribArray(0)
glVertexAttribPointer(0, self.vertex_dim, GL_DOUBLE, GL_FALSE, 0, None)
glDrawArrays(GL_TRIANGLES, 0, self.n_vertices)
glDisableVertexAttribArray(0)
glBindBuffer(GL_ARRAY_BUFFER, 0)
glUseProgram(0)
self.draw_end()
def get_color(self, color_id=0):
glBindFramebuffer(GL_FRAMEBUFFER, (self.intermediate_fbo if (self.intermediate_fbo is not None) else self.frame_buffer))
glReadBuffer((GL_COLOR_ATTACHMENT0 + color_id))
data = glReadPixels(0, 0, self.width, self.height, GL_RGBA, GL_FLOAT, outputType=None)
glBindFramebuffer(GL_FRAMEBUFFER, 0)
rgb = data.reshape(self.height, self.width, (- 1))
rgb = np.flip(rgb, 0)
return rgb
def get_z_value(self):
glBindFramebuffer(GL_FRAMEBUFFER, self.frame_buffer)
data = glReadPixels(0, 0, self.width, self.height, GL_DEPTH_COMPONENT, GL_FLOAT, outputType=None)
glBindFramebuffer(GL_FRAMEBUFFER, 0)
z = data.reshape(self.height, self.width)
z = np.flip(z, 0)
return z
def display(self):
self.draw()
if (not self.egl):
glBindFramebuffer(GL_FRAMEBUFFER, 0)
glClearColor(0.0, 0.0, 0.0, 0.0)
glClear(GL_COLOR_BUFFER_BIT)
glUseProgram(self.quad_program)
glBindBuffer(GL_ARRAY_BUFFER, self.quad_buffer)
size_of_double = 8
glEnableVertexAttribArray(0)
glVertexAttribPointer(0, 2, GL_DOUBLE, GL_FALSE, (4 * size_of_double), None)
glEnableVertexAttribArray(1)
glVertexAttribPointer(1, 2, GL_DOUBLE, GL_FALSE, (4 * size_of_double), c_void_p((2 * size_of_double)))
glDisable(GL_DEPTH_TEST)
glActiveTexture(GL_TEXTURE0)
glBindTexture(GL_TEXTURE_2D, self.screen_texture[0])
glUniform1i(glGetUniformLocation(self.quad_program, 'screenTexture'), 0)
glDrawArrays(GL_TRIANGLES, 0, 6)
glDisableVertexAttribArray(1)
glDisableVertexAttribArray(0)
glEnable(GL_DEPTH_TEST)
glBindBuffer(GL_ARRAY_BUFFER, 0)
glUseProgram(0)
GLUT.glutSwapBuffers()
GLUT.glutPostRedisplay()
def show(self):
if (not self.egl):
GLUT.glutMainLoop() |
class MaskRCNNLossComputation(object):
def __init__(self, proposal_matcher, discretization_size):
self.proposal_matcher = proposal_matcher
self.discretization_size = discretization_size
def match_targets_to_proposals(self, proposal, target):
match_quality_matrix = boxlist_iou(target, proposal)
matched_idxs = self.proposal_matcher(match_quality_matrix)
target = target.copy_with_fields(['labels', 'masks'])
matched_targets = target[matched_idxs.clamp(min=0)]
matched_targets.add_field('matched_idxs', matched_idxs)
return matched_targets
def prepare_targets(self, proposals, targets):
labels = []
masks = []
for (proposals_per_image, targets_per_image) in zip(proposals, targets):
matched_targets = self.match_targets_to_proposals(proposals_per_image, targets_per_image)
matched_idxs = matched_targets.get_field('matched_idxs')
labels_per_image = matched_targets.get_field('labels')
labels_per_image = labels_per_image.to(dtype=torch.int64)
neg_inds = (matched_idxs == Matcher.BELOW_LOW_THRESHOLD)
labels_per_image[neg_inds] = 0
positive_inds = torch.nonzero((labels_per_image > 0)).squeeze(1)
segmentation_masks = matched_targets.get_field('masks')
segmentation_masks = segmentation_masks[positive_inds]
positive_proposals = proposals_per_image[positive_inds]
masks_per_image = project_masks_on_boxes(segmentation_masks, positive_proposals, self.discretization_size)
labels.append(labels_per_image)
masks.append(masks_per_image)
return (labels, masks)
def __call__(self, proposals, mask_logits, targets):
(labels, mask_targets) = self.prepare_targets(proposals, targets)
labels = cat(labels, dim=0)
mask_targets = cat(mask_targets, dim=0)
positive_inds = torch.nonzero((labels > 0)).squeeze(1)
labels_pos = labels[positive_inds]
if (mask_targets.numel() == 0):
return (mask_logits.sum() * 0)
mask_loss = F.binary_cross_entropy_with_logits(mask_logits[(positive_inds, labels_pos)], mask_targets)
return mask_loss |
def _tokenize_str(str_):
str_ = re.sub("[^A-Za-z0-9(),.!?\\'`]", ' ', str_)
str_ = re.sub('\\s{2,}', ' ', str_)
str_ = re.sub('\\(', ' ( ', str_)
str_ = re.sub('\\)', ' ) ', str_)
str_ = re.sub(',', ' , ', str_)
str_ = re.sub('\\.', ' . ', str_)
str_ = re.sub('!', ' ! ', str_)
str_ = re.sub('\\?', ' ? ', str_)
str_ = re.sub("\\'s", " 's", str_)
str_ = re.sub("\\'ve", " 've", str_)
str_ = re.sub("n\\'t", " n't", str_)
str_ = re.sub("\\'re", " 're", str_)
str_ = re.sub("\\'d", " 'd", str_)
str_ = re.sub("\\'ll", " 'll", str_)
return str_.strip().lower().split() |
_loss
def l1_loss(pred, target):
assert ((pred.size() == target.size()) and (target.numel() > 0))
loss = torch.abs((pred - target))
return loss |
class InceptionV3Test(tf.test.TestCase):
def testBuildClassificationNetwork(self):
batch_size = 5
(height, width) = (299, 299)
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
(logits, end_points) = inception.inception_v3(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV3/Logits/SpatialSqueeze'))
self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes])
self.assertTrue(('Predictions' in end_points))
self.assertListEqual(end_points['Predictions'].get_shape().as_list(), [batch_size, num_classes])
def testBuildPreLogitsNetwork(self):
batch_size = 5
(height, width) = (299, 299)
num_classes = None
inputs = tf.random_uniform((batch_size, height, width, 3))
(net, end_points) = inception.inception_v3(inputs, num_classes)
self.assertTrue(net.op.name.startswith('InceptionV3/Logits/AvgPool'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 2048])
self.assertFalse(('Logits' in end_points))
self.assertFalse(('Predictions' in end_points))
def testBuildBaseNetwork(self):
batch_size = 5
(height, width) = (299, 299)
inputs = tf.random_uniform((batch_size, height, width, 3))
(final_endpoint, end_points) = inception.inception_v3_base(inputs)
self.assertTrue(final_endpoint.op.name.startswith('InceptionV3/Mixed_7c'))
self.assertListEqual(final_endpoint.get_shape().as_list(), [batch_size, 8, 8, 2048])
expected_endpoints = ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d', 'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c']
self.assertItemsEqual(end_points.keys(), expected_endpoints)
def testBuildOnlyUptoFinalEndpoint(self):
batch_size = 5
(height, width) = (299, 299)
endpoints = ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d', 'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c']
for (index, endpoint) in enumerate(endpoints):
with tf.Graph().as_default():
inputs = tf.random_uniform((batch_size, height, width, 3))
(out_tensor, end_points) = inception.inception_v3_base(inputs, final_endpoint=endpoint)
self.assertTrue(out_tensor.op.name.startswith(('InceptionV3/' + endpoint)))
self.assertItemsEqual(endpoints[:(index + 1)], end_points)
def testBuildAndCheckAllEndPointsUptoMixed7c(self):
batch_size = 5
(height, width) = (299, 299)
inputs = tf.random_uniform((batch_size, height, width, 3))
(_, end_points) = inception.inception_v3_base(inputs, final_endpoint='Mixed_7c')
endpoints_shapes = {'Conv2d_1a_3x3': [batch_size, 149, 149, 32], 'Conv2d_2a_3x3': [batch_size, 147, 147, 32], 'Conv2d_2b_3x3': [batch_size, 147, 147, 64], 'MaxPool_3a_3x3': [batch_size, 73, 73, 64], 'Conv2d_3b_1x1': [batch_size, 73, 73, 80], 'Conv2d_4a_3x3': [batch_size, 71, 71, 192], 'MaxPool_5a_3x3': [batch_size, 35, 35, 192], 'Mixed_5b': [batch_size, 35, 35, 256], 'Mixed_5c': [batch_size, 35, 35, 288], 'Mixed_5d': [batch_size, 35, 35, 288], 'Mixed_6a': [batch_size, 17, 17, 768], 'Mixed_6b': [batch_size, 17, 17, 768], 'Mixed_6c': [batch_size, 17, 17, 768], 'Mixed_6d': [batch_size, 17, 17, 768], 'Mixed_6e': [batch_size, 17, 17, 768], 'Mixed_7a': [batch_size, 8, 8, 1280], 'Mixed_7b': [batch_size, 8, 8, 2048], 'Mixed_7c': [batch_size, 8, 8, 2048]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue((endpoint_name in end_points))
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(), expected_shape)
def testModelHasExpectedNumberOfParameters(self):
batch_size = 5
(height, width) = (299, 299)
inputs = tf.random_uniform((batch_size, height, width, 3))
with slim.arg_scope(inception.inception_v3_arg_scope()):
inception.inception_v3_base(inputs)
(total_params, _) = slim.model_analyzer.analyze_vars(slim.get_model_variables())
self.assertAlmostEqual(, total_params)
def testBuildEndPoints(self):
batch_size = 5
(height, width) = (299, 299)
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
(_, end_points) = inception.inception_v3(inputs, num_classes)
self.assertTrue(('Logits' in end_points))
logits = end_points['Logits']
self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes])
self.assertTrue(('AuxLogits' in end_points))
aux_logits = end_points['AuxLogits']
self.assertListEqual(aux_logits.get_shape().as_list(), [batch_size, num_classes])
self.assertTrue(('Mixed_7c' in end_points))
pre_pool = end_points['Mixed_7c']
self.assertListEqual(pre_pool.get_shape().as_list(), [batch_size, 8, 8, 2048])
self.assertTrue(('PreLogits' in end_points))
pre_logits = end_points['PreLogits']
self.assertListEqual(pre_logits.get_shape().as_list(), [batch_size, 1, 1, 2048])
def testBuildEndPointsWithDepthMultiplierLessThanOne(self):
batch_size = 5
(height, width) = (299, 299)
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
(_, end_points) = inception.inception_v3(inputs, num_classes)
endpoint_keys = [key for key in end_points.keys() if (key.startswith('Mixed') or key.startswith('Conv'))]
(_, end_points_with_multiplier) = inception.inception_v3(inputs, num_classes, scope='depth_multiplied_net', depth_multiplier=0.5)
for key in endpoint_keys:
original_depth = end_points[key].get_shape().as_list()[3]
new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]
self.assertEqual((0.5 * original_depth), new_depth)
def testBuildEndPointsWithDepthMultiplierGreaterThanOne(self):
batch_size = 5
(height, width) = (299, 299)
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
(_, end_points) = inception.inception_v3(inputs, num_classes)
endpoint_keys = [key for key in end_points.keys() if (key.startswith('Mixed') or key.startswith('Conv'))]
(_, end_points_with_multiplier) = inception.inception_v3(inputs, num_classes, scope='depth_multiplied_net', depth_multiplier=2.0)
for key in endpoint_keys:
original_depth = end_points[key].get_shape().as_list()[3]
new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]
self.assertEqual((2.0 * original_depth), new_depth)
def testRaiseValueErrorWithInvalidDepthMultiplier(self):
batch_size = 5
(height, width) = (299, 299)
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
with self.assertRaises(ValueError):
_ = inception.inception_v3(inputs, num_classes, depth_multiplier=(- 0.1))
with self.assertRaises(ValueError):
_ = inception.inception_v3(inputs, num_classes, depth_multiplier=0.0)
def testHalfSizeImages(self):
batch_size = 5
(height, width) = (150, 150)
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
(logits, end_points) = inception.inception_v3(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV3/Logits'))
self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes])
pre_pool = end_points['Mixed_7c']
self.assertListEqual(pre_pool.get_shape().as_list(), [batch_size, 3, 3, 2048])
def testUnknownImageShape(self):
tf.reset_default_graph()
batch_size = 2
(height, width) = (299, 299)
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3))
(logits, end_points) = inception.inception_v3(inputs, num_classes)
self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes])
pre_pool = end_points['Mixed_7c']
feed_dict = {inputs: input_np}
tf.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 8, 2048])
def testGlobalPoolUnknownImageShape(self):
tf.reset_default_graph()
batch_size = 2
(height, width) = (400, 600)
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3))
(logits, end_points) = inception.inception_v3(inputs, num_classes, global_pool=True)
self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes])
pre_pool = end_points['Mixed_7c']
feed_dict = {inputs: input_np}
tf.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 11, 17, 2048])
def testUnknowBatchSize(self):
batch_size = 1
(height, width) = (299, 299)
num_classes = 1000
inputs = tf.placeholder(tf.float32, (None, height, width, 3))
(logits, _) = inception.inception_v3(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV3/Logits'))
self.assertListEqual(logits.get_shape().as_list(), [None, num_classes])
images = tf.random_uniform((batch_size, height, width, 3))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
def testEvaluation(self):
batch_size = 2
(height, width) = (299, 299)
num_classes = 1000
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
(logits, _) = inception.inception_v3(eval_inputs, num_classes, is_training=False)
predictions = tf.argmax(logits, 1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
(height, width) = (150, 150)
num_classes = 1000
train_inputs = tf.random_uniform((train_batch_size, height, width, 3))
inception.inception_v3(train_inputs, num_classes)
eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))
(logits, _) = inception.inception_v3(eval_inputs, num_classes, is_training=False, reuse=True)
predictions = tf.argmax(logits, 1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
def testLogitsNotSqueezed(self):
num_classes = 25
images = tf.random_uniform([1, 299, 299, 3])
(logits, _) = inception.inception_v3(images, num_classes=num_classes, spatial_squeeze=False)
with self.test_session() as sess:
tf.global_variables_initializer().run()
logits_out = sess.run(logits)
self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes]) |
def dla60x(**kwargs):
return get_dla(levels=[1, 2, 3, 1], channels=[128, 256, 512, 1024], res_body_class=DLABottleneckX, model_name='dla60x', **kwargs) |
def _compute_log_a(q: float, sigma: float, alpha: float) -> float:
if float(alpha).is_integer():
return _compute_log_a_for_int_alpha(q, sigma, int(alpha))
else:
return _compute_log_a_for_frac_alpha(q, sigma, alpha) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.