code stringlengths 101 5.91M |
|---|
def get_human_goals(all_products, product_prices):
goals = []
cnt_atts = defaultdict(int)
cnt = 0
for item in all_products:
asin = item['asin']
if ('instructions' not in item):
continue
for product in item['instructions']:
attributes = product['instruction_attributes']
if (len(attributes) == 0):
cnt += 1
continue
if (product_prices is not None):
price = product_prices[asin]
price_range = [p for p in PRICE_RANGE if (p > price)][:4]
if (len(price_range) >= 2):
(_, price_upper) = sorted(random.sample(price_range, 2))
price_text = f', and price lower than {price_upper:.2f} dollars'
else:
price_upper = 1000000
price_text = ''
else:
price_upper = 1000000
goals.append({'asin': asin, 'category': item['category'], 'query': item['query'], 'name': item['name'], 'product_category': item['product_category'], 'instruction_text': (product['instruction'].strip('.') + price_text), 'attributes': attributes, 'price_upper': price_upper, 'goal_options': product['instruction_options']})
for att in attributes:
cnt_atts[att] += 1
for goal in goals:
goal['weight'] = 1
print(cnt, 'skipped')
return goals |
def analyze(model, dataset, sampled_classes=50, examples_per_class=50, kappa=0, n_t=300, n_reps=1, max_class=None, projection=True, projection_dimension=5000, layer_nums=None, layer_types=None, verbose=True, cuda=True, seed=0):
device = torch.device(('cuda' if (torch.cuda.is_available() and cuda) else 'cpu'))
manifold_data = make_manifold_data(dataset, sampled_classes, examples_per_class, seed=seed)
model = model.to(device)
manifold_data = [d.to(device) for d in manifold_data]
activations = extractor(model, manifold_data, layer_nums=layer_nums, layer_types=layer_types)
np.random.seed(seed)
for (layer, data) in activations.items():
X = [d.reshape(d.shape[0], (- 1)).T for d in data]
N = X[0].shape[0]
if (projection and (N > projection_dimension)):
M = np.random.randn(projection_dimension, N)
M /= np.sqrt(np.sum(np.square(M), axis=1, keepdims=True))
X = [np.matmul(M, d) for d in X]
activations[layer] = X
results = OrderedDict()
for (k, X) in activations.items():
analyze = False
if ((layer_nums is not None) and (int(k.split('_')[1]) in layer_nums)):
analyze = True
elif ((layer_types is not None) and (k.split('_')[(- 1)] in layer_types)):
analyze = True
elif ((layer_nums is None) and (layer_types is None)):
analyze = True
if analyze:
if verbose:
print('Analyzing {}'.format(k))
(a, r, d, r0, K) = manifold_analysis_corr(X, kappa, n_t, n_reps=n_reps)
results[k] = {}
results[k]['capacity'] = a
results[k]['radius'] = r
results[k]['dimension'] = d
results[k]['correlation'] = r0
results[k]['K'] = K
results[k]['feature dimension'] = X[0].shape[0]
return results |
def gen_data_from_full_jsons(game, input_dir, min_frames_per_video):
all_data = []
for rl_agent_name in tqdm(os.listdir(input_dir)):
for level_json in tqdm(os.listdir(os.path.join(input_dir, rl_agent_name, 'json_metadata'))):
json_file = os.path.join(input_dir, rl_agent_name, 'json_metadata', level_json)
game.load_json(json_file)
if (len(game.frames) < min_frames_per_video):
continue
(gt_characters, gt_game_events) = find_gt_characters_and_game_events(game, start_idx=0, end_idx=len(game.frames), get_ranges=True)
video_data = {'id': ((rl_agent_name + '_') + os.path.splitext(level_json)[0]), 'json_file': json_file.split(input_dir)[(- 1)][1:], 'audio_map_file': os.path.join(rl_agent_name, 'audio_semantic_map', 'audio_map.txt'), 'world_theme_n': game.world_theme_n, 'character_ranges': gt_characters, 'game_event_timestamps': gt_game_events, 'num_frames': len(game.frames)}
auto_text = convert_game_to_text_desc(game, start_idx=0, end_idx=len(game.frames))
(characters_mentioned, actions_mentioned) = find_characters_and_actions_mentioned(auto_text)
annotations_data = [{'text': auto_text, 'characters': characters_mentioned, 'actions': actions_mentioned, 'type': 'auto'}]
all_data.append({'video': video_data, 'annotations': annotations_data})
return all_data |
def test_smplify():
smplify_config = dict(mmcv.Config.fromfile('configs/smplify/smplify.py'))
device = (torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu'))
smplify_config['body_model'] = dict(type='SMPL', gender='neutral', num_betas=10, keypoint_src='smpl_45', keypoint_dst='smpl_45', model_path='data/body_models/smpl', batch_size=1)
smplify_config['num_epochs'] = 1
smplify_config['use_one_betas_per_video'] = True
smplify = build_registrant(smplify_config)
smpl = build_body_model(dict(type='SMPL', gender='neutral', num_betas=10, keypoint_src='smpl_45', keypoint_dst='smpl_45', model_path='data/body_models/smpl', batch_size=batch_size))
keypoints3d = smpl()['joints'].detach().to(device=device)
keypoints3d_conf = torch.ones(*keypoints3d.shape[:2], device=device)
smplify_output = smplify(keypoints3d=keypoints3d, keypoints3d_conf=keypoints3d_conf)
for (k, v) in smplify_output.items():
if isinstance(v, torch.Tensor):
assert (not np.any(np.isnan(v.detach().cpu().numpy()))), f'{k} fails.'
smplify_output = smplify(keypoints3d=keypoints3d, keypoints3d_conf=keypoints3d_conf, init_global_orient=torch.rand([1, 3]).to(device), init_body_pose=torch.rand([1, 69]).to(device), init_betas=torch.rand([1, 10]).to(device), init_transl=torch.rand([1, 3]).to(device))
for (k, v) in smplify_output.items():
if isinstance(v, torch.Tensor):
assert (not np.any(np.isnan(v.detach().cpu().numpy()))), f'{k} fails.' |
.async_execution
def async_add_chained(to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then((lambda fut: (fut.wait() + z))) |
def main(args):
print(args)
problem_list = sorted(get_valid_problems(args.source))
print(f'number of problems = {len(problem_list)}')
prob_index = args.number
print(f'problem is {problem_list[prob_index]}')
assert (prob_index < len(problem_list))
if ((args.data == 'q') or (args.data == 'question')):
tmp = get_question(problem_list, prob_index)
print('q', tmp)
elif (args.data in ['solutions', 'sol', 's']):
tmp = get_solutions(problem_list, prob_index)
print('sol', tmp)
elif (args.data == 'starter'):
tmp = get_starter(problem_list, prob_index)
print('starter', tmp)
elif (args.data in ['test', 't']):
sols = get_solutions(problem_list, prob_index)
tmp = run_test(problem_list, prob_index, test=sols[0])
print('results = ', tmp)
print('-2 = compile error, -1 is runtime error, False failed test, True passed test') |
def test_semgrex(corenlp_client):
pattern = '{word:wrote} >nsubj {}=subject >dobj {}=object'
matches = corenlp_client.semgrex(TEXT, pattern, to_words=True)
assert (matches == [{'text': 'wrote', 'begin': 1, 'end': 2, '$subject': {'text': 'Chris', 'begin': 0, 'end': 1}, '$object': {'text': 'sentence', 'begin': 4, 'end': 5}, 'sentence': 0}]) |
def register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3Packet__gt___Const_ns3Address___amp___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::CallbackImpl< void, ns3::Ptr< ns3::Packet const >, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
cls.add_method('DoGetTypeid', 'std::string', [], is_static=True)
cls.add_method('GetTypeid', 'std::string', [], is_const=True, is_virtual=True)
cls.add_method('operator()', 'void', [param('ns3::Ptr< ns3::Packet const >', 'arg0'), param('ns3::Address const &', 'arg1')], is_pure_virtual=True, is_virtual=True, custom_name=u'__call__')
return |
class MNIST_L5_DRP05(nn.Module):
def __init__(self, dropout=0.3):
super(MNIST_L5_DRP05, self).__init__()
self.block = nn.Sequential(nn.Conv2d(1, 32, 2), nn.MaxPool2d(2), nn.ReLU(), nn.Conv2d(32, 64, 2), nn.MaxPool2d(2), nn.ReLU(), nn.Conv2d(64, 128, 2), nn.ReLU())
self.fc1 = nn.Linear((128 * (5 ** 2)), 200)
self.fc2 = nn.Linear(200, 10)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(p=dropout)
def forward(self, x):
x = self.dropout(x)
out = self.block(x)
out = out.view((- 1), (128 * (5 ** 2)))
out = self.dropout(out)
out = self.relu(self.fc1(out))
out = self.dropout(out)
out = self.fc2(out)
return F.log_softmax(out, 1) |
def default_steering_mind(boid):
acceleration = vec3()
for behavior in boid.behaviors:
acceleration += behavior.calculate(boid)
return acceleration |
class AST_translator():
def __init__(self, ast: ast_components.InternalFortranAst, source: str):
self.tables = ast.tables
self.top_level = None
self.globalsdfg = None
self.functions_and_subroutines = ast.functions_and_subroutines
self.name_mapping = ast_utils.NameMap()
self.contexts = {}
self.views = 0
self.libstates = []
self.file_name = source
self.unallocated_arrays = []
self.all_array_names = []
self.last_sdfg_states = {}
self.last_loop_continues = {}
self.last_loop_breaks = {}
self.last_returns = {}
self.module_vars = []
self.libraries = {}
self.last_call_expression = {}
self.ast_elements = {ast_internal_classes.If_Stmt_Node: self.ifstmt2sdfg, ast_internal_classes.For_Stmt_Node: self.forstmt2sdfg, ast_internal_classes.Map_Stmt_Node: self.forstmt2sdfg, ast_internal_classes.Execution_Part_Node: self.basicblock2sdfg, ast_internal_classes.Subroutine_Subprogram_Node: self.subroutine2sdfg, ast_internal_classes.BinOp_Node: self.binop2sdfg, ast_internal_classes.Decl_Stmt_Node: self.declstmt2sdfg, ast_internal_classes.Var_Decl_Node: self.vardecl2sdfg, ast_internal_classes.Symbol_Decl_Node: self.symbol2sdfg, ast_internal_classes.Symbol_Array_Decl_Node: self.symbolarray2sdfg, ast_internal_classes.Call_Expr_Node: self.call2sdfg, ast_internal_classes.Program_Node: self.ast2sdfg, ast_internal_classes.Write_Stmt_Node: self.write2sdfg, ast_internal_classes.Allocate_Stmt_Node: self.allocate2sdfg, ast_internal_classes.Break_Node: self.break2sdfg}
def get_dace_type(self, type):
if isinstance(type, str):
return ast_utils.fortrantypes2dacetypes[type]
def get_name_mapping_in_context(self, sdfg: SDFG):
a = self.name_mapping[self.globalsdfg].copy()
if (sdfg is not self.globalsdfg):
a.update(self.name_mapping[sdfg])
return a
def get_arrays_in_context(self, sdfg: SDFG):
a = self.globalsdfg.arrays.copy()
if (sdfg is not self.globalsdfg):
a.update(sdfg.arrays)
return a
def get_memlet_range(self, sdfg: SDFG, variables: List[ast_internal_classes.FNode], var_name: str, var_name_tasklet: str) -> str:
var = self.get_arrays_in_context(sdfg).get(var_name)
if (len(var.shape) == 0):
return ''
if ((len(var.shape) == 1) and (var.shape[0] == 1)):
return '0'
for o_v in variables:
if (o_v.name == var_name_tasklet):
return ast_utils.generate_memlet(o_v, sdfg, self)
def translate(self, node: ast_internal_classes.FNode, sdfg: SDFG):
if (node.__class__ in self.ast_elements):
self.ast_elements[node.__class__](node, sdfg)
elif isinstance(node, list):
for i in node:
self.translate(i, sdfg)
else:
warnings.warn(f'WARNING: {node.__class__.__name__}')
def ast2sdfg(self, node: ast_internal_classes.Program_Node, sdfg: SDFG):
self.globalsdfg = sdfg
for i in node.modules:
for j in i.specification_part.typedecls:
self.translate(j, sdfg)
for k in j.vardecl:
self.module_vars.append((k.name, i.name))
for j in i.specification_part.symbols:
self.translate(j, sdfg)
for k in j.vardecl:
self.module_vars.append((k.name, i.name))
for j in i.specification_part.specifications:
self.translate(j, sdfg)
for k in j.vardecl:
self.module_vars.append((k.name, i.name))
for i in node.main_program.specification_part.typedecls:
self.translate(i, sdfg)
for i in node.main_program.specification_part.symbols:
self.translate(i, sdfg)
for i in node.main_program.specification_part.specifications:
self.translate(i, sdfg)
self.translate(node.main_program.execution_part.execution, sdfg)
def basicblock2sdfg(self, node: ast_internal_classes.Execution_Part_Node, sdfg: SDFG):
for i in node.execution:
self.translate(i, sdfg)
def allocate2sdfg(self, node: ast_internal_classes.Allocate_Stmt_Node, sdfg: SDFG):
for i in node.allocation_list:
for j in self.unallocated_arrays:
if ((j[0] == i.name.name) and (sdfg == j[2])):
datatype = j[1]
transient = j[3]
self.unallocated_arrays.remove(j)
offset_value = (- 1)
sizes = []
offset = []
for j in i.shape.shape_list:
tw = ast_utils.TaskletWriter([], [], sdfg, self.name_mapping)
text = tw.write_code(j)
sizes.append(sym.pystr_to_symbolic(text))
offset.append(offset_value)
strides = [dat._prod(sizes[:i]) for i in range(len(sizes))]
self.name_mapping[sdfg][i.name.name] = sdfg._find_new_name(i.name.name)
self.all_array_names.append(self.name_mapping[sdfg][i.name.name])
if (self.contexts.get(sdfg.name) is None):
self.contexts[sdfg.name] = ast_utils.Context(name=sdfg.name)
if (i.name.name not in self.contexts[sdfg.name].containers):
self.contexts[sdfg.name].containers.append(i.name.name)
sdfg.add_array(self.name_mapping[sdfg][i.name.name], shape=sizes, dtype=datatype, offset=offset, strides=strides, transient=transient)
def write2sdfg(self, node: ast_internal_classes.Write_Stmt_Node, sdfg: SDFG):
raise NotImplementedError('Fortran write statements are not implemented yet')
def ifstmt2sdfg(self, node: ast_internal_classes.If_Stmt_Node, sdfg: SDFG):
name = f'If_l_{str(node.line_number[0])}_c_{str(node.line_number[1])}'
begin_state = ast_utils.add_simple_state_to_sdfg(self, sdfg, f'Begin{name}')
guard_substate = sdfg.add_state(f'Guard{name}')
sdfg.add_edge(begin_state, guard_substate, InterstateEdge())
condition = ast_utils.ProcessedWriter(sdfg, self.name_mapping).write_code(node.cond)
body_ifstart_state = sdfg.add_state(f'BodyIfStart{name}')
self.last_sdfg_states[sdfg] = body_ifstart_state
self.translate(node.body, sdfg)
final_substate = sdfg.add_state(f'MergeState{name}')
sdfg.add_edge(guard_substate, body_ifstart_state, InterstateEdge(condition))
if (self.last_sdfg_states[sdfg] not in [self.last_loop_breaks.get(sdfg), self.last_loop_continues.get(sdfg), self.last_returns.get(sdfg)]):
body_ifend_state = ast_utils.add_simple_state_to_sdfg(self, sdfg, f'BodyIfEnd{name}')
sdfg.add_edge(body_ifend_state, final_substate, InterstateEdge())
if (len(node.body_else.execution) > 0):
name_else = f'Else_l_{str(node.line_number[0])}_c_{str(node.line_number[1])}'
body_elsestart_state = sdfg.add_state(('BodyElseStart' + name_else))
self.last_sdfg_states[sdfg] = body_elsestart_state
self.translate(node.body_else, sdfg)
body_elseend_state = ast_utils.add_simple_state_to_sdfg(self, sdfg, f'BodyElseEnd{name_else}')
sdfg.add_edge(guard_substate, body_elsestart_state, InterstateEdge((('not (' + condition) + ')')))
sdfg.add_edge(body_elseend_state, final_substate, InterstateEdge())
else:
sdfg.add_edge(guard_substate, final_substate, InterstateEdge((('not (' + condition) + ')')))
self.last_sdfg_states[sdfg] = final_substate
def forstmt2sdfg(self, node: ast_internal_classes.For_Stmt_Node, sdfg: SDFG):
declloop = False
name = ((('FOR_l_' + str(node.line_number[0])) + '_c_') + str(node.line_number[1]))
begin_state = ast_utils.add_simple_state_to_sdfg(self, sdfg, ('Begin' + name))
guard_substate = sdfg.add_state(('Guard' + name))
final_substate = sdfg.add_state(('Merge' + name))
self.last_sdfg_states[sdfg] = final_substate
decl_node = node.init
entry = {}
if isinstance(decl_node, ast_internal_classes.BinOp_Node):
if (sdfg.symbols.get(decl_node.lval.name) is not None):
iter_name = decl_node.lval.name
elif (self.name_mapping[sdfg].get(decl_node.lval.name) is not None):
iter_name = self.name_mapping[sdfg][decl_node.lval.name]
else:
raise ValueError(('Unknown variable ' + decl_node.lval.name))
entry[iter_name] = ast_utils.ProcessedWriter(sdfg, self.name_mapping).write_code(decl_node.rval)
sdfg.add_edge(begin_state, guard_substate, InterstateEdge(assignments=entry))
condition = ast_utils.ProcessedWriter(sdfg, self.name_mapping).write_code(node.cond)
increment = 'i+0+1'
if isinstance(node.iter, ast_internal_classes.BinOp_Node):
increment = ast_utils.ProcessedWriter(sdfg, self.name_mapping).write_code(node.iter.rval)
entry = {iter_name: increment}
begin_loop_state = sdfg.add_state(('BeginLoop' + name))
end_loop_state = sdfg.add_state(('EndLoop' + name))
self.last_sdfg_states[sdfg] = begin_loop_state
self.last_loop_continues[sdfg] = final_substate
self.translate(node.body, sdfg)
sdfg.add_edge(self.last_sdfg_states[sdfg], end_loop_state, InterstateEdge())
sdfg.add_edge(guard_substate, begin_loop_state, InterstateEdge(condition))
sdfg.add_edge(end_loop_state, guard_substate, InterstateEdge(assignments=entry))
sdfg.add_edge(guard_substate, final_substate, InterstateEdge(f'not ({condition})'))
self.last_sdfg_states[sdfg] = final_substate
def symbol2sdfg(self, node: ast_internal_classes.Symbol_Decl_Node, sdfg: SDFG):
if (self.contexts.get(sdfg.name) is None):
self.contexts[sdfg.name] = ast_utils.Context(name=sdfg.name)
if (self.contexts[sdfg.name].constants.get(node.name) is None):
if (isinstance(node.init, ast_internal_classes.Int_Literal_Node) or isinstance(node.init, ast_internal_classes.Real_Literal_Node)):
self.contexts[sdfg.name].constants[node.name] = node.init.value
if isinstance(node.init, ast_internal_classes.Name_Node):
self.contexts[sdfg.name].constants[node.name] = self.contexts[sdfg.name].constants[node.init.name]
datatype = self.get_dace_type(node.type)
if (node.name not in sdfg.symbols):
sdfg.add_symbol(node.name, datatype)
if (self.last_sdfg_states.get(sdfg) is None):
bstate = sdfg.add_state('SDFGbegin', is_start_state=True)
self.last_sdfg_states[sdfg] = bstate
if (node.init is not None):
substate = sdfg.add_state(f'Dummystate_{node.name}')
increment = ast_utils.TaskletWriter([], [], sdfg, self.name_mapping).write_code(node.init)
entry = {node.name: increment}
sdfg.add_edge(self.last_sdfg_states[sdfg], substate, InterstateEdge(assignments=entry))
self.last_sdfg_states[sdfg] = substate
def symbolarray2sdfg(self, node: ast_internal_classes.Symbol_Array_Decl_Node, sdfg: SDFG):
return NotImplementedError('Symbol_Decl_Node not implemented. This should be done via a transformation that itemizes the constant array.')
def subroutine2sdfg(self, node: ast_internal_classes.Subroutine_Subprogram_Node, sdfg: SDFG):
if (node.execution_part is None):
return
inputnodefinder = ast_transforms.FindInputs()
inputnodefinder.visit(node)
input_vars = inputnodefinder.nodes
outputnodefinder = ast_transforms.FindOutputs()
outputnodefinder.visit(node)
output_vars = outputnodefinder.nodes
write_names = list(dict.fromkeys([i.name for i in output_vars]))
read_names = list(dict.fromkeys([i.name for i in input_vars]))
parameters = node.args.copy()
new_sdfg = SDFG(node.name.name)
substate = ast_utils.add_simple_state_to_sdfg(self, sdfg, ('state' + node.name.name))
variables_in_call = []
if (self.last_call_expression.get(sdfg) is not None):
variables_in_call = self.last_call_expression[sdfg]
if (not ((len(variables_in_call) == len(parameters)) or ((len(variables_in_call) == (len(parameters) + 1)) and (not isinstance(node.result_type, ast_internal_classes.Void))))):
for i in variables_in_call:
print('VAR CALL: ', i.name)
for j in parameters:
print('LOCAL TO UPDATE: ', j.name)
raise ValueError('number of parameters does not match the function signature')
ins_in_new_sdfg = []
outs_in_new_sdfg = []
views = []
ind_count = 0
var2 = []
literals = []
literal_values = []
par2 = []
symbol_arguments = []
for (arg_i, variable) in enumerate(variables_in_call):
if isinstance(variable, ast_internal_classes.Name_Node):
varname = variable.name
elif isinstance(variable, ast_internal_classes.Array_Subscript_Node):
varname = variable.name.name
if (isinstance(variable, ast_internal_classes.Literal) or (varname == 'LITERAL')):
literals.append(parameters[arg_i])
literal_values.append(variable)
continue
elif (varname in sdfg.symbols):
symbol_arguments.append((parameters[arg_i], variable))
continue
par2.append(parameters[arg_i])
var2.append(variable)
variables_in_call = var2
parameters = par2
assigns = []
for (lit, litval) in zip(literals, literal_values):
local_name = lit
assigns.append(ast_internal_classes.BinOp_Node(lval=ast_internal_classes.Name_Node(name=local_name.name), rval=litval, op='=', line_number=node.line_number))
for (parameter, symbol) in symbol_arguments:
if (parameter.name != symbol.name):
assigns.append(ast_internal_classes.BinOp_Node(lval=ast_internal_classes.Name_Node(name=parameter.name), rval=ast_internal_classes.Name_Node(name=symbol.name), op='=', line_number=node.line_number))
for variable_in_call in variables_in_call:
all_arrays = self.get_arrays_in_context(sdfg)
sdfg_name = self.name_mapping.get(sdfg).get(ast_utils.get_name(variable_in_call))
globalsdfg_name = self.name_mapping.get(self.globalsdfg).get(ast_utils.get_name(variable_in_call))
matched = False
for (array_name, array) in all_arrays.items():
if (array_name in [sdfg_name]):
matched = True
local_name = parameters[variables_in_call.index(variable_in_call)]
self.name_mapping[new_sdfg][local_name.name] = new_sdfg._find_new_name(local_name.name)
self.all_array_names.append(self.name_mapping[new_sdfg][local_name.name])
if (local_name.name in read_names):
ins_in_new_sdfg.append(self.name_mapping[new_sdfg][local_name.name])
if (local_name.name in write_names):
outs_in_new_sdfg.append(self.name_mapping[new_sdfg][local_name.name])
indices = 0
index_list = []
shape = []
tmp_node = variable_in_call
strides = list(array.strides)
offsets = list(array.offset)
mysize = 1
if isinstance(variable_in_call, ast_internal_classes.Array_Subscript_Node):
changed_indices = 0
for i in variable_in_call.indices:
if isinstance(i, ast_internal_classes.ParDecl_Node):
if (i.type == 'ALL'):
shape.append(array.shape[indices])
mysize = (mysize * array.shape[indices])
index_list.append(None)
else:
raise NotImplementedError('Index in ParDecl should be ALL')
else:
text = ast_utils.ProcessedWriter(sdfg, self.name_mapping).write_code(i)
index_list.append(sym.pystr_to_symbolic(text))
strides.pop((indices - changed_indices))
offsets.pop((indices - changed_indices))
changed_indices += 1
indices = (indices + 1)
if isinstance(variable_in_call, ast_internal_classes.Name_Node):
shape = list(array.shape)
if ((shape == ()) or (shape == (1,)) or (shape == []) or (shape == [1])):
new_sdfg.add_scalar(self.name_mapping[new_sdfg][local_name.name], array.dtype, array.storage)
else:
if (not isinstance(variable_in_call, ast_internal_classes.Name_Node)):
offsets_zero = []
for index in offsets:
offsets_zero.append(0)
(viewname, view) = sdfg.add_view(((array_name + '_view_') + str(self.views)), shape, array.dtype, storage=array.storage, strides=strides, offset=offsets_zero)
from dace import subsets
all_indices = (([None] * (len(array.shape) - len(index_list))) + index_list)
subset = subsets.Range([((i, i, 1) if (i is not None) else (1, s, 1)) for (i, s) in zip(all_indices, array.shape)])
smallsubset = subsets.Range([(0, (s - 1), 1) for s in shape])
memlet = Memlet(f'{array_name}[{subset}]->{smallsubset}')
memlet2 = Memlet(f'{viewname}[{smallsubset}]->{subset}')
wv = None
rv = None
if (local_name.name in read_names):
r = substate.add_read(array_name)
wv = substate.add_write(viewname)
substate.add_edge(r, None, wv, 'views', dpcp(memlet))
if (local_name.name in write_names):
rv = substate.add_read(viewname)
w = substate.add_write(array_name)
substate.add_edge(rv, 'views2', w, None, dpcp(memlet2))
self.views = (self.views + 1)
views.append([array_name, wv, rv, variables_in_call.index(variable_in_call)])
new_sdfg.add_array(self.name_mapping[new_sdfg][local_name.name], shape, array.dtype, array.storage, strides=strides, offset=offsets)
if (not matched):
for (array_name, array) in all_arrays.items():
if (array_name in [globalsdfg_name]):
local_name = parameters[variables_in_call.index(variable_in_call)]
self.name_mapping[new_sdfg][local_name.name] = new_sdfg._find_new_name(local_name.name)
self.all_array_names.append(self.name_mapping[new_sdfg][local_name.name])
if (local_name.name in read_names):
ins_in_new_sdfg.append(self.name_mapping[new_sdfg][local_name.name])
if (local_name.name in write_names):
outs_in_new_sdfg.append(self.name_mapping[new_sdfg][local_name.name])
indices = 0
if isinstance(variable_in_call, ast_internal_classes.Array_Subscript_Node):
indices = len(variable_in_call.indices)
shape = array.shape[indices:]
if ((shape == ()) or (shape == (1,))):
new_sdfg.add_scalar(self.name_mapping[new_sdfg][local_name.name], array.dtype, array.storage)
else:
new_sdfg.add_array(self.name_mapping[new_sdfg][local_name.name], shape, array.dtype, array.storage, strides=array.strides, offset=array.offset)
sym_dict = {}
for i in sdfg.symbols:
sym_dict[i] = i
not_found_write_names = []
not_found_read_names = []
for i in write_names:
if (self.name_mapping[new_sdfg].get(i) is None):
not_found_write_names.append(i)
for i in read_names:
if (self.name_mapping[new_sdfg].get(i) is None):
not_found_read_names.append(i)
for i in self.libstates:
self.name_mapping[new_sdfg][i] = new_sdfg._find_new_name(i)
self.all_array_names.append(self.name_mapping[new_sdfg][i])
if (i in read_names):
ins_in_new_sdfg.append(self.name_mapping[new_sdfg][i])
if (i in write_names):
outs_in_new_sdfg.append(self.name_mapping[new_sdfg][i])
new_sdfg.add_scalar(self.name_mapping[new_sdfg][i], dtypes.int32, transient=False)
addedmemlets = []
globalmemlets = []
for i in not_found_read_names:
if (i in [a[0] for a in self.module_vars]):
if (self.name_mapping[sdfg].get(i) is not None):
self.name_mapping[new_sdfg][i] = new_sdfg._find_new_name(i)
addedmemlets.append(i)
self.all_array_names.append(self.name_mapping[new_sdfg][i])
if (i in read_names):
ins_in_new_sdfg.append(self.name_mapping[new_sdfg][i])
if (i in write_names):
outs_in_new_sdfg.append(self.name_mapping[new_sdfg][i])
array_in_global = sdfg.arrays[self.name_mapping[sdfg][i]]
if isinstance(array_in_global, Scalar):
new_sdfg.add_scalar(self.name_mapping[new_sdfg][i], array_in_global.dtype, transient=False)
elif (array_in_global.type == 'Array'):
new_sdfg.add_array(self.name_mapping[new_sdfg][i], array_in_global.shape, array_in_global.dtype, array_in_global.storage, transient=False, strides=array_in_global.strides, offset=array_in_global.offset)
elif (self.name_mapping[self.globalsdfg].get(i) is not None):
self.name_mapping[new_sdfg][i] = new_sdfg._find_new_name(i)
globalmemlets.append(i)
self.all_array_names.append(self.name_mapping[new_sdfg][i])
if (i in read_names):
ins_in_new_sdfg.append(self.name_mapping[new_sdfg][i])
if (i in write_names):
outs_in_new_sdfg.append(self.name_mapping[new_sdfg][i])
array_in_global = self.globalsdfg.arrays[self.name_mapping[self.globalsdfg][i]]
if isinstance(array_in_global, Scalar):
new_sdfg.add_scalar(self.name_mapping[new_sdfg][i], array_in_global.dtype, transient=False)
elif (array_in_global.type == 'Array'):
new_sdfg.add_array(self.name_mapping[new_sdfg][i], array_in_global.shape, array_in_global.dtype, array_in_global.storage, transient=False, strides=array_in_global.strides, offset=array_in_global.offset)
for i in not_found_write_names:
if (i in not_found_read_names):
continue
if (i in [a[0] for a in self.module_vars]):
if (self.name_mapping[sdfg].get(i) is not None):
self.name_mapping[new_sdfg][i] = new_sdfg._find_new_name(i)
addedmemlets.append(i)
self.all_array_names.append(self.name_mapping[new_sdfg][i])
if (i in read_names):
ins_in_new_sdfg.append(self.name_mapping[new_sdfg][i])
if (i in write_names):
outs_in_new_sdfg.append(self.name_mapping[new_sdfg][i])
array = sdfg.arrays[self.name_mapping[sdfg][i]]
if isinstance(array_in_global, Scalar):
new_sdfg.add_scalar(self.name_mapping[new_sdfg][i], array_in_global.dtype, transient=False)
elif (array_in_global.type == 'Array'):
new_sdfg.add_array(self.name_mapping[new_sdfg][i], array_in_global.shape, array_in_global.dtype, array_in_global.storage, transient=False, strides=array_in_global.strides, offset=array_in_global.offset)
elif (self.name_mapping[self.globalsdfg].get(i) is not None):
self.name_mapping[new_sdfg][i] = new_sdfg._find_new_name(i)
globalmemlets.append(i)
self.all_array_names.append(self.name_mapping[new_sdfg][i])
if (i in read_names):
ins_in_new_sdfg.append(self.name_mapping[new_sdfg][i])
if (i in write_names):
outs_in_new_sdfg.append(self.name_mapping[new_sdfg][i])
array = self.globalsdfg.arrays[self.name_mapping[self.globalsdfg][i]]
if isinstance(array_in_global, Scalar):
new_sdfg.add_scalar(self.name_mapping[new_sdfg][i], array_in_global.dtype, transient=False)
elif (array_in_global.type == 'Array'):
new_sdfg.add_array(self.name_mapping[new_sdfg][i], array_in_global.shape, array_in_global.dtype, array_in_global.storage, transient=False, strides=array_in_global.strides, offset=array_in_global.offset)
internal_sdfg = substate.add_nested_sdfg(new_sdfg, sdfg, ins_in_new_sdfg, outs_in_new_sdfg, symbol_mapping=sym_dict)
for i in self.libstates:
memlet = '0'
if (i in write_names):
ast_utils.add_memlet_write(substate, self.name_mapping[sdfg][i], internal_sdfg, self.name_mapping[new_sdfg][i], memlet)
if (i in read_names):
ast_utils.add_memlet_read(substate, self.name_mapping[sdfg][i], internal_sdfg, self.name_mapping[new_sdfg][i], memlet)
for i in variables_in_call:
local_name = parameters[variables_in_call.index(i)]
if (self.name_mapping.get(sdfg).get(ast_utils.get_name(i)) is not None):
var = sdfg.arrays.get(self.name_mapping[sdfg][ast_utils.get_name(i)])
mapped_name = self.name_mapping[sdfg][ast_utils.get_name(i)]
elif (ast_utils.get_name(i) in sdfg.symbols):
var = ast_utils.get_name(i)
mapped_name = ast_utils.get_name(i)
elif (self.name_mapping.get(self.globalsdfg).get(ast_utils.get_name(i)) is not None):
var = self.globalsdfg.arrays.get(self.name_mapping[self.globalsdfg][ast_utils.get_name(i)])
mapped_name = self.name_mapping[self.globalsdfg][ast_utils.get_name(i)]
else:
raise NameError(('Variable name not found: ' + ast_utils.get_name(i)))
if ((not hasattr(var, 'shape')) or (len(var.shape) == 0)):
memlet = ''
elif ((len(var.shape) == 1) and (var.shape[0] == 1)):
memlet = '0'
else:
memlet = ast_utils.generate_memlet(i, sdfg, self)
found = False
for elem in views:
if ((mapped_name == elem[0]) and (elem[3] == variables_in_call.index(i))):
found = True
if (local_name.name in write_names):
memlet = subsets.Range([(0, (s - 1), 1) for s in sdfg.arrays[elem[2].label].shape])
substate.add_memlet_path(internal_sdfg, elem[2], src_conn=self.name_mapping[new_sdfg][local_name.name], memlet=Memlet(expr=elem[2].label, subset=memlet))
if (local_name.name in read_names):
memlet = subsets.Range([(0, (s - 1), 1) for s in sdfg.arrays[elem[1].label].shape])
substate.add_memlet_path(elem[1], internal_sdfg, dst_conn=self.name_mapping[new_sdfg][local_name.name], memlet=Memlet(expr=elem[1].label, subset=memlet))
if (not found):
if (local_name.name in write_names):
ast_utils.add_memlet_write(substate, mapped_name, internal_sdfg, self.name_mapping[new_sdfg][local_name.name], memlet)
if (local_name.name in read_names):
ast_utils.add_memlet_read(substate, mapped_name, internal_sdfg, self.name_mapping[new_sdfg][local_name.name], memlet)
for i in addedmemlets:
memlet = ast_utils.generate_memlet(ast_internal_classes.Name_Node(name=i), sdfg, self)
if (local_name.name in write_names):
ast_utils.add_memlet_write(substate, self.name_mapping[sdfg][i], internal_sdfg, self.name_mapping[new_sdfg][i], memlet)
if (local_name.name in read_names):
ast_utils.add_memlet_read(substate, self.name_mapping[sdfg][i], internal_sdfg, self.name_mapping[new_sdfg][i], memlet)
for i in globalmemlets:
memlet = ast_utils.generate_memlet(ast_internal_classes.Name_Node(name=i), sdfg, self)
if (local_name.name in write_names):
ast_utils.add_memlet_write(substate, self.name_mapping[self.globalsdfg][i], internal_sdfg, self.name_mapping[new_sdfg][i], memlet)
if (local_name.name in read_names):
ast_utils.add_memlet_read(substate, self.name_mapping[self.globalsdfg][i], internal_sdfg, self.name_mapping[new_sdfg][i], memlet)
if (node.execution_part is not None):
for j in node.specification_part.uses:
for k in j.list:
if (self.contexts.get(new_sdfg.name) is None):
self.contexts[new_sdfg.name] = ast_utils.Context(name=new_sdfg.name)
if ((self.contexts[new_sdfg.name].constants.get(ast_utils.get_name(k)) is None) and (self.contexts[self.globalsdfg.name].constants.get(ast_utils.get_name(k)) is not None)):
self.contexts[new_sdfg.name].constants[ast_utils.get_name(k)] = self.contexts[self.globalsdfg.name].constants[ast_utils.get_name(k)]
pass
for j in node.specification_part.specifications:
self.declstmt2sdfg(j, new_sdfg)
for i in assigns:
self.translate(i, new_sdfg)
self.translate(node.execution_part, new_sdfg)
def binop2sdfg(self, node: ast_internal_classes.BinOp_Node, sdfg: SDFG):
calls = ast_transforms.FindFunctionCalls()
calls.visit(node)
if (len(calls.nodes) == 1):
augmented_call = calls.nodes[0]
if (augmented_call.name.name not in ['sqrt', 'exp', 'pow', 'max', 'min', 'abs', 'tanh', '__dace_epsilon']):
augmented_call.args.append(node.lval)
augmented_call.hasret = True
self.call2sdfg(augmented_call, sdfg)
return
outputnodefinder = ast_transforms.FindOutputs()
outputnodefinder.visit(node)
output_vars = outputnodefinder.nodes
output_names = []
output_names_tasklet = []
for i in output_vars:
mapped_name = self.get_name_mapping_in_context(sdfg).get(i.name)
arrays = self.get_arrays_in_context(sdfg)
if ((mapped_name in arrays) and (mapped_name not in output_names)):
output_names.append(mapped_name)
output_names_tasklet.append(i.name)
inputnodefinder = ast_transforms.FindInputs()
inputnodefinder.visit(node)
input_vars = inputnodefinder.nodes
input_names = []
input_names_tasklet = []
for i in input_vars:
mapped_name = self.get_name_mapping_in_context(sdfg).get(i.name)
arrays = self.get_arrays_in_context(sdfg)
if (i.name in sdfg.symbols):
continue
if (mapped_name in arrays):
count = input_names.count(mapped_name)
input_names.append(mapped_name)
input_names_tasklet.append((((i.name + '_') + str(count)) + '_in'))
substate = ast_utils.add_simple_state_to_sdfg(self, sdfg, ((('_state_l' + str(node.line_number[0])) + '_c') + str(node.line_number[1])))
output_names_changed = [(o_t + '_out') for o_t in output_names]
tasklet = ast_utils.add_tasklet(substate, ((('_l' + str(node.line_number[0])) + '_c') + str(node.line_number[1])), input_names_tasklet, output_names_changed, 'text', node.line_number, self.file_name)
for (i, j) in zip(input_names, input_names_tasklet):
memlet_range = self.get_memlet_range(sdfg, input_vars, i, j)
ast_utils.add_memlet_read(substate, i, tasklet, j, memlet_range)
for (i, j, k) in zip(output_names, output_names_tasklet, output_names_changed):
memlet_range = self.get_memlet_range(sdfg, output_vars, i, j)
ast_utils.add_memlet_write(substate, i, tasklet, k, memlet_range)
tw = ast_utils.TaskletWriter(output_names, output_names_changed, sdfg, self.name_mapping, input_names, input_names_tasklet)
text = tw.write_code(node)
tasklet.code = CodeBlock(text, lang.Python)
def call2sdfg(self, node: ast_internal_classes.Call_Expr_Node, sdfg: SDFG):
self.last_call_expression[sdfg] = node.args
match_found = False
rettype = 'INTEGER'
hasret = False
if (node.name in self.functions_and_subroutines):
for i in self.top_level.function_definitions:
if (i.name == node.name):
self.function2sdfg(i, sdfg)
return
for i in self.top_level.subroutine_definitions:
if (i.name == node.name):
self.subroutine2sdfg(i, sdfg)
return
for j in self.top_level.modules:
for i in j.function_definitions:
if (i.name == node.name):
self.function2sdfg(i, sdfg)
return
for i in j.subroutine_definitions:
if (i.name == node.name):
self.subroutine2sdfg(i, sdfg)
return
else:
libstate = self.libraries.get(node.name.name)
if ((not isinstance(rettype, ast_internal_classes.Void)) and hasattr(node, 'hasret')):
if node.hasret:
hasret = True
retval = node.args.pop((len(node.args) - 1))
if (node.name == 'free'):
return
input_names_tasklet = {}
output_names_tasklet = []
input_names = []
output_names = []
special_list_in = {}
special_list_out = []
if (libstate is not None):
special_list_in[(self.name_mapping[sdfg][libstate] + '_task')] = dtypes.pointer(sdfg.arrays.get(self.name_mapping[sdfg][libstate]).dtype)
special_list_out.append((self.name_mapping[sdfg][libstate] + '_task_out'))
used_vars = [node for node in ast_transforms.mywalk(node) if isinstance(node, ast_internal_classes.Name_Node)]
for i in used_vars:
for j in sdfg.arrays:
if ((self.name_mapping.get(sdfg).get(i.name) == j) and (j not in input_names)):
elem = sdfg.arrays.get(j)
scalar = False
if (len(elem.shape) == 0):
scalar = True
elif ((len(elem.shape) == 1) and (elem.shape[0] == 1)):
scalar = True
if ((not scalar) and (not (node.name.name in ['fprintf', 'printf']))):
output_names.append(j)
output_names_tasklet.append(i.name)
input_names_tasklet[i.name] = dtypes.pointer(elem.dtype)
input_names.append(j)
output_names_changed = []
for (o, o_t) in zip(output_names, output_names_tasklet):
output_names_changed.append((o_t + '_out'))
tw = ast_utils.TaskletWriter(output_names_tasklet.copy(), output_names_changed.copy(), sdfg, self.name_mapping)
if ((not isinstance(rettype, ast_internal_classes.Void)) and hasret):
special_list_in[retval.name] = pointer(self.get_dace_type(rettype))
special_list_out.append((retval.name + '_out'))
text = tw.write_code(ast_internal_classes.BinOp_Node(lval=retval, op='=', rval=node, line_number=node.line_number))
else:
text = tw.write_code(node)
substate = ast_utils.add_simple_state_to_sdfg(self, sdfg, ('_state' + str(node.line_number[0])))
tasklet = ast_utils.add_tasklet(substate, str(node.line_number[0]), {**input_names_tasklet, **special_list_in}, (output_names_changed + special_list_out), 'text', node.line_number, self.file_name)
if (libstate is not None):
ast_utils.add_memlet_read(substate, self.name_mapping[sdfg][libstate], tasklet, (self.name_mapping[sdfg][libstate] + '_task'), '0')
ast_utils.add_memlet_write(substate, self.name_mapping[sdfg][libstate], tasklet, (self.name_mapping[sdfg][libstate] + '_task_out'), '0')
if ((not isinstance(rettype, ast_internal_classes.Void)) and hasret):
ast_utils.add_memlet_read(substate, self.name_mapping[sdfg][retval.name], tasklet, retval.name, '0')
ast_utils.add_memlet_write(substate, self.name_mapping[sdfg][retval.name], tasklet, (retval.name + '_out'), '0')
for (i, j) in zip(input_names, input_names_tasklet):
memlet_range = self.get_memlet_range(sdfg, used_vars, i, j)
ast_utils.add_memlet_read(substate, i, tasklet, j, memlet_range)
for (i, j, k) in zip(output_names, output_names_tasklet, output_names_changed):
memlet_range = self.get_memlet_range(sdfg, used_vars, i, j)
ast_utils.add_memlet_write(substate, i, tasklet, k, memlet_range)
setattr(tasklet, 'code', CodeBlock(text, lang.Python))
def declstmt2sdfg(self, node: ast_internal_classes.Decl_Stmt_Node, sdfg: SDFG):
for i in node.vardecl:
self.translate(i, sdfg)
def vardecl2sdfg(self, node: ast_internal_classes.Var_Decl_Node, sdfg: SDFG):
transient = True
datatype = self.get_dace_type(node.type)
if hasattr(node, 'alloc'):
if node.alloc:
self.unallocated_arrays.append([node.name, datatype, sdfg, transient])
return
if (node.sizes is not None):
sizes = []
offset = []
offset_value = (- 1)
for i in node.sizes:
tw = ast_utils.TaskletWriter([], [], sdfg, self.name_mapping)
text = tw.write_code(i)
sizes.append(sym.pystr_to_symbolic(text))
offset.append(offset_value)
else:
sizes = None
if (self.name_mapping[sdfg].get(node.name) is not None):
return
if (node.name in sdfg.symbols):
return
self.name_mapping[sdfg][node.name] = sdfg._find_new_name(node.name)
if (sizes is None):
sdfg.add_scalar(self.name_mapping[sdfg][node.name], dtype=datatype, transient=transient)
else:
strides = [dat._prod(sizes[:i]) for i in range(len(sizes))]
sdfg.add_array(self.name_mapping[sdfg][node.name], shape=sizes, dtype=datatype, offset=offset, strides=strides, transient=transient)
self.all_array_names.append(self.name_mapping[sdfg][node.name])
if (self.contexts.get(sdfg.name) is None):
self.contexts[sdfg.name] = ast_utils.Context(name=sdfg.name)
if (node.name not in self.contexts[sdfg.name].containers):
self.contexts[sdfg.name].containers.append(node.name)
def break2sdfg(self, node: ast_internal_classes.Break_Node, sdfg: SDFG):
self.last_loop_breaks[sdfg] = self.last_sdfg_states[sdfg]
sdfg.add_edge(self.last_sdfg_states[sdfg], self.last_loop_continues.get(sdfg), InterstateEdge()) |
class ResNet(Backbone):
def __init__(self, stem, stages, num_classes=None, out_features=None, freeze_at=0):
super().__init__()
self.stem = stem
self.num_classes = num_classes
current_stride = self.stem.stride
self._out_feature_strides = {'stem': current_stride}
self._out_feature_channels = {'stem': self.stem.out_channels}
(self.stage_names, self.stages) = ([], [])
if (out_features is not None):
num_stages = max([{'res2': 1, 'res3': 2, 'res4': 3, 'res5': 4}.get(f, 0) for f in out_features])
stages = stages[:num_stages]
for (i, blocks) in enumerate(stages):
assert (len(blocks) > 0), len(blocks)
for block in blocks:
assert isinstance(block, CNNBlockBase), block
name = ('res' + str((i + 2)))
stage = nn.Sequential(*blocks)
self.add_module(name, stage)
self.stage_names.append(name)
self.stages.append(stage)
self._out_feature_strides[name] = current_stride = int((current_stride * np.prod([k.stride for k in blocks])))
self._out_feature_channels[name] = curr_channels = blocks[(- 1)].out_channels
self.stage_names = tuple(self.stage_names)
if (num_classes is not None):
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.linear = nn.Linear(curr_channels, num_classes)
nn.init.normal_(self.linear.weight, std=0.01)
name = 'linear'
if (out_features is None):
out_features = [name]
self._out_features = out_features
assert len(self._out_features)
children = [x[0] for x in self.named_children()]
for out_feature in self._out_features:
assert (out_feature in children), 'Available children: {}'.format(', '.join(children))
self.freeze(freeze_at)
def forward(self, x):
assert (x.dim() == 4), f'ResNet takes an input of shape (N, C, H, W). Got {x.shape} instead!'
outputs = {}
x = self.stem(x)
if ('stem' in self._out_features):
outputs['stem'] = x
for (name, stage) in zip(self.stage_names, self.stages):
x = stage(x)
if (name in self._out_features):
outputs[name] = x
if (self.num_classes is not None):
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.linear(x)
if ('linear' in self._out_features):
outputs['linear'] = x
return outputs
def output_shape(self):
return {name: ShapeSpec(channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]) for name in self._out_features}
def freeze(self, freeze_at=0):
if (freeze_at >= 1):
self.stem.freeze()
for (idx, stage) in enumerate(self.stages, start=2):
if (freeze_at >= idx):
for block in stage.children():
block.freeze()
return self
def make_stage(block_class, num_blocks, *, in_channels, out_channels, **kwargs):
blocks = []
for i in range(num_blocks):
curr_kwargs = {}
for (k, v) in kwargs.items():
if k.endswith('_per_block'):
assert (len(v) == num_blocks), f"Argument '{k}' of make_stage should have the same length as num_blocks={num_blocks}."
newk = k[:(- len('_per_block'))]
assert (newk not in kwargs), f'Cannot call make_stage with both {k} and {newk}!'
curr_kwargs[newk] = v[i]
else:
curr_kwargs[k] = v
blocks.append(block_class(in_channels=in_channels, out_channels=out_channels, **curr_kwargs))
in_channels = out_channels
return blocks
def make_default_stages(depth, block_class=None, **kwargs):
num_blocks_per_stage = {18: [2, 2, 2, 2], 34: [3, 4, 6, 3], 50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3]}[depth]
if (block_class is None):
block_class = (BasicBlock if (depth < 50) else BottleneckBlock)
if (depth < 50):
in_channels = [64, 64, 128, 256]
out_channels = [64, 128, 256, 512]
else:
in_channels = [64, 256, 512, 1024]
out_channels = [256, 512, 1024, 2048]
ret = []
for (n, s, i, o) in zip(num_blocks_per_stage, [1, 2, 2, 2], in_channels, out_channels):
if (depth >= 50):
kwargs['bottleneck_channels'] = (o // 4)
ret.append(ResNet.make_stage(block_class=block_class, num_blocks=n, stride_per_block=([s] + ([1] * (n - 1))), in_channels=i, out_channels=o, **kwargs))
return ret |
class LabeledDummyDataProvider(DummyDataProvider):
def __init__(self, data_dim, num_classes=10, num_cases=7):
self.batch_range = [1]
self.batch_meta = {'num_vis': data_dim, 'label_names': [str(x) for x in range(num_classes)], 'data_in_rows': True}
self.num_cases = num_cases
self.num_classes = num_classes
self.curr_epoch = 1
self.curr_batchnum = 1
self.batch_idx = 0
self.data = None
def get_num_classes(self):
return self.num_classes
def get_next_batch(self):
(epoch, batchnum) = (self.curr_epoch, self.curr_batchnum)
self.advance_batch()
if (self.data is None):
data = rand(self.num_cases, self.get_data_dims()).astype(n.single)
labels = n.require(n.c_[random_integers(0, (self.num_classes - 1), self.num_cases)], requirements='C', dtype=n.single)
(self.data, self.labels) = (data, labels)
else:
(data, labels) = (self.data, self.labels)
return (self.curr_epoch, self.curr_batchnum, [data.T, labels.T]) |
def register_Ns3VhtCapabilities_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_constructor([param('ns3::VhtCapabilities const &', 'arg0')])
cls.add_constructor([])
cls.add_method('DeserializeInformationField', 'uint8_t', [param('ns3::Buffer::Iterator', 'start'), param('uint8_t', 'length')], is_virtual=True)
cls.add_method('ElementId', 'ns3::WifiInformationElementId', [], is_const=True, is_virtual=True)
cls.add_method('GetInformationFieldSize', 'uint8_t', [], is_const=True, is_virtual=True)
cls.add_method('GetMaxAmpduLengthExponent', 'uint8_t', [], is_const=True)
cls.add_method('GetMaxMpduLength', 'uint8_t', [], is_const=True)
cls.add_method('GetRxHighestSupportedLgiDataRate', 'uint16_t', [], is_const=True)
cls.add_method('GetRxLdpc', 'uint8_t', [], is_const=True)
cls.add_method('GetRxMcsMap', 'uint16_t', [], is_const=True)
cls.add_method('GetRxStbc', 'uint8_t', [], is_const=True)
cls.add_method('GetSerializedSize', 'uint16_t', [], is_const=True)
cls.add_method('GetShortGuardIntervalFor160Mhz', 'uint8_t', [], is_const=True)
cls.add_method('GetShortGuardIntervalFor80Mhz', 'uint8_t', [], is_const=True)
cls.add_method('GetSupportedChannelWidthSet', 'uint8_t', [], is_const=True)
cls.add_method('GetSupportedMcsAndNssSet', 'uint64_t', [], is_const=True)
cls.add_method('GetTxHighestSupportedLgiDataRate', 'uint16_t', [], is_const=True)
cls.add_method('GetTxMcsMap', 'uint16_t', [], is_const=True)
cls.add_method('GetTxStbc', 'uint8_t', [], is_const=True)
cls.add_method('GetVhtCapabilitiesInfo', 'uint32_t', [], is_const=True)
cls.add_method('IsSupportedMcs', 'bool', [param('uint8_t', 'mcs'), param('uint8_t', 'Nss')], is_const=True)
cls.add_method('IsSupportedRxMcs', 'bool', [param('uint8_t', 'mcs')], is_const=True)
cls.add_method('IsSupportedTxMcs', 'bool', [param('uint8_t', 'mcs')], is_const=True)
cls.add_method('Serialize', 'ns3::Buffer::Iterator', [param('ns3::Buffer::Iterator', 'start')], is_const=True)
cls.add_method('SerializeInformationField', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True)
cls.add_method('SetMaxAmpduLengthExponent', 'void', [param('uint8_t', 'exponent')])
cls.add_method('SetMaxMpduLength', 'void', [param('uint8_t', 'length')])
cls.add_method('SetRxHighestSupportedLgiDataRate', 'void', [param('uint16_t', 'supporteddatarate')])
cls.add_method('SetRxLdpc', 'void', [param('uint8_t', 'rxldpc')])
cls.add_method('SetRxMcsMap', 'void', [param('uint16_t', 'map')])
cls.add_method('SetRxMcsMap', 'void', [param('uint8_t', 'mcs'), param('uint8_t', 'nss')])
cls.add_method('SetRxStbc', 'void', [param('uint8_t', 'rxstbc')])
cls.add_method('SetShortGuardIntervalFor160Mhz', 'void', [param('uint8_t', 'shortguardinterval')])
cls.add_method('SetShortGuardIntervalFor80Mhz', 'void', [param('uint8_t', 'shortguardinterval')])
cls.add_method('SetSupportedChannelWidthSet', 'void', [param('uint8_t', 'channelwidthset')])
cls.add_method('SetSupportedMcsAndNssSet', 'void', [param('uint64_t', 'ctrl')])
cls.add_method('SetTxHighestSupportedLgiDataRate', 'void', [param('uint16_t', 'supporteddatarate')])
cls.add_method('SetTxMcsMap', 'void', [param('uint16_t', 'map')])
cls.add_method('SetTxMcsMap', 'void', [param('uint8_t', 'mcs'), param('uint8_t', 'nss')])
cls.add_method('SetTxStbc', 'void', [param('uint8_t', 'txstbc')])
cls.add_method('SetVhtCapabilitiesInfo', 'void', [param('uint32_t', 'ctrl')])
cls.add_method('SetVhtSupported', 'void', [param('uint8_t', 'vhtsupported')])
return |
def concatenate_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes, axis=None):
dy = grad_inputs[0]
axis = (axis if (axis is not None) else (len(dy.shape) - 1))
ctx = nn.get_current_context()
df = ConcatenateDataGrad(ctx, axis=axis)
df.xshapes = input_shapes
dx0 = df(dy)
return dx0 |
def downsample_avg(in_channels, out_channels, kernel_size, stride=1, dilation=1, first_dilation=None, norm_layer=None):
norm_layer = (norm_layer or nn.BatchNorm2d)
avg_stride = (stride if (dilation == 1) else 1)
if ((stride == 1) and (dilation == 1)):
pool = nn.Identity()
else:
avg_pool_fn = (AvgPool2dSame if ((avg_stride == 1) and (dilation > 1)) else nn.AvgPool2d)
pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False)
return nn.Sequential(*[pool, nn.Conv2d(in_channels, out_channels, 1, stride=1, padding=0, bias=False), norm_layer(out_channels)]) |
class CLEVADialogueGenerationScenario(CLEVAScenario):
description = 'Dialogue generation task in CLEVA benchmark'
tags = ['dialogue_generation']
def task(self) -> str:
return 'dialogue_generation'
def get_instances(self, output_path: str) -> List[Instance]:
dataset = self.load_dataset(output_path)
instances: List[Instance] = []
for split in self.splits:
for row in dataset[split]:
instances.extend(self.process_dialogue_instance(row, self.splits[split]))
return instances
def process_dialogue_instance(self, row: Dict[(str, Any)], split: str) -> List[Instance]:
instances: List[Instance] = []
dialog = row['dialogue']
history: List[Dict[(str, str)]] = []
for item in dialog:
role = item['role']
utterance = item['content']
if (item['role'] == 'sys'):
instances.append(self.process_instance({'history': copy.deepcopy(history), 'role': role, 'label': [utterance]}, split=split))
history.append({'utterance': utterance, 'role': role})
return instances |
class DerivativeOperator():
class DerivativeOperatorWithParameters():
def __init__(self, parameter_set):
self._parameter_set = parameter_set
def __call__(self, function):
return FDerivativeOperator(function, self._parameter_set)
def __repr__(self):
return ('D[%s]' % ', '.join(map(repr, self._parameter_set)))
def __getitem__(self, args):
if (not isinstance(args, tuple)):
args = (args,)
return self.DerivativeOperatorWithParameters(args) |
class OutputInMiddleTest(BaseKerasFeatureNetworkTest):
def __init__(self, unit_test):
super().__init__(unit_test, experimental_exporter=True)
def create_networks(self):
inputs = layers.Input(shape=self.get_input_shapes()[0][1:])
x = layers.Conv2D(3, 4)(inputs)
x = layers.BatchNormalization()(x)
outputs = layers.Activation('relu')(x)
model = keras.Model(inputs=inputs, outputs=[outputs, x])
return model
def compare(self, quantized_model, float_model, input_x=None, quantization_info=None):
self.unit_test.assertTrue((len(quantized_model.outputs) == 2))
conv_layer = get_layers_from_model_by_type(quantized_model, layers.Conv2D)[0]
self.unit_test.assertTrue((conv_layer.output.ref() in [t.ref() for t in quantized_model.outputs])) |
class TestSequenceBatch(object):
def sequences(self):
return [['a', 'b', 'b', 'c'], ['c'], []]
def vocab(self):
return SimpleVocab(['<unk>', 'a', 'b', 'c', '<start>', '<stop>'])
def test_from_sequences(self, sequences, vocab):
seq_batch = SequenceBatch.from_sequences(sequences, vocab)
assert_tensor_equal(seq_batch.values, np.array([[1, 2, 2, 3], [3, 0, 0, 0], [0, 0, 0, 0]], dtype=np.int32))
assert_tensor_equal(seq_batch.mask, np.array([[1, 1, 1, 1], [1, 0, 0, 0], [0, 0, 0, 0]], dtype=np.float32))
def test_min_seq_length(self, vocab):
seq_batch = SequenceBatch.from_sequences([[], [], []], vocab, min_seq_length=2)
assert_tensor_equal(seq_batch.values, np.zeros((3, 2)))
assert_tensor_equal(seq_batch.mask, np.zeros((3, 2)))
def test_mask_validation(self):
mask = GPUVariable(torch.FloatTensor([[1, 0, 0, 0], [1, 1, 0, 0], [1, 1, 1, 0]]))
values = mask
SequenceBatch(values, mask)
non_binary_mask = GPUVariable(torch.FloatTensor([[1, 0, 0, 0], [1, 1.2, 0, 0], [1, 1, 1, 0]]))
with pytest.raises(ValueError):
SequenceBatch(mask, non_binary_mask)
non_left_justified_mask = GPUVariable(torch.FloatTensor([[1, 0, 0, 1], [1, 1, 0, 0], [1, 1, 1, 0]]))
with pytest.raises(ValueError):
SequenceBatch(mask, non_left_justified_mask)
def test_split(self):
input_embeds = GPUVariable(torch.LongTensor([[[1, 2], [2, 3], [5, 6]], [[4, 8], [3, 5], [0, 0]]]))
input_mask = GPUVariable(torch.FloatTensor([[1, 1, 1], [1, 1, 0]]))
sb = SequenceBatch(input_embeds, input_mask)
elements = sb.split()
input_list = [e.values for e in elements]
mask_list = [e.mask for e in elements]
assert (len(input_list) == 3)
assert_tensor_equal(input_list[0], [[1, 2], [4, 8]])
assert_tensor_equal(input_list[1], [[2, 3], [3, 5]])
assert_tensor_equal(input_list[2], [[5, 6], [0, 0]])
assert (len(mask_list) == 3)
assert_tensor_equal(mask_list[0], [[1], [1]])
assert_tensor_equal(mask_list[1], [[1], [1]])
assert_tensor_equal(mask_list[2], [[1], [0]])
def test_cat(self):
x1 = SequenceBatchElement(GPUVariable(torch.FloatTensor([[[1, 2], [3, 4]], [[8, 2], [9, 0]]])), GPUVariable(torch.FloatTensor([[1], [1]])))
x2 = SequenceBatchElement(GPUVariable(torch.FloatTensor([[[(- 1), 20], [3, 40]], [[(- 8), 2], [9, 10]]])), GPUVariable(torch.FloatTensor([[1], [0]])))
x3 = SequenceBatchElement(GPUVariable(torch.FloatTensor([[[(- 1), 20], [3, 40]], [[(- 8), 2], [9, 10]]])), GPUVariable(torch.FloatTensor([[0], [0]])))
result = SequenceBatch.cat([x1, x2, x3])
assert_tensor_equal(result.values, [[[[1, 2], [3, 4]], [[(- 1), 20], [3, 40]], [[(- 1), 20], [3, 40]]], [[[8, 2], [9, 0]], [[(- 8), 2], [9, 10]], [[(- 8), 2], [9, 10]]]])
assert_tensor_equal(result.mask, [[1, 1, 0], [1, 0, 0]])
def some_seq_batch(self):
values = GPUVariable(torch.FloatTensor([[[1, 2], [4, 5], [4, 4]], [[0, 4], [43, 5], [(- 1), 20]], [[(- 1), 20], [43, 5], [0, 0]]]))
mask = GPUVariable(torch.FloatTensor([[1, 1, 0], [1, 0, 0], [0, 0, 0]]))
return SequenceBatch(values, mask)
def test_weighted_sum(self, some_seq_batch):
weights = GPUVariable(torch.FloatTensor([[0.5, 0.3, 0], [0.8, 0.2, 0], [0, 0, 0]]))
result = SequenceBatch.weighted_sum(some_seq_batch, weights)
assert_tensor_equal(result, [[1.7, 2.5], [0, 3.2], [0, 0]])
def test_reduce_sum(self, some_seq_batch):
result = SequenceBatch.reduce_sum(some_seq_batch)
assert_tensor_equal(result, [[5, 7], [0, 4], [0, 0]])
def test_reduce_mean(self, some_seq_batch):
result = SequenceBatch.reduce_mean(some_seq_batch, allow_empty=True)
assert_tensor_equal(result, [[2.5, 3.5], [0, 4], [0, 0]])
with pytest.raises(ValueError):
SequenceBatch.reduce_mean(some_seq_batch, allow_empty=False)
def test_reduce_prod(self, some_seq_batch):
result = SequenceBatch.reduce_prod(some_seq_batch)
assert_tensor_equal(result, [[4, 10], [0, 4], [1, 1]])
def test_reduce_max(self, some_seq_batch):
with pytest.raises(ValueError):
SequenceBatch.reduce_max(some_seq_batch)
values = GPUVariable(torch.FloatTensor([[[1, 2], [4, 5], [4, 4]], [[0, (- 4)], [43, (- 5)], [(- 1), (- 20)]]]))
mask = GPUVariable(torch.FloatTensor([[1, 0, 0], [1, 1, 0]]))
seq_batch = SequenceBatch(values, mask)
result = SequenceBatch.reduce_max(seq_batch)
assert_tensor_equal(result, [[1, 2], [43, (- 4)]])
def test_log_sum_exp(self):
values = GPUVariable(torch.FloatTensor([[0, 1, (- 2), (- 3)], [(- 2), (- 5), 1, 0]]))
mask = GPUVariable(torch.FloatTensor([[1, 1, 1, 0], [1, 1, 0, 0]]))
seq_batch = SequenceBatch(values, mask, left_justify=False)
result = SequenceBatch.log_sum_exp(seq_batch)
correct = [1., (- 1.)]
assert_tensor_equal(result, correct)
def test_embed(self):
sequences = [[], [1, 2, 3], [3, 3], [2]]
vocab = SimpleVocab([0, 1, 2, 3, 4])
indices = SequenceBatch.from_sequences(sequences, vocab)
embeds = GPUVariable(torch.FloatTensor([[0, 0], [2, 2], [3, 4], [(- 10), 1], [11, (- 1)]]))
embedded = SequenceBatch.embed(indices, embeds)
correct = np.array([[[0, 0], [0, 0], [0, 0]], [[2, 2], [3, 4], [(- 10), 1]], [[(- 10), 1], [(- 10), 1], [0, 0]], [[3, 4], [0, 0], [0, 0]]], dtype=np.float32)
assert_tensor_equal(embedded.values, correct) |
def test_multiple_inheritance_python_many_bases():
class MIMany14(m.BaseN1, m.BaseN2, m.BaseN3, m.BaseN4):
def __init__(self):
m.BaseN1.__init__(self, 1)
m.BaseN2.__init__(self, 2)
m.BaseN3.__init__(self, 3)
m.BaseN4.__init__(self, 4)
class MIMany58(m.BaseN5, m.BaseN6, m.BaseN7, m.BaseN8):
def __init__(self):
m.BaseN5.__init__(self, 5)
m.BaseN6.__init__(self, 6)
m.BaseN7.__init__(self, 7)
m.BaseN8.__init__(self, 8)
class MIMany916(m.BaseN9, m.BaseN10, m.BaseN11, m.BaseN12, m.BaseN13, m.BaseN14, m.BaseN15, m.BaseN16):
def __init__(self):
m.BaseN9.__init__(self, 9)
m.BaseN10.__init__(self, 10)
m.BaseN11.__init__(self, 11)
m.BaseN12.__init__(self, 12)
m.BaseN13.__init__(self, 13)
m.BaseN14.__init__(self, 14)
m.BaseN15.__init__(self, 15)
m.BaseN16.__init__(self, 16)
class MIMany19(MIMany14, MIMany58, m.BaseN9):
def __init__(self):
MIMany14.__init__(self)
MIMany58.__init__(self)
m.BaseN9.__init__(self, 9)
class MIMany117(MIMany14, MIMany58, MIMany916, m.BaseN17):
def __init__(self):
MIMany14.__init__(self)
MIMany58.__init__(self)
MIMany916.__init__(self)
m.BaseN17.__init__(self, 17)
a = MIMany14()
for i in range(1, 4):
assert (getattr(a, ('f' + str(i)))() == (2 * i))
b = MIMany916()
for i in range(9, 16):
assert (getattr(b, ('f' + str(i)))() == (2 * i))
c = MIMany19()
for i in range(1, 9):
assert (getattr(c, ('f' + str(i)))() == (2 * i))
d = MIMany117()
for i in range(1, 17):
assert (getattr(d, ('f' + str(i)))() == (2 * i)) |
class OptimizerNames(ExplicitEnum):
ADAMW_HF = 'adamw_hf'
ADAMW_TORCH = 'adamw_torch'
ADAMW_APEX_FUSED = 'adamw_apex_fused'
ADAFACTOR = 'adafactor' |
class WideResnetBackbone(nn.Module):
def __init__(self, k=1, n=28, drop_rate=0):
super(WideResnetBackbone, self).__init__()
(self.k, self.n) = (k, n)
assert (((self.n - 4) % 6) == 0)
n_blocks = ((self.n - 4) // 6)
n_layers = ([16] + [((self.k * 16) * (2 ** i)) for i in range(3)])
self.conv1 = nn.Conv2d(3, n_layers[0], kernel_size=3, stride=1, padding=1, bias=False)
self.layer1 = self.create_layer(n_layers[0], n_layers[1], bnum=n_blocks, stride=1, drop_rate=drop_rate, pre_res_act=True)
self.layer2 = self.create_layer(n_layers[1], n_layers[2], bnum=n_blocks, stride=2, drop_rate=drop_rate, pre_res_act=False)
self.layer3 = self.create_layer(n_layers[2], n_layers[3], bnum=n_blocks, stride=2, drop_rate=drop_rate, pre_res_act=False)
self.bn_last = BatchNorm2d(n_layers[3], momentum=0.001)
self.relu_last = nn.LeakyReLU(inplace=True, negative_slope=0.1)
self.init_weight()
def create_layer(self, in_chan, out_chan, bnum, stride=1, drop_rate=0, pre_res_act=False):
layers = [BasicBlockPreAct(in_chan, out_chan, drop_rate=drop_rate, stride=stride, pre_res_act=pre_res_act)]
for _ in range((bnum - 1)):
layers.append(BasicBlockPreAct(out_chan, out_chan, drop_rate=drop_rate, stride=1, pre_res_act=False))
return nn.Sequential(*layers)
def forward(self, x):
feat = self.conv1(x)
feat = self.layer1(feat)
feat = self.layer2(feat)
feat = self.layer3(feat)
feat = self.bn_last(feat)
feat = self.relu_last(feat)
return feat
def init_weight(self):
for (_, child) in self.named_children():
if isinstance(child, nn.Conv2d):
n = ((child.kernel_size[0] * child.kernel_size[0]) * child.out_channels)
nn.init.normal_(child.weight, 0, (1.0 / ((0.5 * n) ** 0.5)))
if (not (child.bias is None)):
nn.init.constant_(child.bias, 0) |
def get_hole_count(full_path):
hole_count = 0
file_lines = open(full_path, encoding='utf8', errors='backslashreplace').readlines()
for line in file_lines:
line = line.strip()
if (line and (not np.any([line.startswith(comment) for comment in comments]))):
hole_count += 1
return hole_count |
class SVHN(VisionDataset):
split_list = {'train': [' 'train_32x32.mat', 'e26dedcc434d2e4c54c9b2d4a06d8373'], 'val': [' 'train_32x32.mat', 'e26dedcc434d2e4c54c9b2d4a06d8373'], 'test': [' 'test_32x32.mat', 'eb5a983be6af1b164d9cef3'], 'extra': [' 'extra_32x32.mat', 'a93ce644f1a588dc4d68dda5feec44a7']}
def __init__(self, args, split: str='train', percentage: float=0.8, target_transform: Optional[Callable]=None, download: bool=False) -> None:
super().__init__(args.data_dir, transform=get_transforms(split, args.crop_size, args.pretrained_model), target_transform=target_transform)
self.split = verify_str_arg(split, 'split', tuple(self.split_list.keys()))
self.url = self.split_list[split][0]
self.filename = self.split_list[split][1]
self.file_md5 = self.split_list[split][2]
if download:
self.download()
if (not self._check_integrity()):
raise RuntimeError('Dataset not found or corrupted. You can use download=True to download it')
import scipy.io as sio
loaded_mat = sio.loadmat(os.path.join(self.root, self.filename))
self.data = loaded_mat['X']
self.labels = loaded_mat['y'].astype(np.int64).squeeze()
np.place(self.labels, (self.labels == 10), 0)
self.data = np.transpose(self.data, (3, 2, 0, 1))
if (split == 'train'):
self.labels = self.labels[:int((percentage * len(self.labels)))]
self.data = self.data[:int((percentage * len(self.data)))]
if (split == 'val'):
self.labels = self.labels[int((percentage * len(self.labels))):]
self.data = self.data[int((percentage * len(self.data))):]
self.classes = [str(class_name) for class_name in sorted(list(set(self.labels)))]
def __getitem__(self, index: int) -> Tuple[(Any, Any)]:
(img, target) = (self.data[index], int(self.labels[index]))
img = Image.fromarray(np.transpose(img, (1, 2, 0)))
if (self.transform is not None):
img = self.transform(img)
if (self.target_transform is not None):
target = self.target_transform(target)
sample = {'image': img, 'label': target}
return sample
def __len__(self) -> int:
return len(self.data)
def _check_integrity(self) -> bool:
root = self.root
md5 = self.split_list[self.split][2]
fpath = os.path.join(root, self.filename)
return check_integrity(fpath, md5)
def download(self) -> None:
md5 = self.split_list[self.split][2]
download_url(self.url, self.root, self.filename, md5)
def extra_repr(self) -> str:
return 'Split: {split}'.format(**self.__dict__) |
class _AtomicContext():
def __call__(self, bmodel_net: BModel, bmodel_context: BModelContext) -> Any:
self.bmodel_net = bmodel_net
self.bmodel_context = bmodel_context
return self
def __enter__(self):
pass
def __exit__(self, *exc_info):
self.bmodel_net = None
self.bmodel_context = None |
class CCPM(BaseModel):
def __init__(self, linear_feature_columns, dnn_feature_columns, conv_kernel_width=(6, 5), conv_filters=(4, 4), dnn_hidden_units=(256,), l2_reg_linear=1e-05, l2_reg_embedding=1e-05, l2_reg_dnn=0, dnn_dropout=0, init_std=0.0001, seed=1024, task='binary', device='cpu', dnn_use_bn=False, dnn_activation='relu'):
super(CCPM, self).__init__(linear_feature_columns, dnn_feature_columns, l2_reg_linear=l2_reg_linear, l2_reg_embedding=l2_reg_embedding, init_std=init_std, seed=seed, task=task, device=device)
if (len(conv_kernel_width) != len(conv_filters)):
raise ValueError('conv_kernel_width must have same element with conv_filters')
filed_size = self.compute_input_dim(dnn_feature_columns, include_dense=False, feature_group=True)
self.conv_layer = ConvLayer(field_size=filed_size, conv_kernel_width=conv_kernel_width, conv_filters=conv_filters, device=device)
self.dnn_input_dim = ((self.conv_layer.filed_shape * self.embedding_size) * conv_filters[(- 1)])
self.dnn = DNN(self.dnn_input_dim, dnn_hidden_units, activation=dnn_activation, l2_reg=l2_reg_dnn, dropout_rate=dnn_dropout, use_bn=dnn_use_bn, init_std=init_std, device=device)
self.dnn_linear = nn.Linear(dnn_hidden_units[(- 1)], 1, bias=False).to(device)
self.add_regularization_weight(filter((lambda x: (('weight' in x[0]) and ('bn' not in x[0]))), self.dnn.named_parameters()), l2_reg_dnn)
self.add_regularization_weight(self.dnn_linear.weight, l2_reg_dnn)
self.to(device)
def forward(self, X):
linear_logit = self.linear_model(X)
(sparse_embedding_list, _) = self.input_from_feature_columns(X, self.dnn_feature_columns, self.embedding_dict, support_dense=False)
if (len(sparse_embedding_list) == 0):
raise ValueError('must have the embedding feature,now the embedding feature is None!')
conv_input = concat_fun(sparse_embedding_list, axis=1)
conv_input_concact = torch.unsqueeze(conv_input, 1)
pooling_result = self.conv_layer(conv_input_concact)
flatten_result = pooling_result.view(pooling_result.size(0), (- 1))
dnn_output = self.dnn(flatten_result)
dnn_logit = self.dnn_linear(dnn_output)
logit = (linear_logit + dnn_logit)
y_pred = self.out(logit)
return y_pred |
class CTRLPreTrainedModel():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
def _create_attention_images_summary(final_context_state):
attention_images = final_context_state.alignment_history.stack()
attention_images = tf.expand_dims(tf.transpose(attention_images, [1, 2, 0]), (- 1))
attention_images *= 255
attention_summary = tf.summary.image('attention_images', attention_images)
return attention_summary |
def __plot_validation__(curve, classes, area, area_method, colors, markers):
try:
from matplotlib import pyplot as plt
except Exception:
raise pycmPlotError(MATPLOTLIB_PLOT_LIBRARY_ERROR)
if (classes is None):
classes = curve.classes
if area:
curve.area(method=area_method)
if ((colors is not None) and (len(classes) != len(colors))):
raise pycmPlotError(PLOT_COLORS_CLASS_MISMATCH_ERROR)
if ((markers is not None) and (len(classes) != len(markers))):
raise pycmPlotError(PLOT_MARKERS_CLASS_MISMATCH_ERROR)
(fig, ax) = plt.subplots()
return (fig, ax, classes) |
class RandomDataset(Dataset):
def __init__(self, length: int, sample_fn: Callable, size: Union[(Tuple, List)]):
self.length = length
self.sample_fn = sample_fn
self.size = size
def __len__(self) -> int:
return self.length
def __getitem__(self, idx: int) -> Tensor:
return self.sample_fn(self.size) |
class SubNodeFuser(object):
def __call__(self, graph):
nodes = graph.nodes
fused_nodes = []
for node in nodes:
if (len(node.parents) != 1):
continue
parent = node.get_only_parent()
if (len(parent.children) != 1):
continue
if (not self.is_eligible_pair(parent, node)):
continue
for child in node.children:
child.parents.remove(node)
parent.add_child(child)
parent.children.remove(node)
fused_nodes.append(node)
self.merge(parent, node)
transformed_nodes = [node for node in nodes if (node not in fused_nodes)]
return graph.replaced(transformed_nodes)
def is_eligible_pair(self, parent, child):
raise NotImplementedError('Must be implemented by subclass.')
def merge(self, parent, child):
raise NotImplementedError('Must be implemented by subclass') |
def main(data_args: DataArguments, model_args: ModelArguments, training_args: TrainingArguments, outfile: str, ckpt_num: int, max_samples: int=None, prompt: Optional[str]=None, max_new_tokens: int=2048):
prompt_is_provided = (prompt is not None)
assert data_args.is_multimodal
print('loading model and data...')
(model, tokenizer) = load_pretrained_model(model_args.model_name_or_path, ckpt_num=ckpt_num)
data_args.mm_use_audio_start_end = True
data_module = make_data_module(tokenizer=tokenizer, data_args=data_args)
dataset = data_module['eval_dataset']
end_seq = get_prompt_end_token_sequence(tokenizer, model_args.model_name_or_path)
if (not os.path.exists(os.path.dirname(outfile))):
os.makedirs(os.path.dirname(outfile))
model.cuda()
multimodal_cfg = make_mm_config(data_args)
outputs = []
with torch.autocast(device_type='cuda', dtype=get_autocast_type(training_args)):
with torch.inference_mode():
for (i, ex) in tqdm(enumerate(dataset), total=max_samples):
if (not prompt_is_provided):
input_ids = ex['input_ids']
prompt = extract_prompt_tokens(input_ids, end_seq)
prompt = torch.unsqueeze(prompt, 0).cuda()
stopping_criteria = KeywordsStoppingCriteria(keywords=['###'], tokenizer=tokenizer, input_ids=prompt)
outputs_i = model.generate(input_ids=prompt, audio_encodings=torch.unsqueeze(ex['audio_encoding'], 0).cuda(), max_new_tokens=max_new_tokens, stopping_criteria=[stopping_criteria])
prompt_text = tokenizer.decode(prompt[0])
else:
print(f'[DEBUG] inferring with fixed prompt: {prompt}')
outputs_i = infer_with_prompt(prompt, model=model, audio_encoding=ex['audio_encoding'], multimodal_cfg=multimodal_cfg, end_seq=end_seq, tokenizer=tokenizer, audio_first=True, max_new_tokens=max_new_tokens)
prompt_text = prompt
print('[PROMPT]')
print(prompt_text)
print('[MODEL COMPLETION]')
model_completion_ids = extract_response_tokens(outputs_i[0], end_seq)
model_completion_text = tokenizer.decode(model_completion_ids)
print(model_completion_text)
print('[ORIGINAL/GROUND TRUTH COMPLETION]')
orig_completion_ids = extract_response_tokens(ex['input_ids'], end_seq)
orig_completion_text = tokenizer.decode(orig_completion_ids)
print(orig_completion_text)
output_dict = {'example_id': ex['example_id'], 'prompt_text': prompt_text, 'original_completion_text': orig_completion_text, 'model_completion_text': model_completion_text}
outputs.append(output_dict)
print(('%' * 40))
if (max_samples and (i >= max_samples)):
break
print(f'writing {len(outputs)} results to {outfile}')
pd.DataFrame(outputs).to_csv(outfile, index=False) |
def get_hash(x, bucket_size):
if isinstance(x, np.ndarray):
ret = np.ndarray(x.shape, dtype='int64')
for i in range(x.size):
ret.put(i, (hashing(x.take(i)) % bucket_size))
else:
ret = (hashing(x) % bucket_size)
return ret |
def find_head(x):
x_parsed = nlp(x)
for tok in x_parsed:
if (tok.head == tok):
if (tok.lemma_ == u'-PRON-'):
return (tok.text, tok.text.lower())
return (tok.text, tok.lemma_) |
class ItemAutoRecModel(keras.Model):
def __init__(self, data, num_users, num_items, lr, hidden_neuron, l_w, name='ItemAutoRec', **kwargs):
super().__init__(name=name, **kwargs)
tf.random.set_seed(42)
self.data = data
self.num_users = num_users
self.num_items = num_items
self.lr = lr
self.hidden_neuron = hidden_neuron
self.l_w = l_w
self.encoder = Encoder(hidden_neuron=self.hidden_neuron, regularization=self.l_w)
self.decoder = Decoder(num_users=self.num_users, regularization=self.l_w)
self.optimizer = tf.optimizers.Adam(self.lr)
def get_config(self):
raise NotImplementedError
def call(self, inputs, training=None, **kwargs):
encoded = self.encoder(inputs, training=training)
reconstructed = self.decoder(encoded)
return reconstructed
def train_step(self, batch):
with tf.GradientTape() as tape:
reconstructed = self.call(inputs=batch, training=True)
reconstructed = (reconstructed * tf.sign(batch))
error = (batch - reconstructed)
loss = tf.reduce_mean(tf.reduce_sum((error ** 2), axis=1))
grads = tape.gradient(loss, self.trainable_weights)
self.optimizer.apply_gradients(zip(grads, self.trainable_weights))
return loss
def predict(self, inputs, training=False, **kwargs):
scores = self.call(inputs=inputs, training=training)
return scores
def get_top_k(self, preds, train_mask, k=100):
return tf.nn.top_k(tf.where(train_mask, preds, (- np.inf)), k=k, sorted=True)
def get_recs(self, inputs, training=False, **kwargs):
return self.predict(inputs) |
class SRS_SENet(nn.Module):
def __init__(self, block, num_blocks, num_classes=100):
super(SRS_SENet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512, num_classes)
self.srs = SRS()
def _make_layer(self, block, planes, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x):
out = self.srs(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out |
def check_port(port: int) -> None:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(('127.0.0.1', port))
except socket.error as e:
if (e.errno == errno.EADDRINUSE):
raise ValueError(f'Port {port} is already in use')
else:
raise e |
class DWFormer(nn.Module):
def __init__(self, feadim, n_head, FFNdim, classnum):
super(DWFormer, self).__init__()
self.or1 = vanilla_transformer_block(feadim, n_head, FFNdim)
self.dt1 = DWFormerBlock(feadim, n_head, FFNdim)
self.dt2 = DWFormerBlock(feadim, n_head, FFNdim)
self.dt3 = DWFormerBlock(feadim, n_head, FFNdim)
self.classifier = classifier(feadim, classnum)
self.PE = PositionalEncoding(feadim)
self.ln1 = nn.LayerNorm(feadim, eps=1e-05)
self.ln2 = nn.LayerNorm(feadim, eps=1e-05)
self.ln3 = nn.LayerNorm(feadim, eps=1e-05)
self.ln4 = nn.LayerNorm(feadim, eps=1e-05)
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if (p.dim() > 1):
nn.init.xavier_uniform_(p)
def forward(self, x, x_mask):
(batch, times, _) = x.shape
haltingscore = torch.zeros((batch, times), device=x.device)
x = self.ln1(x)
(x1, _, attn) = self.or1(x, haltingscore)
(x2, thresholds1, attn11) = self.dt1(x1, x_mask, attn)
x3 = self.ln2(x2)
(x4, thresholds2, attn12) = self.dt2(x3, x_mask, attn11)
x5 = self.ln3(x4)
(x6, thresholds3, attn13) = self.dt3(x5, x_mask, attn12)
out = self.classifier(x6)
return out
def getNuthresholdsist(self, start, end, n):
numsArray = set()
while (len(numsArray) < n):
numsArray.add(random.randint(start, end))
return list(numsArray) |
.parametrize('n_rounds, n_unique_action, len_list, dim_context, reward_type, reward_structure, click_model, base_reward_function, is_factorizable, evaluation_policy_logit_, description', valid_input_of_calc_ground_truth_policy_value)
def test_calc_ground_truth_policy_value_using_valid_input_data(n_rounds, n_unique_action, len_list, dim_context, reward_type, reward_structure, click_model, base_reward_function, is_factorizable, evaluation_policy_logit_, description):
dataset = SyntheticSlateBanditDataset(n_unique_action=n_unique_action, len_list=len_list, dim_context=dim_context, reward_type=reward_type, reward_structure=reward_structure, click_model=click_model, base_reward_function=base_reward_function, is_factorizable=is_factorizable)
logged_bandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)
policy_value = dataset.calc_ground_truth_policy_value(evaluation_policy_logit_=evaluation_policy_logit_, context=logged_bandit_feedback['context'])
assert (isinstance(policy_value, float) and (0 <= policy_value)) |
class SpecialHyperellipticQuotientRing(UniqueRepresentation, CommutativeAlgebra):
_p = None
def __init__(self, Q, R=None, invert_y=True):
if (R is None):
R = Q.base_ring()
x = PolynomialRing(R, 'xx').gen()
if is_EllipticCurve(Q):
E = Q
if ((E.a1() != 0) or (E.a2() != 0)):
raise NotImplementedError('curve must be in Weierstrass normal form')
Q = (- E.change_ring(R).defining_polynomial()(x, 0, 1))
self._curve = E
elif is_HyperellipticCurve(Q):
C = Q
if (C.hyperelliptic_polynomials()[1] != 0):
raise NotImplementedError('curve must be of form y^2 = Q(x)')
Q = C.hyperelliptic_polynomials()[0].change_ring(R)
self._curve = C
if isinstance(Q, Polynomial):
self._Q = Q.change_ring(R)
self._coeffs = self._Q.coefficients(sparse=False)
if (self._coeffs.pop() != 1):
raise NotImplementedError('polynomial must be monic')
if (not hasattr(self, '_curve')):
if (self._Q.degree() == 3):
ainvs = [0, self._Q[2], 0, self._Q[1], self._Q[0]]
self._curve = EllipticCurve(ainvs, check_squarefree=R.is_field())
else:
self._curve = HyperellipticCurve(self._Q, check_squarefree=R.is_field())
else:
raise NotImplementedError(('must be an elliptic curve or polynomial Q for y^2 = Q(x)\n(Got element of %s)' % Q.parent()))
self._n = int(Q.degree())
self._series_ring = (LaurentSeriesRing if invert_y else PolynomialRing)(R, 'y')
self._series_ring_y = self._series_ring.gen(0)
self._series_ring_0 = self._series_ring.zero()
CommutativeAlgebra.__init__(self, R)
self._poly_ring = PolynomialRing(self._series_ring, 'x')
self._x = self.element_class(self, self._poly_ring.gen(0))
self._y = self.element_class(self, self._series_ring.gen(0))
self._Q_coeffs = Q.change_ring(self._series_ring).list()
self._dQ = Q.derivative().change_ring(self)(self._x)
self._monsky_washnitzer = MonskyWashnitzerDifferentialRing(self)
self._monomial_diffs = {}
self._monomial_diff_coeffs = {}
def _repr_(self):
y_inverse = (',y^-1' if is_LaurentSeriesRing(self._series_ring) else '')
return ('SpecialHyperellipticQuotientRing K[x,y%s] / (y^2 = %s) over %s' % (y_inverse, self._Q, self.base_ring()))
def base_extend(self, R):
if R.has_coerce_map_from(self.base_ring()):
return self.change_ring(R)
raise TypeError('no such base extension')
def change_ring(self, R):
return SpecialHyperellipticQuotientRing(self._Q.change_ring(R), R, is_LaurentSeriesRing(self._series_ring))
def _element_constructor_(self, val, offset=0, check=True):
if (isinstance(val, SpecialHyperellipticQuotientElement) and (val.parent() is self)):
if (offset == 0):
return val
else:
return (val << offset)
elif isinstance(val, MonskyWashnitzerDifferential):
return self._monsky_washnitzer(val)
return self.element_class(self, val, offset, check)
_method
def one(self):
return self.element_class(self, self._poly_ring.one(), check=False)
_method
def zero(self):
return self.element_class(self, self._poly_ring.zero(), check=False)
def gens(self):
return (self._x, self._y)
def x(self):
return self._x
def y(self):
return self._y
def monomial(self, i, j, b=None):
i = int(i)
j = int(j)
if ((0 < i) and (i < self._n)):
if (b is None):
by_to_j = (self._series_ring_y << (j - 1))
else:
by_to_j = (self._series_ring(b) << j)
v = ([self._series_ring_0] * self._n)
v[i] = by_to_j
return self.element_class(self, v)
if (b is not None):
b = self.base_ring()(b)
return (((self._x ** i) << j) if (b is None) else ((b * (self._x ** i)) << j))
def monomial_diff_coeffs(self, i, j):
try:
return self._monomial_diff_coeffs[(i, j)]
except KeyError:
pass
if (i < self._n):
try:
(A, B, two_i_x_to_i) = self._precomputed_diff_coeffs[i]
except AttributeError:
self._precomputed_diff_coeffs = self._precompute_monomial_diffs()
(A, B, two_i_x_to_i) = self._precomputed_diff_coeffs[i]
if (i == 0):
return ((j * A), (j * B))
else:
return ((j * A), ((j * B) + two_i_x_to_i))
else:
dg = self.monomial(i, j).diff()
coeffs = [dg.extract_pow_y((j - 1)), dg.extract_pow_y((j + 1))]
self._monomial_diff_coeffs[(i, j)] = coeffs
return coeffs
def monomial_diff_coeffs_matrices(self):
self.monomial_diff_coeffs(0, 0)
R = self.base_ring()
mat_1 = matrix(R, self._n, self._n)
mat_2 = matrix(R, self._n, self._n)
for i in range(self._n):
mat_1[i] = self._precomputed_diff_coeffs[i][1]
mat_2[i] = self._precomputed_diff_coeffs[i][2]
return (mat_1.transpose(), mat_2.transpose())
def _precompute_monomial_diffs(self):
(x, y) = self.gens()
R = self.base_ring()
V = FreeModule(R, self.degree())
As = []
for i in range(self.degree()):
dg = self.monomial(i, 1).diff()
two_i_x_to_i = ((((R((2 * i)) * (x ** (i - 1))) * y) * y) if (i > 0) else self(0))
A = (dg - self._monsky_washnitzer(two_i_x_to_i))
As.append((V(A.extract_pow_y(0)), V(A.extract_pow_y(2)), V(two_i_x_to_i.extract_pow_y(2))))
return As
def Q(self):
return self._Q
def curve(self):
return self._curve
def degree(self):
return Integer(self._n)
def prime(self):
return self._p
def monsky_washnitzer(self):
return self._monsky_washnitzer
def is_field(self, proof=True):
return False
Element = SpecialHyperellipticQuotientElement |
def __getattr__(name):
return _sub_module_deprecation(sub_package='signal', module='ltisys', private_modules=['_ltisys'], all=__all__, attribute=name) |
def seed_everything(seed=42):
os.environ['PYTHONHASHSEED'] = str(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_channel, out_channel, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channel, out_channels=out_channel, kernel_size=1, stride=1, bias=False)
self.bn1 = nn.BatchNorm2d(num_features=out_channel)
self.conv2 = nn.Conv2d(in_channels=out_channel, out_channels=out_channel, kernel_size=3, stride=stride, bias=False, padding=1)
self.bn2 = nn.BatchNorm2d(num_features=out_channel)
self.conv3 = nn.Conv2d(in_channels=out_channel, out_channels=(out_channel * self.expansion), kernel_size=1, stride=1, bias=False)
self.bn3 = nn.BatchNorm2d(num_features=(out_channel * self.expansion))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
identity = x
if (self.downsample is not None):
identity = self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += identity
out = self.relu(out)
return out |
def init_config(config, default_config, name=None):
if (config is None):
config = default_config
else:
for k in default_config.keys():
if (k not in config.keys()):
config[k] = default_config[k]
if (name and config['PRINT_CONFIG']):
print(('\n%s Config:' % name))
for c in config.keys():
print(('%-20s : %-30s' % (c, config[c])))
return config |
class DropboxGetItemMetadata(VirtualFunctionTool):
name = 'DropboxGetItemMetadata'
summary = "Get metadata of a file or folder in the user's Dropbox account."
parameters: List[ArgParameter] = [{'name': 'item_path', 'type': 'string', 'description': "The cloud file or folder path in the user's Dropbox account.", 'required': True}]
returns: List[ArgReturn] = [{'name': 'metadata', 'type': 'object', 'description': "An object with fields such as 'id' (the unique identifier of the file or folder), 'name' (the name of the file or folder), 'type' (the type of the item, either 'file' or 'folder'), 'size' (the size of the file in bytes), 'created_at' (the creation timestamp), 'modified_at' (the last modification timestamp), etc."}]
exceptions: List[ArgException] = [{'name': 'NotFoundException', 'description': "The 'item_path' does not exist."}] |
def getFilenames():
result = []
root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
assert os.path.exists(root_dir)
for dirname in ('models', 'examples'):
dirname = os.path.join(root_dir, dirname)
assert os.path.exists(dirname)
for (cwd, _, filenames) in os.walk(dirname):
for filename in filenames:
filename = os.path.join(cwd, filename)
if (filename.endswith('.prototxt') and ('solver' not in filename)):
(yield os.path.join(dirname, filename)) |
def sig(epoch):
scale = 5
return (1 / (1 + np.exp((- ((epoch / scale) - (EP / (scale * 2))))))) |
class KRTToRCBijectionAbstract():
def __init__(self, tp_krt):
self.tp_krt = tp_krt
self.n = tp_krt.parent().cartan_type().classical().rank()
self.ret_rig_con = tp_krt.parent().rigged_configurations()(partition_list=([[]] * self.n))
self.ret_rig_con._set_mutable()
self.cur_dims = []
self.cur_path = []
def __eq__(self, rhs):
return isinstance(rhs, KRTToRCBijectionAbstract)
def run(self, verbose=False):
if verbose:
from sage.combinat.rigged_configurations.tensor_product_kr_tableaux_element import TensorProductOfKirillovReshetikhinTableauxElement
for cur_crystal in reversed(self.tp_krt):
target = cur_crystal.parent()._r
for (col_number, cur_column) in enumerate(reversed(cur_crystal.to_array(False))):
self.cur_path.insert(0, [])
self.cur_dims.insert(0, [0, 1])
for letter in reversed(cur_column):
self.cur_dims[0][0] = self._next_index(self.cur_dims[0][0], target)
val = letter.value
if verbose:
print('')
print(repr(TensorProductOfKirillovReshetikhinTableauxElement(self.tp_krt.parent(), self.cur_path)))
print('')
print(repr(self.ret_rig_con))
print('\n')
self.cur_path[0].insert(0, [letter])
self.next_state(val)
if (col_number > 0):
if verbose:
print('')
print(repr(TensorProductOfKirillovReshetikhinTableauxElement(self.tp_krt.parent(), self.cur_path)))
print('')
print(repr(self.ret_rig_con))
print('\n')
print('Applying column merge')
for (i, letter_singleton) in enumerate(self.cur_path[0]):
self.cur_path[1][i].insert(0, letter_singleton[0])
self.cur_dims[1][1] += 1
self.cur_path.pop(0)
self.cur_dims.pop(0)
for a in range(self.n):
self._update_vacancy_nums(a)
self.ret_rig_con.set_immutable()
return self.ret_rig_con
_method
def next_state(self, val):
def _update_vacancy_nums(self, a):
if (not self.ret_rig_con[a]):
return
block_len = self.ret_rig_con[a][0]
nu = self.ret_rig_con.nu()
vac_num = self.ret_rig_con.parent()._calc_vacancy_number(nu, a, nu[a][0], dims=self.cur_dims)
for (i, row_len) in enumerate(self.ret_rig_con[a]):
if (block_len != row_len):
vac_num = self.ret_rig_con.parent()._calc_vacancy_number(nu, a, row_len, dims=self.cur_dims)
block_len = row_len
self.ret_rig_con[a].vacancy_numbers[i] = vac_num
def _update_partition_values(self, a):
rigged_partition = self.ret_rig_con[a]
for (index, value) in enumerate(rigged_partition.rigging):
if (value is None):
rigged_partition.rigging[index] = rigged_partition.vacancy_numbers[index]
if ((index > 0) and (rigged_partition[(index - 1)] == rigged_partition[index]) and (rigged_partition.rigging[(index - 1)] < rigged_partition.rigging[index])):
pos = 0
width = rigged_partition[index]
val = rigged_partition.rigging[index]
for i in reversed(range((index - 1))):
if ((rigged_partition[i] > width) or (rigged_partition.rigging[i] >= val)):
pos = (i + 1)
break
rigged_partition.rigging.pop(index)
rigged_partition.rigging.insert(pos, val)
def _next_index(self, r, target):
return (r + 1) |
def adjust_lr(optimizer, init_lr, epoch, decay_rate=0.1, decay_epoch=30):
decay = (decay_rate ** (epoch // decay_epoch))
for param_group in optimizer.param_groups:
param_group['lr'] = (decay * init_lr)
lr = param_group['lr']
return lr |
class Node():
def __init__(self, bbox, frame_id, next_frame_id=(- 1)):
self.bbox = bbox
self.frame_id = frame_id
self.next_frame_id = next_frame_id |
def validate_context_and_answer_and_hops(example, pred, trace=None):
if (not dspy.evaluate.answer_exact_match(example, pred)):
return False
if (not dspy.evaluate.answer_passage_match(example, pred)):
return False
hops = ([example.question] + [outputs.query for (*_, outputs) in trace if ('query' in outputs)])
if (max([len(h) for h in hops]) > 100):
return False
if any((dspy.evaluate.answer_exact_match_str(hops[idx], hops[:idx], frac=0.8) for idx in range(2, len(hops)))):
return False
return True |
class SysStdLogger(object):
def __init__(self, filename='terminal log.txt', stream=sys.stdout):
self.terminal = stream
self.log = open(filename, 'a')
self.log.write(''.join([time.strftime('%y-%m-%d %H:%M:%S'), '\n\n']))
def write(self, message):
if ('deprecated pixel format used' in message):
pass
else:
self.terminal.write(message)
self.log.write(message)
def flush(self):
pass
def __del__(self):
self.log.write(''.join(['\n', time.strftime('%y-%m-%d %H:%M:%S')]))
self.log.close() |
def process_evaluation_result(outdir, filename):
callback = _get_callback('process_evaluation_result')
if callback:
callback.process_evaluation_result(outdir, filename) |
class MultiPassOptimizerTests(tf.test.TestCase):
def test_basic(self):
for aggregate_method in ['cumsum', 'storage']:
with tf.Graph().as_default(), tf.Session() as sess, log.verbose_level(2):
opt = tf.train.GradientDescentOptimizer(0.1)
mp_opt = MultiPassOptimizer(opt, 2, aggregate_method=aggregate_method)
a = tf.get_variable('a', shape=[10, 12], initializer=tf.constant_initializer(0.0))
b = tf.get_variable('b', shape=[11, 13], initializer=tf.constant_initializer(0.0))
da1 = (tf.ones([10, 12]) * 0.4)
da2 = (tf.ones([10, 12]) * 0.6)
db1 = (tf.ones([11, 13]) * 0.8)
db2 = (tf.ones([11, 13]) * 1.0)
gv1 = [(da1, a), (db1, b)]
gv2 = [(da2, a), (db2, b)]
op1 = mp_opt.apply_gradients(gv1)
op2 = mp_opt.apply_gradients(gv2)
sess.run(tf.global_variables_initializer())
sess.run([op1])
sess.run([op2])
(a, b) = sess.run([a, b])
np.testing.assert_allclose(a, ((- np.ones([10, 12])) * 0.05))
np.testing.assert_allclose(b, ((- np.ones([11, 13])) * 0.09)) |
class VertexCube(VertexBase):
def __init__(self, x, nn=None, index=None):
super().__init__(x, nn=nn, index=index)
def connect(self, v):
if ((v is not self) and (v not in self.nn)):
self.nn.add(v)
v.nn.add(self)
def disconnect(self, v):
if (v in self.nn):
self.nn.remove(v)
v.nn.remove(self) |
def get_char_embed(word, model, device):
char_vec = model.get_char_embeds(word, device)
return char_vec |
def convert_tag_vocab(state_dict):
if state_dict['lower']:
raise AssertionError("Did not expect an NER vocab with 'lower' set to True")
items = state_dict['_id2unit'][len(VOCAB_PREFIX):]
items = [[[[x]]] for x in items]
vocab = CompositeVocab(data=items, lang=state_dict['lang'], idx=0, sep=None)
if (len(vocab._id2unit[0]) != len(state_dict['_id2unit'])):
raise AssertionError('Failed to construct a new vocab of the same length as the original')
if (vocab._id2unit[0] != state_dict['_id2unit']):
raise AssertionError('Failed to construct a new vocab in the same order as the original')
return vocab |
class GaussianTailProbabilityCalibrator(BasePostprocessor):
def __init__(self, running_statistics=True, window_size=6400):
self.running_statistics = running_statistics
self.window_size = window_size
if self.running_statistics:
self.avg_meter = RunningStatistic(AverageMeter, self.window_size)
self.var_meter = RunningStatistic(VarianceMeter, self.window_size)
else:
self.avg_meter = AverageMeter()
self.var_meter = RunningStatistic(VarianceMeter, self.window_size)
def fit_partial(self, score):
self.avg_meter.update(score)
self.var_meter.update(score)
return self
def transform_partial(self, score):
mean = self.avg_meter.get()
var = self.var_meter.get()
if (var > 0):
std = np.sqrt(var)
else:
std = 1.0
return (1 - self._qfunction(score, mean, std))
def _qfunction(self, x, mean, std):
z = ((x - mean) / std)
return (0.5 * math.erfc((z / math.sqrt(2)))) |
def single_wall_mobility_trans_times_force_pycuda(r_vectors, force, eta, a, *args, **kwargs):
number_of_blobs = np.int32(len(r_vectors))
(threads_per_block, num_blocks) = set_number_of_threads_and_blocks(number_of_blobs)
L = kwargs.get('periodic_length', np.array([0.0, 0.0, 0.0]))
x = real(np.reshape(r_vectors, (number_of_blobs * 3)))
f = real(np.reshape(force, (number_of_blobs * 3)))
x_gpu = cuda.mem_alloc(x.nbytes)
f_gpu = cuda.mem_alloc(f.nbytes)
u_gpu = cuda.mem_alloc(f.nbytes)
number_of_blobs_gpu = cuda.mem_alloc(number_of_blobs.nbytes)
cuda.memcpy_htod(x_gpu, x)
cuda.memcpy_htod(f_gpu, f)
mobility = mod.get_function('velocity_from_force')
mobility(x_gpu, f_gpu, u_gpu, number_of_blobs, real(eta), real(a), real(L[0]), real(L[1]), real(L[2]), block=(threads_per_block, 1, 1), grid=(num_blocks, 1))
u = np.empty_like(f)
cuda.memcpy_dtoh(u, u_gpu)
return u |
def prepare_download():
for file_url in BLOB_NAMES:
for split in SPLIT_LIST:
if (split in file_url):
split_name = split
split_path = os.path.join(COMPRESSED_PATH, split_name)
if (not os.path.exists(split_path)):
os.makedirs(split_path)
if (not os.path.exists(DECOMPRESSED_PATH)):
os.makedirs(DECOMPRESSED_PATH)
filename = file_url.split('/')[(- 1)]
download_path = os.path.join(split_path, filename)
download_url = ((AZURE_URL + '/') + file_url)
if (not validate_file(download_url, download_path)):
if os.path.exists(download_path):
resume_byte_pos = os.path.getsize(download_path)
else:
resume_byte_pos = None
download_file(download_url, download_path, split_name, filename, resume_byte_pos=resume_byte_pos)
else:
print(', \tDownload complete. Skipping')
decompress_file(download_path, DECOMPRESSED_PATH, split_name)
rir_blind_test_download() |
class GatewayObjStoreOperator(GatewayOperator):
def __init__(self, handle: str, region: str, bucket_name: str, bucket_region: str, input_queue: GatewayQueue, output_queue: GatewayQueue, error_event, error_queue: Queue, n_processes: Optional[int]=1, chunk_store: Optional[ChunkStore]=None):
super().__init__(handle, region, input_queue, output_queue, error_event, error_queue, chunk_store, n_processes)
self.bucket_name = bucket_name
self.bucket_region = bucket_region
self.src_requester_pays = cloud_config.get_flag('requester_pays')
self.worker_id: Optional[int] = None
self.obj_store_interfaces: Dict[(str, ObjectStoreInterface)] = {}
def get_obj_store_interface(self, region: str, bucket: str) -> ObjectStoreInterface:
key = f'{region}:{bucket}'
if (key not in self.obj_store_interfaces):
logger.warning(f'[gateway_daemon] ObjectStoreInterface not cached for {key}')
try:
self.obj_store_interfaces[key] = ObjectStoreInterface.create(region, bucket)
except Exception as e:
raise ValueError(f'Failed to create obj store interface {str(e)}')
return self.obj_store_interfaces[key] |
class DeepAttention(nn.Module):
def __init__(self, dim):
super(DeepAttention, self).__init__()
self.linear_in = nn.Linear(dim, dim, bias=False)
self.linear_v = nn.Linear(dim, 1, bias=False)
self.linear_out = nn.Linear((dim * 2), dim, bias=False)
self.relu = nn.ReLU()
self.sm = nn.Softmax(dim=1)
self.tanh = nn.Tanh()
self.mask = None
def forward(self, input, context, mask=None, attn_only=False):
batch_size = context.size(0)
source_len = context.size(1)
dim = context.size(2)
u = input.unsqueeze(1).expand_as(context).contiguous().view((- 1), dim)
u = self.relu(self.linear_in(u))
v = self.relu(self.linear_in(context.contiguous().view((- 1), dim)))
attn = self.linear_v(u.mul(v)).view(batch_size, source_len)
if (mask is not None):
assert (mask.size() == attn.size()), 'Mask size must match the attention size!'
attn.masked_fill_(mask, (- constant.INFINITY_NUMBER))
attn = self.sm(attn)
if attn_only:
return attn
attn3 = attn.view(batch_size, 1, source_len)
weighted_context = torch.bmm(attn3, context).squeeze(1)
h_tilde = torch.cat((weighted_context, input), 1)
h_tilde = self.tanh(self.linear_out(h_tilde))
return (h_tilde, attn) |
def move_file(src: str, dest: str):
assert os.path.exists(src), f'source file {src} does not exist.'
if dest.startswith('gs://'):
(bucket_dest, filepath_dest) = split_gcs_bucket_and_filepath(dest)
gcs_bucket(bucket_dest).blob(filepath_dest).upload_from_filename(src)
else:
shutil.move(src, dest) |
def make_plots(statistics_file):
print('\n Make Plots')
with open(statistics_file, 'r') as f:
stats = json.load(f)
output_folder = os.path.split(statistics_file)[0]
statNames = ['SSIM $\\uparrow$', 'LPIPS $\\downarrow$']
statTags = ['ssim', 'lpips']
statAggregation = [max, min]
latex = io.StringIO()
latex.write(('\\begin{tabular}{r%s}\n' % ('cc' * len(configX))))
latex.write('\\toprule\n')
latex.write('\\multirow{2}{*}{Activation}')
for config in configX:
latex.write((' & \\multicolumn{2}{c}{%s}' % config[2]))
latex.write('\\\\\n')
for config in configX:
latex.write((' & %s & %s' % tuple(statNames)))
latex.write('\\\\\n')
latex.write('\n\\midrule\n')
best_per_dataset = dict()
for config in configX:
cfg_index = stats[config[0]]['cfg_index']
for (tag, aggr) in zip(statTags, statAggregation):
values = []
for activation in activationX:
(_, _, n) = get_args_and_hdf5_file(activation, configX[cfg_index])
v = ('%.4f' % stats[config[0]][n][tag][0])
values.append(v)
best_per_dataset[(cfg_index, tag)] = aggr(values)
for activation in activationX:
latex.write(activation.split(':')[0])
for config in configX:
cfg_index = stats[config[0]]['cfg_index']
(_, _, n) = get_args_and_hdf5_file(activation, configX[cfg_index])
for tag in statTags:
v = ('%.4f' % stats[config[0]][n][tag][0])
if (v == best_per_dataset[(cfg_index, tag)]):
latex.write((' & $\\bm{%s}$' % v))
else:
latex.write((' & $%s$' % v))
latex.write('\\\\\n')
latex.write('\n\\bottomrule\n')
latex.write('\\end{tabular}\n')
latex = latex.getvalue()
with open(os.path.join(output_folder, 'ActivationFunctions.tex'), 'w') as f:
f.write(latex)
print(latex)
print('Done') |
def get_token(doc, doc_id):
token = {'doc_id': [], 'sid': [], 'tid': [], 'token': [], 'token_with_ws': [], 'lemma': [], 'upos': [], 'xpos': [], 'tid_source': [], 'relation': []}
sid = 1
for x in doc.sents:
start_token_i = x[0].i
tid = 1
for word in x:
if (word.dep_ == 'ROOT'):
dep_id = 0
else:
dep_id = ((word.head.i - start_token_i) + 1)
token['doc_id'].append(doc_id)
token['sid'].append(sid)
token['tid'].append(tid)
token['token'].append(word.text)
token['token_with_ws'].append(word.text_with_ws)
token['lemma'].append(word.lemma_)
token['upos'].append(word.pos_)
token['xpos'].append(word.tag_)
token['tid_source'].append(dep_id)
token['relation'].append(word.dep_)
tid += 1
sid += 1
return token |
def test_axis_none():
record = ak.zip({'x': [1, None], 'y': [2, 3]})
assert (ak.fill_none(record, 0, axis=None).to_list() == [{'x': 1, 'y': 2}, {'x': 0, 'y': 3}]) |
class LinkCollector(object):
def __init__(self, session, search_scope):
self.search_scope = search_scope
self.session = session
def create(cls, session, options, suppress_no_index=False):
index_urls = ([options.index_url] + options.extra_index_urls)
if (options.no_index and (not suppress_no_index)):
logger.debug('Ignoring indexes: %s', ','.join((redact_auth_from_url(url) for url in index_urls)))
index_urls = []
find_links = (options.find_links or [])
search_scope = SearchScope.create(find_links=find_links, index_urls=index_urls)
link_collector = LinkCollector(session=session, search_scope=search_scope)
return link_collector
def find_links(self):
return self.search_scope.find_links
def fetch_page(self, location):
return _get_html_page(location, session=self.session)
def collect_links(self, project_name):
search_scope = self.search_scope
index_locations = search_scope.get_index_urls_locations(project_name)
(index_file_loc, index_url_loc) = group_locations(index_locations)
(fl_file_loc, fl_url_loc) = group_locations(self.find_links, expand_dir=True)
file_links = [Link(url) for url in itertools.chain(index_file_loc, fl_file_loc)]
find_link_links = [Link(url, '-f') for url in self.find_links]
url_locations = [link for link in itertools.chain((Link(url, cache_link_parsing=False) for url in index_url_loc), (Link(url) for url in fl_url_loc)) if self.session.is_secure_origin(link)]
url_locations = _remove_duplicate_links(url_locations)
lines = ['{} location(s) to search for versions of {}:'.format(len(url_locations), project_name)]
for link in url_locations:
lines.append('* {}'.format(link))
logger.debug('\n'.join(lines))
return CollectedLinks(files=file_links, find_links=find_link_links, project_urls=url_locations) |
def add_joints_to_image(img_demo, joints):
for joint in joints:
[i, j, sure] = joint
cv2.circle(img_demo, (i, j), radius=2, color=(255, 255, 255), thickness=2)
return img_demo |
def get_exact_set_match_metrics(examples, pred_list, verbose=False, vocabs=None, schema_graphs=None, clauses=None):
assert (len(examples) == len(pred_list))
esm = ExactSetMatch(vocabs)
metrics = {'select': 0.0, 'groupBy': 0.0, 'orderBy': 0.0, 'from': 0.0, 'where': 0.0, 'having': 0.0, 'limit': 0.0}
for (i, (example, example_pred)) in enumerate(zip(examples, pred_list)):
schema_graph = schema_graphs.get_schema(example.db_id)
(sql_correct, select_correct, group_by_correct, order_by_correct, from_correct, where_correct, having_correct, limit_correct) = esm.eval_example(example_pred, example, verbose=verbose, example_id=i, schema_graph=schema_graph)
if sql_correct:
metrics['sql'] += 1.0
if select_correct:
metrics['select'] += 1.0
if group_by_correct:
metrics['groupBy'] += 1.0
if order_by_correct:
metrics['orderBy'] += 1.0
if from_correct:
metrics['from'] += 1.0
if where_correct:
metrics['where'] += 1.0
if having_correct:
metrics['having'] += 1.0
if limit_correct:
metrics['limit'] += 1.0
avg_metrics = 0
if (clauses is None):
clauses = metrics.keys()
for key in clauses:
metrics[key] /= len(examples)
avg_metrics += metrics[key]
avg_metrics /= (len(metrics) - 1)
metrics['average'] = avg_metrics
return metrics |
_torch
_sentencepiece
_tokenizers
class M2M100TokenizerIntegrationTest(unittest.TestCase):
checkpoint_name = 'facebook/m2m100_418M'
src_text = ['In my opinion, there are two levels of response from the French government.', 'NSA Affair Emphasizes Complete Lack of Debate on Intelligence']
tgt_text = ['Selon moi, il y a deux niveaux de reponse de la part du gouvernement francais.', "L'affaire NSA souligne l'absence totale de debat sur le renseignement"]
expected_src_tokens = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2]
def setUpClass(cls):
cls.tokenizer: M2M100Tokenizer = M2M100Tokenizer.from_pretrained(cls.checkpoint_name, src_lang='en', tgt_lang='fr')
cls.pad_token_id = 1
return cls
def check_language_codes(self):
self.assertEqual(self.tokenizer.get_lang_id('ar'), 128006)
self.assertEqual(self.tokenizer.get_lang_id('en'), 128022)
self.assertEqual(self.tokenizer.get_lang_id('ro'), 128076)
self.assertEqual(self.tokenizer.get_lang_id('mr'), 128063)
def test_get_vocab(self):
vocab = self.tokenizer.get_vocab()
self.assertEqual(len(vocab), self.tokenizer.vocab_size)
self.assertEqual(vocab['<unk>'], 3)
self.assertIn(self.tokenizer.get_lang_token('en'), vocab)
def test_tokenizer_batch_encode_plus(self):
self.tokenizer.src_lang = 'en'
ids = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens, ids)
def test_tokenizer_decode_ignores_language_codes(self):
self.assertIn(FR_CODE, self.tokenizer.all_special_ids)
generated_ids = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2]
result = self.tokenizer.decode(generated_ids, skip_special_tokens=True)
expected_french = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=True)
self.assertEqual(result, expected_french)
self.assertNotIn(self.tokenizer.eos_token, result)
def test_special_tokens_unaffacted_by_save_load(self):
tmpdirname = tempfile.mkdtemp()
original_special_tokens = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(tmpdirname)
new_tok = M2M100Tokenizer.from_pretrained(tmpdirname)
self.assertDictEqual(new_tok.lang_token_to_id, original_special_tokens)
_torch
def test_batch_fairseq_parity(self):
self.tokenizer.src_lang = 'en'
self.tokenizer.tgt_lang = 'fr'
batch = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=True, return_tensors='pt')
batch['decoder_input_ids'] = shift_tokens_right(batch['labels'], self.tokenizer.pad_token_id, self.tokenizer.eos_token_id)
for k in batch:
batch[k] = batch[k].tolist()
assert (batch.input_ids[1][0] == EN_CODE)
assert (batch.input_ids[1][(- 1)] == 2)
assert (batch.labels[1][0] == FR_CODE)
assert (batch.labels[1][(- 1)] == 2)
assert (batch.decoder_input_ids[1][:2] == [2, FR_CODE])
_torch
def test_src_lang_setter(self):
self.tokenizer.src_lang = 'mr'
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id('mr')])
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id])
self.tokenizer.src_lang = 'zh'
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id('zh')])
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id])
_torch
def test_tokenizer_target_mode(self):
self.tokenizer.tgt_lang = 'mr'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id('mr')])
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
self.tokenizer.tgt_lang = 'zh'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id('zh')])
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
_torch
def test_tokenizer_translation(self):
inputs = self.tokenizer._build_translation_inputs('A test', return_tensors='pt', src_lang='en', tgt_lang='ar')
self.assertEqual(nested_simplify(inputs), {'input_ids': [[128022, 58, 4183, 2]], 'attention_mask': [[1, 1, 1, 1]], 'forced_bos_token_id': 128006}) |
def aggregate_graph(input_matrix: sparse.csr_matrix, labels: Optional[np.ndarray]=None, labels_row: Optional[np.ndarray]=None, labels_col: Optional[np.ndarray]=None) -> sparse.csr_matrix:
if (labels_row is not None):
membership_row = get_membership(labels_row)
else:
membership_row = get_membership(labels)
if (labels_col is not None):
membership_col = get_membership(labels_col)
else:
membership_col = membership_row
aggregate_matrix = membership_row.T.dot(input_matrix).dot(membership_col)
return aggregate_matrix |
_module
class ConvFCBBoxHead(BBoxHead):
def __init__(self, num_shared_convs=0, num_shared_fcs=0, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0, conv_out_channels=256, fc_out_channels=1024, conv_cfg=None, norm_cfg=None, *args, **kwargs):
super(ConvFCBBoxHead, self).__init__(*args, **kwargs)
assert ((((((num_shared_convs + num_shared_fcs) + num_cls_convs) + num_cls_fcs) + num_reg_convs) + num_reg_fcs) > 0)
if ((num_cls_convs > 0) or (num_reg_convs > 0)):
assert (num_shared_fcs == 0)
if (not self.with_cls):
assert ((num_cls_convs == 0) and (num_cls_fcs == 0))
if (not self.with_reg):
assert ((num_reg_convs == 0) and (num_reg_fcs == 0))
self.num_shared_convs = num_shared_convs
self.num_shared_fcs = num_shared_fcs
self.num_cls_convs = num_cls_convs
self.num_cls_fcs = num_cls_fcs
self.num_reg_convs = num_reg_convs
self.num_reg_fcs = num_reg_fcs
self.conv_out_channels = conv_out_channels
self.fc_out_channels = fc_out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
(self.shared_convs, self.shared_fcs, last_layer_dim) = self._add_conv_fc_branch(self.num_shared_convs, self.num_shared_fcs, self.in_channels, True)
self.shared_out_channels = last_layer_dim
(self.cls_convs, self.cls_fcs, self.cls_last_dim) = self._add_conv_fc_branch(self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels)
(self.reg_convs, self.reg_fcs, self.reg_last_dim) = self._add_conv_fc_branch(self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels)
if ((self.num_shared_fcs == 0) and (not self.with_avg_pool)):
if (self.num_cls_fcs == 0):
self.cls_last_dim *= self.roi_feat_area
if (self.num_reg_fcs == 0):
self.reg_last_dim *= self.roi_feat_area
self.relu = nn.ReLU(inplace=True)
if self.with_cls:
self.fc_cls = nn.Linear(self.cls_last_dim, self.num_classes)
if self.with_reg:
out_dim_reg = (4 if self.reg_class_agnostic else (4 * self.num_classes))
self.fc_reg = nn.Linear(self.reg_last_dim, out_dim_reg)
def _add_conv_fc_branch(self, num_branch_convs, num_branch_fcs, in_channels, is_shared=False):
last_layer_dim = in_channels
branch_convs = nn.ModuleList()
if (num_branch_convs > 0):
for i in range(num_branch_convs):
conv_in_channels = (last_layer_dim if (i == 0) else self.conv_out_channels)
branch_convs.append(ConvModule(conv_in_channels, self.conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg))
last_layer_dim = self.conv_out_channels
branch_fcs = nn.ModuleList()
if (num_branch_fcs > 0):
if ((is_shared or (self.num_shared_fcs == 0)) and (not self.with_avg_pool)):
last_layer_dim *= self.roi_feat_area
for i in range(num_branch_fcs):
fc_in_channels = (last_layer_dim if (i == 0) else self.fc_out_channels)
branch_fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels))
last_layer_dim = self.fc_out_channels
return (branch_convs, branch_fcs, last_layer_dim)
def init_weights(self):
super(ConvFCBBoxHead, self).init_weights()
for module_list in [self.shared_fcs, self.cls_fcs, self.reg_fcs]:
for m in module_list.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0)
def forward(self, x):
if (self.num_shared_convs > 0):
for conv in self.shared_convs:
x = conv(x)
if (self.num_shared_fcs > 0):
if self.with_avg_pool:
x = self.avg_pool(x)
x = x.view(x.size(0), (- 1))
for fc in self.shared_fcs:
x = self.relu(fc(x))
x_cls = x
x_reg = x
for conv in self.cls_convs:
x_cls = conv(x_cls)
if (x_cls.dim() > 2):
if self.with_avg_pool:
x_cls = self.avg_pool(x_cls)
x_cls = x_cls.view(x_cls.size(0), (- 1))
for fc in self.cls_fcs:
x_cls = self.relu(fc(x_cls))
for conv in self.reg_convs:
x_reg = conv(x_reg)
if (x_reg.dim() > 2):
if self.with_avg_pool:
x_reg = self.avg_pool(x_reg)
x_reg = x_reg.view(x_reg.size(0), (- 1))
for fc in self.reg_fcs:
x_reg = self.relu(fc(x_reg))
cls_score = (self.fc_cls(x_cls) if self.with_cls else None)
bbox_pred = (self.fc_reg(x_reg) if self.with_reg else None)
return (cls_score, bbox_pred) |
def model_form(model, db_session=None, base_class=Form, only=None, exclude=None, field_args=None, converter=None, exclude_pk=True, exclude_fk=True, type_name=None):
if (not hasattr(model, '_sa_class_manager')):
raise TypeError('model must be a sqlalchemy mapped model')
type_name = (type_name or str((model.__name__ + 'Form')))
field_dict = model_fields(model, db_session, only, exclude, field_args, converter, exclude_pk=exclude_pk, exclude_fk=exclude_fk)
return type(type_name, (base_class,), field_dict) |
def check_varenv(env: str='', args: dict=None):
if (args is None):
args = {}
env_val = environ.get(env)
if (env and (env_val is not None)):
args[env] = env_val
return args |
class CTRL(nn.Module):
def __init__(self, cfg):
super(CTRL, self).__init__()
print('ctrl/model/cross_task_relation.py --> class CTRL --> __init__()')
self.get_depth_prob = DepthProb(cfg)
self.prob_to_entropy = Prob2Entropy()
def forward(self, semseg_pred, srh_pred, depth_pred):
depth_prob = self.get_depth_prob(depth_pred)
semseg_prob = F.softmax(semseg_pred, dim=1)
srh_prob = F.softmax(srh_pred, dim=1)
srh_prob = srh_prob.div((srh_prob.sum(dim=1, keepdim=True) + 1e-30))
semseg_entropy = self.prob_to_entropy(semseg_prob)
srh_entropy = self.prob_to_entropy(srh_prob)
depth_entropy = self.prob_to_entropy(depth_prob)
FusedE = torch.cat((semseg_entropy, depth_entropy, srh_entropy), dim=1)
return FusedE |
class MethodStatement(ParametrizedStatement):
def __init__(self, test_case: tc.TestCase, generic_callable: gao.GenericMethod, callee: vr.VariableReference, args: (dict[(str, vr.VariableReference)] | None)=None):
super().__init__(test_case, generic_callable, args)
self._callee = callee
def accessible_object(self) -> gao.GenericMethod:
return cast(gao.GenericMethod, self._generic_callable)
def _mutable_argument_count(self) -> int:
return (super()._mutable_argument_count() + 1)
def _mutate_special_parameters(self, p_per_param: float) -> bool:
if (randomness.next_float() < p_per_param):
callee = self.callee
typ = (ANY if (randomness.next_float() < config.configuration.test_creation.use_random_object_for_call) else callee.type)
objects = self.test_case.get_objects(typ, self.get_position())
if (callee in objects):
objects.remove(callee)
if (len(objects) > 0):
self.callee = randomness.choice(objects)
return True
return False
def get_variable_references(self) -> set[vr.VariableReference]:
references = super().get_variable_references()
references.add(self._callee)
return references
def replace(self, old: vr.VariableReference, new: vr.VariableReference) -> None:
super().replace(old, new)
if (self._callee == old):
self._callee = new
def callee(self) -> vr.VariableReference:
return self._callee
def callee(self, new_callee: vr.VariableReference) -> None:
self._callee = new_callee
def clone(self, test_case: tc.TestCase, memo: dict[(vr.VariableReference, vr.VariableReference)]) -> Statement:
return MethodStatement(test_case, self.accessible_object(), self._callee.clone(memo), self._clone_args(memo))
def accept(self, visitor: StatementVisitor) -> None:
visitor.visit_method_statement(self)
def structural_hash(self, memo: dict[(vr.VariableReference, int)]) -> int:
return hash((super().structural_hash(memo), self._callee.structural_hash(memo)))
def structural_eq(self, other: Any, memo: dict[(vr.VariableReference, vr.VariableReference)]) -> bool:
return (super().structural_eq(other, memo) and self._callee.structural_eq(other._callee, memo))
def __repr__(self) -> str:
return f'MethodStatement({self._test_case}, {self._generic_callable}, {self._callee.type}, args={self._args})'
def __str__(self) -> str:
return f'{self._generic_callable}(args={self._args}) -> {self._generic_callable.generated_type()}' |
(0.5)
_service.route('/add_more_to_order', methods=['POST'])
def add_more_to_order():
try:
entities = request.json['entities']
oid = int(entities['oid'])
value = int(entities['quantity'])
add_more_res = simple_db.add_more_to_order(oid, value)
if (add_more_res != 'ERROR'):
return json_resp(True, msg=add_more_res)
else:
return json_resp(False, msg=add_more_res)
except Exception:
return json_resp(False, msg='ERROR') |
class MultiStepLR_Restart(_LRScheduler):
def __init__(self, optimizer, milestones, restarts=None, weights=None, gamma=0.1, clear_state=False, last_epoch=(- 1)):
self.milestones = Counter(milestones)
self.gamma = gamma
self.clear_state = clear_state
self.restarts = (restarts if restarts else [0])
self.restart_weights = (weights if weights else [1])
assert (len(self.restarts) == len(self.restart_weights)), 'restarts and their weights do not match.'
super(MultiStepLR_Restart, self).__init__(optimizer, last_epoch)
def get_lr(self):
if (self.last_epoch in self.restarts):
if self.clear_state:
self.optimizer.state = defaultdict(dict)
weight = self.restart_weights[self.restarts.index(self.last_epoch)]
return [(group['initial_lr'] * weight) for group in self.optimizer.param_groups]
if (self.last_epoch not in self.milestones):
return [group['lr'] for group in self.optimizer.param_groups]
return [(group['lr'] * (self.gamma ** self.milestones[self.last_epoch])) for group in self.optimizer.param_groups] |
def phone2prono(phones, rule_in, rule_out):
for (pattern, replacement) in zip(rule_in, rule_out):
phones = re.sub(pattern, replacement, phones)
prono = phones
return prono |
def test_state_transition_array():
sdfg = dace.SDFG('sta_test')
s0 = sdfg.add_state()
s1 = sdfg.add_state()
s2 = sdfg.add_state()
inp = s0.add_array('inp', [1], dace.float32)
A = s0.add_array('A', [1], dace.float32)
t = s0.add_tasklet('seta', {'a'}, {'b'}, 'b = a')
s0.add_edge(inp, None, t, 'a', dace.Memlet.from_array(inp.data, inp.desc(sdfg)))
s0.add_edge(t, 'b', A, None, dace.Memlet.from_array(A.data, A.desc(sdfg)))
A = s1.add_array('A', [1], dace.float32)
t = s1.add_tasklet('geta', {'a'}, {}, 'printf("ok %f\\n", a + 1)')
s1.add_edge(A, None, t, 'a', dace.Memlet.from_array(A.data, A.desc(sdfg)))
A = s2.add_array('A', [1], dace.float32)
t = s2.add_tasklet('geta', {'a'}, {}, 'printf("BAD %f\\n", a - 1)')
s2.add_edge(A, None, t, 'a', dace.Memlet.from_array(A.data, A.desc(sdfg)))
sdfg.add_edge(s0, s1, dace.InterstateEdge('A[0] > 3'))
sdfg.add_edge(s0, s2, dace.InterstateEdge('A[0] <= 3'))
input = np.ndarray([1], np.float32)
input[0] = 10
output = np.ndarray([1], np.float32)
output[0] = 10
sdfg(inp=input, A=output) |
class ConcatDataset(data.Dataset):
def cumsum(sequence):
(r, s) = ([], 0)
for e in sequence:
l = len(e)
r.append((l + s))
s += l
return r
def __init__(self, datasets, **kwargs):
super(ConcatDataset, self).__init__()
assert (len(datasets) > 0), 'datasets should not be an empty iterable'
self.datasets = list(datasets)
self.is_lazy = (sum([isinstance(ds, lazy_array_loader) for ds in self.datasets]) == len(self.datasets))
self.cumulative_sizes = self.cumsum(self.datasets)
self._X = None
self._Y = None
self._lens = None
def SetTokenizer(self, tokenizer):
for ds in self.datasets:
ds.SetTokenizer(tokenizer)
def GetTokenizer(self):
return self.datasets[0].GetTokenizer()
def __len__(self):
return self.cumulative_sizes[(- 1)]
def __getitem__(self, idx):
dataset_idx = bisect_right(self.cumulative_sizes, idx)
if (dataset_idx == 0):
sample_idx = idx
else:
sample_idx = (idx - self.cumulative_sizes[(dataset_idx - 1)])
return self.datasets[dataset_idx][sample_idx]
def lens(self):
if (self._lens is None):
self._lens = []
if self.is_lazy:
for data in self.datasets:
self._lens.extend(data.lens)
else:
for data in self.datasets:
self._lens.extend([(len(d['text']) if isinstance(d, dict) else len(d)) for d in data])
return self._lens
def X(self):
if (self._X is None):
self._X = []
for data in self.datasets:
self._X.extend(data.X)
return self._X
def Y(self):
if (self._Y is None):
self._Y = []
for data in self.datasets:
self._Y.extend(list(data.Y))
self._Y = np.array(self._Y)
return self._Y
def cummulative_sizes(self):
warnings.warn('cummulative_sizes attribute is renamed to cumulative_sizes', DeprecationWarning, stacklevel=2)
return self.cumulative_sizes |
def save_config(cfg, path):
if is_main_process():
with open(path, 'w') as f:
f.write(cfg.dump()) |
def test_balanced_batch_generator_class_no_return_indices(data):
with pytest.raises(ValueError, match='needs to have an attribute'):
BalancedBatchGenerator(*data, sampler=ClusterCentroids(estimator=KMeans(n_init=1)), batch_size=10) |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, (self.expansion * planes), kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d((self.expansion * planes))
self.swish = Swish()
self.shortcut = nn.Sequential()
if ((stride != 1) or (in_planes != (self.expansion * planes))):
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, (self.expansion * planes), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((self.expansion * planes)))
def forward(self, x):
out = self.swish(self.bn1(self.conv1(x)))
out = self.swish(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = self.swish(out)
return out |
_utils.test()
def test_multiple_ib_deeper_non_scalar():
N = 10
x = ti.field(float, shape=N, needs_dual=True)
y = ti.field(float, shape=N, needs_dual=True)
def compute_y():
for j in range(N):
for i in range(j):
y[j] += x[j]
for i in range(3):
for ii in range(j):
y[j] += x[j]
for i in range(3):
for ii in range(2):
for iii in range(j):
y[j] += x[j]
x.fill(1.0)
with ti.ad.FwdMode(loss=y, param=x, seed=[1.0 for _ in range(N)]):
compute_y()
for i in range(N):
assert (y[i] == (i * 10.0))
assert (y.dual[i] == (i * 10.0)) |
def _format(val: Any, output_format: str='standard', split: bool=False, errors: str='coarse') -> Any:
val = str(val)
result: Any = []
if (val in NULL_VALUES):
return [np.nan]
if (not validate_al_nipt(val)):
if (errors == 'raise'):
raise ValueError(f'Unable to parse value {val}')
error_result = (val if (errors == 'ignore') else np.nan)
return [error_result]
result = ([nipt.compact(val)] + result)
return result |
class FiveCrop(object):
def __init__(self, size):
self.size = size
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
assert (len(size) == 2), 'Please provide only two dimensions (h, w) for size.'
self.size = size
def __call__(self, img):
return F.five_crop(img, self.size)
def __repr__(self):
return (self.__class__.__name__ + '(size={0})'.format(self.size)) |
def read_via_csv(path):
table = pd.read_csv(path)
table['image_name'] = table['filename'].apply(strip_ext)
table = table.drop('filename', axis=1)
table = table.loc[(table['region_count'] > 0)]
regions = table['region_shape_attributes']
x_coord = np.zeros(len(table), dtype=int)
y_coord = np.zeros(len(table), dtype=int)
for i in range(len(regions)):
region = json.loads(regions.iloc[i])
x_coord[i] = region['cx']
y_coord[i] = region['cy']
scores = None
attributes = table['region_attributes']
if (len(table) > 0):
att = json.loads(attributes.iloc[0])
if ('score' in att):
scores = (np.zeros(len(table), dtype=np.float32) - np.inf)
for i in range(len(attributes)):
att = json.loads(attributes.iloc[i])
if ('score' in att):
scores[i] = float(att['score'])
table = table.drop(['file_size', 'file_attributes', 'region_count', 'region_id', 'region_shape_attributes', 'region_attributes'], 1)
table['x_coord'] = x_coord
table['y_coord'] = y_coord
if (scores is not None):
table['score'] = scores
return table |
def init_cnn(m):
if (getattr(m, 'bias', None) is not None):
nn.init.constant_(m.bias, 0)
if isinstance(m, (nn.Conv1d, nn.Conv2d, nn.Linear)):
nn.init.kaiming_normal_(m.weight)
for l in m.children():
init_cnn(l) |
class HighResolutionNet(nn.Module):
def __init__(self, config, **kwargs):
extra = config.MODEL.EXTRA
super(HighResolutionNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = BatchNorm2d(64, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1, bias=False)
self.bn2 = BatchNorm2d(64, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.stage1_cfg = extra['STAGE1']
num_channels = self.stage1_cfg['NUM_CHANNELS'][0]
block = blocks_dict[self.stage1_cfg['BLOCK']]
num_blocks = self.stage1_cfg['NUM_BLOCKS'][0]
self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)
stage1_out_channel = (block.expansion * num_channels)
self.stage2_cfg = extra['STAGE2']
num_channels = self.stage2_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage2_cfg['BLOCK']]
num_channels = [(num_channels[i] * block.expansion) for i in range(len(num_channels))]
self.transition1 = self._make_transition_layer([stage1_out_channel], num_channels)
(self.stage2, pre_stage_channels) = self._make_stage(self.stage2_cfg, num_channels)
self.stage3_cfg = extra['STAGE3']
num_channels = self.stage3_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage3_cfg['BLOCK']]
num_channels = [(num_channels[i] * block.expansion) for i in range(len(num_channels))]
self.transition2 = self._make_transition_layer(pre_stage_channels, num_channels)
(self.stage3, pre_stage_channels) = self._make_stage(self.stage3_cfg, num_channels)
self.stage4_cfg = extra['STAGE4']
num_channels = self.stage4_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage4_cfg['BLOCK']]
num_channels = [(num_channels[i] * block.expansion) for i in range(len(num_channels))]
self.transition3 = self._make_transition_layer(pre_stage_channels, num_channels)
(self.stage4, pre_stage_channels) = self._make_stage(self.stage4_cfg, num_channels, multi_scale_output=True)
last_inp_channels = np.int(np.sum(pre_stage_channels))
self.last_layer = nn.Sequential(nn.Conv2d(in_channels=last_inp_channels, out_channels=last_inp_channels, kernel_size=1, stride=1, padding=0), BatchNorm2d(last_inp_channels, momentum=BN_MOMENTUM), nn.ReLU(inplace=True), nn.ConvTranspose2d(720, 64, 4, stride=2, padding=1, output_padding=0, bias=True), nn.ReLU(inplace=True), nn.ConvTranspose2d(64, 1, 4, stride=2, padding=1, output_padding=0, bias=True), nn.Sigmoid())
def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer):
num_branches_cur = len(num_channels_cur_layer)
num_branches_pre = len(num_channels_pre_layer)
transition_layers = []
for i in range(num_branches_cur):
if (i < num_branches_pre):
if (num_channels_cur_layer[i] != num_channels_pre_layer[i]):
transition_layers.append(nn.Sequential(nn.Conv2d(num_channels_pre_layer[i], num_channels_cur_layer[i], 3, 1, 1, bias=False), BatchNorm2d(num_channels_cur_layer[i], momentum=BN_MOMENTUM), nn.ReLU(inplace=True)))
else:
transition_layers.append(None)
else:
conv3x3s = []
for j in range(((i + 1) - num_branches_pre)):
inchannels = num_channels_pre_layer[(- 1)]
outchannels = (num_channels_cur_layer[i] if (j == (i - num_branches_pre)) else inchannels)
conv3x3s.append(nn.Sequential(nn.Conv2d(inchannels, outchannels, 3, 2, 1, bias=False), BatchNorm2d(outchannels, momentum=BN_MOMENTUM), nn.ReLU(inplace=True)))
transition_layers.append(nn.Sequential(*conv3x3s))
return nn.ModuleList(transition_layers)
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), BatchNorm2d((planes * block.expansion), momentum=BN_MOMENTUM))
layers = []
layers.append(block(inplanes, planes, stride, downsample))
inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(inplanes, planes))
return nn.Sequential(*layers)
def _make_stage(self, layer_config, num_inchannels, multi_scale_output=True):
num_modules = layer_config['NUM_MODULES']
num_branches = layer_config['NUM_BRANCHES']
num_blocks = layer_config['NUM_BLOCKS']
num_channels = layer_config['NUM_CHANNELS']
block = blocks_dict[layer_config['BLOCK']]
fuse_method = layer_config['FUSE_METHOD']
modules = []
for i in range(num_modules):
if ((not multi_scale_output) and (i == (num_modules - 1))):
reset_multi_scale_output = False
else:
reset_multi_scale_output = True
modules.append(HighResolutionModule(num_branches, block, num_blocks, num_inchannels, num_channels, fuse_method, reset_multi_scale_output))
num_inchannels = modules[(- 1)].get_num_inchannels()
return (nn.Sequential(*modules), num_inchannels)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.layer1(x)
x_list = []
for i in range(self.stage2_cfg['NUM_BRANCHES']):
if (self.transition1[i] is not None):
x_list.append(self.transition1[i](x))
else:
x_list.append(x)
y_list = self.stage2(x_list)
x_list = []
for i in range(self.stage3_cfg['NUM_BRANCHES']):
if (self.transition2[i] is not None):
x_list.append(self.transition2[i](y_list[(- 1)]))
else:
x_list.append(y_list[i])
y_list = self.stage3(x_list)
x_list = []
for i in range(self.stage4_cfg['NUM_BRANCHES']):
if (self.transition3[i] is not None):
x_list.append(self.transition3[i](y_list[(- 1)]))
else:
x_list.append(y_list[i])
x = self.stage4(x_list)
(x0_h, x0_w) = (x[0].size(2), x[0].size(3))
x1 = F.upsample(x[1], size=(x0_h, x0_w), mode='bilinear')
x2 = F.upsample(x[2], size=(x0_h, x0_w), mode='bilinear')
x3 = F.upsample(x[3], size=(x0_h, x0_w), mode='bilinear')
f = torch.cat([x[0], x1, x2, x3], 1)
x = self.last_layer(f)
return (f, x)
def init_weights(self, pretrained=''):
logger.info('=> init weights from normal distribution')
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if os.path.isfile(pretrained):
pretrained_dict = torch.load(pretrained)
logger.info('=> loading pretrained model {}'.format(pretrained))
model_dict = self.state_dict()
pretrained_dict = {k: v for (k, v) in pretrained_dict.items() if (k in model_dict.keys())}
model_dict.update(pretrained_dict)
self.load_state_dict(model_dict)
print('load pre_trained parameters for HR_Net') |
def get_lable(image_path, is_grayscale=False):
image = imread(image_path, is_grayscale)
return (image / 255.0) |
def default_flist_reader(flist):
imlist = []
with open(flist, 'r') as rf:
for line in rf.readlines():
(impath, imlabel) = line.strip().split()
imlist.append((impath, int(imlabel)))
return imlist |
class Threshold(Module):
__constants__ = ['threshold', 'value', 'inplace']
threshold: float
value: float
inplace: bool
def __init__(self, threshold: float, value: float, inplace: bool=False) -> None:
super(Threshold, self).__init__()
self.threshold = threshold
self.value = value
self.inplace = inplace
def forward(self, input: Tensor) -> Tensor:
return F.threshold(input, self.threshold, self.value, self.inplace)
def extra_repr(self):
inplace_str = (', inplace=True' if self.inplace else '')
return 'threshold={}, value={}{}'.format(self.threshold, self.value, inplace_str) |
def get_transform(opt):
transform_list = []
if (opt.resize_or_crop == 'resize_and_crop'):
osize = [opt.loadSizeH, opt.loadSizeW]
fsize = [opt.fineSizeH, opt.fineSizeW]
transform_list.append(transforms.Resize(osize, Image.BICUBIC))
transform_list.append(transforms.RandomCrop(fsize))
else:
raise ValueError(('--resize_or_crop %s is not a valid option.' % opt.resize_or_crop))
if (opt.isTrain and (not opt.no_flip)):
transform_list.append(transforms.RandomHorizontalFlip())
transform_list += [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
return transforms.Compose(transform_list) |
def conv(in_channels, out_channels, kernel_size, bias=False, padding=1, stride=1):
return nn.Conv2d(in_channels, out_channels, kernel_size, padding=(kernel_size // 2), bias=bias, stride=stride) |
def set_dict_key(dict, path, value):
if (len(path) == 1):
dict[path[0]] = value
else:
set_dict_key(dict[path[0]], path[1:], value) |
class NoiseModeling(object):
def __init__(self, mean=0.0, var=0.1, pov=0.6):
self.var = var
self.mean = mean
self.pov = pov
def __call__(self, tensor):
sigma = random.uniform(0, (self.var ** self.pov))
noiseModel = ((torch.randn(tensor.size()).uniform_(0, 1.0) * sigma) + self.mean)
return noise
def __repr__(self):
return (self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.var)) |
class AgentPair(AgentGroup):
def __init__(self, *agents, allow_duplicate_agents=False):
super().__init__(*agents, allow_duplicate_agents=allow_duplicate_agents)
assert (self.n == 2)
(self.a0, self.a1) = self.agents
if ((type(self.a0) is CoupledPlanningAgent) and (type(self.a1) is CoupledPlanningAgent)):
print('If the two planning agents have same params, consider using CoupledPlanningPair instead to reduce computation time by a factor of 2')
def joint_action(self, state):
if (self.a0 is self.a1):
self.a0.set_agent_index(0)
action_0 = self.a0.action(state)
self.a1.set_agent_index(1)
action_1 = self.a1.action(state)
return (action_0, action_1)
else:
return super().joint_action(state) |
class TextEncoderTypes(Enum):
identity = 'identity'
transformer = 'transformer'
embedding = 'embedding' |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.