code
stringlengths
101
5.91M
def test_track_progress_list(): out = StringIO() ret = mmcv.track_progress(sleep_1s, [1, 2, 3], bar_width=3, file=out) assert (out.getvalue() == '[ ] 0/3, elapsed: 0s, ETA:\r[> ] 1/3, 1.0 task/s, elapsed: 1s, ETA: 2s\r[>> ] 2/3, 1.0 task/s, elapsed: 2s, ETA: 1s\r[>>>] 3/3, 1.0 task/s, elapsed: 3s, ETA: 0s\n') assert (ret == [1, 2, 3])
def kl_anealing(i, high=0.1, low=0.0): hh = (1 - low) ll = (1 - high) x = (10 * (i - 0.5)) z = (1 / (1 + np.exp(x))) y = (((hh - ll) * z) + ll) return (1 - y)
def get_data(): from bigdl.chronos.data import get_public_dataset from sklearn.preprocessing import StandardScaler (tsdata_train, tsdata_val, tsdata_test) = get_public_dataset(name='nyc_taxi') stand = StandardScaler() for tsdata in [tsdata_train, tsdata_val, tsdata_test]: tsdata.impute().scale(stand, fit=(tsdata is tsdata_train)) tsdata_train.roll(lookback=48, horizon=1) tsdata_test.roll(lookback=48, horizon=1) return (tsdata_train, tsdata_test)
def make_dataset(input_dir, split, net_name, target_dir=None): plyfiles = [] if (net_name == 'GAN'): for dirs in os.listdir(input_dir): tempDir = os.path.join(input_dir, dirs) for input in glob.iglob(os.path.join(tempDir, '*.npy')): input = os.path.basename(input) root_filename = input[:(- 4)] plyinput = (((dirs + '/') + root_filename) + '.npy') plyfiles.append([plyinput]) if (net_name == 'auto_encoder'): target_dir = input_dir for dirs in os.listdir(target_dir): tempDir = os.path.join(input_dir, dirs) for target in glob.iglob(os.path.join(tempDir, '*.ply')): target = os.path.basename(target) root_filename = target[:(- 4)] plytarget = (((dirs + '/') + root_filename) + '.ply') plyinput = plytarget plyfiles.append([[plyinput], [plytarget]]) if (net_name == 'shape_completion'): for dirs in os.listdir(input_dir): temp_In_Dir = os.path.join(input_dir, dirs) temp_Tgt_Dir = os.path.join(target_dir, dirs) for target in glob.iglob(os.path.join(temp_In_Dir, '*.ply')): target = os.path.basename(target) root_filename = target[:(- 9)] plytarget = (((dirs + '/') + root_filename) + '.ply') plyin = ((dirs + '/') + target) plyfiles.append([[plyin], [plytarget]]) if (split == None): return (plyfiles, plyfiles) else: return split2list(plyfiles, split, default_split=split)
class NLayerDiscriminator(nn.Module): def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, gpu_ids=[]): super(NLayerDiscriminator, self).__init__() self.gpu_ids = gpu_ids kw = 4 padw = int(np.ceil(((kw - 1) / 2))) sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)] nf_mult = 1 nf_mult_prev = 1 for n in range(1, n_layers): nf_mult_prev = nf_mult nf_mult = min((2 ** n), 8) sequence += [nn.Conv2d((ndf * nf_mult_prev), (ndf * nf_mult), kernel_size=kw, stride=2, padding=padw), norm_layer((ndf * nf_mult), affine=True), nn.LeakyReLU(0.2, True)] nf_mult_prev = nf_mult nf_mult = min((2 ** n_layers), 8) sequence += [nn.Conv2d((ndf * nf_mult_prev), (ndf * nf_mult), kernel_size=kw, stride=1, padding=padw), norm_layer((ndf * nf_mult), affine=True), nn.LeakyReLU(0.2, True)] sequence += [nn.Conv2d((ndf * nf_mult), 1, kernel_size=kw, stride=1, padding=padw)] if use_sigmoid: sequence += [nn.Sigmoid()] self.model = nn.Sequential(*sequence) def forward(self, x): if (len(self.gpu_ids) and isinstance(x.data, torch.cuda.FloatTensor)): return nn.parallel.data_parallel(self.model, x, self.gpu_ids) else: return self.model(x)
class QuantLinear(nn.Linear): def __init__(self, in_features, out_features, bias=True, a_bits=8, w_bits=8, quant_inference=False, all_positive=False, per_channel=False, batch_init=20): super(QuantLinear, self).__init__(in_features, out_features, bias) self.quant_inference = quant_inference self.activation_quantizer = LSQPlusActivationQuantizer(a_bits=a_bits, all_positive=all_positive, batch_init=batch_init) self.weight_quantizer = LSQPlusWeightQuantizer(w_bits=w_bits, all_positive=all_positive, per_channel=per_channel, batch_init=batch_init) def forward(self, input): self.input = input self.quant_input = self.activation_quantizer(self.input) if (not self.quant_inference): self.quant_weight = self.weight_quantizer(self.weight) else: self.quant_weight = self.weight output = F.linear(self.quant_input, self.quant_weight, self.bias) return output
def abundance_to_mass_fraction(all_elements, all_masses, all_abundances, abundances, symbols): fractions = [] for (i, item) in enumerate(symbols): fractions.append(abundances[i]) fractions[i] -= 12 fractions[i] = np.power(10, fractions[i]) fractions[i] *= all_masses[np.where((all_elements == item))] tmp = sum(fractions) for (i, item) in enumerate(symbols): fractions[i] /= tmp return np.hstack(fractions)
def deeplabv3_resnetd50b_voc(pretrained_backbone=False, num_classes=21, aux=True, **kwargs): backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, multi_output=True).features del backbone[(- 1)] return get_deeplabv3(backbone=backbone, num_classes=num_classes, aux=aux, model_name='deeplabv3_resnetd50b_voc', **kwargs)
def get_model(point_cloud, is_training, bn_decay=None, num_class=NUM_CLASSES): batch_size = point_cloud.get_shape()[0].value num_point = point_cloud.get_shape()[1].value end_points = {} l0_xyz = point_cloud l0_points = None end_points['l0_xyz'] = l0_xyz (l1_xyz, l1_points, l1_indices) = pointnet_sa_module(l0_xyz, l0_points, npoint=512, radius=0.2, nsample=32, mlp=[64, 64, 128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1', use_nchw=True) (l2_xyz, l2_points, l2_indices) = pointnet_sa_module(l1_xyz, l1_points, npoint=128, radius=0.4, nsample=64, mlp=[128, 128, 256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2') (l3_xyz, l3_points, l3_indices) = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256, 512, 1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3') net = tf.reshape(l3_points, [batch_size, (- 1)]) net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay) net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1') net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay) net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp2') net = tf_util.fully_connected(net, num_class, activation_fn=None, scope='fc3') return (net, end_points)
def _get_cosine_schedule_with_warmup_lr_lambda(current_step: int, *, num_warmup_steps: int, num_training_steps: int, num_cycles: float, min_lr_ratio: float): if (current_step < num_warmup_steps): return (float(current_step) / float(max(1, num_warmup_steps))) progress = (float((current_step - num_warmup_steps)) / float(max(1, (num_training_steps - num_warmup_steps)))) out = max(0.0, (0.5 * (1.0 + math.cos((((math.pi * float(num_cycles)) * 2.0) * progress))))) assert ((out >= 0.0) and (out <= 1.0)) if (min_lr_ratio > 0): assert (min_lr_ratio < 1.0) out = (((1 - min_lr_ratio) * out) + min_lr_ratio) return out
_registry(operator_type='MatMulWithBiasTanh') class MatMulWithBiasTanh(Operator): def __init__(self): super().__init__()
def check_dataset(dataset): dataloader = DataLoader(dataset) for batch in dataloader: if ('views' not in batch): raise ValueError("The dataset must return a dictionary with a 'representations' key containing a list of tensors") else: break
def check_one_contract_on_ether_lock(contract_bytecode, contract_address, debug=False, read_from_blockchain=False): print('\x1b[94m[ ] Check if contract is GREEDY\x1b[0m\n') print(('[ ] Contract address : %s' % contract_address)) print(('[ ] Contract bytecode : %s...' % contract_bytecode[:50])) print(('[ ] Bytecode length : %d' % len(contract_bytecode))) print(('[ ] Debug : %s' % debug)) global MAX_CALL_DEPTH, symbolic_vars, symbolic_sha ops = parse_code(contract_bytecode, debug) MyGlobals.symbolic_vars = [] initialize_params(read_from_blockchain, contract_address) set_params('call_value', '', '100') clear_globals() MyGlobals.MAX_CALL_DEPTH = 1 storage = {} stack = [] mmemory = {} data = {} trace = [] configurations = {} execute_one_block(ops, stack, 0, trace, storage, mmemory, data, configurations, ['STOP', 'RETURN'], ether_lock_can_recieve, 0, 0, debug, read_from_blockchain) print((('\x1b[91m[-]' if (not MyGlobals.stop_search) else '\x1b[92m[+]') + '\x1b[0m \x1b[1mContract can receive Ether\x1b[0m')) if (not MyGlobals.stop_search): print('\n\x1b[92m[-] No lock vulnerability found because the contract cannot receive Ether \x1b[0m') return False if (not code_has_instruction(ops, ['CALL', 'CALLCODE', 'DELEGATECALL', 'SUICIDE'])): print('\x1b[91m[-] The code does not have CALL/SUICIDE/DELEGATECALL/CALLCODE thus is greedy !\x1b[0m') return True if debug: print_code(contract_bytecode, ops) MyGlobals.symbolic_vars = ['CALLVALUE', 'CALLER', 'NUMBER', 'TIMESTAMP', 'BLOCKHASH', 'BALANCE', 'ADDRESS', 'ORIGIN', 'EXTCODESIZE'] MyGlobals.symbolic_sha = True MyGlobals.symbolic_load = True for i in range(1, (MyGlobals.max_calldepth_in_normal_search + 1)): run_one_check(i, ops, contract_address, debug, read_from_blockchain) if MyGlobals.stop_search: print('\n\x1b[92m[+] No locking vulnerability found \x1b[0m') return False print('\n\n\x1b[91m[-] Locking vulnerability found! \x1b[0m') return True
def pytest_configure(config): config.addinivalue_line('markers', 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested') config.addinivalue_line('markers', 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested') config.addinivalue_line('markers', 'is_pipeline_test: mark test to run only when pipelines are tested') config.addinivalue_line('markers', 'is_staging_test: mark test to run only in the staging environment')
class DPRQuestionEncoder(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
def create_example_autopilot(image, path, ctrl_cmd): feature = {'image': image_feature(image), 'path': bytes_feature(path), 'left': float_feature((float(ctrl_cmd[0]) / 255.0)), 'right': float_feature((float(ctrl_cmd[1]) / 255.0)), 'cmd': float_feature(float(ctrl_cmd[2]))} return tf.train.Example(features=tf.train.Features(feature=feature))
class Seq2SeqSequenceClassifierOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None past_key_values: Optional[List[torch.FloatTensor]] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
class DummyActorPolicy(): def __init__(self, action=1.0): self._action = action def __call__(self, observation): action = torch.Tensor([self._action]) return (_MockDistribution(action), {}) def action(self, unused_observation): del unused_observation action = torch.Tensor([self._action], dtype=torch.float32) return action def parameters(self): return torch.zeros(5)
def warning_suppress(func): (func) def wrapper(*args, **kwargs): with warnings.catch_warnings(): warnings.simplefilter('ignore') return func(*args, **kwargs) return wrapper
def tail2label(tails): global tail2label_table if (tail2label_table == None): tail2label_table = json.load(open(config['path']['Tail2Emotion'], encoding='utf8')) tail_labels = [] for tail in tails: if (tail in tail2label_table.keys()): tail_labels.append(tail2label_table[tail]) else: tail_labels.append((- 1)) return tail_labels
class Decoder(nn.Module): def __init__(self, rr, theta, T, gpu_id): super(Decoder, self).__init__() self.rr = rr self.theta = theta self.T = T self.gid = gpu_id def forward(self, x): dic_de = creatRealDictionary(self.T, self.rr, self.theta, self.gid) result = torch.matmul(dic_de, x) return result
def test_typechange(conf_dict): cfg = conf_dict({'a': 'bar', 'b': 'foo', 'c': 1}) assert (cfg.typechanged == {'a': (int, type('bar')), 'b': (float, type('foo')), 'c': (bool, int)})
class Content_Density(object): def __init__(self, sentence_objs): self.sentence_objs = sentence_objs def handle(self): (tot_num_nouns, tot_num_verbs, tot_num_adjs, tot_num_advs) = (0, 0, 0, 0) (tot_num_det, tot_num_prep, tot_num_pron, tot_num_cconj) = (0, 0, 0, 0) for so in self.sentence_objs: tot_num_nouns += so.pos_tag_counter.get_pos_tag_count(NOUN) tot_num_verbs += so.pos_tag_counter.get_pos_tag_count(VERB) tot_num_adjs += so.pos_tag_counter.get_pos_tag_count(ADJECTIVE) tot_num_advs += so.pos_tag_counter.get_pos_tag_count(ADVERB) for so in self.sentence_objs: tot_num_det += so.pos_tag_counter.get_pos_tag_count(DETERMINER) tot_num_prep += so.pos_tag_counter.get_pos_tag_count(ADPOSITION) tot_num_pron += so.pos_tag_counter.get_pos_tag_count(PRONOUN) tot_num_cconj += so.pos_tag_counter.get_pos_tag_count(CONJUNCTION) numerator = (((tot_num_nouns + tot_num_verbs) + tot_num_adjs) + tot_num_advs) denominator = (((tot_num_det + tot_num_prep) + tot_num_pron) + tot_num_cconj) if (denominator == 0): return NOT_AVAILABLE return (numerator / denominator)
class SuperResK1KX(PlainNetBasicBlockClass): def __init__(self, in_channels=0, out_channels=0, kernel_size=3, stride=1, expansion=1.0, sublayers=1, no_create=False, block_name=None, **kwargs): super(SuperResK1KX, self).__init__(**kwargs) self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.expansion = expansion self.stride = stride self.sublayers = sublayers self.no_create = no_create self.block_name = block_name self.shortcut_list = nn.ModuleList() self.conv_list = nn.ModuleList() for layerID in range(self.sublayers): if (layerID == 0): current_in_channels = self.in_channels current_out_channels = self.out_channels current_stride = self.stride current_kernel_size = self.kernel_size else: current_in_channels = self.out_channels current_out_channels = self.out_channels current_stride = 1 current_kernel_size = self.kernel_size current_expansion_channel = int(round((current_out_channels * self.expansion))) the_conv_block = nn.Sequential(nn.Conv2d(current_in_channels, current_expansion_channel, kernel_size=1, stride=1, padding=0, bias=False), nn.BatchNorm2d(current_expansion_channel), nn.ReLU(), nn.Conv2d(current_expansion_channel, current_out_channels, kernel_size=current_kernel_size, stride=current_stride, padding=((current_kernel_size - 1) // 2), bias=False), nn.BatchNorm2d(current_out_channels)) self.conv_list.append(the_conv_block) if ((current_stride == 1) and (current_in_channels == current_out_channels)): shortcut = nn.Sequential() else: shortcut = nn.Sequential(nn.Conv2d(current_in_channels, current_out_channels, kernel_size=1, stride=current_stride, padding=0, bias=False), nn.BatchNorm2d(current_out_channels)) self.shortcut_list.append(shortcut) pass def forward(self, x): output = x for (block, shortcut) in zip(self.conv_list, self.shortcut_list): conv_output = block(output) output = (conv_output + shortcut(output)) output = F.relu(output) return output def create_from_str(s, no_create=False): assert SuperResK1KX.is_instance_from_str(s) idx = _get_right_parentheses_index_(s) assert (idx is not None) param_str = s[len('SuperResK1KX('):idx] tmp_idx = param_str.find('|') if (tmp_idx < 0): tmp_block_name = 'uuid{}'.format(uuid.uuid4().hex) else: tmp_block_name = param_str[0:tmp_idx] param_str = param_str[(tmp_idx + 1):] param_str_split = param_str.split(',') in_channels = int(param_str_split[0]) out_channels = int(param_str_split[1]) kernel_size = int(param_str_split[2]) stride = int(param_str_split[3]) expansion = float(param_str_split[4]) sublayers = int(param_str_split[5]) return (SuperResK1KX(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, expansion=expansion, sublayers=sublayers, block_name=tmp_block_name, no_create=no_create), s[(idx + 1):]) def is_instance_from_str(s): if (s.startswith('SuperResK1KX(') and (s[(- 1)] == ')')): return True else: return False
def generate_combined_transform_function(transform_funcs, indices=[0]): for index in indices: print(transform_funcs[index]) def combined_transform_func(sample): for index in indices: sample = transform_funcs[index](sample) return sample return combined_transform_func
def eval(path): if args.reconst: eval_file_name = '/eval.pkl' elif args.voxels: eval_file_name = '/eval_voxelization_{}.pkl'.format(args.res) else: eval_file_name = '/eval_pointcloud_{}.pkl'.format(args.points) try: if os.path.exists((path + eval_file_name)): print('File exists. Done.') return else: path = os.path.normpath(path) folder = path.split(os.sep)[(- 2)] file_name = path.split(os.sep)[(- 1)] if args.reconst: pred_mesh_path = (path + '/surface_reconstruction.off') pred_mesh = trimesh.load(pred_mesh_path, process=False) gt_mesh_path = (data_path + '/{}/{}/isosurf_scaled.off'.format(folder, file_name)) gt_mesh = trimesh.load(gt_mesh_path, process=False) eval = eval_mesh(pred_mesh, gt_mesh, min, max) elif args.voxels: voxel_path = (path + '/voxelization_{}.npy'.format(args.res)) occ = np.unpackbits(np.load(voxel_path)) voxels = np.reshape(occ, ((args.res,) * 3)) off_path = (path + '/voxelization_{}.off'.format(args.res)) input_mesh = VoxelGrid(voxels, [0, 0, 0], 1).to_mesh() input_mesh.export(off_path) gt_mesh_path = (data_path + '/{}/{}/isosurf_scaled.off'.format(folder, file_name)) gt_mesh = trimesh.load(gt_mesh_path, process=False) eval = eval_mesh(input_mesh, gt_mesh, min, max) else: input_points_path = (path + '/voxelized_point_cloud_128res_{}points.npz'.format(args.points)) input_points = np.load(input_points_path)['point_cloud'].astype(np.float32) gt_mesh_path = (data_path + '/{}/{}/isosurf_scaled.off'.format(folder, file_name)) gt_mesh = trimesh.load(gt_mesh_path, process=False) (pointcloud_gt, idx) = gt_mesh.sample(100000, return_index=True) pointcloud_gt = pointcloud_gt.astype(np.float32) eval = eval_pointcloud(input_points, pointcloud_gt) pkl.dump(eval, open((path + eval_file_name), 'wb')) print('Finished {}'.format(path)) except Exception as err: print('Error with {}: {}'.format(path, traceback.format_exc()))
class Flatten(KerasLayer): def __init__(self, input_shape=None, **kwargs): super(Flatten, self).__init__(None, (list(input_shape) if input_shape else None), **kwargs)
class TestKerasInKerasOut(unittest.TestCase): def setUpClass(self): os.environ['ITEX_ONEDNN_GRAPH'] = '1' def tearDownClass(self): shutil.rmtree('baseline_model', ignore_errors=True) shutil.rmtree('itex_qdq_keras_model', ignore_errors=True) def test_keras_in_keras_out(self): logger.info('Run test_keras_in_keras_out case...') global test_mode test_mode = 'accuracy' build_model() from neural_compressor import set_random_seed from neural_compressor.config import PostTrainingQuantConfig from neural_compressor.data.dataloaders.dataloader import DataLoader from neural_compressor.quantization import fit set_random_seed(9527) config = PostTrainingQuantConfig(backend='itex') logger.info('Run Quantization...') q_model = fit(keras.models.load_model('./baseline_model'), conf=config, calib_dataloader=DataLoader(framework='tensorflow', dataset=Dataset()), eval_func=eval_func) q_model.save('itex_qdq_keras_model') self.assertEqual(q_model.framework(), 'keras') framework_config = {'framework': 'keras', 'approach': 'post_training_static_quant'} q_model.q_config = framework_config self.assertEqual(q_model.q_config['framework'], 'keras') self.assertEqual(q_model.graph_info, None) self.assertEqual(q_model.framework(), 'keras') self.assertEqual(isinstance(q_model.model, tf.keras.Model), True) model = keras.models.load_model('./itex_qdq_keras_model') model.summary() found_quantize = False found_dequantize = False for layer in model.layers: if ('quantize' in layer.name): found_quantize = True if ('dequantize' in layer.name): found_dequantize = True self.assertEqual(found_quantize, True) self.assertEqual(found_dequantize, True) from neural_compressor.benchmark import fit from neural_compressor.config import BenchmarkConfig conf = BenchmarkConfig(backend='itex', iteration=100, cores_per_instance=1, num_of_instance=1) logger.info('Run BenchMark...') test_mode = 'performance' fit(model, conf, b_func=eval_func)
def evaluate(args, task_dataloader_val, task_cfg, device, task_id, model, task_losses, log_f): from vilbert.vilbert_mavex import VILBertForVLTasks from vilbert.task_utils import LoadDatasets, LoadLosses, ForwardModelsTrain, ForwardModelsVal model.eval() returned_variables = ['batch_score', 'batch_score_w', 'batch_score_c', 'batch_score_i', 'batch_score_k', 'batch_size', 'batch_score_final_pred', 'results', 'batch_score_mavex_k', 'batch_score_mavex_w', 'batch_score_mavex_c', 'batch_score_mavex_i'] for item in returned_variables: if ('batch' in item): exec((item[6:] + '=0.')) results = [] for batch in tqdm(iter(task_dataloader_val[task_id])): returned_dicts = ForwardModelsVal(args, task_cfg, device, task_id, batch, model, task_losses) results += returned_dicts['results'] for item in returned_variables: if ('batch' in item): exec((item[6:] + ('+=returned_dicts["%s"]' % item))) string = 'VALIDATION: ' for item in returned_variables: if ('score' in item): string += ('%s: %.4f, ' % (item[6:], (eval(item[6:]) / (eval('size') + 1e-06)))) string = (string[:(- 2)] + '\n') log_f.writelines(string) log_f.flush() model.train() return results
def main(): gui.Application.instance.initialize() w = ExampleWindow() gui.Application.instance.run()
class ContextFilter(): def filter(self, record): split_name = record.name.split('.', 1) if ((split_name[0] == 'BASELINE') or (split_name[0] == 'MAIN')): if (len(split_name) > 1): record.name = split_name[1] if (split_name[0] == 'TESTING'): if (len(split_name) > 1): record.name = split_name[0] return True
def create_tri_parametric_color_ramp_node(node_tree: bpy.types.NodeTree) -> bpy.types.Node: tri_color_ramp_node_group: bpy.types.NodeGroup if ('Tri Parametric Color Ramp' in bpy.data.node_groups): tri_color_ramp_node_group = bpy.data.node_groups['Tri Parametric Color Ramp'] else: tri_color_ramp_node_group = add_tri_parametric_color_ramp() node = node_tree.nodes.new(type='ShaderNodeGroup') node.name = 'Tri Parametric Color Ramp' node.node_tree = tri_color_ramp_node_group return node
def _visit_dict_config(cfg, func): if isinstance(cfg, DictConfig): func(cfg) for v in cfg.values(): _visit_dict_config(v, func) elif isinstance(cfg, ListConfig): for v in cfg: _visit_dict_config(v, func)
def split_train_test(anno_list, train_ratio_hard=0.5, train_num=1500): nf_data = [] google_data = [] openfood_data = [] for anno_ in anno_list: file_name = anno_['file_name'].split('/')[(- 1)] if (file_name[:2] == 'nf'): nf_data.append(anno_) elif (file_name[:3] == 'GOG'): google_data.append(anno_) elif (file_name.split('_')[0] in ['NZL', 'SGP', 'UK']): openfood_data.append(anno_) else: raise ValueError(f'Not match: {anno_}') random.shuffle(nf_data) random.shuffle(google_data) random.shuffle(openfood_data) print(f'After split:') print(f'NF_V1: {len(nf_data)}') print(f'Google: {len(google_data)}') print(f'OpenFood: {len(openfood_data)}') train_list = [] test_list = [] num_train = int((len(openfood_data) * train_ratio_hard)) train_list += openfood_data[:num_train] test_list += openfood_data[num_train:] print(f'OpenFood:') print(f' train:{num_train}, test:{(len(openfood_data) - num_train)}') if (len(google_data) > 0): num_train = int((len(google_data) * train_ratio_hard)) train_list += google_data[:num_train] test_list += google_data[num_train:] print(f'Google:') print(f' train:{num_train}, test:{(len(google_data) - num_train)}') num_train = (train_num - len(train_list)) train_list += nf_data[:num_train] test_list += nf_data[num_train:] print(f'NF-V1:') print(f' train:{num_train}, test:{(len(nf_data) - num_train)}') train_cls = [] for info_ in tqdm(train_list): for ins_ in info_['annotations']: assert (len(ins_['text']) == len(ins_['entity'])) for entity_tag in ins_['entity']: if (entity_tag[0] in ['I', 'B']): cur_type = entity_tag[2:] else: cur_type = 'O' if (cur_type == 'O'): continue if (cur_type not in train_cls): train_cls.append(cur_type) assert (len(train_cls) == len(PRE_DEFINE_KEY)), f'{train_cls}' for cls in train_cls: if (cls not in PRE_DEFINE_KEY): raise RuntimeError(f'Unexpected entity: {cls}') test_cls = [] for info_ in tqdm(test_list): for ins_ in info_['annotations']: assert (len(ins_['text']) == len(ins_['entity'])) for entity_tag in ins_['entity']: if (entity_tag[0] in ['I', 'B']): cur_type = entity_tag[2:] else: cur_type = 'O' if (cur_type == 'O'): continue if (cur_type not in test_cls): test_cls.append(cur_type) assert (len(test_cls) == len(PRE_DEFINE_KEY)), f'{test_cls}' for cls in test_cls: if (cls not in PRE_DEFINE_KEY): raise RuntimeError(f'Unexpected entity: {cls}') test_file_names = [x['file_name'] for x in test_list] for x in train_list: assert (x['file_name'] not in test_file_names), f"Exists duplicate: {x['file_name']}" print(f'In total, Num_train:{len(train_list)}, Num_test:{len(test_list)}') return (train_list, test_list)
class Mask2FormerPreTrainedModel(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class KandinskyPipeline(metaclass=DummyObject): _backends = ['torch', 'transformers'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch', 'transformers']) def from_config(cls, *args, **kwargs): requires_backends(cls, ['torch', 'transformers']) def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ['torch', 'transformers'])
_cache() def is_torch_tpu_available(check_device=True): if (not _torch_available): return False if (importlib.util.find_spec('torch_xla') is not None): if check_device: try: import torch_xla.core.xla_model as xm _ = xm.xla_device() return True except RuntimeError: return False return True return False
class InputInjection(nn.Module): def __init__(self, num_downsampling): super(InputInjection, self).__init__() self.pool = nn.ModuleList() for i in range(num_downsampling): self.pool.append(nn.AvgPool2d(3, stride=2, padding=1)) def forward(self, x): for pool in self.pool: x = pool(x) return x
def hungarian_match(flat_preds, flat_targets, preds_k, targets_k) -> Tuple[(torch.Tensor, Dict[(int, int)])]: assert (isinstance(flat_preds, torch.Tensor) and isinstance(flat_targets, torch.Tensor) and (flat_preds.is_cuda == flat_targets.is_cuda)) assert (flat_preds.shape == flat_targets.shape) num_samples = flat_targets.shape[0] assert (preds_k == targets_k) num_k = preds_k num_correct = np.zeros((num_k, num_k)) for c1 in range(num_k): for c2 in range(num_k): votes = int(((flat_preds == c1) * (flat_targets == c2)).sum()) num_correct[(c1, c2)] = votes with warnings.catch_warnings(): warnings.simplefilter('ignore', DeprecationWarning) match = linear_assignment((num_samples - num_correct)) res = {} for (out_c, gt_c) in match: res[out_c] = gt_c flat_preds_reorder = torch.zeros_like(flat_preds) for (k, v) in res.items(): flat_preds_reorder[(flat_preds == k)] = torch.Tensor([v]) return (flat_preds_reorder.to(flat_preds.device), res)
class JavaParser(Parser): def __init__(self, *args, **kwargs): super(JavaParser, self).__init__(*args, **kwargs) def parse(self, code): raise NotImplementedError('Not yet implemented')
def set_seed(seed): seed %= global seed_ seed_ = seed import random random.seed(seed) np.random.seed(seed) import torch torch.manual_seed(seed) torch.cuda.manual_seed(seed) print(colorize(f'using seed {seed}', 'green'))
def test_digits_corr_naive_init(): model = SaturatedCoverageSelection(100, 'corr', optimizer='naive', initial_subset=digits_corr_ranking[:5]) model.fit(X_digits) assert_array_equal(model.ranking[:(- 5)], digits_corr_ranking[5:]) assert_array_almost_equal(model.gains[:(- 5)], digits_corr_gains[5:], 4) assert_array_almost_equal(model.subset, X_digits[model.ranking])
class WeightPruningConfig(): def __init__(self, pruning_configs=[{}], target_sparsity=0.9, pruning_type='snip_momentum', pattern='4x1', op_names=[], excluded_op_names=[], start_step=0, end_step=0, pruning_scope='global', pruning_frequency=1, min_sparsity_ratio_per_op=0.0, max_sparsity_ratio_per_op=0.98, sparsity_decay_type='exp', pruning_op_types=['Conv', 'Linear'], **kwargs): self.pruning_configs = pruning_configs self._weight_compression = DotDict({'target_sparsity': target_sparsity, 'pruning_type': pruning_type, 'pattern': pattern, 'op_names': op_names, 'excluded_op_names': excluded_op_names, 'start_step': start_step, 'end_step': end_step, 'pruning_scope': pruning_scope, 'pruning_frequency': pruning_frequency, 'min_sparsity_ratio_per_op': min_sparsity_ratio_per_op, 'max_sparsity_ratio_per_op': max_sparsity_ratio_per_op, 'sparsity_decay_type': sparsity_decay_type, 'pruning_op_types': pruning_op_types}) self._weight_compression.update(kwargs) def weight_compression(self): return self._weight_compression _compression.setter def weight_compression(self, weight_compression): self._weight_compression = weight_compression
class DepthEvaluator(Harness): def _init_validation(self, opt): self.fixed_depth_scaling = opt.depth_validation_fixed_scaling self.ratio_on_validation = opt.depth_ratio_on_validation self.val_num_log_images = opt.eval_num_images def evaluate(self): print('Evaluate depth predictions:', flush=True) (scores, ratios, images) = self._run_depth_validation(self.val_num_log_images) for domain in scores: print(f' - Results for domain {domain}:') if (len(ratios[domain]) > 0): ratios_np = np.array(ratios[domain]) if self.ratio_on_validation: dataset_split_pos = int((len(ratios_np) / 4)) else: dataset_split_pos = int(len(ratios_np)) ratio_median = np.median(ratios_np[:dataset_split_pos]) ratio_norm_std = np.std((ratios_np[:dataset_split_pos] / ratio_median)) print(' Scaling ratios | med: {:0.3f} | std: {:0.3f}'.format(ratio_median, ratio_norm_std)) metrics = scores[domain].get_scores() print(('\n ' + ('{:>8} | ' * 7).format('abs_rel', 'sq_rel', 'rmse', 'rmse_log', 'a1', 'a2', 'a3'))) print((('&{: 8.3f} ' * 7).format(metrics['abs_rel'], metrics['sq_rel'], metrics['rmse'], metrics['rmse_log'], metrics['delta1'], metrics['delta2'], metrics['delta3']) + '\\\\')) for domain in images: domain_dir = os.path.join(self.log_path, 'eval_images', domain) os.makedirs(domain_dir, exist_ok=True) for (i, (color_gt, depth_gt, depth_pred)) in enumerate(images[domain]): image_path = os.path.join(domain_dir, f'img_{i}.png') logged_images = (color_gt, colors.depth_norm_image(depth_pred), colors.depth_norm_image(depth_gt)) save_image(torch.cat(logged_images, 2).clamp(0, 1), image_path) self._log_gpu_memory()
def cast_ndarray_type(x): if (x.dtype == np.int64): return x.astype(np.int32) elif (x.dtype == np.float64): return x.astype(np.float32) else: return x
def test_log_scalar_metric_with_implicit_step(ex): messages = {} def main_function(_run): for i in range(10): val = (i * i) ex.log_scalar('training.loss', val) messages['messages'] = ex.current_run._metrics.get_last_metrics() ex.run() assert (ex.current_run is not None) messages = messages['messages'] assert (len(messages) == 10) for i in range((len(messages) - 1)): assert (messages[i].step < messages[(i + 1)].step) assert (messages[i].step == i) assert (messages[i].timestamp <= messages[(i + 1)].timestamp)
def get_parser(): parser = argparse.ArgumentParser() parser.add_argument('-c', help='Config file path.') return parser
def kl_divergence(mu, log_sigma, device='cpu'): return torch.mean(((- 0.5) * torch.sum((((1.0 + log_sigma) - (mu ** 2)) - torch.exp(log_sigma)), dim=(- 1))))
class XconfigRes2Block(XconfigLayerBase): def __init__(self, first_token, key_to_value, prev_names=None): assert (first_token == 'res2-block') XconfigLayerBase.__init__(self, first_token, key_to_value, prev_names) def set_default_configs(self): self.config = {'input': '[-1]', 'height': (- 1), 'height-in': (- 1), 'height-out': (- 1), 'num-filters': (- 1), 'num-bottleneck-filters': (- 1), 'time-period': 1, 'self-repair-scale': 2e-05, 'self-repair-lower-threshold1': 0.05, 'self-repair-lower-threshold2': 0.05, 'self-repair-lower-threshold3': 0.05, 'max-change': 0.75, 'allow-zero-padding': True, 'param-stddev': '', 'bias-stddev': '', 'use-natural-gradient': '', 'rank-in': '', 'rank-out': '', 'num-minibatches-history': '', 'alpha-in': '', 'alpha-out': '', 'l2-regularize': ''} def set_derived_configs(self): input_dim = self.descriptors['input']['dim'] if (not (((self.config['height'] > 0) and (self.config['height-in'] == (- 1)) and (self.config['height-out'] == (- 1))) or ((self.config['height-out'] > 0) and (self.config['height-in'] > 0)))): raise RuntimeError('You must specify height, or height-in and height-out, for res2-block.') if (not ((self.config['height-in'] > 0) and (self.config['height-out'] > 0))): height = self.config['height'] if (not (height > 0)): raise RuntimeError('You must specify either height, or height-in and height-out, for res2-block.') self.config['height-in'] = height self.config['height-out'] = height height_in = self.config['height-in'] if ((input_dim % height_in) != 0): raise RuntimeError('Specified input image height {0} does not divide the input dim {1}'.format(height_in, input_dim)) self.config['num-filters'] = (input_dim / height) def check_configs(self): if (self.config['num-filters'] == (- 1)): raise RuntimeError('You must specify num-filters for res2-block.') def auxiliary_outputs(self): return [] def output_name(self, auxiliary_output=None): b = self.config['num-bottleneck-filters'] return ('{0}.relu2' if (b <= 0) else '{0}.relu3').format(self.name) def output_dim(self, auxiliary_output=None): assert (auxiliary_output is None) return (self.config['height-out'] * self.config['num-filters']) def get_full_config(self): ans = [] b = self.config['num-bottleneck-filters'] if (b <= 0): config_lines = self._generate_normal_resblock_config() else: config_lines = self._generate_bottleneck_resblock_config() for line in config_lines: for config_name in ['ref', 'final']: ans.append((config_name, line)) return ans def _generate_normal_resblock_config(self): configs = [] name = self.name assert (self.config['num-bottleneck-filters'] == (- 1)) input_dim = self.descriptors['input']['dim'] height_in = self.config['height-in'] height_out = self.config['height-out'] time_period_out = self.config['time-period'] if (not ((input_dim % height_in) == 0)): raise RuntimeError('input-dim {0} does not divide height-in {1}'.format(input_dim, height_in)) num_filters_in = (input_dim / height_in) num_filters_out = self.config['num-filters'] if (height_out != height_in): if ((height_out < ((height_in / 2) - 1)) or (height_out > ((height_in / 2) + 1))): raise RuntimeError('Expected height-out to be about half height-in, or the same: height-in={0} height-out={1}'.format(height_in, height_out)) if (not ((time_period_out % 2) == 0)): raise RuntimeError('Expected time-period to be a multiple of 2 if you are subsampling on height.') time_period_in = (time_period_out / 2) height_subsample = 2 else: time_period_in = time_period_out height_subsample = 1 cur_time_period = time_period_in cur_num_filters = num_filters_in cur_height = height_in input_descriptor = self.descriptors['input']['final-string'] allow_zero_padding = self.config['allow-zero-padding'] if ((height_subsample == 1) and (num_filters_in == num_filters_out)): bypass_descriptor = input_descriptor else: bypass_descriptor = '{0}.conv_bypass'.format(name) cur_descriptor = input_descriptor a = [] for opt_name in ['param-stddev', 'bias-stddev', 'use-natural-gradient', 'max-change', 'rank-in', 'rank-out', 'num-minibatches-history', 'alpha-in', 'alpha-out', 'l2-regularize']: value = self.config[opt_name] if (value != ''): a.append('{0}={1}'.format(opt_name, value)) misc_conv_opts = ' '.join(a) for n in [1, 2]: conv_opts = 'height-in={hi} height-out={ho} height-offsets=-1,0,1 height-subsample-out={hs} time-offsets=-{p},0,{p} num-filters-in={fi} num-filters-out={fo} {r} {o}'.format(hi=cur_height, ho=height_out, p=cur_time_period, hs=(height_subsample if (n == 1) else 1), fi=cur_num_filters, fo=num_filters_out, r=('required-time-offsets=0' if allow_zero_padding else ''), o=misc_conv_opts) configs.append('component name={0}.conv{1} type=TimeHeightConvolutionComponent {2}'.format(name, n, conv_opts)) configs.append('component-node name={0}.conv{1} component={0}.conv{1} input={2}'.format(name, n, cur_descriptor)) cur_descriptor = '{0}.conv{1}'.format(name, n) cur_num_filters = num_filters_out cur_height = height_out cur_time_period = time_period_out configs.append('component name={0}.batchnorm{1} type=BatchNormComponent dim={2} block-dim={3}'.format(name, n, (cur_num_filters * cur_height), cur_num_filters)) configs.append('component-node name={0}.batchnorm{1} component={0}.batchnorm{1} input={2}'.format(name, n, cur_descriptor)) cur_descriptor = '{0}.batchnorm{1}'.format(name, n) configs.append('component name={0}.scaleoffset{1} type=ScaleAndOffsetComponent dim={2} block-dim={3}'.format(name, n, (cur_num_filters * cur_height), cur_num_filters)) configs.append('component-node name={0}.scaleoffset{1} component={0}.scaleoffset{1} input={2}'.format(name, n, cur_descriptor)) cur_descriptor = '{0}.scaleoffset{1}'.format(name, n) if (n == 2): cur_descriptor = 'Sum({0}, {1})'.format(cur_descriptor, bypass_descriptor) configs.append('component name={0}.relu{1} type=RectifiedLinearComponent dim={2} block-dim={3} self-repair-scale={4} self-repair-lower-threshold={5}'.format(name, n, (cur_num_filters * cur_height), cur_num_filters, self.config['self-repair-scale'], self.config['self-repair-lower-threshold{0}'.format(n)])) configs.append('component-node name={0}.relu{1} component={0}.relu{1} input={2}'.format(name, n, cur_descriptor)) cur_descriptor = '{0}.relu{1}'.format(name, n) if (bypass_descriptor != input_descriptor): conv_opts = 'height-in={hi} height-out={ho} height-offsets=0 time-offsets=0 height-subsample-out={hs} num-filters-in={fi} num-filters-out={fo} {o}'.format(hi=height_in, ho=height_out, hs=height_subsample, fi=num_filters_in, fo=num_filters_out, o=misc_conv_opts) configs.append('component name={0}.conv_bypass type=TimeHeightConvolutionComponent {1}'.format(name, conv_opts)) configs.append('component-node name={0}.conv_bypass component={0}.conv_bypass input={1}'.format(name, input_descriptor)) return configs def _generate_bottleneck_resblock_config(self): configs = [] name = self.name num_bottleneck_filters = self.config['num-bottleneck-filters'] assert (num_bottleneck_filters > 0) input_dim = self.descriptors['input']['dim'] height_in = self.config['height-in'] height_out = self.config['height-out'] input_descriptor = self.descriptors['input']['final-string'] allow_zero_padding = self.config['allow-zero-padding'] time_period_out = self.config['time-period'] if (not ((input_dim % height_in) == 0)): raise RuntimeError('input-dim={0} does not divide height-in={1}'.format(input_dim, height_in)) num_filters_in = (input_dim / height_in) num_filters_out = self.config['num-filters'] if (height_out != height_in): if ((height_out < ((height_in / 2) - 1)) or (height_out > ((height_in / 2) + 1))): raise RuntimeError('Expected height-out to be about half height-in, or the same: height-in={0} height-out={1}'.format(height_in, height_out)) height_subsample = 2 else: height_subsample = 1 cur_descriptor = input_descriptor cur_num_filters = num_filters_in cur_height = height_in if ((height_subsample == 1) and (num_filters_in == num_filters_out)): bypass_descriptor = input_descriptor else: bypass_descriptor = '{0}.conv_bypass'.format(name) a = [] for opt_name in ['param-stddev', 'bias-stddev', 'use-natural-gradient', 'max-change', 'rank-in', 'rank-out', 'num-minibatches-history', 'alpha-in', 'alpha-out', 'l2-regularize']: value = self.config[opt_name] if (value != ''): a.append('{0}={1}'.format(opt_name, value)) misc_conv_opts = ' '.join(a) for n in [1, 2, 3]: height_offsets = ('-1,0,1' if (n == 2) else '0') this_height_subsample = (height_subsample if (n == 1) else 1) time_offsets = ('-{t},0,{t}'.format(t=time_period_out) if (n == 2) else '0') next_num_filters = (num_filters_out if (n == 3) else num_bottleneck_filters) conv_opts = 'height-in={h_in} height-out={h_out} height-offsets={ho} time-offsets={to} num-filters-in={fi} num-filters-out={fo} height-subsample-out={hs} {r} {o}'.format(h_in=cur_height, h_out=height_out, to=time_offsets, ho=height_offsets, hs=this_height_subsample, fi=cur_num_filters, fo=next_num_filters, r=('required-time-offsets=0' if allow_zero_padding else ''), o=misc_conv_opts) configs.append('component name={0}.conv{1} type=TimeHeightConvolutionComponent {2}'.format(name, n, conv_opts)) configs.append('component-node name={0}.conv{1} component={0}.conv{1} input={2}'.format(name, n, cur_descriptor)) cur_num_filters = next_num_filters cur_height = height_out cur_descriptor = '{0}.conv{1}'.format(name, n) configs.append('component name={0}.batchnorm{1} type=BatchNormComponent dim={2} block-dim={3}'.format(name, n, (cur_num_filters * cur_height), cur_num_filters)) configs.append('component-node name={0}.batchnorm{1} component={0}.batchnorm{1} input={2}'.format(name, n, cur_descriptor)) cur_descriptor = '{0}.batchnorm{1}'.format(name, n) configs.append('component name={0}.scaleoffset{1} type=ScaleAndOffsetComponent dim={2} block-dim={3}'.format(name, n, (cur_num_filters * cur_height), cur_num_filters)) configs.append('component-node name={0}.scaleoffset{1} component={0}.scaleoffset{1} input={2}'.format(name, n, cur_descriptor)) cur_descriptor = '{0}.scaleoffset{1}'.format(name, n) if (n == 3): cur_descriptor = 'Sum({0}, {1})'.format(cur_descriptor, bypass_descriptor) configs.append('component name={0}.relu{1} type=RectifiedLinearComponent dim={2} block-dim={3} self-repair-scale={4} self-repair-lower-threshold={5}'.format(name, n, (cur_num_filters * cur_height), cur_num_filters, self.config['self-repair-scale'], self.config['self-repair-lower-threshold{0}'.format(n)])) configs.append('component-node name={0}.relu{1} component={0}.relu{1} input={2}'.format(name, n, cur_descriptor)) cur_descriptor = '{0}.relu{1}'.format(name, n) if (bypass_descriptor != input_descriptor): conv_opts = 'height-in={hi} height-out={ho} height-offsets=0 time-offsets=0 height-subsample-out={hs} num-filters-in={fi} num-filters-out={fo} {o}'.format(hi=height_in, ho=height_out, hs=height_subsample, fi=num_filters_in, fo=num_filters_out, o=misc_conv_opts) configs.append('component name={0}.conv_bypass type=TimeHeightConvolutionComponent {1}'.format(name, conv_opts)) configs.append('component-node name={0}.conv_bypass component={0}.conv_bypass input={1}'.format(name, input_descriptor)) return configs
def vgg11_bn(pretrained=False, **kwargs): if pretrained: kwargs['init_weights'] = False model = VGG(make_layers(cfg['A'], batch_norm=True), **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['vgg11_bn'])) return model
_module() class HourglassNet(BaseModule): def __init__(self, downsample_times: int=5, num_stacks: int=2, stage_channels: Sequence=(256, 256, 384, 384, 384, 512), stage_blocks: Sequence=(2, 2, 2, 2, 2, 4), feat_channel: int=256, norm_cfg: ConfigType=dict(type='BN', requires_grad=True), init_cfg: OptMultiConfig=None) -> None: assert (init_cfg is None), 'To prevent abnormal initialization behavior, init_cfg is not allowed to be set' super().__init__(init_cfg) self.num_stacks = num_stacks assert (self.num_stacks >= 1) assert (len(stage_channels) == len(stage_blocks)) assert (len(stage_channels) > downsample_times) cur_channel = stage_channels[0] self.stem = nn.Sequential(ConvModule(3, (cur_channel // 2), 7, padding=3, stride=2, norm_cfg=norm_cfg), ResLayer(BasicBlock, (cur_channel // 2), cur_channel, 1, stride=2, norm_cfg=norm_cfg)) self.hourglass_modules = nn.ModuleList([HourglassModule(downsample_times, stage_channels, stage_blocks) for _ in range(num_stacks)]) self.inters = ResLayer(BasicBlock, cur_channel, cur_channel, (num_stacks - 1), norm_cfg=norm_cfg) self.conv1x1s = nn.ModuleList([ConvModule(cur_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None) for _ in range((num_stacks - 1))]) self.out_convs = nn.ModuleList([ConvModule(cur_channel, feat_channel, 3, padding=1, norm_cfg=norm_cfg) for _ in range(num_stacks)]) self.remap_convs = nn.ModuleList([ConvModule(feat_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None) for _ in range((num_stacks - 1))]) self.relu = nn.ReLU(inplace=True) def init_weights(self) -> None: super().init_weights() for m in self.modules(): if isinstance(m, nn.Conv2d): m.reset_parameters() def forward(self, x: torch.Tensor) -> List[torch.Tensor]: inter_feat = self.stem(x) out_feats = [] for ind in range(self.num_stacks): single_hourglass = self.hourglass_modules[ind] out_conv = self.out_convs[ind] hourglass_feat = single_hourglass(inter_feat) out_feat = out_conv(hourglass_feat) out_feats.append(out_feat) if (ind < (self.num_stacks - 1)): inter_feat = (self.conv1x1s[ind](inter_feat) + self.remap_convs[ind](out_feat)) inter_feat = self.inters[ind](self.relu(inter_feat)) return out_feats
class custom_dataset(torch.nn.Module): def __init__(self, path, dim, num_class, load_from_txt=True): super(custom_dataset, self).__init__() self.nodes = set() self.load_from_txt = load_from_txt self.num_nodes = 0 self.num_features = dim self.num_classes = num_class self.edge_index = None self.edge_attr = None self.init_edges(path) self.init_embedding(dim) self.init_labels(num_class) train = 1 val = 0.3 test = 0.1 self.train_mask = (([1] * int((self.num_nodes * train))) + ([0] * (self.num_nodes - int((self.num_nodes * train))))) self.val_mask = (([1] * int((self.num_nodes * val))) + ([0] * (self.num_nodes - int((self.num_nodes * val))))) self.test_mask = (([1] * int((self.num_nodes * test))) + ([0] * (self.num_nodes - int((self.num_nodes * test))))) self.train_mask = torch.BoolTensor(self.train_mask).cuda() self.val_mask = torch.BoolTensor(self.val_mask).cuda() self.test_mask = torch.BoolTensor(self.test_mask).cuda() def init_edges(self, path): self.g = dgl.DGLGraph() if self.load_from_txt: fp = open(path, 'r') src_li = [] dst_li = [] start = time.perf_counter() for line in fp: (src, dst) = line.strip('\n').split() (src, dst) = (int(src), int(dst)) src_li.append(src) dst_li.append(dst) self.nodes.add(src) self.nodes.add(dst) self.num_edges = len(src_li) self.num_nodes = (max(self.nodes) + 1) self.edge_index = torch.stack([torch.Tensor(src_li), torch.Tensor(dst_li)], dim=0).long().cuda() dur = (time.perf_counter() - start) print('=> Loading (txt) {:.3f}s '.format(dur)) else: if (not path.endswith('.npz')): raise ValueError('graph file must be a .npz file') start = time.perf_counter() graph_obj = np.load(path) src_li = graph_obj['src_li'] dst_li = graph_obj['dst_li'] self.num_nodes = graph_obj['num_nodes'] self.num_edges = len(src_li) self.edge_index = torch.stack([torch.Tensor(src_li), torch.Tensor(dst_li)], dim=0).long().cuda() dur = (time.perf_counter() - start) print('=> Loading (npz): {:.3f}s '.format(dur)) def init_embedding(self, dim): self.x = torch.randn(self.num_nodes, dim).cuda() def init_labels(self, num_class): self.y = torch.ones(self.num_nodes).long().cuda() def forward(*input, **kwargs): pass
def get_example_outputs(agent, EnvCls, env_kwargs, examples, subprocess=False, env=None): if subprocess: import torch torch.set_num_threads(1) if (env is None): env = EnvCls(**env_kwargs) if (not hasattr(env, 'spaces')): env = MVPWrapper(env) o = env.reset() a = env.action_space.sample() (o, r, d, env_info) = env.step(a) r = np.asarray(r, dtype='float32') agent.reset() agent_inputs = torchify_buffer(AgentInputs(o, a, r)) (a, agent_info) = agent.step(*agent_inputs) if ('prev_rnn_state' in agent_info): agent_info = agent_info._replace(prev_rnn_state=agent_info.prev_rnn_state[0]) examples['observation'] = o examples['reward'] = r examples['done'] = d examples['env_info'] = env_info examples['action'] = a examples['agent_info'] = agent_info
def register_video_dataset(name, dataset): global __video_datasets curr_datasets = list(__video_datasets.keys()) if (name in curr_datasets): raise ValueError('The given name already exists, please choose another name excluding {}'.format(curr_datasets)) __video_datasets[name] = dataset
class UserScatteredDataParallel(DictGatherDataParallel): def scatter(self, inputs, kwargs, device_ids): assert (len(inputs) == 1) inputs = inputs[0] inputs = _async_copy_stream(inputs, device_ids) inputs = [[i] for i in inputs] assert (len(kwargs) == 0) kwargs = [{} for _ in range(len(inputs))] return (inputs, kwargs)
def get_sample_bernoulli(p): return (lambda lst: [elem for elem in lst if (random.random() < p)])
def insertUser(user): user.hashed_password = pbkdf2_sha256.hash(user.password.encode('utf-8')) user.registered = datetime.utcnow().strftime('%Y-%m-%d-%H-%M-%S') conn = getDb() with closing(conn.cursor()) as cur: sql = 'INSERT INTO users(email, salted_hash, firstname, lastname,\n notification_interval, registered, organization, \n dblp_profile, google_scholar_profile, \n semantic_scholar_profile, personal_website, unsubscribe_trace) \n VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)' cur.execute(sql, (user.email, user.hashed_password, user.firstname, user.lastname, user.notification_interval, user.registered, user.organization, user.dblp_profile, user.google_scholar_profile, user.semantic_scholar_profile, user.personal_website, str(uuid4()))) user_id = cur.lastrowid set_user_categories(user_id, user) set_user_topics(user_id, user) conn.commit() return user_id
def encode_dense_query(queries: Dict[(Union[(str, int)], str)], model: Union[(BertDense, RobertaDense)], tokenizer, max_seq_length: int, eval_args: TrainingArguments): logger.info('Encoding Queries...') query_ids = sorted(list(queries.keys())) queries_text = [queries[qid] for qid in query_ids] query_dataset = TextDataset(queries_text) query_out = DenseEvaluater(model=model, args=eval_args, data_collator=get_collator_func(tokenizer, max_seq_length, input_text_type='query'), tokenizer=tokenizer).predict(query_dataset) query_embeds = query_out.predictions assert (len(query_embeds) == len(queries_text)) return (query_embeds, np.array(query_ids))
def prepare_data(args, train=True): data_args = DFR_DATA_ARGS[args.dataset] if (data_args.data_transform == 'None'): transform_cls = (lambda *args, **kwargs: None) else: transform_cls = getattr(dfr_data, data_args.data_transform) train_transform = transform_cls(train=True) test_transform = transform_cls(train=False) dataset_cls = getattr(dfr_data, data_args.dataset) trainset = dataset_cls(basedir=args.root_dir, split='train', transform=train_transform) testset = dataset_cls(basedir=args.root_dir, split='test', transform=test_transform) valset = dataset_cls(basedir=args.root_dir, split='val', transform=test_transform) trainset.get_loader = types.MethodType(get_loader, trainset) testset.get_loader = types.MethodType(get_loader, testset) valset.get_loader = types.MethodType(get_loader, valset) trainset.group_str = types.MethodType(group_str, trainset) testset.group_str = types.MethodType(group_str, testset) valset.group_str = types.MethodType(group_str, valset) def group_counts(self): return self.group_counts_ rename_group_counts(trainset) rename_group_counts(testset) rename_group_counts(valset) return (trainset, valset, testset)
def Basic_Fourier_model(): return {'model': 'LSTM', 'hidden_depth': 3, 'hidden_width': 20, 'recurrent_layers': 2, 'state_size': 32}
class Generator(abc.ABC): def __init__(self, num_jobs: int, num_machines: int, max_num_ops: int, max_op_duration: int): self.num_jobs = num_jobs self.num_machines = num_machines self.max_num_ops = max_num_ops self.max_op_duration = max_op_duration def __call__(self, key: chex.PRNGKey) -> State:
class StartEndDataset(Dataset): Q_FEAT_TYPES = ['pooler_output', 'last_hidden_state'] def __init__(self, dset_name, data_path, v_feat_dirs, q_feat_dir, q_feat_type='last_hidden_state', max_q_l=32, max_v_l=75, data_ratio=1.0, ctx_mode='video', normalize_v=True, normalize_t=True, load_labels=True, clip_len=2, max_windows=5, span_loss_type='l1', txt_drop_ratio=0): self.dset_name = dset_name self.data_path = data_path self.data_ratio = data_ratio self.v_feat_dirs = (v_feat_dirs if isinstance(v_feat_dirs, list) else [v_feat_dirs]) self.q_feat_dir = q_feat_dir self.q_feat_type = q_feat_type self.max_q_l = max_q_l self.max_v_l = max_v_l self.ctx_mode = ctx_mode self.use_tef = ('tef' in ctx_mode) self.use_video = ('video' in ctx_mode) self.normalize_t = normalize_t self.normalize_v = normalize_v self.load_labels = load_labels self.clip_len = clip_len self.max_windows = max_windows self.span_loss_type = span_loss_type self.txt_drop_ratio = txt_drop_ratio if (('val' in data_path) or ('test' in data_path)): assert (txt_drop_ratio == 0) assert (q_feat_type in self.Q_FEAT_TYPES) self.data = self.load_data() def load_data(self): datalist = load_jsonl(self.data_path) if (self.data_ratio != 1): n_examples = int((len(datalist) * self.data_ratio)) datalist = datalist[:n_examples] logger.info('Using {}% of the data: {} examples'.format((self.data_ratio * 100), n_examples)) return datalist def __len__(self): return len(self.data) def __getitem__(self, index): meta = self.data[index] model_inputs = dict() model_inputs['query_feat'] = self._get_query_feat_by_qid(meta['qid']) if self.use_video: model_inputs['video_feat'] = self._get_video_feat_by_vid(meta['vid']) ctx_l = len(model_inputs['video_feat']) else: ctx_l = self.max_v_l if self.use_tef: tef_st = (torch.arange(0, ctx_l, 1.0) / ctx_l) tef_ed = (tef_st + (1.0 / ctx_l)) tef = torch.stack([tef_st, tef_ed], dim=1) if self.use_video: model_inputs['video_feat'] = torch.cat([model_inputs['video_feat'], tef], dim=1) else: model_inputs['video_feat'] = tef if self.load_labels: model_inputs['span_labels'] = self.get_span_labels(meta['relevant_windows'], ctx_l) if ('subs_train' not in self.data_path): (model_inputs['saliency_pos_labels'], model_inputs['saliency_neg_labels']) = self.get_saliency_labels_w_annot(meta['relevant_clip_ids'], meta['saliency_scores'], ctx_l) else: (model_inputs['saliency_pos_labels'], model_inputs['saliency_neg_labels']) = self.get_saliency_labels_sub_as_query(meta['relevant_windows'][0], ctx_l) return dict(meta=meta, model_inputs=model_inputs) def get_saliency_labels_sub_as_query(self, windows, duration, ctx_l, max_n=2): clip_len = (duration / ctx_l) gt_st = int((windows[0] / clip_len)) gt_ed = max(0, (min(int((windows[1] / clip_len)), ctx_l) - 1)) if (gt_st > gt_ed): gt_st = gt_ed if (gt_st != gt_ed): pos_clip_indices = random.sample(range(gt_st, (gt_ed + 1)), k=max_n) else: pos_clip_indices = [gt_st, gt_st] neg_pool = (list(range(0, gt_st)) + list(range((gt_ed + 1), ctx_l))) if (len(neg_pool) < max_n): neg_clip_indices = [0, 0] else: neg_clip_indices = random.sample(neg_pool, k=max_n) return (pos_clip_indices, neg_clip_indices) def get_saliency_labels_w_annot(self, rel_clip_ids, scores, ctx_l, max_n=1, add_easy_negative=True): scores = np.array(scores) agg_scores = np.sum(scores, 1) sort_indices = np.argsort(agg_scores) hard_pos_clip_indices = [min(rel_clip_ids[idx], (ctx_l - 1)) for idx in sort_indices[(- max_n):]] hard_neg_clip_indices = [min(rel_clip_ids[idx], (ctx_l - 1)) for idx in sort_indices[:max_n]] easy_pos_clip_indices = [] easy_neg_clip_indices = [] if add_easy_negative: easy_neg_pool = list((set(range(ctx_l)) - set(rel_clip_ids))) if (len(easy_neg_pool) >= max_n): easy_pos_clip_indices = random.sample(rel_clip_ids, k=max_n) easy_neg_clip_indices = random.sample(easy_neg_pool, k=max_n) else: easy_pos_clip_indices = hard_pos_clip_indices easy_neg_clip_indices = hard_neg_clip_indices pos_clip_indices = (hard_pos_clip_indices + easy_pos_clip_indices) neg_clip_indices = (hard_neg_clip_indices + easy_neg_clip_indices) return (pos_clip_indices, neg_clip_indices) def get_span_labels(self, windows, ctx_l): if (len(windows) > self.max_windows): random.shuffle(windows) windows = windows[:self.max_windows] if (self.span_loss_type == 'l1'): windows = (torch.Tensor(windows) / (ctx_l * self.clip_len)) windows = span_xx_to_cxw(windows) elif (self.span_loss_type == 'ce'): windows = torch.Tensor([[int((w[0] / self.clip_len)), (min(int((w[1] / self.clip_len)), ctx_l) - 1)] for w in windows]).long() else: raise NotImplementedError return windows def _get_query_feat_by_qid(self, qid): q_feat_path = join(self.q_feat_dir, f'qid{qid}.npz') q_feat = np.load(q_feat_path)['last_hidden_state'].astype(np.float32) q_feat = q_feat[:self.max_q_l] if self.normalize_t: q_feat = l2_normalize_np_array(q_feat) if (self.txt_drop_ratio > 0): q_feat = self.random_drop_rows(q_feat) return torch.from_numpy(q_feat) def random_drop_rows(self, embeddings): num_drop_rows = round((len(embeddings) * self.txt_drop_ratio)) if (num_drop_rows > 0): row_indices = np.random.choice(len(embeddings), size=num_drop_rows, replace=False) embeddings[row_indices] = 0 return embeddings def _get_video_feat_by_vid(self, vid): v_feat_list = [] for _feat_dir in self.v_feat_dirs: _feat_path = join(_feat_dir, f'{vid}.npz') _feat = np.load(_feat_path)['features'][:self.max_v_l].astype(np.float32) if self.normalize_v: _feat = l2_normalize_np_array(_feat) v_feat_list.append(_feat) min_len = min([len(e) for e in v_feat_list]) v_feat_list = [e[:min_len] for e in v_feat_list] v_feat = np.concatenate(v_feat_list, axis=1) return torch.from_numpy(v_feat)
def parse_set_parameter_strings(set_para_array): set_list = [] for set_para in set_para_array: set = (lambda : None) setattr(set, 'filename', None) setattr(set, 'probability', None) parts = set_para.split(',') if (len(parts) == 2): set.probability = float(parts[0]) set.filename = parts[1].strip() else: set.filename = parts[0].strip() if (not os.path.isfile(set.filename)): raise Exception((set.filename + ' not found')) set_list.append(set) return smooth_probability_distribution(set_list)
def _target_samples_dict(y, n_target_samples, sampling_type): target_stats = dict(Counter(y)) set_diff_sampling_strategy_target = (set(n_target_samples.keys()) - set(target_stats.keys())) if (len(set_diff_sampling_strategy_target) > 0): raise ValueError(f'The {set_diff_sampling_strategy_target} target class is/are not present in the data.') if any(((n_samples < 0) for n_samples in n_target_samples.values())): raise ValueError(f"The number of samples in a class cannot be negative.'n_target_samples' contains some negative value: {n_target_samples}") if (sampling_type == 'under-sampling'): target_distr = copy(target_stats) for (class_label, n_target_sample) in n_target_samples.items(): n_origin_sample = target_stats[class_label] if (n_target_sample > n_origin_sample): raise ValueError(f' The target number of samples of class {class_label} should be < {n_origin_sample} (number of samples in class {class_label}) to perform under-sampling, got {n_target_sample}.') else: target_distr[class_label] = n_target_sample return target_distr elif (sampling_type == 'over-sampling'): target_distr = copy(target_stats) for (class_label, n_target_sample) in n_target_samples.items(): n_origin_sample = target_stats[class_label] if (n_target_sample < n_origin_sample): raise ValueError(f' The target number of samples of class {class_label} should be > {n_origin_sample} (number of samples in class {class_label}) to perform over-sampling, got {n_target_sample}.') else: target_distr[class_label] = n_target_sample return target_distr elif (sampling_type == 'multi-class-hybrid-sampling'): target_distr = copy(target_stats) if all(((n_target_samples[label] <= target_stats[label]) for label in n_target_samples.keys())): raise Warning(f'The target number of samples is smaller than the number of original samples for all classes. ONLY under-sampling will be carried out.') elif all(((n_target_samples[label] >= target_stats[label]) for label in n_target_samples.keys())): raise Warning(f'The target number of samples is greater than the number of original samples for all classes. ONLY over-sampling will be carried out.') target_distr.update(n_target_samples) return target_distr else: raise SamplingKindError
class _BaseQuantizationConfig(): def __init__(self, inputs=[], outputs=[], backend='default', domain='auto', model_name='', recipes={}, quant_format='default', device='cpu', calibration_sampling_size=[100], example_inputs=None, op_type_dict=None, op_name_dict=None, reduce_range=None, excluded_precisions=[], quant_level='auto', accuracy_criterion=accuracy_criterion, tuning_criterion=tuning_criterion, diagnosis=False, ni_workload_name='quantization'): self.inputs = inputs self.outputs = outputs self.backend = backend self.domain = domain self.model_name = model_name self.recipes = recipes self.quant_format = quant_format self.device = device self.op_type_dict = op_type_dict self.op_name_dict = op_name_dict self.reduce_range = reduce_range self.excluded_precisions = excluded_precisions self.use_bf16 = ('bf16' not in self.excluded_precisions) self.accuracy_criterion = accuracy_criterion self.tuning_criterion = tuning_criterion self.calibration_sampling_size = calibration_sampling_size self.quant_level = quant_level self._framework = None self.diagnosis = diagnosis self.ni_workload_name = ni_workload_name self._example_inputs = example_inputs def domain(self): return self._domain def domain(self, domain): if _check_value('domain', domain, str, ['auto', 'cv', 'object_detection', 'nlp', 'recommendation_system']): self._domain = domain def model_name(self): return self._model_name _name.setter def model_name(self, model_name): if _check_value('model_name', model_name, str): self._model_name = model_name def recipes(self): return self._recipes def recipes(self, recipes): if ((recipes is not None) and (not isinstance(recipes, dict))): raise ValueError('recipes should be a dict.') def smooth_quant(val=None): if (val is not None): return _check_value('smooth_quant', val, bool) else: return False def smooth_quant_args(val=None): if (val is not None): _check_value('smooth_quant_args', val, dict) for (k, v) in val.items(): if (k == 'alpha'): if isinstance(v, str): assert (v == 'auto'), "the alpha of sq only supports float, list and 'auto'" elif (isinstance(v, float) or isinstance(v, int) or isinstance(v, list)): continue else: logger.warning("Ignore the alpha as it's not a list, int or float.") if isinstance(val[k], list): assert all([((vv >= 0.0) and (vv <= 1.0)) for vv in val[k]]), 'The candidate value of smooth quantization alpha should be between 0 and 1.' return True else: return {} def layer_wise_quant(val=None): if (val is not None): return _check_value('layer_wise_quant', val, bool) else: return False def layer_wise_quant_args(val=None): if (val is not None): return _check_value('layer_wise_quant_args', val, dict) else: return {} def rtn_args(val=None): if (val is not None): return _check_value('rtn_args', val, dict) else: return {} def awq_args(val=None): if (val is not None): return _check_value('awq_args', val, dict) else: return {} def gptq_args(val=None): if (val is not None): return _check_value('gptq_args', val, dict) else: return {} def teq_args(val=None): if (val is not None): return _check_value('teq_args', val, dict) else: return {} def fast_bias_correction(val=None): if (val is not None): return _check_value('fast_bias_correction', val, bool) else: return False def weight_correction(val=None): if (val is not None): return _check_value('weight_correction', val, bool) else: return False def gemm_to_matmul(val=None): if (val is not None): return _check_value('gemm_to_matmul', val, bool) else: return True def graph_optimization_level(val=None): if (val is not None): return _check_value('graph_optimization_level', val, str, ['DISABLE_ALL', 'ENABLE_BASIC', 'ENABLE_EXTENDED', 'ENABLE_ALL']) else: return None def first_conv_or_matmul_quantization(val=None): if (val is not None): return _check_value('first_conv_or_matmul_quantization', val, bool) else: return True def last_conv_or_matmul_quantization(val=None): if (val is not None): return _check_value('last_conv_or_matmul_quantization', val, bool) else: return True def pre_post_process_quantization(val=None): if (val is not None): return _check_value('pre_post_process_quantization', val, bool) else: return True def add_qdq_pair_to_weight(val=None): if (val is not None): return _check_value('add_qdq_pair_to_weight', val, bool) else: return False def optypes_to_exclude_output_quant(val=None): if (val is not None): return isinstance(val, list) else: return [] def dedicated_qdq_pair(val=None): if (val is not None): return _check_value('dedicated_qdq_pair', val, bool) else: return False RECIPES = {'smooth_quant': smooth_quant, 'smooth_quant_args': smooth_quant_args, 'layer_wise_quant': layer_wise_quant, 'layer_wise_quant_args': layer_wise_quant_args, 'fast_bias_correction': fast_bias_correction, 'weight_correction': weight_correction, 'gemm_to_matmul': gemm_to_matmul, 'graph_optimization_level': graph_optimization_level, 'first_conv_or_matmul_quantization': first_conv_or_matmul_quantization, 'last_conv_or_matmul_quantization': last_conv_or_matmul_quantization, 'pre_post_process_quantization': pre_post_process_quantization, 'add_qdq_pair_to_weight': add_qdq_pair_to_weight, 'optypes_to_exclude_output_quant': optypes_to_exclude_output_quant, 'dedicated_qdq_pair': dedicated_qdq_pair, 'rtn_args': rtn_args, 'awq_args': awq_args, 'gptq_args': gptq_args, 'teq_args': teq_args} self._recipes = {} for k in RECIPES.keys(): if ((k in recipes) and RECIPES[k](recipes[k])): self._recipes.update({k: recipes[k]}) else: self._recipes.update({k: RECIPES[k]()}) def accuracy_criterion(self): return self._accuracy_criterion _criterion.setter def accuracy_criterion(self, accuracy_criterion): if _check_value('accuracy_criterion', accuracy_criterion, AccuracyCriterion): self._accuracy_criterion = accuracy_criterion def tuning_criterion(self): return self._tuning_criterion _criterion.setter def tuning_criterion(self, tuning_criterion): if _check_value('tuning_criterion', tuning_criterion, TuningCriterion): self._tuning_criterion = tuning_criterion def excluded_precisions(self): return self._excluded_precisions _precisions.setter def excluded_precisions(self, excluded_precisions): if _check_value('excluded_precisions', excluded_precisions, str, ['bf16', 'fp16']): self._excluded_precisions = excluded_precisions self._use_bf16 = ('bf16' not in excluded_precisions) def quant_level(self): return self._quant_level _level.setter def quant_level(self, quant_level): self._quant_level = quant_level def reduce_range(self): return self._reduce_range _range.setter def reduce_range(self, reduce_range): if ((reduce_range is None) or _check_value('reduce_range', reduce_range, bool)): self._reduce_range = reduce_range def op_name_dict(self): return self._op_name_dict _name_dict.setter def op_name_dict(self, op_name_dict): if (op_name_dict is None): self._op_name_dict = op_name_dict elif isinstance(op_name_dict, dict): for (k, v) in op_name_dict.items(): v = _list_wrapper(v) ops_schema.validate(v) self._op_name_dict = op_name_dict else: assert False, 'Type of op_name_dict should be dict but not {}, '.format(type(op_name_dict)) def op_type_dict(self): return self._op_type_dict _type_dict.setter def op_type_dict(self, op_type_dict): if (op_type_dict is None): self._op_type_dict = op_type_dict elif isinstance(op_type_dict, dict): for (k, v) in op_type_dict.items(): v = _list_wrapper(v) ops_schema.validate(v) self._op_type_dict = op_type_dict else: assert False, 'Type of op_type_dict should be dict but not {}'.format(type(op_type_dict)) def calibration_sampling_size(self): return self._calibration_sampling_size _sampling_size.setter def calibration_sampling_size(self, sampling_size): if _check_value('calibration_sampling_size', sampling_size, int): if isinstance(sampling_size, int): sampling_size = [sampling_size] self._calibration_sampling_size = sampling_size def device(self): return self._device def device(self, device): if _check_value('device', device, str, ['cpu', 'gpu', 'npu', 'xpu']): self._device = device def quant_format(self): return self._quant_format _format.setter def quant_format(self, quant_format): if _check_value('quant_format', quant_format, str, ['default', 'QDQ', 'QOperator']): self._quant_format = quant_format def backend(self): return self._backend def backend(self, backend): if _check_value('backend', backend, str, ['default', 'itex', 'ipex', 'onnxrt_trt_ep', 'onnxrt_cuda_ep', 'onnxrt_dnnl_ep', 'onnxrt_dml_ep']): self._backend = backend def outputs(self): return self._outputs def outputs(self, outputs): if _check_value('outputs', outputs, str): self._outputs = outputs def inputs(self): return self._inputs def inputs(self, inputs): if _check_value('inputs', inputs, str): self._inputs = inputs def framework(self): return self._framework def framework(self, framework): self._framework = framework def example_inputs(self): return self._example_inputs _inputs.setter def example_inputs(self, example_inputs): self._example_inputs = example_inputs
def densenet169(pretrained: bool=False, progress: bool=True, **kwargs: Any) -> DenseNet: return DenseNet(torchvision.models.densenet169(pretrained, progress, **kwargs))
def _reload_meta_parameter(module, tensor_name, device, value=None, ckpt_name=None): if ('.' in tensor_name): splits = tensor_name.split('.') for split in splits[:(- 1)]: new_module = getattr(module, split) if (new_module is None): raise ValueError(f'{module} has no attribute {split}.') module = new_module tensor_name = splits[(- 1)] if ((tensor_name not in module._parameters) and (tensor_name not in module._buffers)): raise ValueError(f'{module} does not have a parameter or a buffer named {tensor_name}.') is_buffer = (tensor_name in module._buffers) old_value = getattr(module, tensor_name) if ((old_value.device == torch.device('meta')) and (device not in ['meta', torch.device('meta')]) and (value is None)): raise ValueError(f'{tensor_name} is on the meta device, we need a `value` to put in on {device}.') with torch.no_grad(): if (value is None): new_value = old_value.to(device) elif isinstance(value, torch.Tensor): new_value = value.to(device) else: new_value = torch.tensor(value, device=device) if is_buffer: module._buffers[tensor_name] = new_value elif ((value is not None) or (torch.device(device) != module._parameters[tensor_name].device)): param_cls = type(module._parameters[tensor_name]) kwargs = module._parameters[tensor_name].__dict__ if ('checkpoint_name' in kwargs): del kwargs['checkpoint_name'] new_value = param_cls(new_value, requires_grad=old_value.requires_grad, **kwargs).to(device) if (ckpt_name is not None): setattr(new_value, 'checkpoint_name', ckpt_name) module._parameters[tensor_name] = new_value
class DistMult(torch.nn.Module): def __init__(self, d, d1, d2, **kwargs): super(DistMult, self).__init__() self.E = torch.nn.Embedding(len(d.entities), d1, padding_idx=0) self.R = torch.nn.Embedding(len(d.relations), d2, padding_idx=0) self.inp_drop = torch.nn.Dropout(kwargs['input_dropout']) self.loss = torch.nn.BCELoss() self.bn0 = torch.nn.BatchNorm1d(d1) def init(self): xavier_normal_(self.E.weight.data) xavier_normal_(self.R.weight.data) def forward(self, e1_idx, r_idx): e1 = self.E(e1_idx) r = self.R(r_idx) e1 = self.bn0(e1) e1 = self.inp_drop(e1) pred = torch.mm((e1 * r), self.E.weight.transpose(1, 0)) pred = F.sigmoid(pred) return pred
def resolve_backend_name(name, backends, deprecated, aliased): available = [backend.name() for backend in backends] resolved_name = deprecated.get(name, aliased.get(name, name)) if isinstance(resolved_name, list): resolved_name = next((b for b in resolved_name if (b in available)), '') if (resolved_name not in available): raise LookupError("backend '{}' not found.".format(name)) if (name in deprecated): logger.warning("WARNING: '%s' is deprecated. Use '%s'.", name, resolved_name) return resolved_name
class RecurrentDecoder(Decoder): def __init__(self, vocab_size, latent_dim, rnn_mode, num_layers, hidden_size, bidirectional=True, dropout=0.0, dropword=0.0, label_smoothing=0.0, _shared_weight=None): super(RecurrentDecoder, self).__init__(vocab_size, latent_dim, label_smoothing=label_smoothing, _shared_weight=_shared_weight) if (rnn_mode == 'RNN'): RNN = nn.RNN elif (rnn_mode == 'LSTM'): RNN = nn.LSTM elif (rnn_mode == 'GRU'): RNN = nn.GRU else: raise ValueError(('Unknown RNN mode: %s' % rnn_mode)) assert ((hidden_size % 2) == 0) if bidirectional: self.rnn = RNN(latent_dim, (hidden_size // 2), num_layers=num_layers, batch_first=True, bidirectional=True) else: self.rnn = RNN(latent_dim, hidden_size, num_layers=num_layers, batch_first=True, bidirectional=False) self.attn = GlobalAttention(latent_dim, hidden_size, latent_dim, hidden_features=hidden_size) self.ctx_proj = nn.Sequential(nn.Linear((latent_dim + hidden_size), latent_dim), nn.ELU()) self.dropout = dropout self.dropout2d = (nn.Dropout2d(dropword) if (dropword > 0.0) else None) def forward(self, z, mask, src, src_mask): lengths = mask.sum(dim=1).long() if (self.dropout2d is not None): z = self.dropout2d(z) packed_z = pack_padded_sequence(z, lengths, batch_first=True, enforce_sorted=False) (packed_enc, _) = self.rnn(packed_z) (enc, _) = pad_packed_sequence(packed_enc, batch_first=True, total_length=mask.size(1)) ctx = self.attn(enc, src, key_mask=src_mask.eq(0)) ctx = torch.cat([ctx, enc], dim=2) ctx = F.dropout(self.ctx_proj(ctx), p=self.dropout, training=self.training) return self.readout(ctx) def init(self, z, mask, src, src_mask, init_scale=1.0): with torch.no_grad(): return self(z, mask, src, src_mask) def decode(self, z: torch.Tensor, mask: torch.Tensor, src: torch.Tensor, src_mask: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor)]: log_probs = F.log_softmax(self(z, mask, src, src_mask), dim=2) (log_probs, dec) = log_probs.max(dim=2) dec = (dec * mask.long()) log_probs = log_probs.mul(mask).sum(dim=1) return (dec, log_probs) def loss(self, z: torch.Tensor, target: torch.Tensor, mask: torch.Tensor, src: torch.Tensor, src_mask: torch.Tensor) -> torch.Tensor: logits = self(z, mask, src, src_mask).transpose(1, 2) loss = self.criterion(logits, target).mul(mask) return loss.sum(dim=1) def from_params(cls, params: Dict) -> 'RecurrentDecoder': return RecurrentDecoder(**params)
def _add_property_function(func_name): def property_func(self, *args, **kwargs): result = getattr(self._tensor, func_name)(*args, **kwargs) return result setattr(CUDALongTensor, func_name, property_func)
class CNN(nn.Module): def __init__(self, dim_out): super(CNN, self).__init__() self.dim_out = dim_out self.features = nn.Sequential(nn.Conv2d(1, 8, kernel_size=3, stride=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(8, dim_out, kernel_size=3, stride=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2)) self.maxpool = nn.MaxPool2d(5, 5) def forward(self, zigzag_window_PI): feature = self.features(zigzag_window_PI) feature = self.maxpool(feature) feature = feature.view((- 1), self.dim_out) return feature
class Vocab(object): def __init__(self, filename, min_occur_cnt, specials=None): idx2token = ([PAD, UNK] + (specials if (specials is not None) else [])) self._priority = dict() num_tot_tokens = 0 num_vocab_tokens = 0 for line in open(filename).readlines(): try: (token, cnt) = line.rstrip('\n').split('\t') cnt = int(cnt) num_tot_tokens += cnt except: print(line) if (cnt >= min_occur_cnt): idx2token.append(token) num_vocab_tokens += cnt self._priority[token] = int(cnt) self.coverage = (num_vocab_tokens / num_tot_tokens) self._token2idx = dict(zip(idx2token, range(len(idx2token)))) self._idx2token = idx2token self._padding_idx = self._token2idx[PAD] self._unk_idx = self._token2idx[UNK] def priority(self, x): return self._priority.get(x, 0) def size(self): return len(self._idx2token) def unk_idx(self): return self._unk_idx def padding_idx(self): return self._padding_idx def idx2token(self, x): if isinstance(x, list): return [self.idx2token(i) for i in x] return self._idx2token[x] def token2idx(self, x): if isinstance(x, list): return [self.token2idx(i) for i in x] return self._token2idx.get(x, self.unk_idx)
def conv1x1(in_planes, out_planes, stride=1): return nl.SharableConv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
def F1_score(pred_prob, true_prob): (TP, FP, FN, TN) = (0, 0, 0, 0) for (i, label) in enumerate(true_prob): if ((label == 0) and (pred_prob[i] <= 0.5)): TP += 1 elif ((label == 0) and (pred_prob[i] > 0.5)): FN += 1 elif ((label == 1) and (pred_prob[i] <= 0.5)): FP += 1 elif ((label == 1) and (pred_prob[i] > 0.5)): TN += 1 total_num = len(true_prob) assert ((((TP + TN) + FP) + FN) == len(true_prob)) if ((TP + FP) == 0): precision = 0 else: precision = (TP / (TP + FP)) recall = (TP / (TP + FN)) accu = ((TP + TN) / (((TP + TN) + FP) + FN)) if ((precision + recall) == 0): f1_score = 0 else: f1_score = (((2 * precision) * recall) / (precision + recall)) other_metrics = (precision, recall, accu, (TP / total_num), (FP / total_num), (TN / total_num), (FN / total_num)) return (f1_score, other_metrics)
def seed_all(seed): random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) np.random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed)
def meshgrid(*tensors: Union[(torch.Tensor, List[torch.Tensor])], indexing: Optional[str]=None) -> Tuple[(torch.Tensor, ...)]: if is_torch_greater_or_equal_than_1_10: return torch.meshgrid(*tensors, indexing=indexing) else: if (indexing != 'ij'): raise ValueError('torch.meshgrid only supports `indexing="ij"` for torch<1.10.') return torch.meshgrid(*tensors)
class Audio(): def __init__(self, hyper_params): self.hyper_params = hyper_params self.mel_basis_matrix = librosa.filters.mel(sr=hyper_params.sample_rate, n_fft=hyper_params.n_fft, n_mels=hyper_params.embedder_num_mels) def get_mel_spec(self, wave): spec = librosa.core.stft(y=wave, n_fft=self.hyper_params.n_fft, hop_length=self.hyper_params.hop_length, win_length=self.hyper_params.win_length, window='hann') power_spec = (np.abs(spec) ** 2) mel_spec = np.log10((np.dot(self.mel_basis_matrix, power_spec) + 1e-06)) return mel_spec def wave2spec(self, wave): spec = librosa.core.stft(y=wave, n_fft=self.hyper_params.n_fft, hop_length=self.hyper_params.hop_length, win_length=self.hyper_params.win_length) phase = np.angle(spec) spec_db = self.amp2db(np.abs(spec)) spec_db_norm = self.normalize(spec_db) spec_db_norm = spec_db_norm.T phase = phase.T return (spec_db_norm, phase) def spec2wave(self, spec_db_norm, phase): (spec_db_norm, phase) = (spec_db_norm.T, phase.T) spec_db = self.denormalize(spec_db_norm) spec_amp = self.db2amp(spec_db) spec = (spec_amp * np.exp((1j * phase))) wave = librosa.core.istft(spec, hop_length=self.hyper_params.hop_length, win_length=self.hyper_params.win_length) return wave def amp2db(self, mat): return ((20.0 * np.log10(np.maximum(1e-05, mat))) - self.hyper_params.ref_level_db) def db2amp(self, mat): return np.power(10.0, ((mat + self.hyper_params.ref_level_db) * 0.05)) def normalize(self, mat): return np.clip(((mat - self.hyper_params.min_level_db) / (- self.hyper_params.min_level_db)), 0.0, 1.0) def denormalize(self, mat): return ((np.clip(mat, 0.0, 1.0) * (- self.hyper_params.min_level_db)) + self.hyper_params.min_level_db)
def get_bio_expression(opinion): try: (text, idxs) = opinion['Polar_expression'] except TypeError: return [] except ValueError: return [] if (len(text) > 1): updates = [] for (t, idx) in zip(text, idxs): (bidx, eidx) = idx.split(':') bidx = int(bidx) eidx = int(eidx) polarity = opinion['Polarity'] target_tokens = t.split() label = 'exp-{0}'.format(polarity) tags = [] for (i, token) in enumerate(target_tokens): tags.append(label) updates.append((bidx, tags)) return updates else: (bidx, eidx) = idxs[0].split(':') bidx = int(bidx) eidx = int(eidx) polarity = opinion['Polarity'] target_tokens = text[0].split() label = 'exp-{0}'.format(polarity) tags = [] for (i, token) in enumerate(target_tokens): tags.append(label) return [(bidx, tags)]
def analyze_grid_data(acc_threshold=0.01): fh = open('hyperparameter_grid_models_slackprop.csv', 'r') grid_data = [] for line in fh: parsed = line.split(',') parsed[1] = float(parsed[1]) parsed[5] = float(parsed[5]) parsed[6] = float(parsed[6]) parsed[7] = float(parsed[7]) parsed[8] = float(parsed[8]) parsed[9] = float(parsed[9]) grid_data.append(parsed) fh.close() grid_groups = {model: [] for model in model_names} for dat in grid_data: grid_groups[dat[0]].append(dat[1:]) print('Model,\tParameters,\tGamma,\tCount,\tNumData,\tLocalMaxima,\tSlackProp,\tAvgStdEpoch,\tAvgASWTEpoch,\tAvgStdAcc,\tAvgASWTAcc') for model in grid_groups: epoc_max = (- 100000) data_max = None for dat in grid_groups[model]: if ((dat[6] < acc_threshold) and (dat[5] > epoc_max)): data_max = dat epoc_max = dat[5] if (data_max is not None): (avg_standard_epochs, avg_new_epochs, avg_standard_acc, avg_new_acc) = early_stopping_of_dataset(gamma=float(data_max[0]), model=model, num_data=int(data_max[2]), count=int(data_max[1]), local_maxima=int(data_max[3]), slack_prop=float(data_max[4]), dataset='') print(model, ',', model_parameter_map[model], ',', str(data_max[0]), ',', str(data_max[1]), ',', str(data_max[2]), ',', str(data_max[3]), ',', str(data_max[4]), ',', str(avg_standard_epochs), ',', str(avg_new_epochs), ',', str(avg_standard_acc), ',', str(avg_new_acc)) else: print(model, ',NoSolution')
def main_lower(x_minus, x_plus, y_minus, y_plus, print_info=True): (u0, v0, ka0, kb0) = find_initial_feasible_solution(x_minus, x_plus, y_minus, y_plus) (x, y, ka, kb, a, b, c, v) = train_lower(u0, v0, ka0, kb0, x_minus, x_plus, y_minus, y_plus, lr_x=0.01, lr_k=0.01, max_iter=200, print_info=print_info) increase = second.lower_plane(a, b, c, x, y, x_minus, (x_plus * 0), (y_minus * 0), y_plus, print_info=print_info) c = (c + increase) return (a.detach(), b.detach(), c.detach())
def define_model_inputs_outputs(num_classes, img_size): inputs = tf.keras.layers.Input(shape=(img_size, img_size, 3)) x = tf.cast(inputs, tf.float32) x = tf.keras.applications.resnet50.preprocess_input(x) backbone = ResNet50(weights='imagenet') backbone.trainable = False x = backbone(x) x = layers.Dense(512, activation='relu')(x) outputs = layers.Dense(num_classes, activation='softmax')(x) return (inputs, outputs)
def dfs(current_id, node_dict, id_visited): next_nodes = node_dict[current_id]['next_nodes'] if (len(next_nodes) == 0): return if (not id_visited[current_id]): for next_node_id in next_nodes: if (next_node_id != ''): next_node = node_dict[next_node_id] if ('pre_nodes' not in next_node.keys()): node_dict[next_node_id]['pre_nodes'] = [current_id] else: node_dict[next_node_id]['pre_nodes'].append(current_id) id_visited[current_id] = True for next_node_id in next_nodes: if (next_node_id != ''): dfs(next_node_id, node_dict, id_visited)
def to_sparse(hg, weight_nodes='const', weight_edges='log'): winfo = hg.compute_weights(weight_nodes=weight_nodes, weight_edges=weight_edges) hyperedge_indices = [] hyperedges = [] for e in winfo['edge_list']: hyperedge_indices.append(len(hyperedges)) hyperedges.extend(hg.edges[e]) hyperedge_indices.append(len(hyperedges)) winfo['hyperedge_indices'] = hyperedge_indices winfo['hyperedges'] = hyperedges return winfo
def GetArgs(): parser = argparse.ArgumentParser(description='The purpose of this script is to use a ctm and a vocab fileto extract sub-utterances and a sub-segmentation. Extracted sub-utterancesare all the strings of consecutive in-vocab words from the ctmsurrounded by an out-of-vocab word at each end if present.', epilog='e.g. steps/dict/internal/get_subsegments.py exp/tri3_lex_0.4_work/phonetic_decoding/word.ctm \\exp/tri3_lex_0.4_work/learn_vocab.txt exp/tri3_lex_0.4_work/resegmentation/subsegments \\exp/tri3_lex_0.4_work/resegmentation/textSee steps/dict/learn_lexicon_greedy.sh for an example.') parser.add_argument('ctm', metavar='<ctm>', type=str, help='Input ctm file.each line must be <utt-id> <chanel> <start-time> <duration> <word>') parser.add_argument('vocab', metavar='<vocab>', type=str, help='Vocab file.each line must be <word>') parser.add_argument('subsegment', metavar='<subsegtment>', type=str, help='Subsegment file. Each line is in format:<new-utt> <old-utt> <start-time-within-old-utt> <end-time-within-old-utt>') parser.add_argument('text', metavar='<text>', type=str, help='Text file. Each line is in format: <new-utt> <word1> <word2> ... <wordN>.') print(' '.join(sys.argv), file=sys.stderr) args = parser.parse_args() args = CheckArgs(args) return args
_grad() def rescore_with_n_best_list(lats: k2.Fsa, G: k2.Fsa, num_paths: int) -> k2.Fsa: device = lats.device assert (len(lats.shape) == 3) assert hasattr(lats, 'aux_labels') assert hasattr(lats, 'lm_scores') assert (G.shape == (1, None, None)) assert (G.device == device) assert (hasattr(G, 'aux_labels') is False) paths = k2.random_paths(lats, num_paths=num_paths, use_double_scores=True) word_seqs = k2.index(lats.aux_labels, paths) word_seqs = k2.ragged.remove_values_leq(word_seqs, 0) (unique_word_seqs, num_repeats, new2old) = k2.ragged.unique_sequences(word_seqs, need_num_repeats=True, need_new2old_indexes=True) seq_to_path_shape = k2.ragged.get_layer(unique_word_seqs.shape(), 0) path_to_seq_map = seq_to_path_shape.row_ids(1) unique_word_seqs = k2.ragged.remove_axis(unique_word_seqs, 0) word_fsas = k2.linear_fsa(unique_word_seqs) word_fsas_with_epsilon_loops = k2.add_epsilon_self_loops(word_fsas) am_scores = compute_am_scores(lats, word_fsas_with_epsilon_loops, path_to_seq_map) b_to_a_map = torch.zeros_like(path_to_seq_map) lm_path_lats = _intersect_device(G, word_fsas_with_epsilon_loops, b_to_a_map=b_to_a_map, sorted_match_a=True) lm_path_lats = k2.top_sort(k2.connect(lm_path_lats.to('cpu'))).to(device) lm_scores = lm_path_lats.get_tot_scores(True, True) tot_scores = (am_scores + lm_scores) ragged_tot_scores = k2.RaggedFloat(seq_to_path_shape, tot_scores.to(torch.float32)) argmax_indexes = k2.ragged.argmax_per_sublist(ragged_tot_scores) best_path_indexes = k2.index(new2old, argmax_indexes) paths = k2.ragged.remove_axis(paths, 0) best_paths = k2.index(paths, best_path_indexes) labels = k2.index(lats.labels.contiguous(), best_paths) labels = k2.ragged.remove_values_eq(labels, (- 1)) aux_labels = k2.index(lats.aux_labels, best_paths.values()) best_path_fsas = k2.linear_fsa(labels) best_path_fsas.aux_labels = aux_labels return best_path_fsas
class MappingRule(object): def matches(self, key): raise NotImplementedError() def apply(self, key, value): raise NotImplementedError()
class LRPolicy(): def __init__(self, lr, n_epochs, lr_policy='multi_step'): self.lr_policy = lr_policy self.params_dict = {} self.n_epochs = n_epochs self.base_lr = lr self.lr = lr def set_params(self, params_dict=None): if (self.lr_policy == 'multi_step'): self.params_dict['decay_rate'] = params_dict['decay_rate'] self.params_dict['step'] = sorted(params_dict['step']) if (max(self.params_dict['step']) <= 1): new_step_list = [] for ratio in self.params_dict['step']: new_step_list.append(int((self.n_epochs * ratio))) self.params_dict['step'] = new_step_list elif (self.lr_policy == 'step'): self.params_dict['end_lr'] = params_dict['end_lr'] self.params_dict['step'] = params_dict['step'] max_iter = math.floor(((self.n_epochs - 1.0) / self.params_dict['step'])) if (self.params_dict['end_lr'] == (- 1)): self.params_dict['gamma'] = params_dict['decay_rate'] else: self.params_dict['gamma'] = math.pow((self.params_dict['end_lr'] / self.base_lr), (1.0 / max_iter)) elif (self.lr_policy == 'linear'): self.params_dict['end_lr'] = params_dict['end_lr'] self.params_dict['step'] = params_dict['step'] elif (self.lr_policy == 'exp'): self.params_dict['end_lr'] = params_dict['end_lr'] self.params_dict['gamma'] = math.pow((self.params_dict['end_lr'] / self.base_lr), (1.0 / (self.n_epochs - 1))) elif (self.lr_policy == 'inv'): self.params_dict['end_lr'] = params_dict['end_lr'] self.params_dict['power'] = params_dict['power'] self.params_dict['gamma'] = ((math.pow((self.base_lr / self.params_dict['end_lr']), (1.0 / self.params_dict['power'])) - 1.0) / (self.n_epochs - 1.0)) elif (self.lr_policy == 'const'): self.params_dict = None else: assert False, ('invalid lr_policy' + self.lr_policy) def get_lr(self, epoch): if (self.lr_policy == 'multi_step'): gamma = 0 for step in self.params_dict['step']: if ((epoch + 1.0) > step): gamma += 1 lr = (self.base_lr * math.pow(self.params_dict['decay_rate'], gamma)) elif (self.lr_policy == 'step'): lr = (self.base_lr * math.pow(self.params_dict['gamma'], math.floor(((epoch * 1.0) / self.params_dict['step'])))) elif (self.lr_policy == 'linear'): k = ((self.params_dict['end_lr'] - self.base_lr) / math.ceil((self.n_epochs / self.params_dict['step']))) lr = ((k * math.ceil(((epoch + 1) / self.params_dict['step']))) + self.base_lr) elif (self.lr_policy == 'inv'): lr = (self.base_lr * math.pow((1 + (self.params_dict['gamma'] * epoch)), (- self.params_dict['power']))) elif (self.lr_policy == 'exp'): lr = (self.base_lr * math.pow(self.params_dict['gamma'], epoch)) elif (self.lr_policy == 'const'): lr = self.base_lr else: assert False, ('invalid lr_policy: ' + self.lr_policy) self.lr = lr return lr
class AutoModelForImageClassification(_BaseAutoModelClass): _model_mapping = MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
def vgg11_bn(pretrained=False, dataset_history=[], dataset2num_classes={}, **kwargs): if pretrained: kwargs['init_weights'] = False model = VGG(make_layers(cfg['A'], batch_norm=True), dataset_history, dataset2num_classes, **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['vgg11_bn'])) return model
class CosineDistance(Layer): def __init__(self, bigdl_type='float'): super(CosineDistance, self).__init__(None, bigdl_type)
def get_time_str(trycnt=0): return ('2023-06-01-12-00-' + str(trycnt).zfill(2)) return time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime())
(derivate=True, coderize=True) _loss def balanced_l1_loss(pred, target, beta=1.0, alpha=0.5, gamma=1.5, reduction='mean'): assert (beta > 0) if (target.numel() == 0): return (pred.sum() * 0) assert (pred.size() == target.size()) diff = torch.abs((pred - target)) b = ((np.e ** (gamma / alpha)) - 1) loss = torch.where((diff < beta), ((((alpha / b) * ((b * diff) + 1)) * torch.log((((b * diff) / beta) + 1))) - (alpha * diff)), (((gamma * diff) + (gamma / b)) - (alpha * beta))) return loss
def train(args, train_loader, num_train, model, criterion, optimizer): model.train() start_time = time.time() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() num_back = 0 device = args.device create_graph = (args.opt == 'adahessian') if args.cuda: torch.cuda.empty_cache() for (step, (data, y)) in enumerate(train_loader): optimizer.zero_grad() data = data.to(device, non_blocking=True) y = y.to(device, non_blocking=True) outputs = model(data) loss = criterion(outputs, y) (prec1, prec5) = accuracy(outputs.data, y.data, topk=(1, 5)) losses.update(loss.item(), data.size(0)) top1.update(prec1.item(), data.size(0)) top5.update(prec5.item(), data.size(0)) loss.backward(create_graph=create_graph) optimizer.step() if ((step % args.log_interval) == 0): sys.stdout.write(('\x08' * num_back)) sys.stdout.write((' ' * num_back)) sys.stdout.write(('\x08' * num_back)) log_info = '[{}/{} ({:.0f}%)] loss: {:.4f}, top1: {:.2f}%, top5: {:.2f}%'.format(losses.count, num_train, ((100.0 * losses.count) / num_train), losses.avg, top1.avg, top5.avg) sys.stdout.write(log_info) sys.stdout.flush() num_back = len(log_info) sys.stdout.write(('\x08' * num_back)) sys.stdout.write((' ' * num_back)) sys.stdout.write(('\x08' * num_back)) logging('Average loss: {:.4f}, top1: {:.2f}%, top5: {:.2f}%, time: {:.1f}s'.format(losses.avg, top1.avg, top5.avg, (time.time() - start_time)), args.log) return (losses.avg, top1.avg, top5.avg)
class LinearSumTrainer(Trainer): def __init__(self, params): super(LinearSumTrainer, self).__init__(params) self.x_v = tensor.matrix('vgg_features', dtype='float32') self.x_t = tensor.matrix('features', dtype='float32') self.y = tensor.matrix('genres', dtype='int32') model = LinearSumClassifier(params['visual_dim'], params['textual_dim'], params['n_classes'], params['hidden_size'], params['init_ranges']) model.initialize() with batch_normalization(model): self.y_hat = model.apply(self.x_v, self.x_t) self.cost = BinaryCrossEntropy().apply(self.y, self.y_hat) def get_targets(self, stream): fn = ComputationGraph(self.y_hat).get_theano_function() (y_test, X_v, X_t) = next(stream.get_epoch_iterator()) y_prob = fn(X_t, X_v)[0] return (y_prob, y_test)
_task('multilingual_translation') class MultilingualTranslationTask(LegacyFairseqTask): def add_args(parser): parser.add_argument('data', metavar='DIR', help='path to data directory') parser.add_argument('--lang-pairs', default=None, metavar='PAIRS', help='comma-separated list of language pairs (in training order): en-de,en-fr,de-fr') parser.add_argument('-s', '--source-lang', default=None, metavar='SRC', help='source language (only needed for inference)') parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET', help='target language (only needed for inference)') parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL', help='pad the source on the left (default: True)') parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL', help='pad the target on the left (default: False)') parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N', help='max number of tokens in the source sequence') parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N', help='max number of tokens in the target sequence') parser.add_argument('--upsample-primary', default=1, type=int, help='amount to upsample primary dataset') parser.add_argument('--encoder-langtok', default=None, type=str, choices=['src', 'tgt'], metavar='SRCTGT', help='replace beginning-of-sentence in source sentence with source or target language token. (src/tgt)') parser.add_argument('--decoder-langtok', action='store_true', help='replace beginning-of-sentence in target sentence with target language token') def __init__(self, args, dicts, training): super().__init__(args) self.dicts = dicts self.training = training if training: self.lang_pairs = args.lang_pairs else: self.lang_pairs = ['{}-{}'.format(args.source_lang, args.target_lang)] self.eval_lang_pairs = self.lang_pairs self.model_lang_pairs = self.lang_pairs self.langs = list(dicts.keys()) def setup_task(cls, args, **kwargs): (dicts, training) = cls.prepare(args, **kwargs) return cls(args, dicts, training) def update_args(cls, args): args.left_pad_source = utils.eval_bool(args.left_pad_source) args.left_pad_target = utils.eval_bool(args.left_pad_target) if (args.lang_pairs is None): raise ValueError('--lang-pairs is required. List all the language pairs in the training objective.') if isinstance(args.lang_pairs, str): args.lang_pairs = args.lang_pairs.split(',') def prepare(cls, args, **kargs): cls.update_args(args) sorted_langs = sorted(list({x for lang_pair in args.lang_pairs for x in lang_pair.split('-')})) if ((args.source_lang is not None) or (args.target_lang is not None)): training = False else: training = True dicts = OrderedDict() for lang in sorted_langs: paths = utils.split_paths(args.data) assert (len(paths) > 0) dicts[lang] = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(lang))) if (len(dicts) > 0): assert (dicts[lang].pad() == dicts[sorted_langs[0]].pad()) assert (dicts[lang].eos() == dicts[sorted_langs[0]].eos()) assert (dicts[lang].unk() == dicts[sorted_langs[0]].unk()) if ((args.encoder_langtok is not None) or args.decoder_langtok): for lang_to_add in sorted_langs: dicts[lang].add_symbol(_lang_token(lang_to_add)) logger.info('[{}] dictionary: {} types'.format(lang, len(dicts[lang]))) return (dicts, training) def get_encoder_langtok(self, src_lang, tgt_lang): if (self.args.encoder_langtok is None): return self.dicts[src_lang].eos() if (self.args.encoder_langtok == 'src'): return _lang_token_index(self.dicts[src_lang], src_lang) else: return _lang_token_index(self.dicts[src_lang], tgt_lang) def get_decoder_langtok(self, tgt_lang): if (not self.args.decoder_langtok): return self.dicts[tgt_lang].eos() return _lang_token_index(self.dicts[tgt_lang], tgt_lang) def alter_dataset_langtok(self, lang_pair_dataset, src_eos=None, src_lang=None, tgt_eos=None, tgt_lang=None): if ((self.args.encoder_langtok is None) and (not self.args.decoder_langtok)): return lang_pair_dataset new_src_eos = None if ((self.args.encoder_langtok is not None) and (src_eos is not None) and (src_lang is not None) and (tgt_lang is not None)): new_src_eos = self.get_encoder_langtok(src_lang, tgt_lang) else: src_eos = None new_tgt_bos = None if (self.args.decoder_langtok and (tgt_eos is not None) and (tgt_lang is not None)): new_tgt_bos = self.get_decoder_langtok(tgt_lang) else: tgt_eos = None return TransformEosLangPairDataset(lang_pair_dataset, src_eos=src_eos, new_src_eos=new_src_eos, tgt_bos=tgt_eos, new_tgt_bos=new_tgt_bos) def load_dataset(self, split, epoch=1, **kwargs): paths = utils.split_paths(self.args.data) assert (len(paths) > 0) data_path = paths[((epoch - 1) % len(paths))] def language_pair_dataset(lang_pair): (src, tgt) = lang_pair.split('-') langpair_dataset = load_langpair_dataset(data_path, split, src, self.dicts[src], tgt, self.dicts[tgt], combine=True, dataset_impl=self.args.dataset_impl, upsample_primary=self.args.upsample_primary, left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target, max_source_positions=self.args.max_source_positions, max_target_positions=self.args.max_target_positions) return self.alter_dataset_langtok(langpair_dataset, src_eos=self.dicts[src].eos(), src_lang=src, tgt_eos=self.dicts[tgt].eos(), tgt_lang=tgt) self.datasets[split] = RoundRobinZipDatasets(OrderedDict([(lang_pair, language_pair_dataset(lang_pair)) for lang_pair in self.lang_pairs]), eval_key=(None if self.training else ('%s-%s' % (self.args.source_lang, self.args.target_lang)))) def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None): if (constraints is not None): raise NotImplementedError('Constrained decoding with the multilingual_translation task is not supported') lang_pair = ('%s-%s' % (self.args.source_lang, self.args.target_lang)) return RoundRobinZipDatasets(OrderedDict([(lang_pair, self.alter_dataset_langtok(LanguagePairDataset(src_tokens, src_lengths, self.source_dictionary), src_eos=self.source_dictionary.eos(), src_lang=self.args.source_lang, tgt_eos=self.target_dictionary.eos(), tgt_lang=self.args.target_lang))]), eval_key=lang_pair) def build_model(self, args): def check_args(): messages = [] if (len(set(self.args.lang_pairs).symmetric_difference(args.lang_pairs)) != 0): messages.append('--lang-pairs should include all the language pairs {}.'.format(args.lang_pairs)) if (self.args.encoder_langtok != args.encoder_langtok): messages.append('--encoder-langtok should be {}.'.format(args.encoder_langtok)) if (self.args.decoder_langtok != args.decoder_langtok): messages.append('--decoder-langtok should {} be set.'.format(('' if args.decoder_langtok else 'not'))) if (len(messages) > 0): raise ValueError(' '.join(messages)) self.update_args(args) check_args() from fairseq import models model = models.build_model(args, self) if (not isinstance(model, FairseqMultiModel)): raise ValueError('MultilingualTranslationTask requires a FairseqMultiModel architecture') return model def _per_lang_pair_train_loss(self, lang_pair, model, update_num, criterion, sample, optimizer, ignore_grad): (loss, sample_size, logging_output) = criterion(model.models[lang_pair], sample[lang_pair]) if ignore_grad: loss *= 0 optimizer.backward(loss) return (loss, sample_size, logging_output) def train_step(self, sample, model, criterion, optimizer, update_num, ignore_grad=False): model.train() from collections import defaultdict (agg_loss, agg_sample_size, agg_logging_output) = (0.0, 0.0, defaultdict(float)) curr_lang_pairs = [lang_pair for lang_pair in self.model_lang_pairs if ((sample[lang_pair] is not None) and (len(sample[lang_pair]) != 0))] for (idx, lang_pair) in enumerate(curr_lang_pairs): def maybe_no_sync(): if ((self.args.distributed_world_size > 1) and hasattr(model, 'no_sync') and (idx < (len(curr_lang_pairs) - 1))): return model.no_sync() else: return contextlib.ExitStack() with maybe_no_sync(): (loss, sample_size, logging_output) = self._per_lang_pair_train_loss(lang_pair, model, update_num, criterion, sample, optimizer, ignore_grad) agg_loss += loss.detach().item() agg_sample_size += sample_size for k in logging_output: agg_logging_output[k] += logging_output[k] agg_logging_output[f'{lang_pair}:{k}'] += logging_output[k] return (agg_loss, agg_sample_size, agg_logging_output) def _per_lang_pair_valid_loss(self, lang_pair, model, criterion, sample): return criterion(model.models[lang_pair], sample[lang_pair]) def valid_step(self, sample, model, criterion): model.eval() with torch.no_grad(): from collections import defaultdict (agg_loss, agg_sample_size, agg_logging_output) = (0.0, 0.0, defaultdict(float)) for lang_pair in self.eval_lang_pairs: if ((lang_pair not in sample) or (sample[lang_pair] is None) or (len(sample[lang_pair]) == 0)): continue (loss, sample_size, logging_output) = self._per_lang_pair_valid_loss(lang_pair, model, criterion, sample) agg_loss += loss.data.item() agg_sample_size += sample_size for k in logging_output: agg_logging_output[k] += logging_output[k] agg_logging_output[f'{lang_pair}:{k}'] += logging_output[k] return (agg_loss, agg_sample_size, agg_logging_output) def inference_step(self, generator, models, sample, prefix_tokens=None, constraints=None): with torch.no_grad(): if self.args.decoder_langtok: bos_token = _lang_token_index(self.target_dictionary, self.args.target_lang) else: bos_token = self.target_dictionary.eos() return generator.generate(models, sample, prefix_tokens=prefix_tokens, constraints=constraints, bos_token=bos_token) def reduce_metrics(self, logging_outputs, criterion): with metrics.aggregate(): super().reduce_metrics(logging_outputs, criterion) for k in ['sample_size', 'nsentences', 'ntokens']: metrics.log_scalar(k, sum((l[k] for l in logging_outputs))) def source_dictionary(self): if self.training: return next(iter(self.dicts.values())) else: return self.dicts[self.args.source_lang] def target_dictionary(self): if self.training: return next(iter(self.dicts.values())) else: return self.dicts[self.args.target_lang] def max_positions(self): if (len(self.datasets.values()) == 0): return {('%s-%s' % (self.args.source_lang, self.args.target_lang)): (self.args.max_source_positions, self.args.max_target_positions)} return OrderedDict([(key, (self.args.max_source_positions, self.args.max_target_positions)) for split in self.datasets.keys() for key in self.datasets[split].datasets.keys()])
def hash_file(file_name): sha1 = hashlib.sha1() with open(file_name, 'rb') as f: while True: data = f.read(BUF_SIZE) if (not data): break sha1.update(data) return sha1.hexdigest()
class VGG(nn.Module): def __init__(self, features, num_classes=1000): super(VGG, self).__init__() self.features = features self.classifier = nn.Linear(512, num_classes) self._initialize_weights() def forward(self, x): x = self.features(x) features = x.view(x.size(0), (- 1)) x = self.classifier(features) return [x, features] def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels) m.weight.data.normal_(0, math.sqrt((2.0 / n))) if (m.bias is not None): m.bias.data.zero_() elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.Linear): n = m.weight.size(1) m.weight.data.normal_(0, 0.01) m.bias.data.zero_()
def ReadFileSL(tthread, batchInterval, NUM_ITEMS, deposit_ratio, key_skewness, overlap_ratio, abort_ratio, isCyclic, complexity): (w, h) = (3, 5) y = [[0 for x in range(w)] for y in range(h)] y_sum = [0 for x in range(w)] inputEvents = (tthread * batchInterval) if (isCyclic == 'true'): f = getPathSL('OPGSA', inputEvents, tthread, NUM_ITEMS, deposit_ratio, key_skewness, overlap_ratio, abort_ratio, isCyclic, complexity) lines = open(f).readlines() idx = locateIdx(lines) for line in lines[idx:(idx + tthread)]: breakdown_value = line.split('\t') y[0][0] = 0 y[1][0] = 0 y[2][0] += (float(breakdown_value[1]) + float(breakdown_value[6])) y_sum[0] += (float(breakdown_value[1]) + float(breakdown_value[6])) y[3][0] += float(breakdown_value[5]) y_sum[0] += float(breakdown_value[5]) y[4][0] += float(breakdown_value[3]) y_sum[0] += float(breakdown_value[3]) elif (isCyclic == 'false'): f = getPathSL('GSA', inputEvents, tthread, NUM_ITEMS, deposit_ratio, key_skewness, overlap_ratio, abort_ratio, isCyclic, complexity) lines = open(f).readlines() idx = locateIdx(lines) for line in lines[idx:(idx + tthread)]: breakdown_value = line.split('\t') y[0][0] = 0 y[1][0] = 0 y[2][0] += (float(breakdown_value[1]) + float(breakdown_value[6])) y_sum[0] += (float(breakdown_value[1]) + float(breakdown_value[6])) y[3][0] += float(breakdown_value[5]) y_sum[0] += float(breakdown_value[5]) y[4][0] += float(breakdown_value[3]) y_sum[0] += float(breakdown_value[3]) else: print('error') f = getPathSL('TStream', inputEvents, tthread, NUM_ITEMS, deposit_ratio, key_skewness, overlap_ratio, abort_ratio, isCyclic, complexity) lines = open(f).readlines() idx = locateIdx(lines) for line in lines[idx:(idx + tthread)]: breakdown_value = line.split('\t') y[0][1] += (float(breakdown_value[1]) + float(breakdown_value[6])) y_sum[1] += (float(breakdown_value[1]) + float(breakdown_value[6])) y[1][1] = 0 y[2][1] = 0 y[3][1] += float(breakdown_value[5]) y_sum[1] += float(breakdown_value[5]) y[4][1] += float(breakdown_value[3]) y_sum[1] += float(breakdown_value[3]) f = getPathSL('PAT', inputEvents, tthread, NUM_ITEMS, deposit_ratio, key_skewness, overlap_ratio, abort_ratio, isCyclic, complexity) lines = open(f).readlines() idx = locateIdxPAT(lines) for line in lines[idx:(idx + tthread)]: breakdown_value = line.split('\t') y[0][2] += float(breakdown_value[3]) y_sum[2] += float(breakdown_value[3]) y[1][2] += float(breakdown_value[4]) y_sum[2] += float(breakdown_value[4]) y[2][2] = 0 y[3][2] = 0 y[4][2] += float(breakdown_value[2]) y_sum[2] += float(breakdown_value[2]) for i in range(h): for j in range(w): if (y_sum[j] != 0): y[i][j] = ((y[i][j] / y_sum[j]) * 100) print(y) return y
def dcnn_nodelta(bands=60, frames=31, n_classes=10, channels=1, fully_connected=5000, filters=80, activation='relu'): from keras.models import Sequential, Model from keras.layers import Dense, Dropout, Activation, Input, Concatenate import keras.layers input_shape = (bands, frames, channels) def head(input, name): return dcnn_head(input, name, filters) mel_input = Input(shape=input_shape, name='mel_input') m = head(mel_input, 'mel') m = Dense(fully_connected, activation=activation)(m) m = Dense(fully_connected, activation=activation)(m) m = Dense(n_classes, activation='softmax')(m) model = Model(mel_input, m) return model