code
stringlengths
101
5.91M
def ter_ref_files_gen(b_reduced, param, three_ref_only=False): out = '' for (iter, entry) in enumerate(b_reduced.entries): id_str = ('id' + str((iter + 1))) for (i, lex) in enumerate(entry.lexs): sent_clean = ' '.join(re.split('(\\W)', lex.lex)) sent_clean = ' '.join(sent_clean.split()) if (three_ref_only and (i > 2)): break out += (((unidecode(sent_clean.lower()) + ' (') + id_str) + ')\n') if (not three_ref_only): with open((('references/gold-' + param) + '-reference.ter'), 'w+') as f: f.write(out) else: with open((('references/gold-' + param) + '-reference-3ref.ter'), 'w+') as f: f.write(out)
def _parse_eq_to_pure_multiplication(a_term, shape_a, b_term, shape_b, out): desired_a = '' desired_b = '' new_shape_a = [] new_shape_b = [] for ix in out: if (ix in a_term): desired_a += ix new_shape_a.append(shape_a[a_term.index(ix)]) else: new_shape_a.append(1) if (ix in b_term): desired_b += ix new_shape_b.append(shape_b[b_term.index(ix)]) else: new_shape_b.append(1) if (desired_a != a_term): eq_a = f'{a_term}->{desired_a}' else: eq_a = None if (desired_b != b_term): eq_b = f'{b_term}->{desired_b}' else: eq_b = None return (eq_a, eq_b, new_shape_a, new_shape_b, None, None, True)
class BasicFuseMotion(nn.Module): def __init__(self, args): super(BasicFuseMotion, self).__init__() cor_planes = args.motion_feature_dim out_planes = args.query_latent_dim self.normf1 = nn.InstanceNorm2d(128) self.normf2 = nn.InstanceNorm2d(128) self.convf1 = nn.Conv2d(2, 128, 3, padding=1) self.convf2 = nn.Conv2d(128, 128, 3, padding=1) self.convf3 = nn.Conv2d(128, 64, 3, padding=1) s = 1 self.normc1 = nn.InstanceNorm2d((256 * s)) self.normc2 = nn.InstanceNorm2d((256 * s)) self.normc3 = nn.InstanceNorm2d((256 * s)) self.convc1 = nn.Conv2d((cor_planes + 128), (256 * s), 1, padding=0) self.convc2 = nn.Conv2d((256 * s), (256 * s), 3, padding=1) self.convc3 = nn.Conv2d((256 * s), (256 * s), 3, padding=1) self.convc4 = nn.Conv2d((256 * s), (256 * s), 3, padding=1) self.conv = nn.Conv2d(((256 * s) + 64), out_planes, 1, padding=0) def forward(self, flow, feat, context1=None): flo = F.relu(self.normf1(self.convf1(flow))) flo = F.relu(self.normf2(self.convf2(flo))) flo = self.convf3(flo) feat = torch.cat([feat, context1], dim=1) feat = F.relu(self.normc1(self.convc1(feat))) feat = F.relu(self.normc2(self.convc2(feat))) feat = F.relu(self.normc3(self.convc3(feat))) feat = self.convc4(feat) feat = torch.cat([flo, feat], dim=1) feat = F.relu(self.conv(feat)) return feat
def run_experiment(experiment, configs, args, mods=None, **kwargs): if ('explogger_kwargs' not in kwargs): kwargs['explogger_kwargs'] = dict(folder_format='{experiment_name}_%Y%m%d-%H%M%S') if ('explogger_freq' not in kwargs): kwargs['explogger_freq'] = 1 if ('resume_save_types' not in kwargs): kwargs['resume_save_types'] = ('model', 'simple', 'th_vars', 'results') config = (Config(file_=args.config) if (args.config is not None) else Config()) config.update_missing(configs[args.default_config].deepcopy()) if ((args.mods is not None) and (mods is not None)): for mod in args.mods: config.update(mods[mod]) config = Config(config=config, update_from_argv=True) if args.skip_existing: existing_configs = [] for exp in os.listdir(args.base_dir): try: existing_configs.append(Config(file_=os.path.join(args.base_dir, exp, 'config', 'config.json'))) except Exception as e: pass if (args.grid is not None): grid = GridSearch().read(args.grid) else: grid = [{}] for combi in grid: config.update(combi) if args.skip_existing: skip_this = False for existing_config in existing_configs: if existing_config.contains(config): skip_this = True break if skip_this: continue if ('backup_every' in config): kwargs['save_checkpoint_every_epoch'] = config['backup_every'] loggers = {} if args.visdomlogger: loggers['v'] = ('visdom', {}, 1) if (args.tensorboardxlogger is not None): if (args.tensorboardxlogger == 'same'): loggers['tx'] = ('tensorboard', {}, 1) else: loggers['tx'] = ('tensorboard', {'target_dir': args.tensorboardxlogger}, 1) if args.telegramlogger: kwargs['use_telegram'] = True if args.automatic_description: difference_to_default = Config.difference_config_static(config, configs['DEFAULTS']).flat(keep_lists=True, max_split_size=0, flatten_int=True) description_str = '' for (key, val) in difference_to_default.items(): val = val[0] description_str = '{} = {}\n{}'.format(key, val, description_str) config.description = description_str exp = experiment(config=config, base_dir=args.base_dir, resume=args.resume, ignore_resume_config=args.ignore_resume_config, loggers=loggers, **kwargs) trained = False if ((args.resume is None) or (args.test is False)): exp.run() trained = True if args.test: exp.run_test(setup=(not trained)) if (isinstance(args.resume, str) and (exp.elog is not None) and args.copy_test): for f in glob.glob(os.path.join(exp.elog.save_dir, 'test*')): if os.path.isdir(f): shutil.copytree(f, os.path.join(args.resume, 'save', os.path.basename(f))) else: shutil.copy(f, os.path.join(args.resume, 'save'))
class HierarchicalDataset(): def __init__(self, dataset_directory, dataset_name, batch_size): self.seed = 33 self.batch_size = batch_size self.dataset_directory = dataset_directory self.dataset_name = dataset_name assert os.path.exists(dataset_directory), '[-] Dataset path {} not found.'.format(dataset_directory) self.read_info_json() self.read_labels_csv() assert self.is_valid_for_hierarchical_classification(), '[-] Dataset {} has no super categories.'.format(self.dataset_name) self.construct_dataset() def construct_dataset(self): self.super_categories = sorted(self.df[self.super_category_column_name].unique()) self.super_categories_datasets = [] self.super_categories_dataloaders = [] self.super_categories_datasizes = [] for super_category in self.super_categories: super_category_df = self.df[(self.df[self.super_category_column_name] == super_category)].groupby(self.category_column_name).sample(n=self.min_images_per_category, random_state=self.seed) transform = transforms.Compose([transforms.ToTensor()]) category_dataset = HierarchicalMiniDataset(df=super_category_df, dataset_directory=self.dataset_directory, image_column_name=self.image_column_name, category_column_name=self.category_column_name, transform=transform) dataset_size = len(category_dataset) test_size = int((0.5 * dataset_size)) train_size = (dataset_size - test_size) (train_dataset, test_dataset) = random_split(category_dataset, [train_size, test_size]) super_category_dataloaders = {'test': DataLoader(test_dataset, batch_size=self.batch_size, shuffle=True), 'train': DataLoader(train_dataset, batch_size=self.batch_size, shuffle=True)} super_category_datasetsizes = {'test': len(test_dataset), 'train': len(train_dataset)} self.super_categories_dataloaders.append(super_category_dataloaders) self.super_categories_datasizes.append(super_category_datasetsizes) self.super_categories_datasets.append(category_dataset) def read_info_json(self) -> None: info_json_path = os.path.join(self.dataset_directory, 'info.json') with open(info_json_path, 'r') as f: info_json = json.load(f) self.image_column_name = info_json['image_column_name'] self.category_column_name = info_json['category_column_name'] self.super_category_column_name = info_json['super_category_column_name'] self.has_super_categories = info_json['has_super_categories'] self.min_images_per_category = info_json['minimum_images_per_category'] def read_labels_csv(self): csv_path = os.path.join(self.dataset_directory, 'labels.csv') self.df = pd.read_csv(csv_path, sep=',', encoding='utf-8') def is_valid_for_hierarchical_classification(self): return self.has_super_categories def get_super_categories(self): return self.super_categories def get_categories(self, super_category): index = self.super_categories.index(super_category) return self.super_categories_datasets[index].categories def get_super_category_dataloaders(self, super_category): index = self.super_categories.index(super_category) return self.super_categories_dataloaders[index] def get_super_category_dataset(self, super_category): index = self.super_categories.index(super_category) return self.super_categories_datasets[index] def get_super_category_datasizes(self, super_category): index = self.super_categories.index(super_category) return self.super_categories_datasizes[index] def get_super_categories_count(self): str0 = '# #' str1 = '# Total super-categories : {}'.format(len(self.super_categories)) str2 = '# #' return '{0}\n{1}\n{2}\n'.format(str0, str1, str2) def get_super_category_statistics(self, super_category): index = self.super_categories.index(super_category) dataset = self.super_categories_datasets[index] str0 = '# #' str1 = 'Super Category : {}'.format(super_category) str2 = 'Total Categories : {}'.format(dataset.nb_classes) str3 = 'Categories:' str4 = '' for cat in dataset.categories: str4 += '\t{}\n'.format(cat) str5 = 'Total data points : {}'.format(len(dataset)) str6 = 'Train data points : {}'.format(int((len(dataset) / 2))) str7 = 'Test data points : {}'.format(int((len(dataset) / 2))) str8 = '# #' return '{0}\n{1}\n{2}\n{3}\n{4}\n{5}\n{6}\n{7}\n{8}\n'.format(str0, str1, str2, str3, str4, str5, str6, str7, str8)
class MyGroupNorm(nn.GroupNorm): def __init__(self, num_channels, eps=1e-05, affine=True, num_groups=8): super(MyGroupNorm, self).__init__(num_groups, num_channels, eps, affine)
class BaselineClassifierAlgorithm(SklearnAlgorithm): algorithm_name = 'Baseline Classifier' algorithm_short_name = 'Baseline' def __init__(self, params): super(BaselineClassifierAlgorithm, self).__init__(params) logger.debug('BaselineClassifierAlgorithm.__init__') self.library_version = sklearn.__version__ self.max_iters = additional.get('max_steps', 1) self.model = DummyClassifier(strategy='prior', random_state=params.get('seed', 1)) def file_extension(self): return 'baseline' def is_fitted(self): return (hasattr(self.model, 'n_outputs_') and (self.model.n_outputs_ is not None) and (self.model.n_outputs_ > 0))
def get_atom_feature_dims(): return list(map(len, [allowable_features['possible_atomic_num_list'], allowable_features['possible_chirality_list'], allowable_features['possible_degree_list'], allowable_features['possible_formal_charge_list'], allowable_features['possible_numH_list'], allowable_features['possible_number_radical_e_list'], allowable_features['possible_hybridization_list'], allowable_features['possible_is_aromatic_list'], allowable_features['possible_is_in_ring_list']]))
class TFBartPretrainedModel(metaclass=DummyObject): _backends = ['tf'] def __init__(self, *args, **kwargs): requires_backends(self, ['tf'])
def get_out_bins(start, end, id_date, n_bins, time_bin_labels=get_bin_labels()): i = 0 bins_holder = {} for idx_bin in range(start, end): if ((idx_bin % n_bins) == 0): day = (int(id_date[(- 3):]) + 1) zeros_before = ('0' * (3 - (len(str(day)) % 4))) id_day = (zeros_before + str(day)) id_date = (id_date[:(- 3)] + id_day) bins_holder[i] = {'id_day': id_date[(- 3):], 'id_bin': (idx_bin % n_bins), 'time_bin': time_bin_labels[(idx_bin % n_bins)], 'date': datetime.datetime.strptime(((id_date[:(- 3)] + ' ') + id_date[(- 3):]), '%Y %j').strftime('%Y%m%d')} i += 1 return bins_holder
def query_yes_no(question, default='yes'): valid = {'yes': True, 'y': True, 'ye': True, 'no': False, 'n': False} if (default is None): prompt = ' [y/n] ' elif (default == 'yes'): prompt = ' [Y/n] ' elif (default == 'no'): prompt = ' [y/N] ' else: raise ValueError(("invalid default answer: '%s'" % default)) while True: sys.stdout.write((question + prompt)) if (sys.version_info[0] == 3): choice = input().lower() else: choice = raw_input().lower() if ((default is not None) and (choice == '')): return valid[default] elif (choice in valid): return valid[choice] else: sys.stdout.write("Please respond with 'yes' or 'no' (or 'y' or 'n').\n")
def process_labels(labels: Optional[Dict[(str, Any)]]=None, onehot: bool=False) -> Tuple[(Optional[Union[(Dict[(str, Any)], pd.DataFrame)]], Optional[np.ndarray], Optional[np.ndarray], int)]: if ((labels is not None) and (not isinstance(labels, (str, pd.DataFrame)))): if onehot: _all_labels_raw = np.array(list(labels.values())) _unique_raw = np.unique(_all_labels_raw) max_label = np.max(_unique_raw) labels = {k: to_onehot(v, (max_label + 1)) for (k, v) in labels.items()} num_outcomes = 1 else: first_label = list(labels.values())[0] if (not isinstance(first_label, list)): num_outcomes = 1 else: num_outcomes = len(first_label) _all_labels = np.array(list(labels.values())) unique_labels = np.unique(_all_labels, axis=0) _lbls = np.array([np.sum((_all_labels == i)) for i in unique_labels]) label_prob = (_lbls / len(_all_labels)) elif isinstance(labels, (pd.DataFrame, str)): if isinstance(labels, str): df = pd.read_parquet(labels) else: df = labels if ('label' not in df.columns): raise ValueError(f'Could not find column "label" in the tile labels dataframe at {labels}.') labels = df unique_labels = None label_prob = None num_outcomes = 1 else: unique_labels = None label_prob = None num_outcomes = 1 return (labels, unique_labels, label_prob, num_outcomes)
def dot_product_attention(tensor1, tensor2, with_bias=False): dots = tf.matmul(tensor1, tensor2, transpose_b=True) if with_bias: bias = tf.get_variable('bias', shape=(), dtype=tf.float32) dots += bias return dots
_materialize('core') class Conv1d(UnaryOpBase): in_dtypes = [(DType.float32,)] out_dtypes = [(DType.float32,)] def __init__(self, in_channels: Union[(int, z3.ExprRef)], out_channels: Union[(int, z3.ExprRef)], kernel_size: Union[(int, z3.ExprRef)], stride: Union[(int, z3.ExprRef)], padding: Union[(int, z3.ExprRef)], dilation: Union[(int, z3.ExprRef)]): super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.stride = stride self.padding = padding self.dilation = dilation self.inp_ranks = [(3,)] self.out_ranks = [(3,)] def type_transfer(self, input_shapes: List[AbsTensor]) -> List[AbsTensor]: abs_tensor = AbsTensor([input_shapes[0].shape[0], self.out_channels], dtype=input_shapes[0].dtype) mimic_k = (self.kernel_size + ((self.dilation - 1) * (self.kernel_size - 1))) abs_tensor.shape.append((nnsmith_div(nnsmith_add(nnsmith_sub(input_shapes[0].shape[2], mimic_k), (2 * self.padding)), self.stride) + 1)) return [abs_tensor] def requires(self, input_shapes): cons = [] cons.append(nnsmith_eq(self.in_channels, input_shapes[0].shape[1])) cons.append(nnsmith_ge(self.out_channels, 1)) cons.append(nnsmith_ge(self.dilation, 1)) mimic_k = (self.kernel_size + ((self.dilation - 1) * (self.kernel_size - 1))) cons.append(nnsmith_ge(mimic_k, 1)) cons.append(nnsmith_ge(self.stride, 1)) cons.append(nnsmith_ge(self.padding, 0)) cons.append(nnsmith_le(mimic_k, nnsmith_add(input_shapes[0].shape[2], (2 * self.padding)))) cons.append(nnsmith_le(self.padding, 255)) return cons def deduct_inp_ranks_and_dtype(self, out_abs_tensor: List[AbsTensor]) -> List[Tuple[(int, DType)]]: return [(3, out_abs_tensor[0].dtype)] def __repr__(self) -> str: repr = f'Conv1d({self.in_channels}, {self.out_channels}, k={self.kernel_size}' if ((not isinstance(self.stride, int)) or (self.stride != 1)): repr += f', s={self.stride}' if ((not isinstance(self.padding, int)) or (self.padding != 0)): repr += f', p={self.padding}' if ((not isinstance(self.dilation, int)) or (self.dilation != 1)): repr += f', d={self.dilation}' repr += ')' return repr
def measure_semiorthogonality(model: nn.Module) -> Dict[(str, float)]: with torch.no_grad(): scores = {} for (name, m) in model.named_modules(): if hasattr(m, 'constrain_orthonormal'): weight = m.state_dict()['conv.weight'] dim = weight.shape[0] w = weight.reshape(dim, (- 1)) P = torch.mm(w, w.t()) scale = torch.trace((torch.mm(P, P.t()) / torch.trace(P))) I = torch.eye(dim, dtype=P.dtype, device=P.device) Q = (P - (scale * I)) score = torch.trace(torch.mm(Q, Q.t())) scores[name] = score.item() return scores
def batch_norm_for_conv2d(inputs, is_training, bn_decay, scope, data_format, freeze_bn=False): return batch_norm_template(inputs, is_training, scope, [0, 1, 2], bn_decay, data_format=data_format, freeze_bn=freeze_bn)
def split_citations_iter(citation_elements): current_citation = [] current_types = set() last_type = None num_auth = 0 postponed_auth = None prev_split_reason = None for el in citation_elements: split_reason = split_needed(el, current_types, last_type) if split_reason: if (split_reason == 'semicolon'): (misc, el['misc_txt']) = el['misc_txt'].split(';', 1) current_citation.append({'type': 'MISC', 'misc_txt': misc}) if (postponed_auth and ((num_auth == 0) or (prev_split_reason == 'repeated field'))): current_citation.insert(0, postponed_auth) num_auth += 1 postponed_auth = postpone_last_auth(current_citation, num_auth) (yield current_citation) current_citation = [] current_types = set() last_type = None num_auth = 0 prev_split_reason = split_reason current_citation.append(el) if (el['type'] == 'MISC'): continue if (el['type'] == 'AUTH'): num_auth += 1 continue last_type = ('ARXIV' if el.get('is_arxiv') else el['type']) current_types.add(last_type) if (postponed_auth and ((num_auth == 0) or (prev_split_reason == 'repeated field'))): current_citation.insert(0, postponed_auth) (yield current_citation)
def _read(path, encoding='utf-8', comment=';;;'): if path: if (isinstance(path, str) and os.path.exists(path)): f = open(path, 'r', encoding='utf-8') elif isinstance(path, str): f = path.splitlines() else: f = path for (i, line) in enumerate(f): line = (line.strip(BOM_UTF8) if ((i == 0) and isinstance(line, str)) else line) line = line.strip() line = decode_utf8(line, encoding) if ((not line) or (comment and line.startswith(comment))): continue (yield line) return StopIteration
def count(dic, fname): with open(fname, 'r') as fd: lines = fd.read().splitlines() lines = ' '.join(lines) words = lines.split(' ') for w in words: if (w in dic): dic[w] += 1 else: dic[w] = 1 return dic
def test_osipkovmerritt_selfconsist_dehnencore_meanvr_directint(): pot = potential.DehnenCoreSphericalPotential(amp=2.5, a=1.15) ras = [2.3, 5.7] for (ra, dfh) in zip(ras[1:], osipkovmerritt_dfs_selfconsist[1:]): tol = 1e-08 check_meanvr_directint(dfh, pot, tol, rmin=(pot._scale / 10.0), rmax=(pot._scale * 10.0), bins=3) return None
def build_trainer(args, device_id, model, optim): grad_accum_count = args.accum_count n_gpu = args.world_size if (device_id >= 0): gpu_rank = int(args.gpu_ranks[device_id]) else: gpu_rank = 0 n_gpu = 0 print(('gpu_rank %d' % gpu_rank)) tensorboard_log_dir = args.model_path writer = SummaryWriter(tensorboard_log_dir, comment='Unmt') report_manager = ReportMgr(args.report_every, start_time=(- 1), tensorboard_writer=writer) trainer = Trainer(args, model, optim, grad_accum_count, n_gpu, gpu_rank, report_manager) if model: n_params = _tally_parameters(model) logger.info(('* number of parameters: %d' % n_params)) return trainer
class InfiniteGroupBatchSampler(Sampler): def __init__(self, dataset, batch_size=1, world_size=None, rank=None, seed=0, shuffle=True): (_rank, _world_size) = get_dist_info() if (world_size is None): world_size = _world_size if (rank is None): rank = _rank self.rank = rank self.world_size = world_size self.dataset = dataset self.batch_size = batch_size self.seed = (seed if (seed is not None) else 0) self.shuffle = shuffle assert hasattr(self.dataset, 'flag') self.flag = self.dataset.flag self.group_sizes = np.bincount(self.flag) self.buffer_per_group = {k: [] for k in range(len(self.group_sizes))} self.size = len(dataset) self.indices = self._indices_of_rank() def _infinite_indices(self): g = torch.Generator() g.manual_seed(self.seed) while True: if self.shuffle: (yield from torch.randperm(self.size, generator=g).tolist()) else: (yield from torch.arange(self.size).tolist()) def _indices_of_rank(self): (yield from itertools.islice(self._infinite_indices(), self.rank, None, self.world_size)) def __iter__(self): for idx in self.indices: flag = self.flag[idx] group_buffer = self.buffer_per_group[flag] group_buffer.append(idx) if (len(group_buffer) == self.batch_size): (yield group_buffer[:]) del group_buffer[:] def __len__(self): return self.size def set_epoch(self, epoch): raise NotImplementedError
('Please use `bigdl.orca.automl.hp` instead.') class GridRandomRecipe(Recipe): def __init__(self, num_rand_samples=1, look_back=2, epochs=5, training_iteration=10): super(self.__class__, self).__init__() self.num_samples = num_rand_samples self.training_iteration = training_iteration self.past_seq_config = PastSeqParamHandler.get_past_seq_config(look_back) self.epochs = epochs def search_space(self): return {'model': hp.choice(['LSTM', 'Seq2seq']), 'lstm_1_units': hp.grid_search([16, 32]), 'dropout_1': 0.2, 'lstm_2_units': hp.grid_search([16, 32]), 'dropout_2': hp.uniform(0.2, 0.5), 'latent_dim': hp.grid_search([32, 64]), 'dropout': hp.uniform(0.2, 0.5), 'lr': hp.uniform(0.001, 0.01), 'batch_size': hp.choice([32, 64]), 'epochs': self.epochs, 'past_seq_len': self.past_seq_config}
class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.resnet50 = resnet50.resnet50(pretrained=True, strides=(2, 2, 2, 1)) self.stage1 = nn.Sequential(self.resnet50.conv1, self.resnet50.bn1, self.resnet50.relu, self.resnet50.maxpool, self.resnet50.layer1) self.stage2 = nn.Sequential(self.resnet50.layer2) self.stage3 = nn.Sequential(self.resnet50.layer3) self.stage4 = nn.Sequential(self.resnet50.layer4) self.classifier = nn.Conv2d(2048, 20, 1, bias=False) self.backbone = nn.ModuleList([self.stage1, self.stage2, self.stage3, self.stage4]) self.newly_added = nn.ModuleList([self.classifier]) def forward(self, x): x = self.stage1(x) x = self.stage2(x).detach() x = self.stage3(x) x = self.stage4(x) x = torchutils.gap2d(x, keepdims=True) x = self.classifier(x) x = x.view((- 1), 20) return x def train(self, mode=True): for p in self.resnet50.conv1.parameters(): p.requires_grad = False for p in self.resnet50.bn1.parameters(): p.requires_grad = False def trainable_parameters(self): return (list(self.backbone.parameters()), list(self.newly_added.parameters()))
def pyconvresnet18(pretrained=False, **kwargs): model = PyConvResNet(PyConvBasicBlock2, [2, 2, 2, 2], **kwargs) if pretrained: raise NotImplementedError('Not available the pretrained model yet!') return model
def _plot_poseaug(tmp_inputs_3d, tmp_inputs_2d, tmp_outputs_3d_ba, tmp_outputs_2d_ba, tmp_outputs_3d_bl, tmp_outputs_2d_bl, tmp_outputs_3d_rt, tmp_outputs_2d_rt, epoch, iter, args): fig3d = plt.figure(figsize=(16, 8)) ax3din = fig3d.add_subplot(2, 4, 1, projection='3d') ax3din.set_title('input 3D') show3Dpose(tmp_inputs_3d, ax3din, gt=False) ax2din = fig3d.add_subplot(2, 4, 5) ax2din.set_title('input 2d') show2Dpose(tmp_inputs_2d, ax2din) ax3dba = fig3d.add_subplot(2, 4, 2, projection='3d') ax3dba.set_title('input/ba 3d') show3DposePair(tmp_inputs_3d, tmp_outputs_3d_ba, ax3dba) ax2dba = fig3d.add_subplot(2, 4, 6) ax2dba.set_title('ba 2d') show2Dpose(tmp_outputs_2d_ba, ax2dba) ax3dbl = fig3d.add_subplot(2, 4, 3, projection='3d') ax3dbl.set_title('ba/bl 3d') show3DposePair(tmp_outputs_3d_ba, tmp_outputs_3d_bl, ax3dbl) ax2dbl = fig3d.add_subplot(2, 4, 7) ax2dbl.set_title('bl 2d') show2Dpose(tmp_outputs_2d_bl, ax2dbl) ax3drt = fig3d.add_subplot(2, 4, 4, projection='3d') ax3drt.set_title('modify 3d - rt') show3Dpose(tmp_outputs_3d_rt, ax3drt, gt=False) ax2d = fig3d.add_subplot(2, 4, 8) ax2d.set_title('rt 2d') show2Dpose(tmp_outputs_2d_rt, ax2d) os.makedirs('{}/poseaug_viz'.format(args.checkpoint), exist_ok=True) image_name = '{}/poseaug_viz/epoch_{:0>4d}_iter_{:0>4d}.png'.format(args.checkpoint, epoch, iter) plt.savefig(image_name) plt.close('all')
def optimizer_factory_two_groups(name: str, initial_lr1: float, initial_lr2: float, model1: Module, model2: Module, batch_size: Optional[int]=None, num_steps_per_epoch: Optional[int]=None, exclude_wd_norm: bool=False, exclude_wd_bias: bool=False, scaler: Optional[str]=None, params: DictConfig={}, scheduler: Optional[DictConfig]=None) -> Tuple[(Optimizer, Optional[_LRScheduler])]: optimizer_class = _OPTIMIZERS[name] lr1 = scale_learning_rate(initial_lr1, scaler, batch_size) lr2 = scale_learning_rate(initial_lr2, scaler, batch_size) modules_without_decay = [] keys_without_decay = [] if exclude_wd_norm: modules_without_decay.extend(_NORM_LAYERS) if exclude_wd_bias: keys_without_decay.append('bias') (no_wd_parameters1, wd_parameters1) = retrieve_model_params(model1, modules_without_decay, keys_without_decay) (no_wd_parameters2, wd_parameters2) = retrieve_model_params(model2, modules_without_decay, keys_without_decay) wd_parameters1 = filter_learnable_params(wd_parameters1, model1) no_wd_parameters1 = filter_learnable_params(no_wd_parameters1, model1) wd_parameters2 = filter_learnable_params(wd_parameters2, model2) no_wd_parameters2 = filter_learnable_params(no_wd_parameters2, model2) named_wd_parameters1 = [name for (name, param) in model1.named_parameters() if any([(param is wd_param) for wd_param in wd_parameters1])] named_no_wd_parameters1 = [name for (name, param) in model1.named_parameters() if any([(param is no_wd_param) for no_wd_param in no_wd_parameters1])] named_wd_parameters2 = [name for (name, param) in model2.named_parameters() if any([(param is wd_param) for wd_param in wd_parameters2])] named_no_wd_parameters2 = [name for (name, param) in model2.named_parameters() if any([(param is no_wd_param) for no_wd_param in no_wd_parameters2])] list_optim = [{'params': wd_parameters1, 'lr': lr1}, {'params': wd_parameters2, 'lr': lr2}] if (no_wd_parameters1 != []): list_optim.append({'params': no_wd_parameters1, 'weight_decay': 0.0}) if (no_wd_parameters2 != []): list_optim.append({'params': no_wd_parameters2, 'weight_decay': 0.0}) optimizer = optimizer_class(list_optim, **params) rank_zero_info(f'''{model1._get_name()} optimizer's: With weight decay: num parameters={len(wd_parameters1)}, name parameters: {named_wd_parameters1} Without weight decay: num parameters={len(no_wd_parameters1)}, name parameters:{named_no_wd_parameters1}''') rank_zero_info(f'''{model2._get_name()} optimizer's: With weight decay: num parameters={len(wd_parameters2)}, name parameters: {named_wd_parameters2} Without weight decay: num parameters={len(no_wd_parameters2)}, name parameters:{named_no_wd_parameters2}''') if (scheduler is not None): scheduler = hydra.utils.instantiate(scheduler, num_steps_per_epoch=num_steps_per_epoch, optimizer=optimizer, scaler=scaler, batch_size=batch_size) return (optimizer, scheduler)
def initNormal(mean, std, name, shape): if name.endswith('_weight'): return mx.nd.normal(mean, std, shape) if name.endswith('_bias'): return mx.nd.zeros(shape) if name.endswith('_gamma'): return mx.nd.ones(shape) if name.endswith('_beta'): return mx.nd.zeros(shape) if name.endswith('_moving_mean'): return mx.nd.zeros(shape) if name.endswith('_moving_var'): return mx.nd.ones(shape) raise ValueError('Unknown name type for `{}`'.format(name))
def wide_resnet50_2(pth_path, pretrained=False, **kwargs): kwargs['width_per_group'] = (64 * 2) return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3], pretrained, pth_path, **kwargs)
def inference_detector(model, img): cfg = model.cfg device = next(model.parameters()).device test_pipeline = ([LoadImage()] + cfg.data.test.pipeline[1:]) test_pipeline = Compose(test_pipeline) data = dict(img=img) data = test_pipeline(data) data = collate([data], samples_per_gpu=1) if next(model.parameters()).is_cuda: data = scatter(data, [device])[0] else: for m in model.modules(): if isinstance(m, (RoIPool, RoIAlign)): if (not m.aligned): m.use_torchvision = True warnings.warn('We set use_torchvision=True in CPU mode.') data['img_metas'] = data['img_metas'][0].data with torch.no_grad(): result = model(return_loss=False, rescale=True, **data) return result
class AverageEpochMeter(object): def __init__(self, name, fmt=':f'): self.name = name self.fmt = fmt self.reset() def reset(self): self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.sum += (val * n) self.count += n def compute(self): self.avg = (self.sum / self.count) return self.avg def __str__(self): fmtstr = (('{name}: {avg' + self.fmt) + '}') return fmtstr.format(**self.__dict__)
def katsura6(): pol1 = '1*x1+2*x2+2*x3+2*x4+2*x5+2*x6+2*x7-1;' pol2 = '2*x4*x3+2*x5*x2+2*x6*x1+2*x7*x2-1*x6;' pol3 = '1*x3^2+2*x4*x2+2*x5*x1+2*x6*x2+2*x7*x3-1*x5;' pol4 = '2*x3*x2+2*x4*x1+2*x5*x2+2*x6*x3+2*x7*x4-1*x4;' pol5 = '1*x2^2+2*x3*x1+2*x4*x2+2*x5*x3+2*x6*x4+2*x7*x5-1*x3;' pol6 = '2*x2*x1+2*x3*x2+2*x4*x3+2*x5*x4+2*x6*x5+2*x7*x6-1*x2;' pol7 = '1*x1^2+2*x2^2+2*x3^2+2*x4^2+2*x5^2+2*x6^2+2*x7^2-1*x1;' return [pol1, pol2, pol3, pol4, pol5, pol6, pol7]
def _gen_spnasnet(variant, channel_multiplier=1.0, pretrained=False, **kwargs): arch_def = [['ds_r1_k3_s1_c16_noskip'], ['ir_r3_k3_s2_e3_c24'], ['ir_r1_k5_s2_e6_c40', 'ir_r3_k3_s1_e3_c40'], ['ir_r1_k5_s2_e6_c80', 'ir_r3_k3_s1_e3_c80'], ['ir_r1_k5_s1_e6_c96', 'ir_r3_k5_s1_e3_c96'], ['ir_r4_k5_s2_e6_c192'], ['ir_r1_k3_s1_e6_c320_noskip']] model_kwargs = dict(block_args=decode_arch_def(arch_def), stem_size=32, channel_multiplier=channel_multiplier, norm_kwargs=resolve_bn_args(kwargs), **kwargs) model = _create_effnet(variant, pretrained, **model_kwargs) return model
class RandomRotation(): def __init__(self, axis=None, max_theta=180, max_theta2=None): self.axis = axis self.max_theta = max_theta self.max_theta2 = max_theta2 def _M(self, axis, theta): return expm(np.cross(np.eye(3), ((axis / norm(axis)) * theta))).astype(np.float32) def __call__(self, coords): if (self.axis is not None): axis = self.axis else: axis = (np.random.rand(3) - 0.5) R = self._M(axis, ((((np.pi * self.max_theta) / 180.0) * 2.0) * (np.random.rand(1) - 0.5))) if (self.max_theta2 is None): coords = (coords R) else: R_n = self._M((np.random.rand(3) - 0.5), ((((np.pi * self.max_theta2) / 180.0) * 2.0) * (np.random.rand(1) - 0.5))) coords = ((coords R) R_n) return coords
class DatasetGen(object): def __init__(self, args): super(DatasetGen, self).__init__() self.seed = args.seed self.sbatch = args.sbatch self.pc_valid = args.pc_valid self.root = args.data_dir self.num_tasks = args.ntasks self.num_classes = 100 self.inputsize = [3, 84, 84] mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] self.train_transformation = transforms.Compose([transforms.Resize((self.inputsize[1], self.inputsize[2])), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)]) self.test_transformation = transforms.Compose([transforms.Resize((self.inputsize[1], self.inputsize[2])), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)]) self.taskcla = [[t, int((self.num_classes / self.num_tasks))] for t in range(self.num_tasks)] self.indices = {} self.dataloaders = {} self.idx = {} self.num_workers = args.workers self.pin_memory = True np.random.seed(self.seed) task_ids = np.split(np.random.permutation(self.num_classes), self.num_tasks) self.task_ids = [list(arr) for arr in task_ids] self.train_set = {} self.train_split = {} self.test_set = {} def get(self, task_id): self.dataloaders[task_id] = {} sys.stdout.flush() self.train_set[task_id] = iMiniImageNet(root=self.root, classes=self.task_ids[task_id], train=True, transform=self.train_transformation) self.test_set[task_id] = iMiniImageNet(root=self.root, classes=self.task_ids[task_id], train=False, transform=self.test_transformation) split = int(np.floor((self.pc_valid * len(self.train_set[task_id])))) (train_split, valid_split) = torch.utils.data.random_split(self.train_set[task_id], [(len(self.train_set[task_id]) - split), split]) self.train_split[task_id] = train_split train_loader = torch.utils.data.DataLoader(train_split, batch_size=self.sbatch, num_workers=self.num_workers, pin_memory=self.pin_memory, shuffle=True) fisher_loader = torch.utils.data.DataLoader(train_split, batch_size=10, num_workers=1, pin_memory=self.pin_memory, shuffle=True) valid_loader = torch.utils.data.DataLoader(valid_split, batch_size=self.sbatch, num_workers=self.num_workers, pin_memory=self.pin_memory, shuffle=True) test_loader = torch.utils.data.DataLoader(self.test_set[task_id], batch_size=self.sbatch, num_workers=self.num_workers, pin_memory=self.pin_memory, shuffle=True) self.dataloaders[task_id]['train'] = train_loader self.dataloaders[task_id]['fisher'] = fisher_loader self.dataloaders[task_id]['valid'] = valid_loader self.dataloaders[task_id]['test'] = test_loader self.dataloaders[task_id]['name'] = 'iMiniImageNet-{}-{}'.format(task_id, self.task_ids[task_id]) print('Task ID: ', task_id) print('Training set size: {} images of {}x{}'.format(len(train_loader.dataset), self.inputsize[1], self.inputsize[1])) print('Validation set size: {} images of {}x{}'.format(len(valid_loader.dataset), self.inputsize[1], self.inputsize[1])) print('Train+Val set size: {} images of {}x{}'.format((len(valid_loader.dataset) + len(train_loader.dataset)), self.inputsize[1], self.inputsize[1])) print('Test set size: {} images of {}x{}'.format(len(test_loader.dataset), self.inputsize[1], self.inputsize[1])) return self.dataloaders
class FeedfreeInput(InputData): def get_input_tensors(self): return self._get_input_tensors() def _get_input_tensors(self):
def read_src_and_trg_files(src_file, trg_file, is_train, remove_title_eos=True): tokenized_train_src = [] tokenized_train_trg = [] filtered_cnt = 0 for (line_idx, (src_line, trg_line)) in enumerate(zip(open(src_file, 'r'), open(trg_file, 'r'))): if ((len(src_line.strip()) == 0) and is_train): continue title_and_context = src_line.strip().split('<eos>') if (len(title_and_context) == 1): [context] = title_and_context src_word_list = context.strip().split(' ') elif (len(title_and_context) == 2): [title, context] = title_and_context title_word_list = title.strip().split(' ') context_word_list = context.strip().split(' ') if remove_title_eos: src_word_list = (title_word_list + context_word_list) else: src_word_list = ((title_word_list + ['<eos>']) + context_word_list) else: raise ValueError('The source text contains more than one title') trg_list = trg_line.strip().split(';') trg_word_list = [trg.split(' ') for trg in trg_list] if is_train: if ((len(src_word_list) > 400) or (len(trg_word_list) > 14)): filtered_cnt += 1 continue tokenized_train_src.append(src_word_list) tokenized_train_trg.append(trg_word_list) assert (len(tokenized_train_src) == len(tokenized_train_trg)), 'the number of records in source and target are not the same' logging.info(('%d rows filtered' % filtered_cnt)) tokenized_train_pairs = list(zip(tokenized_train_src, tokenized_train_trg)) return tokenized_train_pairs
class PEARL(MetaRLAlgorithm): def __init__(self, env, inner_policy, qf, vf, num_train_tasks, num_test_tasks, latent_dim, encoder_hidden_sizes, test_env_sampler, policy_class=ContextConditionedPolicy, encoder_class=MLPEncoder, policy_lr=0.0003, qf_lr=0.0003, vf_lr=0.0003, context_lr=0.0003, policy_mean_reg_coeff=0.001, policy_std_reg_coeff=0.001, policy_pre_activation_coeff=0.0, soft_target_tau=0.005, kl_lambda=0.1, optimizer_class=torch.optim.Adam, use_information_bottleneck=True, use_next_obs_in_context=False, meta_batch_size=64, num_steps_per_epoch=1000, num_initial_steps=100, num_tasks_sample=100, num_steps_prior=100, num_steps_posterior=0, num_extra_rl_steps_posterior=100, batch_size=1024, embedding_batch_size=1024, embedding_mini_batch_size=1024, max_path_length=1000, discount=0.99, replay_buffer_size=1000000, reward_scale=1, update_post_train=1): self._env = env self._qf1 = qf self._qf2 = copy.deepcopy(qf) self._vf = vf self._num_train_tasks = num_train_tasks self._num_test_tasks = num_test_tasks self._latent_dim = latent_dim self._policy_mean_reg_coeff = policy_mean_reg_coeff self._policy_std_reg_coeff = policy_std_reg_coeff self._policy_pre_activation_coeff = policy_pre_activation_coeff self._soft_target_tau = soft_target_tau self._kl_lambda = kl_lambda self._use_information_bottleneck = use_information_bottleneck self._use_next_obs_in_context = use_next_obs_in_context self._meta_batch_size = meta_batch_size self._num_steps_per_epoch = num_steps_per_epoch self._num_initial_steps = num_initial_steps self._num_tasks_sample = num_tasks_sample self._num_steps_prior = num_steps_prior self._num_steps_posterior = num_steps_posterior self._num_extra_rl_steps_posterior = num_extra_rl_steps_posterior self._batch_size = batch_size self._embedding_batch_size = embedding_batch_size self._embedding_mini_batch_size = embedding_mini_batch_size self.max_path_length = max_path_length self._discount = discount self._replay_buffer_size = replay_buffer_size self._reward_scale = reward_scale self._update_post_train = update_post_train self._task_idx = None self._is_resuming = False worker_args = dict(deterministic=True, accum_context=True) self._evaluator = MetaEvaluator(test_task_sampler=test_env_sampler, max_path_length=max_path_length, worker_class=PEARLWorker, worker_args=worker_args, n_test_tasks=num_test_tasks) encoder_spec = self.get_env_spec(env[0](), latent_dim, 'encoder') encoder_in_dim = int(np.prod(encoder_spec.input_space.shape)) encoder_out_dim = int(np.prod(encoder_spec.output_space.shape)) context_encoder = encoder_class(input_dim=encoder_in_dim, output_dim=encoder_out_dim, hidden_sizes=encoder_hidden_sizes) self._policy = policy_class(latent_dim=latent_dim, context_encoder=context_encoder, policy=inner_policy, use_information_bottleneck=use_information_bottleneck, use_next_obs=use_next_obs_in_context) self._replay_buffers = {i: PathBuffer(replay_buffer_size) for i in range(num_train_tasks)} self._context_replay_buffers = {i: PathBuffer(replay_buffer_size) for i in range(num_train_tasks)} self.target_vf = copy.deepcopy(self._vf) self.vf_criterion = torch.nn.MSELoss() self._policy_optimizer = optimizer_class(self._policy.networks[1].parameters(), lr=policy_lr) self.qf1_optimizer = optimizer_class(self._qf1.parameters(), lr=qf_lr) self.qf2_optimizer = optimizer_class(self._qf2.parameters(), lr=qf_lr) self.vf_optimizer = optimizer_class(self._vf.parameters(), lr=vf_lr) self.context_optimizer = optimizer_class(self._policy.networks[0].parameters(), lr=context_lr) def __getstate__(self): data = self.__dict__.copy() del data['_replay_buffers'] del data['_context_replay_buffers'] return data def __setstate__(self, state): self.__dict__.update(state) self._replay_buffers = {i: PathBuffer(self._replay_buffer_size) for i in range(self._num_train_tasks)} self._context_replay_buffers = {i: PathBuffer(self._replay_buffer_size) for i in range(self._num_train_tasks)} self._is_resuming = True def train(self, runner): for _ in runner.step_epochs(): epoch = (runner.step_itr / self._num_steps_per_epoch) if ((epoch == 0) or self._is_resuming): for idx in range(self._num_train_tasks): self._task_idx = idx self._obtain_samples(runner, epoch, self._num_initial_steps, np.inf) self._is_resuming = False for _ in range(self._num_tasks_sample): idx = np.random.randint(self._num_train_tasks) self._task_idx = idx self._context_replay_buffers[idx].clear() if (self._num_steps_prior > 0): self._obtain_samples(runner, epoch, self._num_steps_prior, np.inf) if (self._num_steps_posterior > 0): self._obtain_samples(runner, epoch, self._num_steps_posterior, self._update_post_train) if (self._num_extra_rl_steps_posterior > 0): self._obtain_samples(runner, epoch, self._num_extra_rl_steps_posterior, self._update_post_train, add_to_enc_buffer=False) logger.log('Training...') self._train_once() runner.step_itr += 1 logger.log('Evaluating...') self._policy.reset_belief() self._evaluator.evaluate(self) def _train_once(self): for _ in range(self._num_steps_per_epoch): indices = np.random.choice(range(self._num_train_tasks), self._meta_batch_size) self._optimize_policy(indices) def _optimize_policy(self, indices): num_tasks = len(indices) context = self._sample_context(indices) self._policy.reset_belief(num_tasks=num_tasks) (obs, actions, rewards, next_obs, terms) = self._sample_data(indices) (policy_outputs, task_z) = self._policy(obs, context) (new_actions, policy_mean, policy_log_std, log_pi) = policy_outputs[:4] (t, b, _) = obs.size() obs = obs.view((t * b), (- 1)) actions = actions.view((t * b), (- 1)) next_obs = next_obs.view((t * b), (- 1)) q1_pred = self._qf1(torch.cat([obs, actions], dim=1), task_z) q2_pred = self._qf2(torch.cat([obs, actions], dim=1), task_z) v_pred = self._vf(obs, task_z.detach()) with torch.no_grad(): target_v_values = self.target_vf(next_obs, task_z) self.context_optimizer.zero_grad() if self._use_information_bottleneck: kl_div = self._policy.compute_kl_div() kl_loss = (self._kl_lambda * kl_div) kl_loss.backward(retain_graph=True) self.qf1_optimizer.zero_grad() self.qf2_optimizer.zero_grad() rewards_flat = rewards.view((self._batch_size * num_tasks), (- 1)) rewards_flat = (rewards_flat * self._reward_scale) terms_flat = terms.view((self._batch_size * num_tasks), (- 1)) q_target = (rewards_flat + (((1.0 - terms_flat) * self._discount) * target_v_values)) qf_loss = (torch.mean(((q1_pred - q_target) ** 2)) + torch.mean(((q2_pred - q_target) ** 2))) qf_loss.backward() self.qf1_optimizer.step() self.qf2_optimizer.step() self.context_optimizer.step() q1 = self._qf1(torch.cat([obs, new_actions], dim=1), task_z.detach()) q2 = self._qf2(torch.cat([obs, new_actions], dim=1), task_z.detach()) min_q = torch.min(q1, q2) v_target = (min_q - log_pi) vf_loss = self.vf_criterion(v_pred, v_target.detach()) self.vf_optimizer.zero_grad() vf_loss.backward() self.vf_optimizer.step() self._update_target_network() log_policy_target = min_q policy_loss = (log_pi - log_policy_target).mean() mean_reg_loss = (self._policy_mean_reg_coeff * (policy_mean ** 2).mean()) std_reg_loss = (self._policy_std_reg_coeff * (policy_log_std ** 2).mean()) pre_tanh_value = policy_outputs[(- 1)] pre_activation_reg_loss = (self._policy_pre_activation_coeff * (pre_tanh_value ** 2).sum(dim=1).mean()) policy_reg_loss = ((mean_reg_loss + std_reg_loss) + pre_activation_reg_loss) policy_loss = (policy_loss + policy_reg_loss) self._policy_optimizer.zero_grad() policy_loss.backward() self._policy_optimizer.step() def _obtain_samples(self, runner, itr, num_samples, update_posterior_rate, add_to_enc_buffer=True): self._policy.reset_belief() total_samples = 0 if (update_posterior_rate != np.inf): num_samples_per_batch = (update_posterior_rate * self.max_path_length) else: num_samples_per_batch = num_samples while (total_samples < num_samples): paths = runner.obtain_samples(itr, num_samples_per_batch, self._policy, self._env[self._task_idx]) total_samples += sum([len(path['rewards']) for path in paths]) for path in paths: p = {'observations': path['observations'], 'actions': path['actions'], 'rewards': path['rewards'].reshape((- 1), 1), 'next_observations': path['next_observations'], 'dones': path['dones'].reshape((- 1), 1)} self._replay_buffers[self._task_idx].add_path(p) if add_to_enc_buffer: self._context_replay_buffers[self._task_idx].add_path(p) if (update_posterior_rate != np.inf): context = self._sample_context(self._task_idx) self._policy.infer_posterior(context) def _sample_data(self, indices): initialized = False for idx in indices: batch = self._replay_buffers[idx].sample_transitions(self._batch_size) if (not initialized): o = batch['observations'][np.newaxis] a = batch['actions'][np.newaxis] r = batch['rewards'][np.newaxis] no = batch['next_observations'][np.newaxis] d = batch['dones'][np.newaxis] initialized = True else: o = np.vstack((o, batch['observations'][np.newaxis])) a = np.vstack((a, batch['actions'][np.newaxis])) r = np.vstack((r, batch['rewards'][np.newaxis])) no = np.vstack((no, batch['next_observations'][np.newaxis])) d = np.vstack((d, batch['dones'][np.newaxis])) o = torch.as_tensor(o, device=global_device()).float() a = torch.as_tensor(a, device=global_device()).float() r = torch.as_tensor(r, device=global_device()).float() no = torch.as_tensor(no, device=global_device()).float() d = torch.as_tensor(d, device=global_device()).float() return (o, a, r, no, d) def _sample_context(self, indices): if (not hasattr(indices, '__iter__')): indices = [indices] initialized = False for idx in indices: batch = self._context_replay_buffers[idx].sample_transitions(self._embedding_batch_size) o = batch['observations'] a = batch['actions'] r = batch['rewards'] context = np.hstack((np.hstack((o, a)), r)) if self._use_next_obs_in_context: context = np.hstack((context, batch['next_observations'])) if (not initialized): final_context = context[np.newaxis] initialized = True else: final_context = np.vstack((final_context, context[np.newaxis])) final_context = torch.as_tensor(final_context, device=global_device()).float() if (len(indices) == 1): final_context = final_context.unsqueeze(0) return final_context def _update_target_network(self): for (target_param, param) in zip(self.target_vf.parameters(), self._vf.parameters()): target_param.data.copy_(((target_param.data * (1.0 - self._soft_target_tau)) + (param.data * self._soft_target_tau))) def policy(self): return self._policy def networks(self): return ((self._policy.networks + [self._policy]) + [self._qf1, self._qf2, self._vf, self.target_vf]) def get_exploration_policy(self): return self._policy def adapt_policy(self, exploration_policy, exploration_trajectories): total_steps = sum(exploration_trajectories.lengths) o = exploration_trajectories.observations a = exploration_trajectories.actions r = exploration_trajectories.rewards.reshape(total_steps, 1) ctxt = np.hstack((o, a, r)).reshape(1, total_steps, (- 1)) context = torch.as_tensor(ctxt, device=global_device()).float() self._policy.infer_posterior(context) return self._policy def to(self, device=None): device = (device or global_device()) for net in self.networks: net.to(device) def augment_env_spec(cls, env_spec, latent_dim): obs_dim = int(np.prod(env_spec.observation_space.shape)) action_dim = int(np.prod(env_spec.action_space.shape)) aug_obs = akro.Box(low=(- 1), high=1, shape=((obs_dim + latent_dim),), dtype=np.float32) aug_act = akro.Box(low=(- 1), high=1, shape=(action_dim,), dtype=np.float32) return EnvSpec(aug_obs, aug_act) def get_env_spec(cls, env_spec, latent_dim, module): obs_dim = int(np.prod(env_spec.observation_space.shape)) action_dim = int(np.prod(env_spec.action_space.shape)) if (module == 'encoder'): in_dim = ((obs_dim + action_dim) + 1) out_dim = (latent_dim * 2) elif (module == 'vf'): in_dim = obs_dim out_dim = latent_dim in_space = akro.Box(low=(- 1), high=1, shape=(in_dim,), dtype=np.float32) out_space = akro.Box(low=(- 1), high=1, shape=(out_dim,), dtype=np.float32) if (module == 'encoder'): spec = InOutSpec(in_space, out_space) elif (module == 'vf'): spec = EnvSpec(in_space, out_space) return spec
class DummyCVDataset_dict(DummyCVDataset): def __init__(self, shape): super().__init__(shape) self.process() def process(self): for idx in range(0, len(self.shape)): tensor = np.random.uniform(low=self.low[idx], high=self.high[idx], size=self.shape[idx]) tensor = tensor.astype(np.float32) self.dataset.append({'input': tensor})
class RegNet(AnyNet): def __init__(self, *, stem_class, stem_width, block_class, depth, w_a, w_0, w_m, group_width, stride=2, bottleneck_ratio=1.0, se_ratio=0.0, activation_class=None, freeze_at=0, norm='BN', out_features=None): (ws, ds) = generate_regnet_parameters(w_a, w_0, w_m, depth)[0:2] ss = [stride for _ in ws] bs = [bottleneck_ratio for _ in ws] gs = [group_width for _ in ws] (ws, bs, gs) = adjust_block_compatibility(ws, bs, gs) def default_activation_class(): return nn.ReLU(inplace=True) super().__init__(stem_class=stem_class, stem_width=stem_width, block_class=block_class, depths=ds, widths=ws, strides=ss, group_widths=gs, bottleneck_ratios=bs, se_ratio=se_ratio, activation_class=(default_activation_class if (activation_class is None) else activation_class), freeze_at=freeze_at, norm=norm, out_features=out_features)
class GptneoxState(): def __init__(self, eval_tokens: Deque[gptneox_cpp.gptneox_token], eval_logits: Deque[List[float]], gptneox_state, gptneox_state_size: int): self.eval_tokens = eval_tokens self.eval_logits = eval_logits self.gptneox_state = gptneox_state self.gptneox_state_size = gptneox_state_size
class Darknet(nn.Module): def getLossLayers(self): loss_layers = [] for m in self.models: if (isinstance(m, RegionLayer) or isinstance(m, YoloLayer)): loss_layers.append(m) return loss_layers def __init__(self, cfgfile, use_cuda=True): super(Darknet, self).__init__() self.use_cuda = use_cuda self.blocks = parse_cfg(cfgfile) self.models = self.create_network(self.blocks) self.loss_layers = self.getLossLayers() if (len(self.loss_layers) > 0): last = (len(self.loss_layers) - 1) self.anchors = self.loss_layers[last].anchors self.num_anchors = self.loss_layers[last].num_anchors self.anchor_step = self.loss_layers[last].anchor_step self.num_classes = self.loss_layers[last].num_classes self.header = torch.IntTensor([0, 1, 0, 0]) self.seen = 0 def forward(self, x): ind = (- 2) self.loss_layers = None outputs = dict() out_boxes = dict() outno = 0 for block in self.blocks: ind = (ind + 1) if (block['type'] == 'net'): continue elif (block['type'] in ['convolutional', 'maxpool', 'reorg', 'upsample', 'avgpool', 'softmax', 'connected']): x = self.models[ind](x) outputs[ind] = x elif (block['type'] == 'route'): layers = block['layers'].split(',') layers = [(int(i) if (int(i) > 0) else (int(i) + ind)) for i in layers] if (len(layers) == 1): x = outputs[layers[0]] elif (len(layers) == 2): x1 = outputs[layers[0]] x2 = outputs[layers[1]] x = torch.cat((x1, x2), 1) outputs[ind] = x elif (block['type'] == 'shortcut'): from_layer = int(block['from']) activation = block['activation'] from_layer = (from_layer if (from_layer > 0) else (from_layer + ind)) x1 = outputs[from_layer] x2 = outputs[(ind - 1)] x = (x1 + x2) if (activation == 'leaky'): x = F.leaky_relu(x, 0.1, inplace=True) elif (activation == 'relu'): x = F.relu(x, inplace=True) outputs[ind] = x elif (block['type'] in ['region', 'yolo']): boxes = self.models[ind].get_mask_boxes(x) out_boxes[outno] = boxes outno += 1 outputs[ind] = None elif (block['type'] == 'cost'): continue else: print(('unknown type %s' % block['type'])) return (x if (outno == 0) else out_boxes) def print_network(self): print_cfg(self.blocks) def create_network(self, blocks): models = nn.ModuleList() prev_filters = 3 out_filters = [] prev_stride = 1 out_strides = [] conv_id = 0 ind = (- 2) for block in blocks: ind += 1 if (block['type'] == 'net'): prev_filters = int(block['channels']) self.width = int(block['width']) self.height = int(block['height']) continue elif (block['type'] == 'convolutional'): conv_id = (conv_id + 1) batch_normalize = int(block['batch_normalize']) filters = int(block['filters']) kernel_size = int(block['size']) stride = int(block['stride']) is_pad = int(block['pad']) pad = (((kernel_size - 1) // 2) if is_pad else 0) activation = block['activation'] model = nn.Sequential() if batch_normalize: model.add_module('conv{0}'.format(conv_id), nn.Conv2d(prev_filters, filters, kernel_size, stride, pad, bias=False)) model.add_module('bn{0}'.format(conv_id), nn.BatchNorm2d(filters)) else: model.add_module('conv{0}'.format(conv_id), nn.Conv2d(prev_filters, filters, kernel_size, stride, pad)) if (activation == 'leaky'): model.add_module('leaky{0}'.format(conv_id), nn.LeakyReLU(0.1, inplace=True)) elif (activation == 'relu'): model.add_module('relu{0}'.format(conv_id), nn.ReLU(inplace=True)) prev_filters = filters out_filters.append(prev_filters) prev_stride = (stride * prev_stride) out_strides.append(prev_stride) models.append(model) elif (block['type'] == 'maxpool'): pool_size = int(block['size']) stride = int(block['stride']) if (stride > 1): model = nn.MaxPool2d(pool_size, stride) else: model = MaxPoolStride1() out_filters.append(prev_filters) prev_stride = (stride * prev_stride) out_strides.append(prev_stride) models.append(model) elif (block['type'] == 'avgpool'): model = GlobalAvgPool2d() out_filters.append(prev_filters) models.append(model) elif (block['type'] == 'softmax'): model = nn.Softmax() out_strides.append(prev_stride) out_filters.append(prev_filters) models.append(model) elif (block['type'] == 'cost'): if (block['_type'] == 'sse'): model = nn.MSELoss(size_average=True) elif (block['_type'] == 'L1'): model = nn.L1Loss(size_average=True) elif (block['_type'] == 'smooth'): model = nn.SmoothL1Loss(size_average=True) out_filters.append(1) out_strides.append(prev_stride) models.append(model) elif (block['type'] == 'reorg'): stride = int(block['stride']) prev_filters = ((stride * stride) * prev_filters) out_filters.append(prev_filters) prev_stride = (prev_stride * stride) out_strides.append(prev_stride) models.append(Reorg(stride)) elif (block['type'] == 'upsample'): stride = int(block['stride']) out_filters.append(prev_filters) prev_stride = (prev_stride / stride) out_strides.append(prev_stride) models.append(Upsample(stride)) elif (block['type'] == 'route'): layers = block['layers'].split(',') ind = len(models) layers = [(int(i) if (int(i) > 0) else (int(i) + ind)) for i in layers] if (len(layers) == 1): prev_filters = out_filters[layers[0]] prev_stride = out_strides[layers[0]] elif (len(layers) == 2): assert (layers[0] == (ind - 1)) prev_filters = (out_filters[layers[0]] + out_filters[layers[1]]) prev_stride = out_strides[layers[0]] out_filters.append(prev_filters) out_strides.append(prev_stride) models.append(EmptyModule()) elif (block['type'] == 'shortcut'): ind = len(models) prev_filters = out_filters[(ind - 1)] out_filters.append(prev_filters) prev_stride = out_strides[(ind - 1)] out_strides.append(prev_stride) models.append(EmptyModule()) elif (block['type'] == 'connected'): filters = int(block['output']) if (block['activation'] == 'linear'): model = nn.Linear(prev_filters, filters) elif (block['activation'] == 'leaky'): model = nn.Sequential(nn.Linear(prev_filters, filters), nn.LeakyReLU(0.1, inplace=True)) elif (block['activation'] == 'relu'): model = nn.Sequential(nn.Linear(prev_filters, filters), nn.ReLU(inplace=True)) prev_filters = filters out_filters.append(prev_filters) out_strides.append(prev_stride) models.append(model) elif (block['type'] == 'region'): region_layer = RegionLayer(use_cuda=self.use_cuda) anchors = block['anchors'].split(',') region_layer.anchors = [float(i) for i in anchors] region_layer.num_classes = int(block['classes']) region_layer.num_anchors = int(block['num']) region_layer.anchor_step = (len(region_layer.anchors) // region_layer.num_anchors) region_layer.rescore = int(block['rescore']) region_layer.object_scale = float(block['object_scale']) region_layer.noobject_scale = float(block['noobject_scale']) region_layer.class_scale = float(block['class_scale']) region_layer.coord_scale = float(block['coord_scale']) region_layer.thresh = float(block['thresh']) out_filters.append(prev_filters) out_strides.append(prev_stride) models.append(region_layer) elif (block['type'] == 'yolo'): yolo_layer = YoloLayer(use_cuda=self.use_cuda) anchors = block['anchors'].split(',') anchor_mask = block['mask'].split(',') yolo_layer.anchor_mask = [int(i) for i in anchor_mask] yolo_layer.anchors = [float(i) for i in anchors] yolo_layer.num_classes = int(block['classes']) yolo_layer.num_anchors = int(block['num']) yolo_layer.anchor_step = (len(yolo_layer.anchors) // yolo_layer.num_anchors) try: yolo_layer.rescore = int(block['rescore']) except: pass yolo_layer.ignore_thresh = float(block['ignore_thresh']) yolo_layer.truth_thresh = float(block['truth_thresh']) yolo_layer.stride = prev_stride yolo_layer.nth_layer = ind yolo_layer.net_width = self.width yolo_layer.net_height = self.height out_filters.append(prev_filters) out_strides.append(prev_stride) models.append(yolo_layer) else: print(('unknown type %s' % block['type'])) return models def load_binfile(self, weightfile): fp = open(weightfile, 'rb') version = np.fromfile(fp, count=3, dtype=np.int32) version = [int(i) for i in version] if ((((version[0] * 10) + version[1]) >= 2) and (version[0] < 1000) and (version[1] < 1000)): seen = np.fromfile(fp, count=1, dtype=np.int64) else: seen = np.fromfile(fp, count=1, dtype=np.int32) self.header = torch.from_numpy(np.concatenate((version, seen), axis=0)) self.seen = int(seen) body = np.fromfile(fp, dtype=np.float32) fp.close() return body def load_weights(self, weightfile): buf = self.load_binfile(weightfile) start = 0 ind = (- 2) for block in self.blocks: if (start >= buf.size): break ind = (ind + 1) if (block['type'] == 'net'): continue elif (block['type'] == 'convolutional'): model = self.models[ind] batch_normalize = int(block['batch_normalize']) if batch_normalize: start = load_conv_bn(buf, start, model[0], model[1]) else: start = load_conv(buf, start, model[0]) elif (block['type'] == 'connected'): model = self.models[ind] if (block['activation'] != 'linear'): start = load_fc(buf, start, model[0]) else: start = load_fc(buf, start, model) elif (block['type'] == 'maxpool'): pass elif (block['type'] == 'reorg'): pass elif (block['type'] == 'upsample'): pass elif (block['type'] == 'route'): pass elif (block['type'] == 'shortcut'): pass elif (block['type'] == 'region'): pass elif (block['type'] == 'yolo'): pass elif (block['type'] == 'avgpool'): pass elif (block['type'] == 'softmax'): pass elif (block['type'] == 'cost'): pass else: print(('unknown type %s' % block['type'])) def save_weights(self, outfile, cutoff=0): if (cutoff <= 0): cutoff = (len(self.blocks) - 1) fp = open(outfile, 'wb') self.header[3] = self.seen header = np.array(self.header[0:3].numpy(), np.int32) header.tofile(fp) if (((self.header[0] * 10) + self.header[1]) >= 2): seen = np.array(self.seen, np.int64) else: seen = np.array(self.seen, np.int32) seen.tofile(fp) ind = (- 1) for blockId in range(1, (cutoff + 1)): ind = (ind + 1) block = self.blocks[blockId] if (block['type'] == 'convolutional'): model = self.models[ind] batch_normalize = int(block['batch_normalize']) if batch_normalize: save_conv_bn(fp, model[0], model[1]) else: save_conv(fp, model[0]) elif (block['type'] == 'connected'): model = self.models[ind] if (block['activation'] != 'linear'): save_fc(fc, model) else: save_fc(fc, model[0]) elif (block['type'] == 'maxpool'): pass elif (block['type'] == 'reorg'): pass elif (block['type'] == 'upsample'): pass elif (block['type'] == 'route'): pass elif (block['type'] == 'shortcut'): pass elif (block['type'] == 'region'): pass elif (block['type'] == 'yolo'): pass elif (block['type'] == 'avgpool'): pass elif (block['type'] == 'softmax'): pass elif (block['type'] == 'cost'): pass else: print(('unknown type %s' % block['type'])) fp.close()
def generate_extra_cols(df): origin = df.pop('Origin') df['USA'] = ((origin == 1) * 1.0) df['Europe'] = ((origin == 2) * 1.0) df['Japan'] = ((origin == 3) * 1.0) return df
def train_on_batch(data, model, optimizer, criterion_traj, criterion_intend, params, print_result=False, epoch=0, iter=0): optimizer.zero_grad() (x, pred_traj, y_traj, pred_intent, y_intent, pred_start_pos) = get_prediction_on_batch(data, model, device) loss_traj = criterion_traj(pred_traj, y_traj) loss_intent = criterion_intend(pred_intent, y_intent) loss = ((params['traj_intent_loss_ratio'][0] * loss_traj) + (params['traj_intent_loss_ratio'][1] * loss_intent)) loss.backward() _ = torch.nn.utils.clip_grad_norm_(model.parameters(), 10) optimizer.step() if print_result: data_stats = {'data_mean': params['data_mean'], 'data_std': params['data_std']} out_str = ('epoch: %d, iter: %d, loss: %.4f ' % (epoch, iter, loss.detach().cpu().numpy())) pred_traj = get_position(pred_traj, pred_start_pos, data_stats) y_traj = get_position(y_traj, pred_start_pos, data_stats) mse = ((pred_traj - y_traj).pow(2).sum().float() / (pred_traj.size(0) * pred_traj.size(1))) mse = mse.cpu().detach().numpy() loss_traj_val = loss_traj.cpu().detach().numpy() out_str += ('trajectory_loss: %.4f, trajectory_mse: %.4f, ' % (loss_traj_val, mse)) (_, pred_intent_cls) = pred_intent.max(1) label_cls = y_intent acc = ((pred_intent_cls == label_cls).sum().float() / label_cls.size(0)) acc = acc.cpu().detach().numpy() loss_intent_val = loss_intent.cpu().detach().numpy() out_str += ('intent_loss: %.4f, intent_acc: %.4f, ' % (loss_intent_val, acc)) print(out_str) log_path = (params['log_dir'] + 'train.tsv') if (not os.path.exists(log_path)): with open(log_path, 'a') as f: f.write('epoch\titer\ttraj_loss\tintent_loss\tmse\tacc\n') with open(log_path, 'a') as f: f.write(('%05d\t%05d\t%f\t%f\t%f\t%f\n' % (epoch, iter, loss_traj_val, loss_intent_val, mse, acc))) return loss
class TerminateOnNaNCallback(Callback): def __init__(self): self.stop = False def on_batch_end(self, last_loss, epoch, num_batch, **kwargs: Any) -> None: if self.stop: return True if torch.isnan(last_loss): print(f'Epoch/Batch ({epoch}/{num_batch}): Invalid loss, terminating training.') return {'stop_epoch': True, 'stop_training': True, 'skip_validate': True}
_sentencepiece _tokenizers class XLMRobertaTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = XLMRobertaTokenizer rust_tokenizer_class = XLMRobertaTokenizerFast test_rust_tokenizer = True def setUp(self): super().setUp() tokenizer = XLMRobertaTokenizer(SAMPLE_VOCAB, keep_accents=True) tokenizer.save_pretrained(self.tmpdirname) def test_full_tokenizer(self): tokenizer = XLMRobertaTokenizer(SAMPLE_VOCAB, keep_accents=True) tokens = tokenizer.tokenize('This is a test') self.assertListEqual(tokens, ['This', 'is', 'a', 't', 'est']) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [(value + tokenizer.fairseq_offset) for value in [285, 46, 10, 170, 382]]) tokens = tokenizer.tokenize('I was born in 92000, and this is false.') self.assertListEqual(tokens, [(SPIECE_UNDERLINE + 'I'), (SPIECE_UNDERLINE + 'was'), (SPIECE_UNDERLINE + 'b'), 'or', 'n', (SPIECE_UNDERLINE + 'in'), (SPIECE_UNDERLINE + ''), '9', '2', '0', '0', '0', ',', (SPIECE_UNDERLINE + 'and'), (SPIECE_UNDERLINE + 'this'), (SPIECE_UNDERLINE + 'is'), (SPIECE_UNDERLINE + 'f'), 'al', 's', 'e', '.']) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual(ids, [(value + tokenizer.fairseq_offset) for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]]) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual(back_tokens, [(SPIECE_UNDERLINE + 'I'), (SPIECE_UNDERLINE + 'was'), (SPIECE_UNDERLINE + 'b'), 'or', 'n', (SPIECE_UNDERLINE + 'in'), (SPIECE_UNDERLINE + ''), '<unk>', '2', '0', '0', '0', ',', (SPIECE_UNDERLINE + 'and'), (SPIECE_UNDERLINE + 'this'), (SPIECE_UNDERLINE + 'is'), (SPIECE_UNDERLINE + 'f'), 'al', 's', '<unk>', '.']) _property def big_tokenizer(self): return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base') def test_rust_and_python_full_tokenizers(self): if (not self.test_rust_tokenizer): return tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() sequence = 'I was born in 92000, and this is false.' tokens = tokenizer.tokenize(sequence) rust_tokens = rust_tokenizer.tokenize(sequence) self.assertListEqual(tokens, rust_tokens) ids = tokenizer.encode(sequence, add_special_tokens=False) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(ids, rust_ids) rust_tokenizer = self.get_rust_tokenizer() ids = tokenizer.encode(sequence) rust_ids = rust_tokenizer.encode(sequence) self.assertListEqual(ids, rust_ids) def test_tokenization_base_easy_symbols(self): symbols = 'Hello World!' original_tokenizer_encodings = [0, 35378, 6661, 38, 2] self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols)) def test_tokenization_base_hard_symbols(self): symbols = 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth' original_tokenizer_encodings = [0, 3293, 83, 10, 4552, 4989, 7986, 678, 10, 5915, 111, 179459, 124850, 4, 6044, 237, 12, 6, 5, 6, 4, 6780, 705, 15, 1388, 44, 378, 10114, 711, 152, 20, 6, 5, 22376, 642, 1221, 15190, 34153, 450, 5608, 959, 1119, 57702, 136, 186, 47, 1098, 29367, 47, 3, 6, 4, 6044, 237, 6284, 50901, 528, 31, 90, 34, 927, 2] self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols))
class BertJapaneseTokenizer(BertTokenizer): vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__(self, vocab_file, do_lower_case=False, do_word_tokenize=True, do_subword_tokenize=True, word_tokenizer_type='basic', subword_tokenizer_type='wordpiece', never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', **kwargs): super(BertTokenizer, self).__init__(unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, **kwargs) self.max_len_single_sentence = (self.max_len - 2) self.max_len_sentences_pair = (self.max_len - 3) if (not os.path.isfile(vocab_file)): raise ValueError("Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file)) self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict([(ids, tok) for (tok, ids) in self.vocab.items()]) self.do_word_tokenize = do_word_tokenize if do_word_tokenize: if (word_tokenizer_type == 'basic'): self.word_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=False) elif (word_tokenizer_type == 'mecab'): self.word_tokenizer = MecabTokenizer(do_lower_case=do_lower_case, never_split=never_split) else: raise ValueError("Invalid word_tokenizer_type '{}' is specified.".format(word_tokenizer_type)) self.do_subword_tokenize = do_subword_tokenize if do_subword_tokenize: if (subword_tokenizer_type == 'wordpiece'): self.subword_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token) elif (subword_tokenizer_type == 'character'): self.subword_tokenizer = CharacterTokenizer(vocab=self.vocab, unk_token=self.unk_token) else: raise ValueError("Invalid subword_tokenizer_type '{}' is specified.".format(subword_tokenizer_type)) def _tokenize(self, text): if self.do_word_tokenize: tokens = self.word_tokenizer.tokenize(text, never_split=self.all_special_tokens) else: tokens = [text] if self.do_subword_tokenize: split_tokens = [sub_token for token in tokens for sub_token in self.subword_tokenizer.tokenize(token)] else: split_tokens = tokens return split_tokens
def get_model(mode): if (mode == 'flow'): return Model_flow else: raise ValueError('Mode {} not found.'.format(mode))
class TinyImagenetFederatedTask(FederatedTask): def __init__(self, params: Params): super(TinyImagenetFederatedTask, self).__init__(params) self.means = (0.485, 0.456, 0.406) self.lvars = (0.229, 0.224, 0.225) self.normalize = transforms.Normalize(self.means, self.lvars) self.data_dir = './tiny-imagenet-200/' def load_imagenet_data(self): if self.params.transform_train: transform_train = transforms.Compose([transforms.RandomCrop(64, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), self.normalize]) else: transform_train = transforms.Compose([transforms.ToTensor(), self.normalize]) transform_test = transforms.Compose([transforms.ToTensor(), self.normalize]) self.train_dataset = TinyImageNet(self.data_dir, train=True, transform=transform_train) self.test_dataset = TinyImageNet(self.data_dir, train=False, transform=transform_test) self.train_loader = DataLoader(self.train_dataset, batch_size=self.params.batch_size, shuffle=True, num_workers=0) self.test_loader = DataLoader(self.test_dataset, batch_size=self.params.test_batch_size, shuffle=False, num_workers=0) self.classes = [i for i in range(200)] def load_data(self) -> None: self.load_imagenet_data() def build_model(self) -> Module: if (self.params.model == 'resnet18'): if self.params.pretrained: model = resnet18(pretrained=True) model.fc = nn.Linear(512, len(self.classes)) else: model = resnet18(pretrained=False, num_classes=len(self.classes)) print('build resnet18') return model elif (self.params.model == 'simple'): if self.params.pretrained: raise NotImplemented else: model = SimpleNet(num_classes=len(self.classes)) return model
(for_each_device=True) def cupy_launch(strFunction, strKernel): return cupy.cuda.compile_with_cache(strKernel).get_function(strFunction)
class ConvBlock(nn.Module): def __init__(self, dimension, layer_num, in_channels, out_channels, kernel_size, stride=1, padding='SAME', dilation=1, bias=True, norm='batch', activation='relu', last_activation='relu', mode='conv'): super(ConvBlock, self).__init__() conv_block = [] if (dimension == 1): for i in range((layer_num - 1)): conv_block.append(Conv1d_layer(in_channels, in_channels, kernel_size, padding=padding, dilation=dilation, bias=bias, norm=norm, activation=activation)) conv_block.append(Conv1d_layer(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, norm=norm, activation=last_activation, mode=mode)) elif (dimension == 2): for i in range((layer_num - 1)): conv_block.append(Conv2d_layer(in_channels, in_channels, kernel_size, padding=padding, dilation=dilation, bias=bias, norm=norm, activation=activation)) conv_block.append(Conv2d_layer(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, norm=norm, activation=last_activation, mode=mode)) self.conv_block = nn.Sequential(*conv_block) def forward(self, input): return self.conv_block(input)
def register_loader(loader_class): name = loader_class.__name__.lower()[:(- len('Loader'))] class _Wrapped(loader_class): def __init__(self, shuffle_sequences: Optional[bool]=None, shuffle_sequence_items: Optional[bool]=None, shuffle: Optional[bool]=None, sequence_size: Optional[int]=None, image_size: int=None, seed: int=None, **kwargs): raise NotImplementedError() def __new__(self, shuffle_sequences: Optional[bool]=None, shuffle_sequence_items: Optional[bool]=None, shuffle: Optional[bool]=None, sequence_size: Optional[int]=None, image_size: int=None, seed: int=None, **kwargs): if (seed is not None): kwargs['seed'] = seed seed = (seed if (seed is not None) else 42) custom_resize = getattr(loader_class, '_custom_resize', False) custom_shuffle = getattr(loader_class, '_custom_shuffle', False) custom_sequence_size = getattr(loader_class, '_custom_sequence_size', False) if custom_resize: kwargs['image_size'] = image_size if custom_sequence_size: kwargs['sequence_size'] = sequence_size if (shuffle is not None): assert (shuffle_sequence_items is None) assert (shuffle_sequences is None) shuffle_sequence_items = shuffle_sequences = shuffle else: assert (shuffle is None) shuffle_sequence_items = (shuffle_sequence_items or False) shuffle_sequences = (shuffle_sequences or False) if custom_shuffle: loader = loader_class(shuffle_sequences=shuffle_sequences, shuffle_sequence_items=shuffle_sequence_items, sequence_size=sequence_size, seed=seed, **kwargs) else: loader = loader_class(**kwargs) if shuffle_sequence_items: loader = ShuffledLoader(loader, seed, shuffle_sequence_items=True) if ((sequence_size is not None) and (not custom_sequence_size)): loader = FixedSequenceSizeLoader(loader, sequence_size) if shuffle_sequences: loader = ShuffledLoader(loader, seed, shuffle_sequences=True) if ((image_size is not None) and (not custom_resize)): loader = ChangedImageSizeLoader(loader, image_size) return loader _registry[name] = _Wrapped return _Wrapped
def build_dataset(image_set, args): assert (image_set in ['train', 'val', 'fewshot']), "image_set must be 'train', 'val' or 'fewshot'." if (image_set == 'train'): if (args.dataset_file == 'coco'): root = Path('data/coco') img_folder = (root / 'train2017') ann_file = ((root / 'annotations') / 'instances_train2017.json') class_ids = (coco_base_class_ids + coco_novel_class_ids) class_ids.sort() return build(args, img_folder, ann_file, image_set, activated_class_ids=class_ids, with_support=True) if (args.dataset_file == 'coco_base'): root = Path('data/coco') img_folder = (root / 'train2017') ann_file = ((root / 'annotations') / 'instances_train2017.json') return build(args, img_folder, ann_file, image_set, activated_class_ids=coco_base_class_ids, with_support=True) if (args.dataset_file == 'voc'): root = Path('data/voc') img_folder = (root / 'images') ann_file = ((root / 'annotations') / 'pascal_trainval0712.json') return build(args, img_folder, ann_file, image_set, activated_class_ids=list(range(1, (20 + 1))), with_support=True) if (args.dataset_file == 'voc_base1'): root = Path('data/voc') img_folder = (root / 'images') ann_file = ((root / 'annotations') / 'pascal_trainval0712.json') return build(args, img_folder, ann_file, image_set, activated_class_ids=voc_base1_class_ids, with_support=True) if (args.dataset_file == 'voc_base2'): root = Path('data/voc') img_folder = (root / 'images') ann_file = ((root / 'annotations') / 'pascal_trainval0712.json') return build(args, img_folder, ann_file, image_set, activated_class_ids=voc_base2_class_ids, with_support=True) if (args.dataset_file == 'voc_base3'): root = Path('data/voc') img_folder = (root / 'images') ann_file = ((root / 'annotations') / 'pascal_trainval0712.json') return build(args, img_folder, ann_file, image_set, activated_class_ids=voc_base3_class_ids, with_support=True) if (image_set == 'val'): if (args.dataset_file in ['coco', 'coco_base']): root = Path('data/coco') img_folder = (root / 'val2017') ann_file = ((root / 'annotations') / 'instances_val2017.json') class_ids = (coco_base_class_ids + coco_novel_class_ids) class_ids.sort() return build(args, img_folder, ann_file, image_set, activated_class_ids=class_ids, with_support=False) if (args.dataset_file in ['voc', 'voc_base1', 'voc_base2', 'voc_base3']): root = Path('data/voc') img_folder = (root / 'images') ann_file = ((root / 'annotations') / 'pascal_test2007.json') return build(args, img_folder, ann_file, image_set, activated_class_ids=list(range(1, (20 + 1))), with_support=False) if (image_set == 'fewshot'): if (args.dataset_file in ['coco', 'coco_base']): class_ids = (coco_base_class_ids + coco_novel_class_ids) class_ids.sort() return build_fewshot(args, image_set, activated_class_ids=class_ids, with_support=True) if (args.dataset_file in ['voc', 'voc_base1', 'voc_base2', 'voc_base3']): return build_fewshot(args, image_set, activated_class_ids=list(range(1, (20 + 1))), with_support=True) raise ValueError(f'{image_set} of dataset {args.dataset_file} not supported.')
def _empty_box_results(): return OrderedDict({'box': OrderedDict([('AP', (- 1)), ('AP50', (- 1)), ('AP75', (- 1)), ('APs', (- 1)), ('APm', (- 1)), ('APl', (- 1)), ('CorLoc', (- 1))])})
def evaluate_metrics(prediction_file: Union[(str, Path, List[Dict[(str, str)]])], reference_file: Union[(str, Path, List[Dict[(str, str)]])], nb_reference_captions: int=5) -> Dict[(str, Dict[(str, Union[(float, Dict[(str, float)])])])]: prediction_file = check_and_read_csv(prediction_file) reference_file = check_and_read_csv(reference_file) prediction_file.sort(key=(lambda the_row: the_row['file_name'])) reference_file.sort(key=(lambda the_row: the_row['file_name'])) reference_dict = {} for row in reference_file: reference_dict[row['file_name']] = row file_names = [row['file_name'] for row in prediction_file] assert all(((file_name in reference_dict) for file_name in file_names)) predictions = [] ground_truths = [] for row in prediction_file: file_name = row['file_name'] predictions.append(row['caption_predicted']) cap_names = ['caption_{:1d}'.format(i) for i in range(1, (nb_reference_captions + 1))] ground_truths.append([reference_dict[file_name][cap] for cap in cap_names]) (metrics, per_file_metrics) = evaluate_metrics_from_lists(predictions, ground_truths) total_metrics = combine_single_and_per_file_metrics(metrics, per_file_metrics, file_names) return {key.lower(): value for (key, value) in total_metrics.items()}
class Dataset_ETT_minute(Dataset): def __init__(self, root_path, flag='train', size=None, features='S', data_path='ETTm1.csv', target='OT', scale=True, timeenc=0, freq='t'): if (size == None): self.seq_len = ((24 * 4) * 4) self.label_len = (24 * 4) self.pred_len = (24 * 4) else: self.seq_len = size[0] self.label_len = size[1] self.pred_len = size[2] assert (flag in ['train', 'test', 'val']) type_map = {'train': 0, 'val': 1, 'test': 2} self.set_type = type_map[flag] self.features = features self.target = target self.scale = scale self.timeenc = timeenc self.freq = freq self.root_path = root_path self.data_path = data_path self.__read_data__() def __read_data__(self): self.scaler = StandardScaler() df_raw = pd.read_csv(os.path.join(self.root_path, self.data_path)) border1s = [0, ((((12 * 30) * 24) * 4) - self.seq_len), (((((12 * 30) * 24) * 4) + (((4 * 30) * 24) * 4)) - self.seq_len)] border2s = [(((12 * 30) * 24) * 4), ((((12 * 30) * 24) * 4) + (((4 * 30) * 24) * 4)), ((((12 * 30) * 24) * 4) + (((8 * 30) * 24) * 4))] border1 = border1s[self.set_type] border2 = border2s[self.set_type] if ((self.features == 'M') or (self.features == 'MS')): cols_data = df_raw.columns[1:] df_data = df_raw[cols_data] elif (self.features == 'S'): df_data = df_raw[[self.target]] if self.scale: train_data = df_data[border1s[0]:border2s[0]] self.scaler.fit(train_data.values) data = self.scaler.transform(df_data.values) else: data = df_data.values df_stamp = df_raw[['date']][border1:border2] df_stamp['date'] = pd.to_datetime(df_stamp.date) if (self.timeenc == 0): df_stamp['month'] = df_stamp.date.apply((lambda row: row.month), 1) df_stamp['day'] = df_stamp.date.apply((lambda row: row.day), 1) df_stamp['weekday'] = df_stamp.date.apply((lambda row: row.weekday()), 1) df_stamp['hour'] = df_stamp.date.apply((lambda row: row.hour), 1) df_stamp['minute'] = df_stamp.date.apply((lambda row: row.minute), 1) df_stamp['minute'] = df_stamp.minute.map((lambda x: (x // 15))) data_stamp = df_stamp.drop(['date'], 1).values elif (self.timeenc == 1): data_stamp = time_features(pd.to_datetime(df_stamp['date'].values), freq=self.freq) data_stamp = data_stamp.transpose(1, 0) self.data_x = data[border1:border2] self.data_y = data[border1:border2] self.data_stamp = data_stamp def __getitem__(self, index): s_begin = index s_end = (s_begin + self.seq_len) r_begin = (s_end - self.label_len) r_end = ((r_begin + self.label_len) + self.pred_len) seq_x = self.data_x[s_begin:s_end] seq_y = self.data_y[r_begin:r_end] seq_x_mark = self.data_stamp[s_begin:s_end] seq_y_mark = self.data_stamp[r_begin:r_end] return (seq_x, seq_y, seq_x_mark, seq_y_mark) def __len__(self): return (((len(self.data_x) - self.seq_len) - self.pred_len) + 1) def inverse_transform(self, data): return self.scaler.inverse_transform(data)
class DictCIFAR100(DictDataset): def __init__(self, root: str, train: bool=True, transform: Optional[Callable]=None, target_transform: Optional[Callable]=None, download: bool=False) -> None: dataset = CIFAR100(root, train, transform, target_transform, download) super().__init__(dataset)
def trim_rule(qrule, dataset): if (type(qrule) != QuantClassAssociationRule): raise Exception('type of qrule must be QuantClassAssociationRule') if (type(dataset) != pandas.DataFrame): raise Exception('type of dataset must be: pandas.DataFrame') (correctly_covered_by_r, _, _) = find_correctly_covered(qrule, dataset) antecedent = qrule.new_antecedent for (idx, literal) in enumerate(antecedent): (attribute, value) = literal current_column = correctly_covered_by_r[[attribute]].values if (not current_column.any()): continue minv = np.asscalar(min(current_column)) maxv = np.asscalar(max(current_column)) new_interval = Interval.from_scalars(minv, maxv, True, True) antecedent[idx] = (attribute, new_interval) return qrule
_model def dla60_res2net(pretrained=False, **kwargs): model_kwargs = dict(levels=(1, 1, 1, 2, 3, 1), channels=(16, 32, 128, 256, 512, 1024), block=DlaBottle2neck, cardinality=1, base_width=28, **kwargs) return _create_dla('dla60_res2net', pretrained, **model_kwargs)
class DefaultWorker(Worker): def __init__(self, *, seed, max_path_length, worker_number): super().__init__(seed=seed, max_path_length=max_path_length, worker_number=worker_number) self.agent = None self.env = None self._observations = [] self._last_observations = [] self._actions = [] self._rewards = [] self._terminals = [] self._lengths = [] self._agent_infos = defaultdict(list) self._env_infos = defaultdict(list) self._prev_obs = None self._path_length = 0 self.worker_init() def worker_init(self): if (self._seed is not None): deterministic.set_seed((self._seed + self._worker_number)) def update_agent(self, agent_update): if isinstance(agent_update, (dict, tuple, np.ndarray)): self.agent.set_param_values(agent_update) elif (agent_update is not None): self.agent = agent_update def update_env(self, env_update): if (env_update is not None): if isinstance(env_update, EnvUpdate): self.env = env_update(self.env) elif isinstance(env_update, gym.Env): if (self.env is not None): self.env.close() self.env = env_update else: raise TypeError('Uknown environment update type.') def start_rollout(self): self._path_length = 0 self._prev_obs = self.env.reset() self.agent.reset() def step_rollout(self): if (self._path_length < self._max_path_length): (a, agent_info) = self.agent.get_action(self._prev_obs) (next_o, r, d, env_info) = self.env.step(a) self._observations.append(self._prev_obs) self._rewards.append(r) self._actions.append(a) for (k, v) in agent_info.items(): self._agent_infos[k].append(v) for (k, v) in env_info.items(): self._env_infos[k].append(v) self._path_length += 1 self._terminals.append(d) if (not d): self._prev_obs = next_o return False self._lengths.append(self._path_length) self._last_observations.append(self._prev_obs) return True def collect_rollout(self): observations = self._observations self._observations = [] last_observations = self._last_observations self._last_observations = [] actions = self._actions self._actions = [] rewards = self._rewards self._rewards = [] terminals = self._terminals self._terminals = [] env_infos = self._env_infos self._env_infos = defaultdict(list) agent_infos = self._agent_infos self._agent_infos = defaultdict(list) for (k, v) in agent_infos.items(): agent_infos[k] = np.asarray(v) for (k, v) in env_infos.items(): env_infos[k] = np.asarray(v) lengths = self._lengths self._lengths = [] return TrajectoryBatch(self.env.spec, np.asarray(observations), np.asarray(last_observations), np.asarray(actions), np.asarray(rewards), np.asarray(terminals), dict(env_infos), dict(agent_infos), np.asarray(lengths, dtype='i')) def rollout(self): self.start_rollout() while (not self.step_rollout()): pass return self.collect_rollout() def shutdown(self): self.env.close()
class MultiProcessingHandler(logging.Handler): def __init__(self, name, sub_handler=None): super(MultiProcessingHandler, self).__init__() if (sub_handler is None): sub_handler = logging.StreamHandler() self.sub_handler = sub_handler self.setLevel(self.sub_handler.level) self.setFormatter(self.sub_handler.formatter) self.filters = self.sub_handler.filters self.queue = queue.Queue((- 1)) self._is_closed = False self._receive_thread = threading.Thread(target=self._receive, name=name) self._receive_thread.daemon = True self._receive_thread.start() def setFormatter(self, fmt): super(MultiProcessingHandler, self).setFormatter(fmt) self.sub_handler.setFormatter(fmt) def _receive(self): while True: try: if (self._is_closed and self.queue.empty()): break record = self.queue.get(timeout=0.2) self.sub_handler.emit(record) except (KeyboardInterrupt, SystemExit): raise except (BrokenPipeError, EOFError): break except queue.Empty: pass except: from sys import stderr from traceback import print_exc print_exc(file=stderr) raise def _send(self, s): self.queue.put_nowait(s) def _format_record(self, record): if record.args: record.msg = (record.msg % record.args) record.args = None if record.exc_info: self.format(record) record.exc_info = None return record def emit(self, record): try: s = self._format_record(record) self._send(s) except (KeyboardInterrupt, SystemExit): raise except: self.handleError(record) def close(self): if (not self._is_closed): self._is_closed = True self._receive_thread.join(5.0) self.sub_handler.close() super().close()
def _quantize_language_model(data_dir, arch, extra_flags=None, run_validation=False): train_parser = options.get_training_parser() train_args = options.parse_args_and_arch(train_parser, (['--task', 'language_modeling', data_dir, '--arch', arch, '--optimizer', 'adam', '--lr', '0.0001', '--criterion', 'adaptive_loss', '--adaptive-softmax-cutoff', '5,10,15', '--max-tokens', '500', '--tokens-per-sample', '500', '--save-dir', data_dir, '--max-epoch', '1', '--no-progress-bar', '--distributed-world-size', '1', '--ddp-backend', 'no_c10d', '--num-workers', 0] + (extra_flags or []))) train.main(train_args) scalar_quant_train_parser = options.get_training_parser() scalar_quant_train_args = options.parse_args_and_arch(scalar_quant_train_parser, (['--task', 'language_modeling', data_dir, '--arch', arch, '--optimizer', 'adam', '--lr', '0.0001', '--criterion', 'adaptive_loss', '--adaptive-softmax-cutoff', '5,10,15', '--max-tokens', '500', '--tokens-per-sample', '500', '--save-dir', data_dir, '--max-update', '3', '--no-progress-bar', '--distributed-world-size', '1', '--ddp-backend', 'no_c10d', '--num-workers', 0, '--quant-noise-scalar', '0.5'] + (extra_flags or []))) train.main(scalar_quant_train_args) quantize_parser = options.get_training_parser() quantize_args = options.parse_args_and_arch(quantize_parser, (['--task', 'language_modeling', data_dir, '--arch', arch, '--optimizer', 'adam', '--lr', '0.0001', '--criterion', 'adaptive_loss', '--adaptive-softmax-cutoff', '5,10,15', '--max-tokens', '50', '--tokens-per-sample', '50', '--max-update', '6', '--no-progress-bar', '--distributed-world-size', '1', '--ddp-backend', 'no_c10d', '--num-workers', 0, '--restore-file', os.path.join(data_dir, 'checkpoint_last.pt'), '--reset-optimizer', '--quantization-config-path', os.path.join(os.path.dirname(__file__), 'transformer_quantization_config.yaml')] + (extra_flags or []))) train.main(quantize_args)
def _create_losses(input_queue, create_model_fn, train_config): detection_model = create_model_fn() (images, _, groundtruth_boxes_list, groundtruth_classes_list, groundtruth_masks_list, groundtruth_keypoints_list) = get_inputs(input_queue, detection_model.num_classes, train_config.merge_multiple_label_boxes) images = [detection_model.preprocess(image) for image in images] images = tf.concat(images, 0) if any(((mask is None) for mask in groundtruth_masks_list)): groundtruth_masks_list = None if any(((keypoints is None) for keypoints in groundtruth_keypoints_list)): groundtruth_keypoints_list = None detection_model.provide_groundtruth(groundtruth_boxes_list, groundtruth_classes_list, groundtruth_masks_list, groundtruth_keypoints_list) prediction_dict = detection_model.predict(images) losses_dict = detection_model.loss(prediction_dict) for loss_tensor in losses_dict.values(): tf.losses.add_loss(loss_tensor)
class DropoutContext(object): def __init__(self): self.dropout = 0 self.mask = None self.scale = 1 self.reuse_mask = True
def load(model_class, dir_path, opt, reset_params=False): epoch_path = os.path.realpath(dir_path) optimizer_path = os.path.join(epoch_path, 'optimizer.pth.tar') logger.info(('Loading %s' % epoch_path)) gnn_config = json.load(open((epoch_path + '/gnn_config.json'))) model = model_class.from_pretrained(epoch_path, **gnn_config) model = model.to(opt.device) logger.info(('loading checkpoint %s' % optimizer_path)) checkpoint = torch.load(optimizer_path, map_location=opt.device) opt_checkpoint = checkpoint['opt'] step = checkpoint['step'] if ('best_eval_metric' in checkpoint): best_eval_metric = checkpoint['best_eval_metric'] else: best_eval_metric = checkpoint['best_dev_em'] if (not reset_params): (optimizer, scheduler) = set_optim(opt_checkpoint, model) scheduler.load_state_dict(checkpoint['scheduler']) optimizer.load_state_dict(checkpoint['optimizer']) else: (optimizer, scheduler) = set_optim(opt, model) return (model, optimizer, scheduler, opt_checkpoint, step, best_eval_metric)
class AttentionBuilder(BaseAttentionBuilder): def __init__(self): super(AttentionBuilder, self).__init__(AttentionRegistry)
def bitstr2float(bitstr): byte_arr = bytearray((int(bitstr[i:(i + 8)], 2) for i in range(0, len(bitstr), 8))) return struct.unpack('>f', byte_arr)[0]
class RoIAlignFunction(Function): def forward(ctx, features, rois, out_size, spatial_scale, sample_num=0, aligned=True): (out_h, out_w) = _pair(out_size) assert (isinstance(out_h, int) and isinstance(out_w, int)) ctx.spatial_scale = spatial_scale ctx.sample_num = sample_num ctx.save_for_backward(rois) ctx.feature_size = features.size() ctx.aligned = aligned if features.is_cuda: if (not aligned): (batch_size, num_channels, data_height, data_width) = features.size() num_rois = rois.size(0) output = features.new_zeros(num_rois, num_channels, out_h, out_w) roi_align_cuda.forward_v1(features, rois, out_h, out_w, spatial_scale, sample_num, output) else: output = roi_align_cuda.forward_v2(features, rois, spatial_scale, out_h, out_w, sample_num, aligned) else: raise NotImplementedError return output _differentiable def backward(ctx, grad_output): feature_size = ctx.feature_size spatial_scale = ctx.spatial_scale sample_num = ctx.sample_num rois = ctx.saved_tensors[0] aligned = ctx.aligned assert ((feature_size is not None) and grad_output.is_cuda) (batch_size, num_channels, data_height, data_width) = feature_size out_w = grad_output.size(3) out_h = grad_output.size(2) grad_input = grad_rois = None if (not aligned): if ctx.needs_input_grad[0]: grad_input = rois.new_zeros(batch_size, num_channels, data_height, data_width) roi_align_cuda.backward_v1(grad_output.contiguous(), rois, out_h, out_w, spatial_scale, sample_num, grad_input) else: grad_input = roi_align_cuda.backward_v2(grad_output, rois, spatial_scale, out_h, out_w, batch_size, num_channels, data_height, data_width, sample_num, aligned) return (grad_input, grad_rois, None, None, None, None)
def _test_mean_and_cov(approx, var_param): (mean, cov) = approx.mean_and_cov(var_param) second_moments = (np.outer(mean, mean) + cov) samples = approx.sample(var_param, MC_SAMPLES) samples_outer = np.einsum('ij,ik->ijk', samples, samples) mean_p_values = stats.ttest_1samp(samples, mean, axis=0)[1] np.testing.assert_array_less(test_size, mean_p_values) second_moments_p_values = stats.ttest_1samp(samples_outer, second_moments, axis=0)[1] np.testing.assert_array_less(test_size, second_moments_p_values)
def unpad_input(hidden_states, attention_mask): seqlens_in_batch = attention_mask.sum(dim=(- 1), dtype=torch.int32) indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() max_seqlen_in_batch = seqlens_in_batch.max().item() cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0)) return (index_first_axis(rearrange(hidden_states, 'b s d -> (b s) d'), indices), indices, cu_seqlens, max_seqlen_in_batch)
class DecodeLayer(nn.Module): def __init__(self, vocabs, inference_layers, embed_dim, ff_embed_dim, num_heads, conc_size, rel_size, dropout): super(DecodeLayer, self).__init__() self.inference_layers = inference_layers self.arc_generator = ArcGenerator(vocabs, embed_dim, ff_embed_dim, num_heads, dropout) self.concept_generator = ConceptGenerator(vocabs, embed_dim, ff_embed_dim, conc_size, dropout) self.relation_generator = RelationGenerator(vocabs, embed_dim, rel_size, dropout) self.dropout = dropout self.vocabs = vocabs def forward(self, probe, snt_state, graph_state, snt_padding_mask, graph_padding_mask, attn_mask, copy_seq, target=None, target_rel=None, work=False): outs = F.dropout(probe, p=self.dropout, training=self.training) if work: for i in range(self.inference_layers): (arc_ll, outs) = self.arc_generator(outs, graph_state, graph_padding_mask, attn_mask, work=True) (concept_ll, outs) = self.concept_generator(outs, snt_state, snt_padding_mask, copy_seq, work=True) rel_ll = self.relation_generator(outs, graph_state, work=True) return (concept_ll, arc_ll, rel_ll) (arc_losses, concept_losses, rel_losses) = ([], [], []) for i in range(self.inference_layers): (arc_loss, outs) = self.arc_generator(outs, graph_state, graph_padding_mask, attn_mask, target_rel=target_rel, work=False) (concept_loss, outs) = self.concept_generator(outs, snt_state, snt_padding_mask, copy_seq, target=target, work=False) arc_losses.append(arc_loss) concept_losses.append(concept_loss) rel_loss = self.relation_generator(outs, graph_state, target_rel=target_rel, work=False) arc_loss = arc_losses[(- 1)] concept_loss = concept_losses[(- 1)] return (concept_loss, arc_loss, rel_loss)
def main(argv=sys.argv): if (len(argv) < 2): sys.stderr.write(('Google Mock Class Generator v%s\n\n' % '.'.join(map(str, _VERSION)))) sys.stderr.write(__doc__) return 1 global _INDENT try: _INDENT = int(os.environ['INDENT']) except KeyError: pass except: sys.stderr.write(('Unable to use indent of %s\n' % os.environ.get('INDENT'))) filename = argv[1] desired_class_names = None if (len(argv) >= 3): desired_class_names = set(argv[2:]) source = utils.ReadFile(filename) if (source is None): return 1 builder = ast.BuilderFromSource(source, filename) try: entire_ast = filter(None, builder.Generate()) except KeyboardInterrupt: return except: sys.exit(1) else: lines = _GenerateMocks(filename, source, entire_ast, desired_class_names) sys.stdout.write('\n'.join(lines))
class SingleDataset(BaseDataset): def modify_commandline_options(parser, is_train): parser = BaseDataset.modify_commandline_options(parser, is_train) parser.add_argument('--meta_path', type=str, default=None, help='the path to the meta file') return parser def __init__(self, opt): BaseDataset.__init__(self, opt) meta_path = (opt.meta_path if (opt.phase == 'train') else None) self.A_paths = sorted(make_dataset(opt.dataroot, opt.max_dataset_size, meta_path=meta_path)) input_nc = (self.opt.output_nc if (self.opt.direction == 'BtoA') else self.opt.input_nc) self.transform = get_transform(opt, grayscale=(input_nc == 1)) def __getitem__(self, index): A_path = self.A_paths[index] A_img = Image.open(A_path).convert('RGB') A = self.transform(A_img) return {'A': A, 'A_paths': A_path} def __len__(self): if (self.opt.max_dataset_size == (- 1)): return len(self.A_paths) else: return self.opt.max_dataset_size
def trace_torch(frame, event, arg): if (event != 'line'): return trace_torch global prev_line global prev_filename func_filename = frame.f_code.co_filename func_line_no = frame.f_lineno if ('torch' not in func_filename): return trace_torch if (func_filename != prev_filename): data[(func_filename + prev_filename)].add((prev_line, func_line_no)) else: data[func_filename].add((prev_line, func_line_no)) prev_line = func_line_no prev_filename = func_filename return trace_torch
class ConvBnRelu(nn.Module): def __init__(self, in_channels: int, out_channels: int, kernel_size: int, stride: int=1, padding: int=0, dilation: int=1, groups: int=1, bias: bool=True, add_relu: bool=True, interpolate: bool=False): super(ConvBnRelu, self).__init__() self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, groups=groups) self.add_relu = add_relu self.interpolate = interpolate self.bn = nn.BatchNorm2d(out_channels) self.activation = nn.ReLU(inplace=True) def forward(self, x): x = self.conv(x) x = self.bn(x) if self.add_relu: x = self.activation(x) if self.interpolate: x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=True) return x
class NTXent(nn.Module): def __init__(self, temperature=0.07): super(NTXent, self).__init__() self.loss = nn.LogSoftmax(dim=1) self.tau = temperature def forward(self, audio_embeds, text_embeds, labels): n = audio_embeds.shape[0] a2t = (util.cos_sim(audio_embeds, text_embeds) / self.tau) t2a = (util.cos_sim(text_embeds, audio_embeds) / self.tau) mask = labels.expand(n, n).eq(labels.expand(n, n).t()).to(a2t.device) mask_diag = mask.diag() mask_diag = torch.diag_embed(mask_diag) mask = (mask ^ mask_diag) a2t_loss = (- self.loss(a2t).masked_fill(mask, 0).diag().mean()) t2a_loss = (- self.loss(t2a).masked_fill(mask, 0).diag().mean()) loss = ((0.5 * a2t_loss) + (0.5 * t2a_loss)) return loss
class CachedProcessPoolExecutor(): def __init__(self): self._pool = None self._n_workers = (- 1) def __call__(self, n_workers=None): if (n_workers != self._n_workers): from concurrent.futures import ProcessPoolExecutor self.shutdown() self._pool = ProcessPoolExecutor(n_workers) self._n_workers = n_workers return self._pool def is_initialized(self): return (self._pool is not None) def shutdown(self): if (self._pool is not None): self._pool.shutdown() self._pool = None def __del__(self): self.shutdown()
class CiderMetric(Metric): def __init__(self, n_gram=4, sigma=6.0, tokenize=True): self.n_gram = n_gram self.sigma = sigma self.tokenize = tokenize def evaluate_example(self, summary, reference): if self.tokenize: if isinstance(reference, str): reference = ' '.join(tokenizer.tokenize(reference)) else: reference = [' '.join(tokenizer.tokenize(ref)) for ref in reference] summary = ' '.join(tokenizer.tokenize(summary)) cider_scorer = CiderScorer(n=self.n_gram, sigma=self.sigma) if (not isinstance(reference, list)): reference = [reference] cider_scorer += (summary, reference) (score, _) = cider_scorer.compute_score() score_dict = {'cider': score} return score_dict def evaluate_batch(self, summaries, references, aggregate=True): if self.tokenize: if isinstance(references[0], str): references = [' '.join(tokenizer.tokenize(reference)) for reference in references] else: references = [[' '.join(tokenizer.tokenize(ref)) for ref in reference] for reference in references] summaries = [' '.join(tokenizer.tokenize(summary)) for summary in summaries] cider_scorer = CiderScorer(n=self.n_gram, sigma=self.sigma) for (summ, ref) in zip(summaries, references): if (not isinstance(ref, list)): ref = [ref] cider_scorer += (summ, ref) (score, scores) = cider_scorer.compute_score() if (not aggregate): scores_return = [{'cider': cur_score} for cur_score in scores] return scores_return score_dict = {'cider': score} return score_dict def supports_multi_ref(self): return True
def savgol_smooth(y, box_pts): if ((box_pts % 2) == 0): box_pts += 1 y_smooth = scipy.signal.savgol_filter(y, box_pts, 2) return y_smooth
def _load_dataset(frames_dataset_class, features_dataset_class, dataset_path: str, selected_video_names): frames_dataset = frames_dataset_class(dataset_path) video_names = _resolve_video_names(frames_dataset, selected_video_names) raw_features_dataset = features_dataset_class(dataset_path, video_names) features_dataset = {} for video_name in video_names: features_dataset[video_name] = Features(raw_features_dataset.get_features(video_name)) return (frames_dataset, features_dataset, video_names)
class TomWorld(CostarWorld): def __init__(self, data_root='', fake=True, load_dataset=False, lfd=None, *args, **kwargs): if (not fake): raise NotImplementedError('Not quite set up yet') else: observe = None super(TomWorld, self).__init__(None, *args, namespace='/tom', observe=observe, lfd=lfd, robot_config=[TOM_RIGHT_CONFIG, TOM_LEFT_CONFIG], **kwargs) self.addCondition(ValidStateCondition(), (- .0), 'valid_state') self.oranges = [] if (lfd is None): if load_dataset: self.dataset = TomDataset() self.dataset.load(root_filepath=data_root) self.addTrajectories('move', self.dataset.move_trajs, self.dataset.move_data, ['time', 'squeeze_area']) self.addTrajectories('pickup', self.dataset.pickup_trajs, self.dataset.pickup_data, ['time', 'orange']) self.addTrajectories('test', self.dataset.test_trajs, self.dataset.test_data, ['time', 'squeeze_area']) self.addTrajectories('box', self.dataset.box, self.dataset.box_data, ['time', 'box']) self.addTrajectories('trash', self.dataset.trash, self.dataset.trash_data, ['time', 'trash']) self.ref_data = ((((self.dataset.move_data + self.dataset.pickup_data) + self.dataset.test_data) + self.dataset.box_data) + self.dataset.trash_data) self.fitTrajectories() else: self.loadModels('tom') self.features = DemoFeatures(self.lfd.kdl_kin, TOM_RIGHT_CONFIG) self.reward = DemoReward(self.lfd.skill_models) def _preprocessData(self, data): for traj in data: orange_pose = None for world in traj: if (world['orange'] is not None): orange_pose = world['orange'] break for world in traj: world['orange'] = orange_pose def _dataToPose(self, data): msg = PoseArray() for traj in data: for world in traj: if (world['orange'] is not None): msg.poses.append(pm.toMsg(world['orange'])) msg.poses.append(pm.toMsg(world['box'])) msg.poses.append(pm.toMsg(world['trash'])) msg.poses.append(pm.toMsg(world['squeeze_area'])) return msg def make_task_plan(self): args = self.getArgs()
class RandomWindowSoccerNetClipSampler(SoccerNetClipSampler): def __init__(self, data_source: SoccerNet, windows_per_video: int=50, window_duration: float=32.0, sample_edges: bool=False, shuffle: bool=False) -> None: super().__init__(data_source, shuffle=shuffle) assert ((windows_per_video % 2) == 0), 'Windows per video should be an even number.' self.windows_per_video = windows_per_video self.windows_per_half = (windows_per_video // 2) self.window_duration = window_duration self.sample_edges = sample_edges self._shuffle = shuffle def __iter__(self) -> List[Any]: g = torch.Generator() g.manual_seed((self.seed + self.epoch)) indices = [None for i in range((len(self.data_source) * self.windows_per_video))] global_idx = 0 for idx in range(len(self.data_source)): video_metadata = self.data_source.get_video_metadata(idx) for half_idx in range(video_metadata['num_halves']): max_different_windows = (video_metadata['duration'][half_idx] / self.window_duration) has_overlap = (max_different_windows < self.windows_per_half) max_possible_clip_start = Fraction(max((video_metadata['duration'][half_idx] - self.window_duration), 0)) if (self.sample_edges and (not has_overlap)): indices[global_idx] = (idx, half_idx, Fraction(0), Fraction(self.window_duration)) global_idx += 1 max_possible_clip_start -= (self.window_duration * 2) uniform_clip = (Fraction(max_possible_clip_start, max((self.windows_per_half - 3), 1)) if has_overlap else Fraction((video_metadata['duration'][half_idx] - (self.window_duration * 2)), (self.windows_per_half - 2))) else: uniform_clip = (Fraction(max_possible_clip_start, max((self.windows_per_half - 1), 1)) if has_overlap else Fraction(video_metadata['duration'][half_idx], self.windows_per_half)) windows_per_half = ((self.windows_per_half - 2) if (self.sample_edges and (not has_overlap)) else self.windows_per_half) for i in range(windows_per_half): if has_overlap: clip_start_sec = (uniform_clip * i) else: clip_start_sec = (((uniform_clip - self.window_duration) * (1 - torch.rand(1, generator=g).item())) + (i * uniform_clip)) if self.sample_edges: clip_start_sec += self.window_duration clip_end_sec = (clip_start_sec + self.window_duration) indices[global_idx] = (idx, half_idx, Fraction(clip_start_sec), Fraction(clip_end_sec)) global_idx += 1 if (self.sample_edges and (not has_overlap)): indices[global_idx] = (idx, half_idx, Fraction((video_metadata['duration'][half_idx] - self.window_duration)), Fraction(video_metadata['duration'][half_idx])) global_idx += 1 if self._shuffle: indices = [indices[idx] for idx in torch.randperm(len(indices), generator=g)] return iter(indices) def __len__(self) -> int: return (len(self.data_source) * self.windows_per_video) def __repr__(self) -> str: return f'{__class__.__name__}(len={self.__len__()}, windows_per_video={self.windows_per_video}, window_duration={self.window_duration}, sample_edges={self.sample_edges}, shuffle={self._shuffle}, seed={self.seed})'
def h36m_numbers(coords3d_true, coords3d_pred, activity_name, procrustes=False, joint_validity_mask=None): if (joint_validity_mask is None): joint_validity_mask = np.full_like(coords3d_true[(..., 0)], fill_value=True, dtype=np.bool) coords3d_true = tfu3d.root_relative(coords3d_true) coords3d_pred = tfu3d.root_relative(coords3d_pred) if (procrustes in ('rigid', 'rigid+scale')): coords3d_pred = util3d.rigid_align_many(coords3d_pred, coords3d_true, joint_validity_mask=joint_validity_mask, scale_align=(procrustes == 'rigid+scale')) dist = np.linalg.norm((coords3d_true - coords3d_pred), axis=(- 1)) overall_mean_error = np.mean(dist[joint_validity_mask]) result = [] ordered_actions = 'Directions,Discussion,Eating,Greeting,Phoning,Posing,Purchases,Sitting,SittingDown,Smoking,Photo,Waiting,Walking,WalkDog,WalkTogether'.split(',') for activity in ordered_actions: activity = activity.encode('utf8') mask = np.logical_and(np.expand_dims((activity_name == activity), (- 1)), joint_validity_mask) act_mean_error = np.mean(dist[mask]) result.append(act_mean_error) result.append(overall_mean_error) return result
def clean(input_file, output_file): lines = open(input_file, 'r').readlines() writer = open(output_file, 'w') for line in lines: parts = line[:(- 1)].split(' ') tag = parts[0].split(':')[0] class_num = class_name_to_num[tag] sentence = get_only_chars(' '.join(parts[1:])) print(tag, class_num, sentence) output_line = ((str(class_num) + '\t') + sentence) writer.write((output_line + '\n')) writer.close()
def notears_standard(data, loss, loss_grad, c=0.25, r=10.0, e=1e-08, rnd_W_init=False, output_all_progress=False, verbose=False): n = np.shape(data)[0] d = np.shape(data)[1] data = np.array(data).astype(dtype=np.float64) cov = np.cov(data.T) if rnd_W_init: W = np.random.randn(d, d) else: W = np.zeros([d, d]) W = W.astype(dtype=np.float64) a = 0.0 p = 1.0 if output_all_progress: ret = [] def h(W): return (np.trace(scipy.linalg.expm(np.multiply(W, W))) - d) def h_grad(W): return np.multiply(np.transpose(scipy.linalg.expm(np.multiply(W, W))), (2.0 * W)) def L(W, p, a): W = np.reshape(W, [d, d]).astype(dtype=np.float64) return ((loss(W, data, cov, d, n) + ((p / 2.0) * (h(W) ** 2))) + (a * h(W))) def L_grad(W, p, a): W = np.reshape(W, [d, d]).astype(dtype=np.float64) return np.reshape((loss_grad(W, data, cov, d, n) + (h_grad(W) * (a + (p * h(W))))), [(d ** 2)]).astype(dtype=np.float64) def get_W_star(p, W, a): return scipy.optimize.minimize(L, W, args=(p, a), jac=L_grad, method='L-BFGS-B', options={'disp': False}) while True: W_star = np.reshape(get_W_star(p, W, a)['x'], [d, d]).astype(dtype=np.float64) h_W_star = h(W_star) if (h(W) != 0.0): while (h_W_star >= max((c * h(W)), e)): p = (r * p) W_star = np.reshape(get_W_star(p, W, a)['x'], [d, d]).astype(dtype=np.float64) h_W_star = h(W_star) if verbose: print('Increasing p:\t p = {: .2e}\n\t\t h_W_star = {}'.format(p, h_W_star)) if output_all_progress: ret.append({'h': h_W_star, 'loss': loss(W_star, data, cov, d, n), 'a': a, 'W': W_star}) if (h_W_star < e): if verbose: print('Done:\t\t h = {}\n\t\t loss = {}\nt\t\t a = {}'.format(h_W_star, loss(W_star, data, cov, d, n), a)) if output_all_progress: return ret return {'h': h_W_star, 'loss': loss(W_star, data, cov, d, n), 'W': W_star} if verbose: print('Progress:\t h = {}\n\t\t loss = {}\n\t\t a = {}'.format(h_W_star, loss(W_star, data, cov, d, n), a)) a = (a + (p * h_W_star)) W = W_star
class Experts(nn.Module): def __init__(self, n_source, fdim, num_classes): super().__init__() self.linears = nn.ModuleList([nn.Linear(fdim, num_classes) for _ in range(n_source)]) self.softmax = nn.Softmax(dim=1) def forward(self, i, x): x = self.linears[i](x) x = self.softmax(x) return x
def get_root(): root = os.path.realpath(os.path.abspath(os.getcwd())) setup_py = os.path.join(root, 'setup.py') versioneer_py = os.path.join(root, 'versioneer.py') if (not (os.path.exists(setup_py) or os.path.exists(versioneer_py))): root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0]))) setup_py = os.path.join(root, 'setup.py') versioneer_py = os.path.join(root, 'versioneer.py') if (not (os.path.exists(setup_py) or os.path.exists(versioneer_py))): err = "Versioneer was unable to run the project root directory. Versioneer requires setup.py to be executed from its immediate directory (like 'python setup.py COMMAND'), or in a way that lets it use sys.argv[0] to find the root (like 'python path/to/setup.py COMMAND')." raise VersioneerBadRootError(err) try: my_path = os.path.realpath(os.path.abspath(__file__)) me_dir = os.path.normcase(os.path.splitext(my_path)[0]) vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0]) if (me_dir != vsr_dir): print(('Warning: build in %s is using versioneer.py from %s' % (os.path.dirname(my_path), versioneer_py))) except NameError: pass return root
def gen_learner_wide(data: ImageDataBunch, gen_loss, arch=models.resnet101, nf_factor: int=2) -> Learner: return unet_learner_wide(data, arch=arch, wd=0.001, blur=True, norm_type=NormType.Spectral, self_attention=True, y_range=((- 3.0), 3.0), loss_func=gen_loss, nf_factor=nf_factor)
class _CounterfactualExpV2SchemaConstants(): TEST_DATA = 'test_data' CFS_LIST = 'cfs_list' LOCAL_IMPORTANCE = _CommonSchemaConstants.LOCAL_IMPORTANCE SUMMARY_IMPORTANCE = _CommonSchemaConstants.SUMMARY_IMPORTANCE METADATA = _CommonSchemaConstants.METADATA MODEL_TYPE = 'model_type' DATA_INTERFACE = 'data_interface' FEATURE_NAMES = 'feature_names' DESIRED_CLASS = 'desired_class' DESIRED_RANGE = 'desired_range' FEATURE_NAMES_INCLUDING_TARGET = 'feature_names_including_target'
class Server(): def __init__(self, *args, **kwargs): rospy.Subscriber('/map_collector/points', Float32MultiArray, callback=self.read_points) plt.ion() plt.show() self.points = None def read_points(self, msg): points = list(msg.data) numPoints = int((len(points) / 3)) self.points = (points[:numPoints], points[numPoints:(numPoints + numPoints)], points[(numPoints + numPoints):]) def start(self): while True: if (self.points is None): continue (xs, ys, rs) = self.points plt.cla() plt.axis('equal') plt.scatter(*find_missing(xs, ys, rs), c='r') plt.scatter(xs, ys, c='b', edgecolors='none', s=8) plt.draw() plt.pause(1)
def add_flops_counter_hook_function(module): if isinstance(module, torch.nn.Conv2d): if hasattr(module, '__flops_handle__'): return handle = module.register_forward_hook(conv_flops_counter_hook) module.__flops_handle__ = handle elif isinstance(module, torch.nn.Linear): if hasattr(module, '__flops_handle__'): return handle = module.register_forward_hook(linear_flops_counter_hook) module.__flops_handle__ = handle
class SPADEDistillerModules(BaseSPADEDistillerModules): def __init__(self, opt): super(SPADEDistillerModules, self).__init__(opt) def profile(self, input_semantics, config=None): raise NotImplementedError('The distiller is only for training!!!') def calc_distill_loss(self, Tacts, Sacts): losses = {} for (i, netA) in enumerate(self.netAs): assert isinstance(netA, nn.Conv2d) layer = self.mapping_layers[i] (Tact, Sact) = (Tacts[layer], Sacts[layer]) Sact = netA(Sact) loss = F.mse_loss(Sact, Tact) losses[('G_distill%d' % i)] = loss return ((sum(losses.values()) * self.opt.lambda_distill), losses) def load_networks(self, verbose=True): if (self.opt.restore_pretrained_G_path is not None): util.load_network(self.netG_pretrained, self.opt.restore_pretrained_G_path, verbose) load_pretrained_weight(self.opt.pretrained_netG, self.opt.student_netG, self.netG_pretrained, self.netG_student, self.opt.pretrained_ngf, self.opt.student_ngf) del self.netG_pretrained super(SPADEDistillerModules, self).load_networks(verbose)
def _gen_mobilenet_v3(variant, channel_multiplier=1.0, pretrained=False, **kwargs): if ('small' in variant): num_features = 1024 if ('minimal' in variant): act_layer = resolve_act_layer(kwargs, 'relu') arch_def = [['ds_r1_k3_s2_e1_c16'], ['ir_r1_k3_s2_e4.5_c24', 'ir_r1_k3_s1_e3.67_c24'], ['ir_r1_k3_s2_e4_c40', 'ir_r2_k3_s1_e6_c40'], ['ir_r2_k3_s1_e3_c48'], ['ir_r3_k3_s2_e6_c96'], ['cn_r1_k1_s1_c576']] else: act_layer = resolve_act_layer(kwargs, 'hard_swish') arch_def = [['ds_r1_k3_s2_e1_c16_se0.25_nre'], ['ir_r1_k3_s2_e4.5_c24_nre', 'ir_r1_k3_s1_e3.67_c24_nre'], ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r2_k5_s1_e6_c40_se0.25'], ['ir_r2_k5_s1_e3_c48_se0.25'], ['ir_r3_k5_s2_e6_c96_se0.25'], ['cn_r1_k1_s1_c576']] else: num_features = 1280 if ('minimal' in variant): act_layer = resolve_act_layer(kwargs, 'relu') arch_def = [['ds_r1_k3_s1_e1_c16'], ['ir_r1_k3_s2_e4_c24', 'ir_r1_k3_s1_e3_c24'], ['ir_r3_k3_s2_e3_c40'], ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], ['ir_r2_k3_s1_e6_c112'], ['ir_r3_k3_s2_e6_c160'], ['cn_r1_k1_s1_c960']] else: act_layer = resolve_act_layer(kwargs, 'hard_swish') arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k3_s2_e4_c24_nre', 'ir_r1_k3_s1_e3_c24_nre'], ['ir_r3_k5_s2_e3_c40_se0.25_nre'], ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], ['ir_r2_k3_s1_e6_c112_se0.25'], ['ir_r3_k5_s2_e6_c160_se0.25'], ['cn_r1_k1_s1_c960']] se_layer = partial(SqueezeExcite, gate_layer='hard_sigmoid', force_act_layer=nn.ReLU, rd_round_fn=round_channels) model_kwargs = dict(block_args=decode_arch_def(arch_def), num_features=num_features, stem_size=16, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=act_layer, se_layer=se_layer, **kwargs) model = _create_mnv3(variant, pretrained, **model_kwargs) return model
_loss def quality_focal_loss_with_prob(pred, target, beta=2.0): assert (len(target) == 2), 'target for QFL must be a tuple of two elements,\n including category label and quality label, respectively' (label, score) = target pred_sigmoid = pred scale_factor = pred_sigmoid zerolabel = scale_factor.new_zeros(pred.shape) loss = (F.binary_cross_entropy(pred, zerolabel, reduction='none') * scale_factor.pow(beta)) bg_class_ind = pred.size(1) pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1) pos_label = label[pos].long() scale_factor = (score[pos] - pred_sigmoid[(pos, pos_label)]) loss[(pos, pos_label)] = (F.binary_cross_entropy(pred[(pos, pos_label)], score[pos], reduction='none') * scale_factor.abs().pow(beta)) loss = loss.sum(dim=1, keepdim=False) return loss
def test_grasp_dataset(): dataset = '102' batch_size = 10 num_batches_to_traverse = 10 grasp_dataset_object = GraspDataset(dataset=dataset) (feature_op_dicts, features_complete_list, time_ordered_feature_name_dict, num_samples_in_dataset) = grasp_dataset_object.get_training_dictionaries(batch_size=batch_size) config = tf.ConfigProto() config.inter_op_parallelism_threads = 40 config.intra_op_parallelism_threads = 40 config.gpu_options.allow_growth = True with tf.Session(config=config) as tf_session: tf_session.run(tf.global_variables_initializer()) for batch_num in tqdm(range(num_batches_to_traverse), desc='dataset'): attempt_num_string = (('batch_' + str(batch_num).zfill(4)) + '_') print((((('dataset_' + dataset) + '_') + attempt_num_string) + 'starting')) output_features_dicts = tf_session.run(feature_op_dicts) assert (len(output_features_dicts) > 0)
def package_shortname(long_name, family): if long_name.startswith('CABGA'): if (family == 'MachXO'): return ('B' + long_name[5:]) else: return ('BG' + long_name[5:]) elif long_name.startswith('CSBGA'): if (family == 'MachXO'): return ('M' + long_name[5:]) else: return ('MG' + long_name[5:]) elif long_name.startswith('CSFBGA'): return ('MG' + long_name[6:]) elif long_name.startswith('UCBGA'): return ('UMG' + long_name[5:]) elif long_name.startswith('FPBGA'): return ('FG' + long_name[5:]) elif long_name.startswith('FTBGA'): if (family == 'MachXO'): return ('FT' + long_name[5:]) else: return ('FTG' + long_name[5:]) elif long_name.startswith('WLCSP'): if (family == 'MachXO3D'): return ('UTG' + long_name[5:]) else: return ('UWG' + long_name[5:]) elif long_name.startswith('TQFP'): if (family == 'MachXO'): return ('T' + long_name[4:]) else: return ('TG' + long_name[4:]) elif long_name.startswith('QFN'): if (family == 'MachXO3D'): return ('SG' + long_name[3:]) elif (long_name[3] == '8'): return ('QN' + long_name[3:]) else: return ('SG' + long_name[3:]) else: print(('unknown package name ' + long_name)) sys.exit((- 1))
def main(args): (model, _, _) = load_model_and_preprocess('blip_diffusion', 'base', device='cpu', is_eval=True) save_blip_diffusion_model(model.state_dict(), args)
class DeprecatedMID(): InitMT = 2 InitMTResults = 3 ReqDataLength = 10 DataLength = 11 ReqGPSStatus = 166 GPSStatus = 167 SetSyncInSettings = 214 SetSyncOutSettings = 216 SetOutputSkipFactor = 212 SetObjectAlignment = 224 SetHeading = 130 SetLeverArmGPS = 104 SetMagneticDeclination = 106 SetProcessingFlags = 32
_model def regnetx_320(pretrained=False, **kwargs): return _create_regnet('regnetx_320', pretrained, **kwargs)
def run_pdfminer(pdf_path, output_xml_path): subprocess.run(['pdf2txt.py', '-t', 'xml', pdf_path, '-o', output_xml_path])