code
stringlengths
101
5.91M
def test_orbit_method_inputAsQuantity(): from galpy import potential from galpy.orbit import Orbit (ro, vo) = (7.0, 210.0) o = Orbit([(10.0 * units.kpc), (((- 20.0) * units.km) / units.s), ((210.0 * units.km) / units.s), (500.0 * units.pc), (((- 12.0) * units.km) / units.s), (45.0 * units.deg)], ro=ro, vo=vo) assert (numpy.fabs((o.Jacobi(pot=potential.MWPotential, OmegaP=(((41 * units.km) / units.s) / units.kpc), use_physical=False) - o.Jacobi(pot=potential.MWPotential, OmegaP=((41.0 * ro) / vo), use_physical=False))) < (10.0 ** (- 8.0))), 'Orbit method Jacobi does not return the correct value when input OmegaP is Quantity' return None
_REGISTRY.register() def resnet101_ms_l123(pretrained=True, **kwargs): from dassl.modeling.ops import MixStyle model = ResNet(block=Bottleneck, layers=[3, 4, 23, 3], ms_class=MixStyle, ms_layers=['layer1', 'layer2', 'layer3']) if pretrained: init_pretrained_weights(model, model_urls['resnet101']) return model
.parametrize('loader_parameters', [{'path_data': [__data_testing_dir__], 'target_suffix': ['_lesion-manual'], 'extensions': [], 'roi_params': {'suffix': '_seg-manual', 'slice_filter_roi': None}, 'contrast_params': {'contrast_lst': ['T1w', 'T2w']}}]) def test_bids_df_anat(download_data_testing_test_files, loader_parameters): bids_df = BidsDataframe(loader_parameters, __tmp_dir__, derivatives=True) df_test = bids_df.df.drop(columns=['path']) df_test = df_test.sort_values(by=['filename']).reset_index(drop=True) csv_ref = Path(loader_parameters[LoaderParamsKW.PATH_DATA][0], 'df_ref.csv') csv_test = Path(loader_parameters[LoaderParamsKW.PATH_DATA][0], 'df_test.csv') df_test.to_csv(csv_test, index=False) diff = csv_diff.compare(csv_diff.load_csv(open(csv_ref)), csv_diff.load_csv(open(csv_test))) assert (diff == {'added': [], 'removed': [], 'changed': [], 'columns_added': [], 'columns_removed': []})
class CatBoostEvalMetricMSE(object): def get_final_error(self, error, weight): return error def is_max_optimal(self): return False def evaluate(self, approxes, target, weight): assert (len(approxes) == 1) assert (len(target) == len(approxes[0])) preds = np.array(approxes[0]) target = np.array(target) if (weight is not None): weight = np.array(weight) return (mean_squared_error(target, preds, sample_weight=weight), 0)
def mask_reduce_loss(loss, weight=None, reduction='mean', sample_wise=False): if (weight is not None): assert (weight.dim() == loss.dim()) assert ((weight.size(1) == 1) or (weight.size(1) == loss.size(1))) loss = (loss * weight) if ((weight is None) or (reduction == 'sum')): loss = reduce_loss(loss, reduction) elif (reduction == 'mean'): if (weight.size(1) == 1): weight = weight.expand_as(loss) eps = 1e-12 if sample_wise: weight = weight.sum(dim=[1, 2, 3], keepdim=True) loss = ((loss / (weight + eps)).sum() / weight.size(0)) else: loss = (loss.sum() / (weight.sum() + eps)) return loss
def run_tracker(tracker_name, tracker_param, run_id=None, dataset_name='otb', sequence=None, debug=0, threads=0, num_gpus=8): dataset = get_dataset(dataset_name) if (sequence is not None): dataset = [dataset[sequence]] trackers = [Tracker(tracker_name, tracker_param, dataset_name, run_id)] run_dataset(dataset, trackers, debug, threads, num_gpus=num_gpus)
class Pool(): def __init__(self, size): self.size = size self.data = ([None] * size) self.idx = 0 self.sum_len = 0 self.total = 0 def put(self, x): if (self.total >= self.size): old_x = self.data[self.idx] self.sum_len -= len(old_x[0]) self.sum_len += len(x[0]) self.data[self.idx] = x self.idx = ((self.idx + 1) % self.size) self.total += 1 ' Sample a batch of #size episodes. ' def sample(self, size): return random.choices(self.data, k=size) ' Samples a batch of episodes. Size of total steps is close to #size.' def sample_steps(self, size): avg_len = (self.sum_len / self.size) eps_to_fetch = int((size / avg_len)) return random.choices(self.data, k=eps_to_fetch)
def fcn_resnet50(pretrained=False, progress=True, num_classes=21, aux_loss=None, **kwargs): return _load_model('fcn', 'resnet50', pretrained, progress, num_classes, aux_loss, **kwargs)
class LaTextControl(bc.BaseTextControl): (STC_STYLE_LA_DEFAULT, STC_STYLE_LA_KW, STC_STYLE_LA_IDENTIFIER, STC_STYLE_LA_STRING, STC_STYLE_LA_OPERATOR, STC_STYLE_LA_NUMBER, STC_STYLE_LA_ESCAPE_CHAR, STC_STYLE_LA_ESCAPE_STR, STC_STYLE_LA_ESCAPE_PARAMETER, STC_STYLE_LA_ESCAPE_DESCRIPTION) = range(10) SUBSTITUTION_START = '\\' SUBSTITUTION_END = ' ' def __init__(self, parent): super().__init__(parent) self.SetEditable(True) self.keywords = ['where', 'sqrt', 'exp', 'log', 'sum', 'symmetric', 'diagonal', 'sparse', 'if', 'otherwise', 'in', 'index', 'given'] self.unicode_dict = {'R': 'R', 'Z': 'Z', 'x': '', 'times': '', 'inf': '', 'in': '', 'sum': '', 'had': '', 'kro': '', 'dot': '', 'T': 'T', '^T': 'T', 'par': '', 'emp': '', 'arr': '', 'int': '', 'dbl': '', 'pi': '', 'sig': '', 'rho': '', 'phi': '', 'theta': '', 'alpha': '', 'beta': '', 'gamma': '', 'delta': '', 'epsilon': '', 'zeta': '', 'eta': '', 'iota': '', 'kappa': '', 'lambda': '', 'mu': '', 'nu': '', 'xi': '', 'omicron': '', 'sigma': '', 'tau': '', 'upsilon': '', 'chi': '', 'psi': '', 'omega': '', 'u0': '0', 'u1': '1', 'u2': '2', 'u3': '3', 'u4': '4', 'u5': '5', 'u6': '6', 'u7': '7', 'u8': '8', 'u9': '9', '_0': '0', '_1': '1', '_2': '2', '_3': '3', '_4': '4', '_5': '5', '_6': '6', '_7': '7', '_8': '8', '_9': '9', 's0': '0', 's1': '1', 's2': '2', 's3': '3', 's4': '4', 's5': '5', 's6': '6', 's7': '7', 's8': '8', 's9': '9', 's-1': '1', '^-1': '1', '^0': '0', '^1': '1', '^2': '2', '^3': '3', '^4': '4', '^5': '5', '^6': '6', '^7': '7', '^8': '8', '^9': '9', '_a': 'a', '_e': 'e', '_h': 'h', '_i': 'i', '_j': 'j', '_k': 'k', '_l': 'l', '_m': 'm', '_n': 'n', '_o': 'o', '_p': 'p', '_s': 's', '_t': 't', '_u': 'u', '_v': 'v', '_x': 'x', '1': '1', 'cdot': '', 'nabla': '', 'sqrt': '', '+-': '', '<=': '', '<=>': '', '>=': '', '1/2': '12', '1/3': '13', '1/4': '14', '1/5': '15', '1/6': '16', '1/7': '17', '1/8': '18', '1/9': '19', '1/10': '110', '2/3': '23', '2/5': '25', '3/4': '34', '3/5': '35', '3/8': '38', '4/5': '45', '5/6': '56', '5/8': '58', '7/8': '78', 'heart': '', 'iheartla': 'ILA', 'le': '', 'ge': '', 'ne': '=', 'notin': '', 'div': '', 'nplus': '', 'linner': '', 'rinner': '', 'num1': '1', 'hat': '', 'bar': '', 'dag': '', '^+': '+', 's+': '+'} self.StyleSetSpec(self.STC_STYLE_LA_DEFAULT, 'fore:#A9B7C6,back:{}'.format(bc.BACKGROUND_COLOR)) self.StyleSetSpec(self.STC_STYLE_LA_KW, 'fore:#94558D,bold,back:{}'.format(bc.BACKGROUND_COLOR)) self.StyleSetSpec(self.STC_STYLE_LA_ESCAPE_STR, 'fore:#6A8759,bold,back:{}'.format(bc.BACKGROUND_COLOR)) self.StyleSetSpec(self.STC_STYLE_LA_NUMBER, 'fore:#9686F5,bold,back:{}'.format(bc.BACKGROUND_COLOR)) self.StyleSetSpec(self.STC_STYLE_LA_ESCAPE_PARAMETER, 'fore:#CC7832,bold,back:{}'.format(bc.BACKGROUND_COLOR)) self.StyleSetSpec(self.STC_STYLE_LA_ESCAPE_DESCRIPTION, 'fore:#6C7986,bold,back:{}'.format(bc.BACKGROUND_COLOR)) self.SetLexer(wx.stc.STC_LEX_CONTAINER) self.Bind(wx.stc.EVT_STC_MARGINCLICK, self.OnMarginClick) self.Bind(wx.stc.EVT_STC_STYLENEEDED, self.OnStyleNeeded) def OnStyleNeeded(self, event): where_block = False last_styled_pos = self.GetEndStyled() line = self.LineFromPosition(last_styled_pos) start_pos = self.PositionFromLine(line) end_pos = event.GetPosition() start_pos = 0 while (start_pos < end_pos): self.StartStyling(start_pos) char = self.GetTextRange(start_pos, (start_pos + 1)) style = self.STC_STYLE_LA_DEFAULT if (char == '`'): self.SetStyling(1, self.STC_STYLE_LA_ESCAPE_STR) start_pos += 1 while ((start_pos < end_pos) and (self.GetTextRange(start_pos, (start_pos + 1)) != '`')): self.StartStyling(start_pos) self.SetStyling(1, self.STC_STYLE_LA_ESCAPE_STR) start_pos += 1 self.SetStyling(1, self.STC_STYLE_LA_ESCAPE_STR) start_pos += 1 continue elif ((char == ':') and where_block): cur_line = self.LineFromPosition(start_pos) line_pos = self.PositionFromLine(cur_line) if (':' not in self.GetTextRange(line_pos, start_pos)): self.StartStyling(line_pos) self.SetStyling((start_pos - line_pos), self.STC_STYLE_LA_ESCAPE_PARAMETER) self.StartStyling(start_pos) else: line_end = self.GetLineEndPosition(cur_line) if (self.GetTextRange(line_pos, start_pos).count(':') == 1): self.StartStyling((start_pos + 1)) self.SetStyling(((line_end - start_pos) + 1), self.STC_STYLE_LA_ESCAPE_DESCRIPTION) start_pos = line_end continue elif (char in (self.SUBSTITUTION_START, self.SUBSTITUTION_END)): match = False index = 1 prefix = self.GetTextRange(start_pos, (start_pos + index)) unicode_str = '' while (self.is_unicode_prefix(prefix) and ((start_pos + index) < end_pos)): if self.is_unicode(prefix): unicode_str = self.get_unicode(prefix) match = True break index += 1 prefix = self.GetTextRange(start_pos, (start_pos + index)) if match: self.DeleteRange(start_pos, index) self.AddText(unicode_str) start_pos += index continue elif char.isnumeric(): style = self.STC_STYLE_LA_NUMBER else: index = 1 prefix = self.GetTextRange(start_pos, (start_pos + index)) if ((start_pos > 1) and (not self.GetTextRange((start_pos - 1), start_pos).isalnum())): while (self.is_keyword_prefix(prefix) and ((start_pos + index) < end_pos)): index += 1 prefix = self.GetTextRange(start_pos, (start_pos + index)) if ((index > 2) and (((start_pos + index) + 1) < end_pos)): if (not self.GetTextRange(((start_pos + index) - 1), (start_pos + index)).isalnum()): prefix = self.GetTextRange(start_pos, ((start_pos + index) - 1)) if self.is_keyword(prefix): self.SetStyling(index, self.STC_STYLE_LA_KW) start_pos += (index - 1) if (prefix == 'where'): where_block = True continue self.SetStyling(1, style) start_pos += 1 def OnMarginClick(self, event): pass def is_keyword_prefix(self, prefix): for keyword in self.keywords: if keyword.startswith(prefix): return True return False def is_unicode_prefix(self, prefix): for unicode in self.unicode_dict: target = ((self.SUBSTITUTION_START + unicode) + self.SUBSTITUTION_END) if target.startswith(prefix): return True return False def get_unicode(self, unicode): return self.unicode_dict[unicode[len(self.SUBSTITUTION_START):(- len(self.SUBSTITUTION_END))]] def is_keyword(self, key): return (key in self.keywords) def is_unicode(self, prefix): for unicode in self.unicode_dict: target = ((self.SUBSTITUTION_START + unicode) + self.SUBSTITUTION_END) if (target == prefix): return True return False
class UnCLIPTextProjModel(ModelMixin, ConfigMixin): _to_config def __init__(self, *, clip_extra_context_tokens: int=4, clip_embeddings_dim: int=768, time_embed_dim: int, cross_attention_dim): super().__init__() self.learned_classifier_free_guidance_embeddings = nn.Parameter(torch.zeros(clip_embeddings_dim)) self.embedding_proj = nn.Linear(clip_embeddings_dim, time_embed_dim) self.clip_image_embeddings_project_to_time_embeddings = nn.Linear(clip_embeddings_dim, time_embed_dim) self.clip_extra_context_tokens = clip_extra_context_tokens self.clip_extra_context_tokens_proj = nn.Linear(clip_embeddings_dim, (self.clip_extra_context_tokens * cross_attention_dim)) self.encoder_hidden_states_proj = nn.Linear(clip_embeddings_dim, cross_attention_dim) self.text_encoder_hidden_states_norm = nn.LayerNorm(cross_attention_dim) def forward(self, *, image_embeddings, prompt_embeds, text_encoder_hidden_states, do_classifier_free_guidance): if do_classifier_free_guidance: image_embeddings_batch_size = image_embeddings.shape[0] classifier_free_guidance_embeddings = self.learned_classifier_free_guidance_embeddings.unsqueeze(0) classifier_free_guidance_embeddings = classifier_free_guidance_embeddings.expand(image_embeddings_batch_size, (- 1)) image_embeddings = torch.cat([classifier_free_guidance_embeddings, image_embeddings], dim=0) assert (image_embeddings.shape[0] == prompt_embeds.shape[0]) batch_size = prompt_embeds.shape[0] time_projected_prompt_embeds = self.embedding_proj(prompt_embeds) time_projected_image_embeddings = self.clip_image_embeddings_project_to_time_embeddings(image_embeddings) additive_clip_time_embeddings = (time_projected_image_embeddings + time_projected_prompt_embeds) clip_extra_context_tokens = self.clip_extra_context_tokens_proj(image_embeddings) clip_extra_context_tokens = clip_extra_context_tokens.reshape(batch_size, (- 1), self.clip_extra_context_tokens) clip_extra_context_tokens = clip_extra_context_tokens.permute(0, 2, 1) text_encoder_hidden_states = self.encoder_hidden_states_proj(text_encoder_hidden_states) text_encoder_hidden_states = self.text_encoder_hidden_states_norm(text_encoder_hidden_states) text_encoder_hidden_states = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states], dim=1) return (text_encoder_hidden_states, additive_clip_time_embeddings)
class BaseDeep(pl.LightningModule, _BaseModel): def __init__(self, latent_dimensions: int, encoders=None, optimizer: str='adam', scheduler: Optional[str]=None, lr: float=0.01, extra_optimizer_kwargs: Optional[Dict[(str, Any)]]=None, max_epochs: int=1000, eps=1e-06, *args, **kwargs): super().__init__() if (extra_optimizer_kwargs is None): extra_optimizer_kwargs = {} self.latent_dimensions = latent_dimensions self.optimizer = optimizer self.scheduler = scheduler self.lr = lr self.extra_optimizer_kwargs = extra_optimizer_kwargs self.max_epochs = max_epochs self.eps = eps if (encoders is None): raise ValueError('Encoders must be a list of torch.nn.Module with length equal to the number of representations.') self.encoders = torch.nn.ModuleList(encoders) def forward(self, views, **kwargs): if (not hasattr(self, 'n_views_')): self.n_views_ = len(views) z = [encoder(view) for (encoder, view) in zip(self.encoders, views)] return z def loss(self, batch, **kwargs): representations = self(batch['views']) return {'objective': self.objective(representations)} def pairwise_correlations(self, loader: torch.utils.data.DataLoader): return super().pairwise_correlations(loader) def correlation_captured(self, z): z = [(zi - zi.mean(0)) for zi in z] return MCCA(latent_dimensions=self.latent_dimensions).fit(z).score(z).sum() def score(self, loader: torch.utils.data.DataLoader, **kwargs): z = self.transform(loader) corr = self.correlation_captured(z) return corr def training_step(self, batch: Dict[(str, Any)], batch_idx: int) -> torch.Tensor: loss = self.loss(batch) for (k, v) in loss.items(): self.log(f'train/{k}', v, on_step=False, on_epoch=True, batch_size=batch['views'][0].shape[0]) return loss['objective'] def validation_step(self, batch: Dict[(str, Any)], batch_idx: int) -> torch.Tensor: loss = self.loss(batch) for (k, v) in loss.items(): self.log(f'val/{k}', v, on_step=False, on_epoch=True, batch_size=batch['views'][0].shape[0]) return loss['objective'] def test_step(self, batch: Dict[(str, Any)], batch_idx: int) -> torch.Tensor: loss = self.loss(batch) for (k, v) in loss.items(): self.log(f'test/{k}', v, on_step=False, on_epoch=True, batch_size=batch['views'][0].shape[0]) return loss['objective'] _grad() def get_representations(self, loader: torch.utils.data.DataLoader): self.eval() all_z = [] for batch in loader: views_device = [view.to(self.device) for view in batch['views']] z = self(views_device) all_z.append([z_.cpu() for z_ in z]) stacked_z = [torch.vstack(z_) for z_ in zip(*all_z)] return stacked_z _grad() def transform(self, loader: torch.utils.data.DataLoader) -> List[np.ndarray]: return [z.numpy() for z in self.get_representations(loader)] def configure_optimizers(self) -> Union[(torch.optim.Optimizer, Tuple[(List[torch.optim.Optimizer], List[torch.optim.lr_scheduler._LRScheduler])])]: if (self.optimizer == 'sgd'): optimizer = torch.optim.SGD elif (self.optimizer == 'adam'): optimizer = torch.optim.Adam elif (self.optimizer == 'adamw'): optimizer = torch.optim.AdamW elif (self.optimizer == 'lbfgs'): optimizer = torch.optim.LBFGS else: raise ValueError(f'{self.optimizer} not in (sgd, adam, adamw)') optimizer = optimizer(self.parameters(), lr=self.lr, **self.extra_optimizer_kwargs) return optimizer def configure_callbacks(self) -> None: pass def detach_all(z: List[torch.Tensor]) -> List[torch.Tensor]: return [z_.detach() for z_ in z]
def values_from_const(node_def): if (node_def.op != 'Const'): raise ValueError(f'''Can not extract constant value from a node that is not Const. Got: {node_def}''') input_tensor = node_def.attr['value'].tensor tensor_value = tensor_util.MakeNdarray(input_tensor) return tensor_value
def parse_args(): parser = argparse.ArgumentParser(description='Generate symlinks for train and test') parser.add_argument('-d', '--dataset', help='which dataset to process', default='all') parser.add_argument('--root-dir', help='the dir to store train and test symlinks', default='./data/HazeWorld') parser.add_argument('--gt-dir', help='the dir that contains the ground truth images', default='./data/HazeWorld/gt') parser.add_argument('--hazy-dir', help='the dir that contains synthetic hazy images', default='./data/HazeWorld/hazy') parser.add_argument('--trans-dir', help='the dir that contains transmission maps', default='./data/HazeWorld/transmission') return parser.parse_args()
def main(): parser = argparse.ArgumentParser(description='PyTorch MNIST Example') parser.add_argument('--train-scp', required=True, help='kaldi train scp file') parser.add_argument('--train-utt2label', required=True, help='train utt2label') parser.add_argument('--validation-scp', required=True, help='kaldi dev scp file') parser.add_argument('--validation-utt2label', required=True, help='dev utt2label') parser.add_argument('--eval-scp', help='kaldi eval scp file') parser.add_argument('--eval-utt2label', help='train utt2label') parser.add_argument('--logging-dir', required=True, help='model save directory') parser.add_argument('--epochs', type=int, default=10, metavar='N', help='number of epochs to train (default: 10)') parser.add_argument('--lr', type=float, default=0.001, metavar='LR', help='learning rate (default: 0.001)') parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training') parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)') parser.add_argument('--log-interval', type=int, default=10, metavar='N', help='how many batches to wait before logging training status') parser.add_argument('--hidden-dim', type=int, default=100, help='number of neurones in the hidden dimension') parser.add_argument('--plot-wd', help='training plot directory') args = parser.parse_args() use_cuda = ((not args.no_cuda) and torch.cuda.is_available()) print('use_cuda is', use_cuda) global_timer = timer() logger = setup_logs(args.logging_dir, run_name) np.random.seed(args.seed) random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) torch.backends.cudnn.deterministic = True device = torch.device(('cuda' if use_cuda else 'cpu')) model.to(device) params = ({'num_workers': 0, 'pin_memory': False, 'worker_init_fn': np.random.seed(args.seed)} if use_cuda else {}) logger.info('===> loading train and dev dataset') training_set = SpoofDataset(args.train_scp, args.train_utt2label) validation_set = SpoofDataset(args.validation_scp, args.validation_utt2label) train_loader = data.DataLoader(training_set, batch_size=batch_size, shuffle=True, **params) validation_loader = data.DataLoader(validation_set, batch_size=test_batch_size, shuffle=False, **params) logger.info('===> loading eval dataset') eval_set = SpoofDataset(args.eval_scp, args.eval_utt2label) eval_loader = data.DataLoader(eval_set, batch_size=test_batch_size, shuffle=False, **params) optimizer = optim.Adam(model.parameters(), lr=args.lr, amsgrad=True) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', factor=0.5, patience=1) model_params = sum((p.numel() for p in model.parameters() if p.requires_grad)) logger.info('### Model summary below###\n {}\n'.format(str(model))) logger.info('===> Model total parameter: {}\n'.format(model_params)) (best_eer, best_loss) = (np.inf, np.inf) (early_stopping, max_patience) = (0, 5) print(run_name) for epoch in range(1, (args.epochs + 1)): epoch_timer = timer() train(args, model, device, train_loader, optimizer, epoch, rnn) (val_loss, eer) = validation(args, model, device, validation_loader, args.validation_scp, args.validation_utt2label, rnn) scheduler.step(val_loss) if (select_best == 'eer'): is_best = (eer < best_eer) best_eer = min(eer, best_eer) elif (select_best == 'val'): is_best = (val_loss < best_loss) best_loss = min(val_loss, best_loss) snapshot(args.logging_dir, run_name, is_best, {'epoch': (epoch + 1), 'best_eer': best_eer, 'state_dict': model.state_dict(), 'validation_loss': val_loss, 'optimizer': optimizer.state_dict()}) if (is_best == 1): early_stopping = 0 else: early_stopping += 1 end_epoch_timer = timer() logger.info('#### End epoch {}/{}, elapsed time: {}'.format(epoch, args.epochs, (end_epoch_timer - epoch_timer))) if (early_stopping == max_patience): break logger.info('===> loading best model for prediction') checkpoint = torch.load(os.path.join(args.logging_dir, (run_name + '-model_best.pth'))) model.load_state_dict(checkpoint['state_dict']) (eval_loss, eval_eer) = prediction(args, model, device, eval_loader, args.eval_scp, args.eval_utt2label, rnn) end_global_timer = timer() logger.info(' Success ') logger.info(('Total elapsed time: %s' % (end_global_timer - global_timer)))
def build_model(cfg, num_classes): if (cfg.MODEL.SETTING == 'video'): model = VNetwork(num_classes, cfg.MODEL.LAST_STRIDE, cfg.MODEL.PRETRAIN_PATH, cfg.MODEL.NECK, cfg.TEST.NECK_FEAT, cfg.MODEL.NAME, cfg.MODEL.PRETRAIN_CHOICE, cfg.MODEL.TEMP, cfg.MODEL.NON_LAYERS, cfg.INPUT.SEQ_LEN) return model else: raise NotImplementedError()
def test_caller(path, step_ind, on_val): os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0' config = Config() config.load(path) print() print('Dataset Preparation') print('') dataset = ThreeDMatchDataset(1, load_test=True) dataset.init_test_input_pipeline(config) print('Creating Model') print('\n') t1 = time.time() model = KernelPointFCNN(dataset.flat_inputs, config) snap_path = os.path.join(path, 'snapshots') snap_steps = [int(f[:(- 5)].split('-')[(- 1)]) for f in os.listdir(snap_path) if (f[(- 5):] == '.meta')] chosen_step = np.sort(snap_steps)[step_ind] chosen_snap = os.path.join(path, 'snapshots', 'snap-{:d}'.format(chosen_step)) tester = ModelTester(model, restore_snap=chosen_snap) t2 = time.time() print('\n') print('Done in {:.1f} s'.format((t2 - t1))) print('\n') print('Start Test') print('\n') tester.generate_descriptor(model, dataset)
class SimCLRCifarTransform(transforms.Compose): def __init__(self, train, finetune=False, normalize_stats=None): self.transforms = [] if (train or finetune): self.transforms = [transforms.RandomResizedCrop(32), transforms.RandomHorizontalFlip(p=0.5)] if train: self.transforms.append(ColorDistortion(s=0.5)) _add_totensor_normalize(self.transforms, normalize_stats)
def load_pipeline(config, dataset): name = config.pipeline.lower() mdict = {model.__name__.lower(): model for model in all_models} if (name not in mdict): print('Invalid pipeline. Options are:') for model in all_models: print('\t* {}'.format(model.__name__)) return None Class = mdict[name] return Class(config, dataset)
def main_worker(gpu, ngpus_per_node, args): args.gpu = gpu if (args.multiprocessing_distributed and (args.gpu != 0)): def print_pass(*args): pass builtins.print = print_pass if (args.gpu is not None): print('Use GPU: {} for training'.format(args.gpu)) if args.distributed: if ((args.dist_url == 'env://') and (args.rank == (- 1))): args.rank = int(os.environ['RANK']) if args.multiprocessing_distributed: args.rank = ((args.rank * ngpus_per_node) + gpu) dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank) teacher_model = models.__dict__['resnet50'] print("=> creating model '{}'".format(args.arch)) if (args.arch == 'efficientb0'): model = efficientnet_b0 else: model = models.__dict__[args.arch] model = moco.builder.MoCo(model, teacher_model, args.moco_dim, args.moco_k, args.moco_m, args.moco_t, args.mlp, args.arch) print(model) if args.distributed: if (args.gpu is not None): torch.cuda.set_device(args.gpu) model.cuda(args.gpu) args.batch_size = int((args.batch_size / ngpus_per_node)) args.workers = int((((args.workers + ngpus_per_node) - 1) / ngpus_per_node)) model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True) else: model.cuda() model = torch.nn.parallel.DistributedDataParallel(model, find_unused_parameters=True) elif (args.gpu is not None): torch.cuda.set_device(args.gpu) model = model.cuda(args.gpu) raise NotImplementedError('Only DistributedDataParallel is supported.') else: raise NotImplementedError('Only DistributedDataParallel is supported.') criterion = nn.CrossEntropyLoss().cuda(args.gpu) criterion_mse = nn.MSELoss(reduction='sum').cuda(args.gpu) optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay) if args.resume: if os.path.isfile(args.resume): print("=> loading checkpoint '{}'".format(args.resume)) if (args.gpu is None): checkpoint = torch.load(args.resume) else: loc = 'cuda:{}'.format(args.gpu) checkpoint = torch.load(args.resume, map_location=loc) args.start_epoch = checkpoint['epoch'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch'])) else: print("=> no checkpoint found at '{}'".format(args.resume)) if args.teacher: if os.path.isfile(args.teacher): print("=> loading teacher checkpoint '{}'".format(args.teacher)) if (args.gpu is None): teacher_checkpoint = torch.load(args.teacher) else: loc = 'cuda:{}'.format(args.gpu) teacher_checkpoint = torch.load(args.teacher, map_location=loc) param_dict = teacher_checkpoint['state_dict'] new_param_dict = {} for (k, v) in param_dict.items(): new_param_dict[k.replace('encoder', 'teacher_encoder')] = v model.load_state_dict(new_param_dict, strict=False) cudnn.benchmark = True traindir = os.path.join(args.data, 'train') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) if args.aug_plus: augmentation = [transforms.RandomResizedCrop(224, scale=(0.2, 1.0)), transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8), transforms.RandomGrayscale(p=0.2), transforms.RandomApply([moco.loader.GaussianBlur([0.1, 2.0])], p=0.5), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize] else: augmentation = [transforms.RandomResizedCrop(224, scale=(0.2, 1.0)), transforms.RandomGrayscale(p=0.2), transforms.ColorJitter(0.4, 0.4, 0.4, 0.4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize] train_dataset = datasets.ImageFolder(traindir, moco.loader.TwoCropsTransform(transforms.Compose(augmentation))) if args.distributed: train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) else: train_sampler = None train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), num_workers=args.workers, pin_memory=True, sampler=train_sampler, drop_last=True) for epoch in range(args.start_epoch, args.epochs): if args.distributed: train_sampler.set_epoch(epoch) adjust_learning_rate(optimizer, epoch, args) train(train_loader, model, criterion, criterion_mse, optimizer, epoch, args) if ((not args.multiprocessing_distributed) or (args.multiprocessing_distributed and ((args.rank % ngpus_per_node) == 0))): save_checkpoint({'epoch': (epoch + 1), 'arch': args.arch, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()}, is_best=False, filename='ckpt/checkpoint_{:04d}.pth.tar'.format(epoch))
def test_repr(): assert ('pybind11_type' in repr(type(UserType))) assert ('UserType' in repr(UserType))
def rla_mobilenetv2_k6(): print('Constructing rla_mobilenetv2_k6......') model = RLA_MobileNetV2(rla_channel=6) return model
def tanh_1(x, mu, sd): xn = ((x - mu) / sd) tanh = torch.tanh(xn) sech2 = (1 - (tanh ** 2)) t = tanh jt = ((1 / sd) * sech2) return (t, jt)
def strip_iterator(graph_def): from neural_compressor.adaptor.tf_utils.util import strip_unused_nodes input_node_names = ['input_ids', 'input_mask', 'segment_ids'] output_node_names = ['loss/Softmax'] with tf.compat.v1.Graph().as_default() as g: input_ids = tf.compat.v1.placeholder(tf.int32, shape=(None, 384), name='input_ids') input_mask = tf.compat.v1.placeholder(tf.int32, shape=(None, 384), name='input_mask') segment_ids = tf.compat.v1.placeholder(tf.int32, shape=(None, 384), name='segment_ids') tf.import_graph_def(graph_def, name='') graph_def = g.as_graph_def() for node in graph_def.node: for (idx, in_tensor) in enumerate(node.input): if (('IteratorGetNext:0' == in_tensor) or ('IteratorGetNext' == in_tensor)): node.input[idx] = 'input_ids' if ('IteratorGetNext:1' in in_tensor): node.input[idx] = 'input_mask' if ('IteratorGetNext:2' in in_tensor): node.input[idx] = 'segment_ids' graph_def = strip_unused_nodes(graph_def, input_node_names, output_node_names) return graph_def
def __scale_shortside(img, target_width, method=Image.BICUBIC): (ow, oh) = img.size (ss, ls) = (min(ow, oh), max(ow, oh)) width_is_shorter = (ow == ss) if (ss == target_width): return img ls = int(((target_width * ls) / ss)) (nw, nh) = ((ss, ls) if width_is_shorter else (ls, ss)) return img.resize((nw, nh), method)
def eval(args, val_loader, model, criterion): model.eval() if is_distributed(args.rank): model = model.module losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() device = args.device if args.cuda: torch.cuda.empty_cache() for (data, y) in val_loader: data = data.to(device, non_blocking=True) y = y.to(device, non_blocking=True) outputs = model(data) loss = criterion(outputs, y) (prec1, prec5) = accuracy(outputs.data, y.data, topk=(1, 5)) losses.update(loss.item(), data.size(0)) top1.update(prec1.item(), data.size(0)) top5.update(prec5.item(), data.size(0)) logging('Avg loss: {:.4f}, top1: {:.2f}%, top5: {:.2f}%'.format(losses.avg, top1.avg, top5.avg), args.log) return (losses.avg, top1.avg, top5.avg)
def default_hp_search_backend(): if is_optuna_available(): return 'optuna' elif is_ray_available(): return 'ray'
def getUserStatistics(): cur = getDb().cursor() sql = 'select count(*), DATE(registered) from users group by DATE(registered) order by registered desc limit 30' cur.execute(sql) usersByDate = cur.fetchall() cur.execute('SELECT count(*) from users') total = cur.fetchall()[0][0] today = datetime.datetime.today() dateList = [(today - datetime.timedelta(days=x)).strftime('%Y-%m-%d') for x in range(0, 30)] i = 0 users = [] for x in dateList: if (str(usersByDate[i][1]) == x): users.append(usersByDate[i][0]) i += 1 else: users.append(0) (users.reverse(),) (dateList.reverse(),) result = {'users': users, 'dates': dateList, 'total': total} cur.close() return result
class LinearWarmupCosineAnnealingLR(_LRScheduler): def __init__(self, optimizer: Optimizer, warmup_epochs: int, max_epochs: int, warmup_start_lr: float=0.0, eta_min: float=0.0, last_epoch: int=(- 1)) -> None: self.warmup_epochs = warmup_epochs self.max_epochs = max_epochs self.warmup_start_lr = warmup_start_lr self.eta_min = eta_min has_layer_lr_decay = (optimizer.param_groups[0].get('layer_lr_decay', None) is not None) if has_layer_lr_decay: self.layer_lr_decay_values = [group['layer_lr_decay'] for group in optimizer.param_groups] else: self.layer_lr_decay_values = [1.0 for _ in optimizer.param_groups] super().__init__(optimizer, last_epoch) def get_lr(self) -> List[float]: if (not self._get_lr_called_within_step): warnings.warn('To get the last learning rate computed by the scheduler, please use `get_last_lr()`.', UserWarning) if ((self.last_epoch == 0) and (self.warmup_epochs != 0)): return [(self.warmup_start_lr * decay_value) for decay_value in self.layer_lr_decay_values] elif (self.last_epoch == 0): return [(base_lr * layer_lr_decay) for (base_lr, layer_lr_decay) in zip(self.base_lrs, self.layer_lr_decay_values)] elif (self.last_epoch < self.warmup_epochs): return [(group['lr'] + (((base_lr - self.warmup_start_lr) / (self.warmup_epochs - 1)) * layer_lr_decay)) for (base_lr, group, layer_lr_decay) in zip(self.base_lrs, self.optimizer.param_groups, self.layer_lr_decay_values, strict=True)] elif (self.last_epoch == self.warmup_epochs): return [(base_lr * layer_lr_decay) for (base_lr, layer_lr_decay) in zip(self.base_lrs, self.layer_lr_decay_values, strict=True)] elif ((((self.last_epoch - 1) - self.max_epochs) % (2 * (self.max_epochs - self.warmup_epochs))) == 0): return [(group['lr'] + ((((base_lr - self.eta_min) * (1 - math.cos((math.pi / (self.max_epochs - self.warmup_epochs))))) / 2) * layer_lr_decay)) for (base_lr, group, layer_lr_decay) in zip(self.base_lrs, self.optimizer.param_groups, self.layer_lr_decay_values, strict=True)] return [((((1 + math.cos(((math.pi * (self.last_epoch - self.warmup_epochs)) / (self.max_epochs - self.warmup_epochs)))) / (1 + math.cos(((math.pi * ((self.last_epoch - self.warmup_epochs) - 1)) / (self.max_epochs - self.warmup_epochs))))) * (group['lr'] - (self.eta_min * layer_lr_decay))) + (self.eta_min * layer_lr_decay)) for (group, layer_lr_decay) in zip(self.optimizer.param_groups, self.layer_lr_decay_values, strict=True)] def _get_closed_form_lr(self) -> List[float]: if (self.last_epoch < self.warmup_epochs): return [(layer_lr_decay * (self.warmup_start_lr + ((self.last_epoch * (base_lr - self.warmup_start_lr)) / (self.warmup_epochs - 1)))) for (base_lr, layer_lr_decay) in zip(self.base_lrs, self.layer_lr_decay_values, strict=True)] return [(layer_lr_decay * (self.eta_min + ((0.5 * (base_lr - self.eta_min)) * (1 + math.cos(((math.pi * (self.last_epoch - self.warmup_epochs)) / (self.max_epochs - self.warmup_epochs))))))) for (base_lr, layer_lr_decay) in zip(self.base_lrs, self.layer_lr_decay_values, strict=True)]
def load_results(cfg, stage): try: filename = RESULTS_FILE.format(stage=stage) path = (Path(cfg.paths.results) / filename) results = pd.read_csv(path, index_col=0).to_dict() results = {f'{mode}/{k}': v for (mode, sub_dict) in results.items() for (k, v) in sub_dict.items()} return results except: return dict()
def Backbone_ResNet50_in3(): net = resnet50(pretrained=True) div_2 = nn.Sequential(*list(net.children())[:3]) div_4 = nn.Sequential(*list(net.children())[3:5]) div_8 = net.layer2 div_16 = net.layer3 div_32 = net.layer4 return (div_2, div_4, div_8, div_16, div_32)
def _segm_mobilenet(name, backbone_name, num_classes, output_stride, pretrained_backbone): if (output_stride == 8): aspp_dilate = [12, 24, 36] else: aspp_dilate = [6, 12, 18] backbone = mobilenetv2.mobilenet_v2(pretrained=pretrained_backbone, output_stride=output_stride) backbone.low_level_features = backbone.features[0:4] backbone.high_level_features = backbone.features[4:(- 1)] backbone.features = None backbone.classifier = None inplanes = 320 low_level_planes = 24 if (name == 'deeplabv3plus'): return_layers = {'high_level_features': 'out', 'low_level_features': 'low_level'} classifier = DeepLabHeadV3Plus(inplanes, low_level_planes, num_classes, aspp_dilate) elif (name == 'deeplabv3'): return_layers = {'high_level_features': 'out'} classifier = DeepLabHead(inplanes, num_classes, aspp_dilate) backbone = IntermediateLayerGetter(backbone, return_layers=return_layers) model = DeepLabV3(backbone, classifier) return model
def cfg_base(): cfg = {} uuid = '' config_file = os.path.join(os.getcwd(), 'habitat-api/configs/tasks/pointnav_gibson_val.yaml') cfg['eval_kwargs'] = {'exp_path': '/mnt/logdir/keypoints3d_encoding_restart1', 'weights_only_path': None, 'challenge': True, 'debug': False, 'overwrite_configs': True, 'benchmark_episodes': 10, 'benchmark_config': config_file}
def comp_num_seg_out_of_p_sent_beam(_filtered_doc_list, _num_edu, _absas_read_str, abs_as_read_list, map_from_new_to_ori_idx, beam_sz=8): beam = [] if (len(_filtered_doc_list) <= _num_edu): return {'nlabel': _num_edu, 'data': {}, 'best': None} combs = list(range(1, len(_filtered_doc_list))) cur_beam = {'in': [], 'todo': combs, 'val': 0} beam.append(cur_beam) for t in range(_num_edu): dict_pattern = {} global_board = [] for b in beam: already_in_beam = b['in'] todo = b['todo'] leaderboard = {} for to_add in todo: after_add = (already_in_beam + [to_add]) _tmp = assemble_doc_list_from_idx(_filtered_doc_list, after_add) _tmp = '\n'.join(_tmp) average_f_score = get_rouge_est_str_4gram(_absas_read_str, _tmp) leaderboard[to_add] = average_f_score sorted_beam = [(k, leaderboard[k]) for k in sorted(leaderboard, key=leaderboard.get, reverse=True)] for it in sorted_beam: new_in = (already_in_beam + [it[0]]) new_in.sort() str_new_in = [str(x) for x in new_in] if ('_'.join(str_new_in) in dict_pattern): continue else: dict_pattern['_'.join(str_new_in)] = True new_list = todo.copy() new_list.remove(it[0]) _beam = {'in': new_in, 'todo': new_list, 'val': it[1]} global_board.append(_beam) sorted_global_board = sorted(global_board, key=(lambda x: x['val']), reverse=True) _cnt = 0 check_dict = [] beam_waitlist = [] for it in sorted_global_board: str_in = sorted(it['in']) str_in = [str(x) for x in str_in] _tmp_key = '_'.join(str_in) if (_tmp_key in check_dict): continue else: beam_waitlist.append(it) check_dict.append(_tmp_key) _cnt += 1 if (_cnt >= beam_sz): break beam = beam_waitlist _comb_bag = {} for it in beam: n_comb = it['in'] n_comb.sort() n_comb_original = [map_from_new_to_ori_idx[a] for a in n_comb] n_comb_original.sort() n_comb_original = [int(x) for x in n_comb_original] _tmp = assemble_doc_list_from_idx(_filtered_doc_list, n_comb) _tmp = '\n'.join(_tmp) f1 = get_rouge_est_str_4gram(_absas_read_str, _tmp) _comb_bag[f1] = {'label': n_comb_original, 'R1': f1, 'nlabel': _num_edu} if (len(_comb_bag) == 0): return {'nlabel': _num_edu, 'data': {}, 'best': None} else: best_key = sorted(_comb_bag.keys(), reverse=True)[0] rt_dict = {'nlabel': _num_edu, 'data': _comb_bag, 'best': _comb_bag[best_key]} return rt_dict
def set_seed(seed=3): os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False
def chatglm_tokenize(ctx: ChatGLMContext, prompt: str) -> List[int]: return ctx.pipeline.tokenizer.encode(prompt)
class TransfEncoder(nn.Module): def __init__(self, n_vocab, pretrained=None, model_name='default', d_word_vec=512, d_model=512, len_max_seq=512, n_layer=6, d_inner=2048, slf_attn='multi-head', n_head=8, d_k=64, d_v=64, feat_vocab=None, d_feat_vec=32, layer_attn=False, slf_attn_mask='', dropout=0.1, attn_dropout=0.1): self.name = 'transf' self.model_type = model_name n_position = (len_max_seq + 5) super(TransfEncoder, self).__init__() self.feature = (False if (not feat_vocab) else True) self.layer_attn = layer_attn self.pretrained = pretrained self.defined_slf_attn_mask = (True if slf_attn_mask else False) if pretrained: return self.word_emb = nn.Embedding(n_vocab, d_word_vec, padding_idx=Constants.PAD) self.pos_emb = nn.Embedding.from_pretrained(get_sinusoid_encoding_table(n_position, d_word_vec, padding_idx=Constants.PAD), freeze=True) if self.feature: self.feat_embs = nn.ModuleList([nn.Embedding(n_f_vocab, d_feat_vec, padding_idx=Constants.PAD) for n_f_vocab in feat_vocab]) self.layer_stack = nn.ModuleList([EncoderLayer(d_model, slf_attn, d_inner, n_head, d_k, d_v, dropout=dropout, attn_dropout=attn_dropout) for _ in range(n_layer)]) def from_opt(cls, opt): if ('pretrained' not in opt): return cls(opt['n_vocab'], d_word_vec=opt['d_word_vec'], d_model=opt['d_model'], len_max_seq=opt['len_max_seq'], n_layer=opt['n_layer'], d_inner=opt['d_inner'], n_head=opt['n_head'], slf_attn=opt['slf_attn'], d_k=opt['d_k'], d_v=opt['d_v'], feat_vocab=opt['feat_vocab'], d_feat_vec=opt['d_feat_vec'], layer_attn=opt['layer_attn'], slf_attn_mask=opt['mask_slf_attn'], dropout=opt['dropout'], attn_dropout=opt['attn_dropout']) elif opt['pretrained'].count('bert'): pretrained = BertModel.from_pretrained(opt['pretrained']) return cls(opt['n_vocab'], pretrained=pretrained, layer_attn=opt['layer_attn'], model_name='bert') elif opt['pretrained'].count('gpt2'): pretrained = GPT2Model.from_pretrained(opt['pretrained']) return cls(opt['n_vocab'], pretrained=pretrained, model_name='gpt2') else: raise ValueError("Other pretrained models haven't been supported yet") def forward(self, inputs, return_attns=False): if (self.pretrained and (self.model_type == 'bert')): src_seq = inputs['src_seq'] else: (src_seq, src_pos, feat_seqs) = (inputs['src_seq'], inputs['src_pos'], inputs['feat_seqs']) if self.pretrained: if (self.model_type == 'bert'): src_sep = None (enc_outputs, *_) = self.pretrained(src_seq, token_type_ids=src_sep, output_all_encoded_layers=True) enc_output = enc_outputs[(- 1)] elif (self.model_type == 'gpt2'): (enc_output, *_) = self.pretrained(src_seq) hidden = [enc_output.transpose(0, 1)[(- 1)] for _ in range(12)] return (enc_output, hidden) else: slf_attn_mask = get_attn_key_pad_mask(seq_k=src_seq, seq_q=src_seq) non_pad_mask = get_non_pad_mask(src_seq) if self.defined_slf_attn_mask: def_slf_attn_mask = inputs['slf_attn_mask'] slf_attn_mask = (slf_attn_mask + def_slf_attn_mask).gt(0) enc_output = (self.word_emb(src_seq) + self.pos_emb(src_pos)) if self.feature: assert (feat_seqs is not None), 'feature seq(s) must be given' feat_output = [feat_emb(feat_seq) for (feat_seq, feat_emb) in zip(feat_seqs, self.feat_word_embs)] feat_output = torch.cat(feat_output, dim=2) enc_output = torch.cat((enc_output, feat_output), dim=2) enc_outputs = [] for (layer_idx, enc_layer) in enumerate(self.layer_stack): (enc_output, _) = enc_layer(enc_output, src_seq, non_pad_mask=non_pad_mask, slf_attn_mask=slf_attn_mask, layer_id=layer_idx) enc_outputs.append(enc_output) hidden = [layer_output.transpose(0, 1)[0] for layer_output in enc_outputs] if self.layer_attn: enc_output = enc_outputs return (enc_output, hidden)
def resampling_dataset_present(ds): if isinstance(ds, ResamplingDataset): return True if isinstance(ds, ConcatDataset): return any((resampling_dataset_present(d) for d in ds.datasets)) if hasattr(ds, 'dataset'): return resampling_dataset_present(ds.dataset) return False
def trades_loss(model, x_natural, y, optimizer, step_size=0.003, epsilon=0.031, perturb_steps=10, beta=1.0, attack='linf-pgd', label_smoothing=0.1): criterion_ce = SmoothCrossEntropyLoss(reduction='mean', smoothing=label_smoothing) criterion_kl = nn.KLDivLoss(reduction='sum') model.train() track_bn_stats(model, False) batch_size = len(x_natural) x_adv = (x_natural.detach() + torch.FloatTensor(x_natural.shape).uniform_((- epsilon), epsilon).cuda().detach()) x_adv = torch.clamp(x_adv, 0.0, 1.0) p_natural = F.softmax(model(x_natural), dim=1).detach() if (attack == 'linf-pgd'): for _ in range(perturb_steps): x_adv.requires_grad_() with torch.enable_grad(): loss_kl = criterion_kl(F.log_softmax(model(x_adv), dim=1), p_natural) grad = torch.autograd.grad(loss_kl, [x_adv])[0] x_adv = (x_adv.detach() + (step_size * torch.sign(grad.detach()))) x_adv = torch.min(torch.max(x_adv, (x_natural - epsilon)), (x_natural + epsilon)) x_adv = torch.clamp(x_adv, 0.0, 1.0) elif (attack == 'l2-pgd'): delta = torch.FloatTensor(x.shape).normal_(mean=0, std=1.0).cuda().detach() delta.data = ((delta.data * np.random.uniform(0.0, epsilon)) / ((delta.data ** 2).sum([1, 2, 3], keepdim=True) ** 0.5)) delta = Variable(delta.data, requires_grad=True) optimizer_delta = optim.SGD([delta], lr=step_size) for _ in range(perturb_steps): adv = (x_natural + delta) optimizer_delta.zero_grad() with torch.enable_grad(): loss = ((- 1) * criterion_kl(F.log_softmax(model(adv), dim=1), p_natural)) loss.backward(retain_graph=True) grad_norms = delta.grad.view(batch_size, (- 1)).norm(p=2, dim=1) delta.grad.div_(grad_norms.view((- 1), 1, 1, 1)) if (grad_norms == 0).any(): delta.grad[(grad_norms == 0)] = torch.randn_like(delta.grad[(grad_norms == 0)]) optimizer_delta.step() delta.data.add_(x_natural) delta.data.clamp_(0, 1).sub_(x_natural) delta.data.renorm_(p=2, dim=0, maxnorm=epsilon) x_adv = Variable((x_natural + delta), requires_grad=False) else: raise ValueError(f'Attack={attack} not supported for TRADES training!') model.train() track_bn_stats(model, True) x_adv = Variable(torch.clamp(x_adv, 0.0, 1.0), requires_grad=False) optimizer.zero_grad() logits_natural = model(x_natural) logits_adv = model(x_adv) loss_natural = criterion_ce(logits_natural, y) loss_robust = ((1.0 / batch_size) * criterion_kl(F.log_softmax(logits_adv, dim=1), F.softmax(logits_natural, dim=1))) loss = (loss_natural + (beta * loss_robust)) batch_metrics = {'loss': loss.item()} batch_metrics.update({'clean_acc': accuracy(y, logits_natural.detach()), 'adversarial_acc': accuracy(y, logits_adv.detach())}) return (loss, batch_metrics)
def quantize_grad(x, num_bits=None, qparams=None, flatten_dims=_DEFAULT_FLATTEN_GRAD, reduce_dim=0, dequantize=True, signed=False, stochastic=True): return UniformQuantizeGrad().apply(x, num_bits, qparams, flatten_dims, reduce_dim, dequantize, signed, stochastic)
class _TorchNanoModule(_LiteModule): def __init__(self, module, precision_plugin, channels_last) -> None: super().__init__(module, precision_plugin) self.channels_last = channels_last def state_dict(self, *args, **kwargs): if isinstance(self.module, DistributedDataParallel): return self.module.module.state_dict(*args, **kwargs) else: return self.module.state_dict(*args, **kwargs) def load_state_dict(self, state_dict: Mapping[(str, Any)], strict: bool=True): invalidInputError(TORCH_VERSION_LESS_1_13, "TorchNano doesn't support loading state dict with PyTorch<1.13, please load it using original pytorch model") if isinstance(self.module, DistributedDataParallel): return self.module.module.load_state_dict(state_dict=state_dict, strict=strict) else: return self.module.load_state_dict(state_dict=state_dict, strict=strict) def __getattr__(self, name: str): try: return super().__getattr__(name) except AttributeError: pass if isinstance(self.module, DistributedDataParallel): try: return getattr(self.module, name) except AttributeError: pass return getattr(self.module.module, name) else: return getattr(self.module, name) def forward(self, *args: Any, **kwargs: Any) -> Any: if self.channels_last: def _convert_to_channels_last(t: torch.Tensor) -> torch.Tensor: if (t.dim() == 4): return t.to(memory_format=torch.channels_last) return t (args, kwargs) = apply_to_collection([args, kwargs], function=_convert_to_channels_last, dtype=torch.Tensor) return super().forward(*args, **kwargs)
class RoFormerTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs): super().__init__(do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs) if (not os.path.isfile(vocab_file)): raise ValueError(f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained model use `tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`") self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict([(ids, tok) for (tok, ids) in self.vocab.items()]) self.do_basic_tokenize = do_basic_tokenize if do_basic_tokenize: self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token) try: import rjieba except ImportError: raise ImportError('You need to install rjieba to use RoFormerTokenizer. See for installation.') self.jieba = rjieba def do_lower_case(self): return self.basic_tokenizer.do_lower_case def vocab_size(self): return len(self.vocab) def __getstate__(self): state = self.__dict__.copy() state['jieba'] = None return state def __setstate__(self, d): self.__dict__ = d import rjieba self.jieba = rjieba def get_vocab(self): return dict(self.vocab, **self.added_tokens_encoder) def _tokenize(self, text, use_jieba=True): split_tokens = [] if use_jieba: for wholword in self.jieba.cut(text, False): if (wholword in self.vocab): split_tokens.append(wholword) else: char_list = self._tokenize(wholword, use_jieba=False) split_tokens.extend(char_list) elif self.do_basic_tokenize: for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens): if (token in self.basic_tokenizer.never_split): split_tokens.append(token) else: split_tokens += self.wordpiece_tokenizer.tokenize(token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) return split_tokens def _convert_token_to_id(self, token): return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index): return self.ids_to_tokens.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): out_string = ' '.join(tokens).replace(' ##', '').strip() return out_string def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: if (token_ids_1 is None): return (([self.cls_token_id] + token_ids_0) + [self.sep_token_id]) cls = [self.cls_token_id] sep = [self.sep_token_id] return ((((cls + token_ids_0) + sep) + token_ids_1) + sep) def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if (token_ids_1 is not None): return (((([1] + ([0] * len(token_ids_0))) + [1]) + ([0] * len(token_ids_1))) + [1]) return (([1] + ([0] * len(token_ids_0))) + [1]) def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if (token_ids_1 is None): return (len(((cls + token_ids_0) + sep)) * [0]) return ((len(((cls + token_ids_0) + sep)) * [0]) + (len((token_ids_1 + sep)) * [1])) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: index = 0 if os.path.isdir(save_directory): vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])) else: vocab_file = (((filename_prefix + '-') if filename_prefix else '') + save_directory) with open(vocab_file, 'w', encoding='utf-8') as writer: for (token, token_index) in sorted(self.vocab.items(), key=(lambda kv: kv[1])): if (index != token_index): logger.warning(f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!') index = token_index writer.write((token + '\n')) index += 1 return (vocab_file,)
class ConResidualBlock(nn.Module): def __init__(self, h_dim, c_norm_layer=None, nl_layer=None, use_dropout=False, return_con=False): super(ConResidualBlock, self).__init__() self.return_con = return_con self.c1 = Conv2dBlock(h_dim, h_dim, kernel_size=3, stride=1, padding=1, pad_type='reflect', bias=False) self.n1 = c_norm_layer(h_dim) self.l1 = nl_layer() self.c2 = Conv2dBlock(h_dim, h_dim, kernel_size=3, stride=1, padding=1, pad_type='reflect', bias=False) self.n2 = c_norm_layer(h_dim) if use_dropout: self.dropout = nn.Dropout(0.5) else: self.dropout = (lambda x: x) def forward(self, input): (x, code) = input y = self.l1(self.n1(self.c1(x), code)) y = self.n2(self.c2(y), code) y = self.dropout(y) out = (x + y) if self.return_con: out = [out, code] return out
def test_digits_cosine_greedi_ln_object(): model = SumRedundancySelection(100, 'cosine', optimizer=GreeDi(optimizer1='lazy', optimizer2='naive', random_state=0)) model.fit(X_digits) assert_array_equal(model.ranking, digits_cosine_greedi_ranking) assert_array_almost_equal(model.gains, digits_cosine_greedi_gains, 4) assert_array_almost_equal(model.subset, X_digits[model.ranking])
class FlaxLMSDiscreteScheduler(FlaxSchedulerMixin, ConfigMixin): _compatibles = [e.name for e in FlaxKarrasDiffusionSchedulers] dtype: jnp.dtype def has_state(self): return True _to_config def __init__(self, num_train_timesteps: int=1000, beta_start: float=0.0001, beta_end: float=0.02, beta_schedule: str='linear', trained_betas: Optional[jnp.ndarray]=None, prediction_type: str='epsilon', dtype: jnp.dtype=jnp.float32): self.dtype = dtype def create_state(self, common: Optional[CommonSchedulerState]=None) -> LMSDiscreteSchedulerState: if (common is None): common = CommonSchedulerState.create(self) timesteps = jnp.arange(0, self.config.num_train_timesteps).round()[::(- 1)] sigmas = (((1 - common.alphas_cumprod) / common.alphas_cumprod) ** 0.5) init_noise_sigma = sigmas.max() return LMSDiscreteSchedulerState.create(common=common, init_noise_sigma=init_noise_sigma, timesteps=timesteps, sigmas=sigmas) def scale_model_input(self, state: LMSDiscreteSchedulerState, sample: jnp.ndarray, timestep: int) -> jnp.ndarray: (step_index,) = jnp.where((state.timesteps == timestep), size=1) step_index = step_index[0] sigma = state.sigmas[step_index] sample = (sample / (((sigma ** 2) + 1) ** 0.5)) return sample def get_lms_coefficient(self, state: LMSDiscreteSchedulerState, order, t, current_order): def lms_derivative(tau): prod = 1.0 for k in range(order): if (current_order == k): continue prod *= ((tau - state.sigmas[(t - k)]) / (state.sigmas[(t - current_order)] - state.sigmas[(t - k)])) return prod integrated_coeff = integrate.quad(lms_derivative, state.sigmas[t], state.sigmas[(t + 1)], epsrel=0.0001)[0] return integrated_coeff def set_timesteps(self, state: LMSDiscreteSchedulerState, num_inference_steps: int, shape: Tuple=()) -> LMSDiscreteSchedulerState: timesteps = jnp.linspace((self.config.num_train_timesteps - 1), 0, num_inference_steps, dtype=self.dtype) low_idx = jnp.floor(timesteps).astype(jnp.int32) high_idx = jnp.ceil(timesteps).astype(jnp.int32) frac = jnp.mod(timesteps, 1.0) sigmas = (((1 - state.common.alphas_cumprod) / state.common.alphas_cumprod) ** 0.5) sigmas = (((1 - frac) * sigmas[low_idx]) + (frac * sigmas[high_idx])) sigmas = jnp.concatenate([sigmas, jnp.array([0.0], dtype=self.dtype)]) timesteps = timesteps.astype(jnp.int32) derivatives = jnp.zeros(((0,) + shape), dtype=self.dtype) return state.replace(timesteps=timesteps, sigmas=sigmas, num_inference_steps=num_inference_steps, derivatives=derivatives) def step(self, state: LMSDiscreteSchedulerState, model_output: jnp.ndarray, timestep: int, sample: jnp.ndarray, order: int=4, return_dict: bool=True) -> Union[(FlaxLMSSchedulerOutput, Tuple)]: if (state.num_inference_steps is None): raise ValueError("Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler") sigma = state.sigmas[timestep] if (self.config.prediction_type == 'epsilon'): pred_original_sample = (sample - (sigma * model_output)) elif (self.config.prediction_type == 'v_prediction'): pred_original_sample = ((model_output * ((- sigma) / (((sigma ** 2) + 1) ** 0.5))) + (sample / ((sigma ** 2) + 1))) else: raise ValueError(f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`') derivative = ((sample - pred_original_sample) / sigma) state = state.replace(derivatives=jnp.append(state.derivatives, derivative)) if (len(state.derivatives) > order): state = state.replace(derivatives=jnp.delete(state.derivatives, 0)) order = min((timestep + 1), order) lms_coeffs = [self.get_lms_coefficient(state, order, timestep, curr_order) for curr_order in range(order)] prev_sample = (sample + sum(((coeff * derivative) for (coeff, derivative) in zip(lms_coeffs, reversed(state.derivatives))))) if (not return_dict): return (prev_sample, state) return FlaxLMSSchedulerOutput(prev_sample=prev_sample, state=state) def add_noise(self, state: LMSDiscreteSchedulerState, original_samples: jnp.ndarray, noise: jnp.ndarray, timesteps: jnp.ndarray) -> jnp.ndarray: sigma = state.sigmas[timesteps].flatten() sigma = broadcast_to_shape_from_left(sigma, noise.shape) noisy_samples = (original_samples + (noise * sigma)) return noisy_samples def __len__(self): return self.config.num_train_timesteps
def require_torch(test_case): if (not is_torch_available()): return unittest.skip('test requires PyTorch')(test_case) else: return test_case
def generate_json(docid, vector): return json.dumps({'id': docid, 'contents': '', 'vector': vector}, ensure_ascii=False)
_model def ese_vovnet39b_evos(pretrained=False, **kwargs): def norm_act_fn(num_features, **nkwargs): return create_norm_act('EvoNormSample', num_features, jit=False, **nkwargs) return _create_vovnet('ese_vovnet39b_evos', pretrained=pretrained, norm_layer=norm_act_fn, **kwargs)
def generate_model(opt): assert (opt.model in ['resnet', 'resnext']) if (opt.model == 'resnet'): assert (opt.model_depth in [10, 18, 34, 50, 101, 152, 200]) from models.resnet import get_fine_tuning_parameters if (opt.model_depth == 10): model = resnet.resnet10(opt=opt) elif (opt.model_depth == 18): model = resnet.resnet18(opt=opt) elif (opt.model_depth == 34): model = resnet.resnet34(opt=opt) elif (opt.model_depth == 50): model = resnet.resnet50(opt=opt) elif (opt.model_depth == 101): model = resnet.resnet101(opt=opt) elif (opt.model_depth == 152): model = resnet.resnet152(opt=opt) elif (opt.model_depth == 200): model = resnet.resnet200(opt=opt) elif (opt.model == 'resnext'): assert (opt.model_depth in [50, 101, 152]) from models.resnext import get_fine_tuning_parameters if (opt.model_depth == 50): model = resnext.resnet50(opt=opt) elif (opt.model_depth == 101): model = resnext.resnet101(opt=opt) elif (opt.model_depth == 152): model = resnext.resnet152(opt=opt) if (not opt.no_cuda): model = model.cuda() return (model, model.parameters())
_metaclass(ABCMeta) class SimulatorProcessBase(mp.Process): def __init__(self, idx): super(SimulatorProcessBase, self).__init__() self.idx = int(idx) self.name = u'simulator-{}'.format(self.idx) self.identity = self.name.encode('utf-8') def _build_player(self): pass
class _GateMoudle(nn.Module): def __init__(self): super(_GateMoudle, self).__init__() self.conv1 = nn.Conv2d(131, 64, (3, 3), 1, 1) self.relu = nn.LeakyReLU(0.2, inplace=True) self.conv2 = nn.Conv2d(64, 64, (1, 1), 1, padding=0) for i in self.modules(): if isinstance(i, nn.Conv2d): j = ((i.kernel_size[0] * i.kernel_size[1]) * i.out_channels) i.weight.data.normal_(0, math.sqrt((2 / j))) if (i.bias is not None): i.bias.data.zero_() def forward(self, x): con1 = self.relu(self.conv1(x)) scoremap = self.conv2(con1) return scoremap
def test(dataset, epoch): ff = open((('recordFATTOld' + str(epoch)) + '.txt'), 'w') count = 0 correct = 0 tp = [] tn = [] fp = [] fn = [] for yi in range(0, 18): tp.append(0) tn.append(0) fp.append(0) fn.append(0) results = [] for (data, label) in dataset: label = torch.tensor(label, dtype=torch.float, device=device) (x1, x2, edge_index1, edge_index2, edge_attr1, edge_attr2) = data x1 = torch.tensor(x1, dtype=torch.long, device=device) x2 = torch.tensor(x2, dtype=torch.long, device=device) edge_index1 = torch.tensor(edge_index1, dtype=torch.long, device=device) edge_index2 = torch.tensor(edge_index2, dtype=torch.long, device=device) if (edge_attr1 != None): edge_attr1 = torch.tensor(edge_attr1, dtype=torch.long, device=device) edge_attr2 = torch.tensor(edge_attr2, dtype=torch.long, device=device) data = [x1, x2, edge_index1, edge_index2, edge_attr1, edge_attr2] prediction = model(data) output = F.cosine_similarity(prediction[0], prediction[1]) results.append(output.item()) prediction = torch.sign(output).item() ff.write(str(output.item())) ff.write(' ') ff.close() return results
def training_loss_2nd_user_task(data, batch_index, model, sess, train_data, is_training): train_loss = 0.0 num_batch = (data.oracle_num_users // setting.batch_size_user) for index in batch_index: (b_target_user, b_k_shot_item, b_second_order_users, b_third_order_items, b_oracle_user_ebd, b_mask_num_second_order_user, b_mask_num_third_order_item) = data.batch_gen_3rd_user_task(train_data, index) feed_dict = {model.target_user: b_oracle_user_ebd, model.support_item_1st: b_k_shot_item, model.training_phrase_user_task: is_training, model.support_user_2nd: b_second_order_users, model.training_phrase_item_task: is_training} train_loss += sess.run(model.loss_2nd_user, feed_dict) return (train_loss / num_batch)
def test(model, dataloader): model.eval() device = model.device time_start = time.time() batch_time = 0.0 accuracy = 0.0 with torch.no_grad(): for batch in dataloader: batch_start = time.time() premises = batch['premise'].to(device) premises_lengths = batch['premise_length'].to(device) hypotheses = batch['hypothesis'].to(device) hypotheses_lengths = batch['hypothesis_length'].to(device) labels = batch['label'].to(device) (_, probs) = model(premises, premises_lengths, hypotheses, hypotheses_lengths) accuracy += correct_predictions(probs, labels) batch_time += (time.time() - batch_start) batch_time /= len(dataloader) total_time = (time.time() - time_start) accuracy /= len(dataloader.dataset) return (batch_time, total_time, accuracy)
class Bert4WSFunction(BaseFunction): def __init__(self): super().__init__() def forward(self, batch=None): (input_ids, attention_mask, segment_ids, label_ids, label_masks) = batch sequence_output = self.bert(input_ids=input_ids, attention_mask=attention_mask)[0] sequence_output = self.dropout(sequence_output) output = self.classifier(sequence_output) return output def predict(self, batch=None): (input_ids, attention_mask, segment_ids, label_ids, label_masks) = batch output = self.forward(batch) (prediction, ans_score) = self.crf.viterbi_decode(output, label_masks) (batch_size, max_len, feat_dim) = output.shape valid_len = torch.zeros(batch_size, dtype=torch.int, device=self.device) for i in range(batch_size): seq_len = 0 for j in range(max_len): if (label_masks[i][j].item() == 1): seq_len += 1 else: valid_len[i] = seq_len break return (prediction, valid_len) def loss(self, batch=None, loss_function=None): (input_ids, attention_mask, segment_ids, label_ids, label_masks) = batch output = self.forward(batch) loss = self.crf(output, label_ids, label_masks).mean() return loss def evaluate(self, batch=None, metrics=None): (input_ids, attention_mask, segment_ids, label_ids, label_masks) = batch output = self.forward(batch) (prediction, ans_score) = self.crf.viterbi_decode(output, label_masks) (batch_size, max_len, feat_dim) = output.shape valid_len = torch.zeros(batch_size, dtype=torch.int, device=self.device) for i in range(batch_size): seq_len = 0 for j in range(max_len): if (label_masks[i][j].item() == 1): seq_len += 1 else: valid_len[i] = seq_len break metrics.evaluate(prediction, label_ids, valid_len)
class CosineSimilarityAnalysis(): def __init__(self, file_paths): self.file_paths = file_paths def load_scores(self, file_path): with open(file_path, 'r') as file: scores = json.load(file) return scores def calculate_cosine_scores_gpu(self, data1, data2_tensors, vectorizer, batch_size=100, save_path_1=None, save_path_2=None): max_cosine_scores = [] most_similar_indices = [] for i in range(0, len(data1), batch_size): batch_text = data1[i:(i + batch_size)] batch_vector = self.prepare_data_for_gpu(vectorizer, batch_text) max_scores_batch = [] indices_batch = [] for data2_tensor in data2_tensors: scores = self.cosine_similarity_gpu(batch_vector, data2_tensor) (max_scores, indices) = torch.max(scores, dim=1) max_scores_batch.append(max_scores) indices_batch.append(indices) max_scores_batch_tensor = torch.stack(max_scores_batch) (max_scores, max_indices) = torch.max(max_scores_batch_tensor, dim=0) max_cosine_scores.extend(max_scores.cpu().numpy()) most_similar_indices.extend(max_indices.cpu().numpy()) if save_path_1: most_similar_indices_native = [int(index) for index in most_similar_indices] with open(save_path_1, 'w') as file: json.dump(most_similar_indices_native, file) if save_path_2: most_similar_scores_native = [float(score) for score in max_cosine_scores] with open(save_path_2, 'w') as file: json.dump(most_similar_scores_native, file) return (max_cosine_scores, most_similar_indices) def cosine_similarity_gpu(self, batch_tensor1, batch_tensor2): batch_tensor1_norm = F.normalize(batch_tensor1, p=2, dim=(- 1)) batch_tensor2_norm = F.normalize(batch_tensor2, p=2, dim=(- 1)) return torch.mm(batch_tensor1_norm, batch_tensor2_norm.transpose(0, 1)) def prepare_data_for_gpu(self, vectorizer, texts): vectors = vectorizer.transform(texts).toarray() return torch.tensor(vectors, device='cuda') def plot_scores(self, oss_scores, evol_scores, codealpaca_scores): num_samples_oss = len(oss_scores) num_samples_evol = len(evol_scores) num_samples_codealpaca = len(codealpaca_scores) score_counts_oss = Counter([round(score, 2) for score in oss_scores]) score_counts_evol = Counter([round(score, 2) for score in evol_scores]) score_counts_codealpaca = Counter([round(score, 2) for score in codealpaca_scores]) avg_score_oss = np.mean(oss_scores) avg_score_evol = np.mean(evol_scores) avg_score_codealpaca = np.mean(codealpaca_scores) percentage_counts_oss = {score: (count / num_samples_oss) for (score, count) in score_counts_oss.items()} percentage_counts_evol = {score: (count / num_samples_evol) for (score, count) in score_counts_evol.items()} percentage_counts_codealpaca = {score: (count / num_samples_codealpaca) for (score, count) in score_counts_codealpaca.items()} plt.figure(figsize=(4.8, 3)) sorted_keys_oss = sorted(percentage_counts_oss.keys()) sorted_keys_evol = sorted(percentage_counts_evol.keys()) sorted_keys_codealpaca = sorted(percentage_counts_codealpaca.keys()) color1 = ((59 / 256), (117 / 256), (175 / 256)) color2 = ((82 / 256), (159 / 256), (64 / 256)) color3 = ((239 / 256), (139 / 256), (54 / 256)) color4 = ((0 / 256), (60 / 256), (146 / 256)) color5 = ((0 / 256), (146 / 256), (10 / 256)) color6 = ((230 / 256), (120 / 256), (0 / 37)) alpha = 0.33 plt.plot(sorted_keys_codealpaca, [percentage_counts_codealpaca[k] for k in sorted_keys_codealpaca], color=color2, alpha=1, label=('Self-Instruct; Avg Score: ' + f'{avg_score_codealpaca:.3f}'), zorder=3) plt.fill_between(sorted_keys_codealpaca, [percentage_counts_codealpaca[k] for k in sorted_keys_codealpaca], color=color5, alpha=alpha, zorder=2) plt.plot(sorted_keys_evol, [percentage_counts_evol[k] for k in sorted_keys_evol], color=color1, alpha=1, label=('Evol-Instruct; Avg Score: ' + f'{avg_score_evol:.3f}'), zorder=3) plt.fill_between(sorted_keys_evol, [percentage_counts_evol[k] for k in sorted_keys_evol], color=color4, alpha=alpha, zorder=2) plt.plot(sorted_keys_oss, [percentage_counts_oss[k] for k in sorted_keys_oss], color=color3, alpha=1, label=('OSS-Instruct; Avg Score: ' + f'{avg_score_oss:.3f}'), zorder=3) plt.fill_between(sorted_keys_oss, [percentage_counts_oss[k] for k in sorted_keys_oss], color=color6, alpha=alpha, zorder=2) plt.xlabel('Cosine Similarity Score', fontsize=15) plt.ylabel('Percentage', fontsize=15) plt.xlim(0, 0.5) plt.ylim(bottom=0) plt.xticks(np.arange(0, 0.55, 0.1)) plt.yticks(np.arange(0, 0.16, 0.02), [f'{i:.2f}' for i in np.arange(0, 0.16, 0.02)]) plt.tick_params(axis='both', labelsize=14) plt.axvline(x=avg_score_codealpaca, color='Forestgreen', linestyle='dotted') plt.axvline(x=avg_score_evol, color='royalblue', linestyle='dotted') plt.axvline(x=avg_score_oss, color='tomato', linestyle='dotted') plt.legend(prop={'size': 10}) plt.tight_layout() plt.savefig('HE_similarity_comparison.png')
class DeResNetBlockBatchNorm(nn.Module): def __init__(self, inplanes, planes, stride=1, output_padding=0, activation='relu'): super(DeResNetBlockBatchNorm, self).__init__() assert (activation in ['relu', 'elu', 'leaky_relu']) self.deconv1 = deconv3x3(inplanes, planes, stride, output_padding) self.bn1 = nn.BatchNorm2d(planes) if (activation == 'relu'): self.activation = nn.ReLU(inplace=True) elif (activation == 'elu'): self.activation = nn.ELU(inplace=True) else: self.activation = nn.LeakyReLU(inplace=True, negative_slope=0.1) self.deconv2 = deconv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) downsample = None if ((stride != 1) or (inplanes != planes)): downsample = nn.Sequential(nn.ConvTranspose2d(inplanes, planes, kernel_size=1, stride=stride, output_padding=output_padding, bias=False), nn.BatchNorm2d(planes)) self.downsample = downsample self.reset_parameters() def reset_parameters(self): nn.init.constant_(self.bn1.weight, 1.0) nn.init.constant_(self.bn1.bias, 0.0) nn.init.constant_(self.bn2.weight, 1.0) nn.init.constant_(self.bn2.bias, 0.0) if (self.downsample is not None): assert isinstance(self.downsample[1], nn.BatchNorm2d) nn.init.constant_(self.downsample[1].weight, 1.0) nn.init.constant_(self.downsample[1].bias, 0.0) def init(self, x, init_scale=1.0): with torch.no_grad(): return self(x) def forward(self, x): residual = x out = self.deconv1(x) out = self.bn1(out) out = self.activation(out) out = self.deconv2(out) out = self.bn2(out) if (self.downsample is not None): residual = self.downsample(x) out += residual out = self.activation(out) return out
def _upgrade_state_dict(state): if ('optimizer_history' not in state): state['optimizer_history'] = [{'criterion_name': 'CrossEntropyCriterion', 'best_loss': state['best_loss']}] state['last_optimizer_state'] = state['optimizer'] del state['optimizer'] del state['best_loss'] if (('epoch' in state) and ('extra_state' not in state)): state['extra_state'] = {'epoch': state['epoch'], 'batch_offset': state['batch_offset'], 'val_loss': state['val_loss']} del state['epoch'] del state['batch_offset'] del state['val_loss'] if ('optimizer' in state['optimizer_history'][(- 1)]): state['last_optimizer_state'] = state['optimizer_history'][(- 1)]['optimizer'] for optim_hist in state['optimizer_history']: del optim_hist['optimizer'] if ('optimizer_name' not in state['optimizer_history'][(- 1)]): state['optimizer_history'][(- 1)]['optimizer_name'] = 'FairseqNAG' if ('lr_scheduler_state' not in state['optimizer_history'][(- 1)]): state['optimizer_history'][(- 1)]['lr_scheduler_state'] = {'best': state['optimizer_history'][(- 1)]['best_loss']} del state['optimizer_history'][(- 1)]['best_loss'] if ('num_updates' not in state['optimizer_history'][(- 1)]): state['optimizer_history'][(- 1)]['num_updates'] = 0 if ('train_iterator' not in state['extra_state']): state['extra_state']['train_iterator'] = {'epoch': state['extra_state'].get('epoch', 0), 'iterations_in_epoch': state['extra_state'].get('batch_offset', 0)} if (('args' in state) and (state['args'] is not None)): if (hasattr(state['args'], 'max_positions') and (not hasattr(state['args'], 'max_source_positions'))): state['args'].max_source_positions = state['args'].max_positions state['args'].max_target_positions = state['args'].max_positions if (not hasattr(state['args'], 'task')): state['args'].task = 'translation' if getattr(state['args'], 'raw_text', False): state['args'].dataset_impl = 'raw' elif getattr(state['args'], 'lazy_load', False): state['args'].dataset_impl = 'lazy' if (state['extra_state']['train_iterator'] is not None): state['extra_state']['train_iterator']['epoch'] = max(state['extra_state']['train_iterator'].get('epoch', 1), 1) if hasattr(state['args'], 'remove_bpe'): state['args'].post_process = state['args'].remove_bpe if hasattr(state['args'], 'min_lr'): state['args'].stop_min_lr = state['args'].min_lr del state['args'].min_lr if (hasattr(state['args'], 'criterion') and (state['args'].criterion in ['binary_cross_entropy', 'kd_binary_cross_entropy'])): state['args'].criterion = 'wav2vec' if (hasattr(state['args'], 'log_keys') and (state['args'].log_keys is None)): delattr(state['args'], 'log_keys') if (hasattr(state['args'], 'task') and (state['args'].task == 'speech_pretraining')): state['args'].task = 'audio_pretraining' if (hasattr(state['args'], 'arch') and (state['args'].arch == 'audio_cpc')): state['args'].arch = 'wav2vec' if (hasattr(state['args'], 'lr') and isinstance(state['args'].lr, float)): state['args'].lr = [state['args'].lr] if (hasattr(state['args'], 'data') and isinstance(state['args'].data, list) and (len(state['args'].data) > 0)): state['args'].data = state['args'].data[0] state['cfg'] = convert_namespace_to_omegaconf(state['args']) if (('cfg' in state) and (state['cfg'] is not None)): cfg = state['cfg'] with open_dict(cfg): if (('task' in cfg) and ('eval_wer_config' in cfg.task) and isinstance(cfg.task.eval_wer_config.print_alignment, bool)): cfg.task.eval_wer_config.print_alignment = 'hard' if (('generation' in cfg) and isinstance(cfg.generation.print_alignment, bool)): cfg.generation.print_alignment = ('hard' if cfg.generation.print_alignment else None) if (('model' in cfg) and ('w2v_args' in cfg.model) and (cfg.model.w2v_args is not None) and (hasattr(cfg.model.w2v_args, 'task') or ('task' in cfg.model.w2v_args)) and hasattr(cfg.model.w2v_args.task, 'eval_wer_config') and (cfg.model.w2v_args.task.eval_wer_config is not None) and isinstance(cfg.model.w2v_args.task.eval_wer_config.print_alignment, bool)): cfg.model.w2v_args.task.eval_wer_config.print_alignment = 'hard' return state
class EntityLabel(): def __init__(self, identifier, index, short_name, verbose_name): self._identifier = identifier self._index = index self._short_name = short_name self._verbose_name = verbose_name def identifier(self): return self._identifier def index(self): return self._index def short_name(self): return self._short_name def verbose_name(self): return self._verbose_name def __int__(self): return self._index def __eq__(self, other): if isinstance(other, EntityLabel): return (self._identifier == other._identifier) return False def __hash__(self): return hash(self._identifier)
class TestBasicSwap(QiskitTestCase): def test_trivial_case(self): coupling = CouplingMap([[0, 1], [0, 2]]) qr = QuantumRegister(3, 'q') circuit = QuantumCircuit(qr) circuit.cx(qr[0], qr[1]) circuit.h(qr[0]) circuit.cx(qr[0], qr[2]) dag = circuit_to_dag(circuit) pass_ = BasicSwap(coupling) after = pass_.run(dag) self.assertEqual(dag, after) def test_trivial_in_same_layer(self): coupling = CouplingMap([[0, 1], [1, 2], [2, 3]]) qr = QuantumRegister(4, 'q') circuit = QuantumCircuit(qr) circuit.cx(qr[2], qr[3]) circuit.cx(qr[0], qr[1]) dag = circuit_to_dag(circuit) pass_ = BasicSwap(coupling) after = pass_.run(dag) self.assertEqual(dag, after) def test_a_single_swap(self): coupling = CouplingMap([[0, 1], [0, 2]]) qr = QuantumRegister(3, 'q') circuit = QuantumCircuit(qr) circuit.cx(qr[1], qr[2]) dag = circuit_to_dag(circuit) expected = QuantumCircuit(qr) expected.swap(qr[1], qr[0]) expected.cx(qr[0], qr[2]) pass_ = BasicSwap(coupling) after = pass_.run(dag) self.assertEqual(circuit_to_dag(expected), after) def test_keep_layout(self): coupling = CouplingMap([[1, 0], [1, 2]]) qr = QuantumRegister(3, 'q') circuit = QuantumCircuit(qr) circuit.cx(qr[0], qr[2]) circuit.h(qr[0]) dag = circuit_to_dag(circuit) expected = QuantumCircuit(qr) expected.swap(qr[0], qr[1]) expected.cx(qr[1], qr[2]) expected.h(qr[1]) pass_ = BasicSwap(coupling) after = pass_.run(dag) self.assertEqual(circuit_to_dag(expected), after) def test_far_swap(self): coupling = CouplingMap([[0, 1], [1, 2], [2, 3]]) qr = QuantumRegister(4, 'q') circuit = QuantumCircuit(qr) circuit.cx(qr[0], qr[3]) circuit.cx(qr[3], qr[0]) dag = circuit_to_dag(circuit) expected = QuantumCircuit(qr) expected.swap(qr[0], qr[1]) expected.swap(qr[1], qr[2]) expected.cx(qr[2], qr[3]) expected.cx(qr[3], qr[2]) pass_ = BasicSwap(coupling) after = pass_.run(dag) self.assertEqual(circuit_to_dag(expected), after) def test_far_swap_with_gate_the_front(self): coupling = CouplingMap([[0, 1], [1, 2], [2, 3]]) qr = QuantumRegister(4, 'qr') circuit = QuantumCircuit(qr) circuit.h(qr[3]) circuit.cx(qr[3], qr[0]) dag = circuit_to_dag(circuit) expected = QuantumCircuit(qr) expected.h(qr[3]) expected.swap(qr[3], qr[2]) expected.swap(qr[2], qr[1]) expected.cx(qr[1], qr[0]) pass_ = BasicSwap(coupling) after = pass_.run(dag) self.assertEqual(circuit_to_dag(expected), after) def test_far_swap_with_gate_the_back(self): coupling = CouplingMap([[0, 1], [1, 2], [2, 3]]) qr = QuantumRegister(4, 'qr') circuit = QuantumCircuit(qr) circuit.cx(qr[3], qr[0]) circuit.h(qr[3]) dag = circuit_to_dag(circuit) expected = QuantumCircuit(qr) expected.swap(qr[3], qr[2]) expected.swap(qr[2], qr[1]) expected.cx(qr[1], qr[0]) expected.h(qr[1]) pass_ = BasicSwap(coupling) after = pass_.run(dag) self.assertEqual(circuit_to_dag(expected), after) def test_far_swap_with_gate_the_middle(self): coupling = CouplingMap([[0, 1], [1, 2], [2, 3]]) qr = QuantumRegister(4, 'qr') circuit = QuantumCircuit(qr) circuit.cx(qr[3], qr[0]) circuit.h(qr[3]) circuit.cx(qr[0], qr[3]) dag = circuit_to_dag(circuit) expected = QuantumCircuit(qr) expected.swap(qr[3], qr[2]) expected.swap(qr[2], qr[1]) expected.cx(qr[1], qr[0]) expected.h(qr[1]) expected.cx(qr[0], qr[1]) pass_ = BasicSwap(coupling) after = pass_.run(dag) self.assertEqual(circuit_to_dag(expected), after) def test_swap_between_qregs(self): coupling = CouplingMap([[0, 1], [0, 2]]) qr0 = QuantumRegister(1, 'qr0') qr1 = QuantumRegister(2, 'qr1') circuit = QuantumCircuit(qr0, qr1) circuit.cx(qr1[0], qr1[1]) dag = circuit_to_dag(circuit) expected = QuantumCircuit(qr0, qr1) expected.swap(qr1[0], qr0[0]) expected.cx(qr0[0], qr1[1]) pass_ = BasicSwap(coupling) after = pass_.run(dag) self.assertEqual(circuit_to_dag(expected), after) def test_initial_layout(self): coupling = CouplingMap([[0, 1], [1, 2]]) qr = QuantumRegister(3, 'q') circuit = QuantumCircuit(qr) circuit.cx(qr[1], qr[2]) dag = circuit_to_dag(circuit) layout = Layout({qr[1]: 0, qr[0]: 1, qr[2]: 2}) expected = QuantumCircuit(qr) expected.swap(qr[1], qr[0]) expected.cx(qr[0], qr[2]) pass_ = BasicSwap(coupling, initial_layout=layout) after = pass_.run(dag) self.assertEqual(circuit_to_dag(expected), after) def test_initial_layout_in_different_qregs(self): coupling = CouplingMap([[0, 1], [1, 2]]) qr0 = QuantumRegister(1, 'q0') qr1 = QuantumRegister(1, 'q1') qr2 = QuantumRegister(1, 'q2') circuit = QuantumCircuit(qr0, qr1, qr2) circuit.cx(qr1[0], qr2[0]) dag = circuit_to_dag(circuit) layout = Layout({qr1[0]: 0, qr0[0]: 1, qr2[0]: 2}) expected = QuantumCircuit(qr0, qr1, qr2) expected.swap(qr1[0], qr0[0]) expected.cx(qr0[0], qr2[0]) pass_ = BasicSwap(coupling, initial_layout=layout) after = pass_.run(dag) self.assertEqual(circuit_to_dag(expected), after)
def predict_inputs(model: nn.Module, inputs: Tensor) -> Tuple[(Tensor, Tensor, Tensor)]: logits = model(inputs) probabilities = torch.softmax(logits, 1) predictions = logits.argmax(1) return (logits, probabilities, predictions)
('pretrained_mlm') class PretrainedMLM(TokenEmbedder): authorized_missing_keys = ['position_ids$'] def __init__(self, model_name: str, *, max_length: int=None, train_parameters: Union[(bool, str)]=True, arp_injector: Union[(Lazy[ArpInjector], ArpInjector)], on_logits: Union[(bool, str)]=False, eval_mode: bool=False, tokenizer_kwargs: Optional[Dict[(str, Any)]]=None, transformer_kwargs: Optional[Dict[(str, Any)]]=None) -> None: super().__init__() if (transformer_kwargs is None): transformer_kwargs = {} self.transformer_model_for_mlm = AutoModelForMaskedLM.from_pretrained(model_name, **transformer_kwargs) self.config = self.transformer_model_for_mlm.config self._max_length = max_length self.output_dim = getattr(self.config, 'embedding_size', self.config.hidden_size) tokenizer = PretrainedTransformerTokenizer(model_name, tokenizer_kwargs=tokenizer_kwargs) self._num_added_start_tokens = len(tokenizer.single_sequence_start_tokens) self._num_added_end_tokens = len(tokenizer.single_sequence_end_tokens) self._num_added_tokens = (self._num_added_start_tokens + self._num_added_end_tokens) frozen_prompts = False if (train_parameters == 'no_prompts'): train_parameters = 'only_prompts' frozen_prompts = True self.train_parameters = train_parameters if (train_parameters is not True): for (key, param) in self.named_parameters(): last_layer_name = f'layer.{(self.num_hidden_layers - 1)}.attention' if (train_parameters != 'only_prompts'): if (((train_parameters == 'last_layer_only') and (last_layer_name in key)) or ('.lm_head.' in key)): continue param.requires_grad = False transformer_embeddings = self.embeddings_layer if isinstance(arp_injector, Lazy): arp_injector_ = arp_injector.construct(embedder=transformer_embeddings.word_embeddings, tokenizer=tokenizer.tokenizer) else: arp_injector_ = arp_injector if frozen_prompts: arp_injector_.freeze_prompts() transformer_embeddings.word_embeddings = arp_injector_ self.config.output_hidden_states = True self.on_logits = on_logits self.eval_mode = eval_mode if (on_logits in ['pre_decoder', 'pre_decoder_layer_norm']): lm_head = self.lm_head lm_head._decoder = lm_head.decoder lm_head.decoder = Identity() if (on_logits != 'pre_decoder_layer_norm'): lm_head._layer_norm = lm_head.layer_norm lm_head.layer_norm = Identity() def state_dict(self, destination, prefix, keep_vars): states = super().state_dict(prefix=prefix, keep_vars=keep_vars) logger.warning(f'saving {self.train_parameters}') if (self.train_parameters == 'only_prompts'): keys_to_remove = [key for key in states if ('.prompt_params' not in key)] for key in keys_to_remove: del states[key] for (key, value) in states.items(): destination[key] = value return destination def lm_head(self): for key in ['lm_head', 'predictions', 'cls', 'generator_lm_head']: model = getattr(self.transformer_model_for_mlm, key, None) if (model is not None): return model else: raise NotImplementedError def transformer_model(self): for key in ['roberta', 'albert', 'bert', 'electra']: model = getattr(self.transformer_model_for_mlm, key, None) if (model is not None): return model else: raise NotImplementedError def num_hidden_layers(self): return self.config.num_hidden_layers def embeddings_layer(self): return self.transformer_model.embeddings def get_output_dim(self): if (self.on_logits is True): return self.config.vocab_size return self.output_dim def _number_of_token_type_embeddings(self): if isinstance(self.config, XLNetConfig): return 3 elif hasattr(self.config, 'type_vocab_size'): return self.config.type_vocab_size else: return 0 def forward(self, token_ids: torch.LongTensor, mask: torch.BoolTensor, type_ids: Optional[torch.LongTensor]=None, segment_concat_mask: Optional[torch.BoolTensor]=None, **kwargs) -> torch.Tensor: if self.eval_mode: self.eval() if (type_ids is not None): max_type_id = type_ids.max() if (max_type_id == 0): type_ids = None else: if (max_type_id >= self._number_of_token_type_embeddings()): raise ValueError('Found type ids too large for the chosen transformer model.') assert (token_ids.shape == type_ids.shape) transformer_mask = (segment_concat_mask if (self._max_length is not None) else mask) assert (transformer_mask is not None) parameters = {'input_ids': token_ids, 'attention_mask': transformer_mask.float(), **kwargs} if (type_ids is not None): parameters['token_type_ids'] = type_ids parameters['return_dict'] = True transformer_output = self.transformer_model_for_mlm(**parameters) if self.on_logits: return transformer_output.logits return transformer_output.hidden_states[(- 1)]
def parse_node_str(node_str): node_str = node_str.split(' #')[0] op_groups = node_str.split('), %') for i in range((len(op_groups) - 1)): op_groups[i] += ')' for i in range(1, len(op_groups)): op_groups[i] = ('%' + op_groups[i]) node_dict = {} for op_group in op_groups: (node_group, node_op) = op_group.split(' = ') node_op = parse_node_op(node_op) node_defs = node_group.split(', %') for i in range(1, len(node_defs)): node_defs[i] = ('%' + node_defs[i]) for (i, node_def) in enumerate(node_defs): (node_name, node_type) = node_def.split(' : ') node_class = classify_node_type(node_type) if (node_name not in node_dict): node_dict[node_name] = {'node_class': node_class, 'node_op': node_op, 'output_id': i} if ((node_class == 'Tensor') and ('(' in node_type) and (')' in node_type)): shape_str = node_type.split('(')[(- 1)].split(')')[0] if (', ' in shape_str): shape = [int(s) for s in shape_str.split(', ')] node_dict[node_name]['shape'] = shape else: node_dict[node_name]['shape'] = [] "\n # remove comment\n node_str = node_str.split(' #')[0]\n splits = node_str.split(', %')\n for i in range(1, len(splits)):\n # add back %\n splits[i] = '%' + splits[i]\n node_dict = {}\n queue = []\n for s in splits:\n if ' = ' in s:\n node_def, node_op = s.split(' = ')\n node_name, node_type = node_def.split(' : ')\n node_class = classify_node_type(node_type)\n node_dict[node_name] = {'node_class': node_class, 'node_op': None, 'output_id': None}\n queue.append(node_name)\n node_op = parse_node_op(node_op)\n for i, queued_node_name in enumerate(queue):\n node_dict[queued_node_name]['node_op'] = node_op\n node_dict[queued_node_name]['output_id'] = i\n queue = []\n else:\n node_name, node_type = s.split(' : ')\n node_class = classify_node_type(node_type)\n node_dict[node_name] = {'node_class': node_class, 'node_op': None, 'output_id': None}\n queue.append(node_name)\n " return node_dict
class VGG19FeatureExtractor(nn.Module): def __init__(self, layer_names: List[str]=VGG19LAYERS): super().__init__() self.backbone = tv_models.vgg19(pretrained=True) _set_requires_grad_false(self.backbone) self.features = nn.Sequential(*(list(self.backbone.features.children()) + [self.backbone.avgpool])) self.layer_name_mapping = VGG19MAPPING self.layer_names = layer_names self.pad = nn.ReflectionPad2d(padding=2) def forward(self, inp: Tensor) -> Dict[(str, Tensor)]: if (inp.shape[1] == 1): inp = inp.repeat(1, 3, 1, 1) out = {} for (name, module) in self.features._modules.items(): inp = module(inp) if (name in self.layer_name_mapping.keys()): if (self.layer_name_mapping[name] in self.layer_names): out[self.layer_name_mapping[name]] = inp if (self.layer_name_mapping[name] == self.layer_names[(- 1)]): break return out
class RandomRotate90(DualTransform): def __init__(self, axes=(0, 1), always_apply=False, p=0.5): super().__init__(always_apply, p) self.axes = axes def apply(self, img, factor): return np.rot90(img, factor, axes=self.axes) def get_params(self, **data): return {'factor': random.randint(0, 3)}
def get_launcher_path() -> Path: root = Path('/root/eclipse.jdt.ls/org.eclipse.jdt.ls.product/target/repository/plugins/') return next(root.glob('org.eclipse.equinox.launcher_*.jar'))
def build_frame_selector(cfg: CfgNode): strategy = FrameSelectionStrategy(cfg.STRATEGY) if (strategy == FrameSelectionStrategy.RANDOM_K): frame_selector = RandomKFramesSelector(cfg.NUM_IMAGES) elif (strategy == FrameSelectionStrategy.FIRST_K): frame_selector = FirstKFramesSelector(cfg.NUM_IMAGES) elif (strategy == FrameSelectionStrategy.LAST_K): frame_selector = LastKFramesSelector(cfg.NUM_IMAGES) elif (strategy == FrameSelectionStrategy.ALL): frame_selector = None return frame_selector
def custom_vgg(custom_cfg, dataset_history=[], dataset2num_classes={}, network_width_multiplier=1.0, groups=1, shared_layer_info={}, **kwargs): return VGG(make_layers(custom_cfg, network_width_multiplier, batch_norm=True, groups=groups), dataset_history, dataset2num_classes, network_width_multiplier, shared_layer_info, **kwargs)
_grad() def evaluate(model, criterion, valid_loader): model.eval() acc_losses = {} for (i, (x, _)) in enumerate(valid_loader): x = x.to(args.device) output = model(x) (_, diagnostics) = criterion(x, output, model) acc_losses = (Counter(acc_losses) + Counter(diagnostics)) log_interval((i + 1), len(valid_loader), acc_losses) avg_losses = {k: (acc_losses[k] / len(valid_loader)) for k in acc_losses} return avg_losses
def parseTask(task: list) -> Tuple[(str, str, str)]: api: str = task[0] label: str = task[1] src: str = task[2] return (api, label, src)
_registry('SelfKnowledgeDistillationLoss', 'pytorch') class PyTorchSelfKnowledgeDistillationLossWrapper(object): def __init__(self, param_dict): self.param_dict = param_dict def _param_check(self): param_dict = self.param_dict _params = ['temperature', 'layer_mappings', 'loss_types', 'loss_weights', 'add_origin_loss'] layer_mappings = param_dict['layer_mappings'] if ('loss_types' not in param_dict): param_dict['loss_types'] = (['CE'] * len(layer_mappings)) if ('loss_weights' not in param_dict): param_dict['loss_weights'] = ([(1.0 / len(layer_mappings))] * len(layer_mappings)) if ('add_origin_loss' not in param_dict): param_dict['add_origin_loss'] = False if ('temperature' not in param_dict): param_dict['temperature'] = 1.0 assert ('layer_mappings' in param_dict), 'Key layer_mappings must be in input parameters.' assert all(((type(param_dict[k]) in [list, tuple]) for k in ['layer_mappings', 'loss_types', 'loss_weights'])), 'Type of loss_types and loss_weights must be list or tuple.' assert isinstance(param_dict['add_origin_loss'], bool), 'Type of add_origin_loss should be bool.' assert (len(param_dict['layer_mappings']) == len(param_dict['loss_types']) == len(param_dict['loss_weights'])), 'Length of layer_mappings, loss_types and loss_weights must be the same.' assert (param_dict['temperature'] > 0.0), 'Value of temperature must be positive.' for items in param_dict['layer_mappings']: assert all((((type(it) in [list, tuple]) and (len(it) == 2)) for it in items)), ((((('Elements of layer_mappings must be list or tuple and with length of 2.' + "element looks like ['resblock.1.feature.output,") + "'resblock.deepst.feature.output'], where ") + "'resblock.1.feature.output' and 'resblock.deepst.feature.output' ") + 'represent resblock feature output of the student model and feature output of the') + 'teacher model respectively.') assert all((any((isinstance(e, t) for t in [str])) for e in param_dict['loss_types'])), 'Type of loss_types element must be str.' assert all(((0.0 <= e <= 1.0) for e in param_dict['loss_weights'])), 'Element of loss_weights must be in interval [0, 1].' new_dict = {} for k in _params: new_dict[k] = param_dict[k] return new_dict def __call__(self, **kwargs): return (PyTorchSelfKnowledgeDistillationLoss, self._param_check())
def exitTensorMol(): PrintTMTIMER() LOGGER.info('Total Time : %0.5f s', (time.time() - TMSTARTTIME)) LOGGER.info('~ Adios Homeshake ~')
class Time_usage_testing(): def __init__(self): pass def init(self, train): self.start_time = 0 self.start_time_cpu = 0 self.time_sum_cpu = 0 self.time_sum = 0 self.time_count = 0 def start_predict(self, algorithm): self.start_time = time.time() self.start_time_cpu = time.clock() def stop_predict(self, algorithm): self.time_count += 1 self.time_sum_cpu = (time.clock() - self.start_time_cpu) self.time_sum = (time.clock() - self.start_time) def result(self): return (('Predcition time:', 'Predcition time CPU:'), ((self.time_sum / self.time_count), (self.time_sum_cpu / self.time_count))) def result_second(self, second_time): return ('Testing time seconds:', second_time) def result_cpu(self, cpu_time): return ('Testing time cpu:', cpu_time) def reset(self): pass
_grad() def convert_dalle_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_path=None, save_checkpoint=True): from dall_e import Encoder encoder = Encoder() if os.path.exists(checkpoint_path): ckpt = torch.load(checkpoint_path) else: ckpt = torch.hub.load_state_dict_from_url(checkpoint_path) if isinstance(ckpt, Encoder): ckpt = ckpt.state_dict() encoder.load_state_dict(ckpt) if (config_path is not None): config = FlavaImageCodebookConfig.from_pretrained(config_path) else: config = FlavaImageCodebookConfig() hf_model = FlavaImageCodebook(config).eval() state_dict = encoder.state_dict() hf_state_dict = upgrade_state_dict(state_dict) hf_model.load_state_dict(hf_state_dict) hf_state_dict = hf_model.state_dict() hf_count = count_parameters(hf_state_dict) state_dict_count = count_parameters(state_dict) assert torch.allclose(hf_count, state_dict_count, atol=0.001) if save_checkpoint: hf_model.save_pretrained(pytorch_dump_folder_path) else: return hf_state_dict
class DataHandler(): base_dataset = None train_transforms = [] common_transforms = [transforms.ToTensor()] class_order = None
def _build_viz_err_obj(err_msg): _type = 'html' figure = _build_error_frame(err_msg) viz_figure = {'type': _type, 'figure': figure} viz_obj = {'name': 'Error', 'overall': viz_figure, 'specific': [], 'selector': {'columns': [], 'data': []}} return viz_obj
class RandomCrop(object): def __init__(self, output_size): if ((type(output_size) != tuple) and (type(output_size) != list)): output_size = (output_size, output_size) self.output_size = output_size def __call__(self, input): img = input if (type(input) == list): img = img[0] (width, height) = (img.size[0], img.size[1]) (new_width, new_height) = self.output_size left = random.randint(0, (width - new_width)) top = random.randint(0, (height - new_height)) if (type(input) == list): return [im.crop((left, top, (left + new_width), (top + new_height))) for im in input] return input.crop((left, top, (left + new_width), (top + new_height)))
def create_transform(input_size, is_training=False, use_prefetcher=False, no_aug=False, scale=None, ratio=None, hflip=0.5, vflip=0.0, color_jitter=0.4, auto_augment=None, interpolation='bilinear', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, re_prob=0.0, re_mode='const', re_count=1, re_num_splits=0, crop_pct=None, tf_preprocessing=False, separate=False): if isinstance(input_size, (tuple, list)): img_size = input_size[(- 2):] else: img_size = input_size if (tf_preprocessing and use_prefetcher): assert (not separate), 'Separate transforms not supported for TF preprocessing' from timm.data.tf_preprocessing import TfPreprocessTransform transform = TfPreprocessTransform(is_training=is_training, size=img_size, interpolation=interpolation) elif (is_training and no_aug): assert (not separate), 'Cannot perform split augmentation with no_aug' transform = transforms_noaug_train(img_size, interpolation=interpolation, use_prefetcher=use_prefetcher, mean=mean, std=std) elif is_training: transform = transforms_imagenet_train(img_size, scale=scale, ratio=ratio, hflip=hflip, vflip=vflip, color_jitter=color_jitter, auto_augment=auto_augment, interpolation=interpolation, use_prefetcher=use_prefetcher, mean=mean, std=std, re_prob=re_prob, re_mode=re_mode, re_count=re_count, re_num_splits=re_num_splits, separate=separate) else: assert (not separate), 'Separate transforms not supported for validation preprocessing' transform = transforms_imagenet_eval(img_size, interpolation=interpolation, use_prefetcher=use_prefetcher, mean=mean, std=std, crop_pct=crop_pct) return transform
def test_can_access_globals_from_original_scope(): from .enclosed_config_scope import cfg as conf_scope cfg = conf_scope() assert (set(cfg.keys()) == {'answer'}) assert (cfg['answer'] == 42)
class PrefixTransformer(pl.LightningModule): def __init__(self, hparams: argparse.Namespace, num_labels=None, config=None, tokenizer=None, seq2seq_model=None, **config_kwargs): super().__init__() self.save_hyperparameters(hparams) self.step_count = 0 self.output_dir = Path(self.hparams.output_dir) cache_dir = (self.hparams.cache_dir if self.hparams.cache_dir else None) rank_zero_info('the cache dir is {}'.format(cache_dir)) if (config is None): self.config = AutoConfig.from_pretrained((self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path), **({'num_labels': num_labels} if (num_labels is not None) else {}), cache_dir=cache_dir, **config_kwargs) else: self.config: PretrainedConfig = config extra_model_params = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout') for p in extra_model_params: if getattr(self.hparams, p, None): assert hasattr(self.config, p), f"model config doesn't have a `{p}` attribute" setattr(self.config, p, getattr(self.hparams, p)) if (tokenizer is None): self.tokenizer = AutoTokenizer.from_pretrained((self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path), cache_dir=cache_dir) if self.hparams.new_tokens: new_tokens = ['<H>', '<R>', '<T>'] if self.hparams.control_token_DART: new_tokens.extend(['<e2e>', '<webnlg_old>', '<WikiTableQuestions_lily>', '<WikiSQL_decl_sents>', '<WikiTableQuestions_mturk>', '<WikiSQL_lily>']) self.new_token_len = len(new_tokens) new_tokens_vocab = {} new_tokens_vocab['additional_special_tokens'] = [] for (idx, t) in enumerate(new_tokens): new_tokens_vocab['additional_special_tokens'].append(t) num_added_toks = self.tokenizer.add_special_tokens(new_tokens_vocab) rank_zero_info('We have added %s tokens', num_added_toks) else: self.tokenizer: PreTrainedTokenizer = tokenizer self.config.preseqlen = self.hparams.preseqlen if self.hparams.control_prefixes: if self.hparams.DART: self.config.preseqlen += self.hparams.m_prefix_len else: self.config.preseqlen += (self.hparams.m_prefix_len * 2) self.config.use_prefix = True self.seq2seq_model_type = AutoModel if (seq2seq_model is None): self.seq2seq_model = T5ForConditionalGeneration.from_pretrained(self.hparams.model_name_or_path, from_tf=bool(('.ckpt' in self.hparams.model_name_or_path)), config=self.config, cache_dir=cache_dir) else: self.seq2seq_model = seq2seq_model self.seq2seq_model.resize_token_embeddings(len(self.tokenizer)) config_prefix = AutoConfig.from_pretrained(self.hparams.model_name_or_path, cache_dir=cache_dir) self.model_type = config_prefix.model_type if (self.hparams.optim_prefix == 'yes'): optim_prefix_bool = True elif (self.hparams.optim_prefix == 'no'): optim_prefix_bool = False else: assert False, 'model_args.optim_prefix should be either yes or no' rank_zero_info(self.model_type) config_prefix._my_arg_tune_mode = self.hparams.tuning_mode config_prefix._my_arg_task_mode = self.hparams.task_mode config_prefix._my_arg_control = True config_prefix.train_weights = False config_prefix.optim_prefix = optim_prefix_bool config_prefix.preseqlen = self.hparams.preseqlen config_prefix.use_infix = (self.hparams.format_mode == 'infix') config_prefix.format_mode = self.hparams.format_mode config_prefix.prefix_dropout = self.hparams.prefix_dropout config_prefix.vocab_size = len(self.tokenizer) config_prefix.DART = self.hparams.DART config_prefix.lowdata = ('lowdata' in self.hparams.output_dir) if (config_prefix.lowdata and (self.hparams.use_lowdata_token == 'yes')): config_prefix.lowdata_token = self.tokenizer([self.hparams.lowdata_token], add_prefix_space=True)['input_ids'] rank_zero_info(self.hparams.lowdata_token) rank_zero_info(config_prefix.lowdata_token) rank_zero_info(self.tokenizer.pad_token_id) config_prefix.mid_dim = self.hparams.mid_dim config_prefix.new_token_len = self.new_token_len if self.hparams.control_prefixes: config_prefix.m_prefix_mid_dim = self.hparams.m_prefix_mid_dim config_prefix.m_prefix_len = self.hparams.m_prefix_len if self.hparams.unseen: config_prefix.unseen = True if (self.hparams.prefixModel_name_or_path is not None): rank_zero_info('loading from {}'.format(hparams.prefixModel_name_or_path)) self.model = ControlPrefixes.from_pretrained(self.hparams.prefixModel_name_or_path, from_tf=bool(('.ckpt' in self.hparams.prefixModel_name_or_path)), cache_dir=cache_dir, config=config_prefix) else: self.model = ControlPrefixes(config_prefix) def load_hf_checkpoint(self, *args, **kwargs): assert False, 'why need to load model here?' self.model = self.model_type.from_pretrained(*args, **kwargs) def get_lr_scheduler(self): get_schedule_func = arg_to_scheduler[self.hparams.lr_scheduler] scheduler = get_schedule_func(self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps) rank_zero_info(f'warm up {self.hparams.warmup_steps}') scheduler = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1} return scheduler def configure_optimizers(self): if self.hparams.different_scheduler: cefr_params = [p for (n, p) in self.named_parameters() if any(((nd in n) for nd in ['CEFR_matrices.wte']))] no_cefr_params = [p for (n, p) in self.named_parameters() if (not any(((nd in n) for nd in ['CEFR_matrices.wte'])))] optimizer_grouped_parameters = [{'params': no_cefr_params, 'weight_decay': self.hparams.weight_decay}, {'params': cefr_params, 'weight_decay': self.hparams.weight_decay}] if self.hparams.adafactor: optimizer = Adafactor(optimizer_grouped_parameters, lr=self.hparams.learning_rate, scale_parameter=False, relative_step=False) else: optimizer = AdamW(optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon) self.opt = optimizer scheduler = bespoke_scheduler(self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps) rank_zero_info(f'warm up {self.hparams.warmup_steps}') scheduler = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1} return ([optimizer], [scheduler]) def test_step(self, batch, batch_nb): return self.validation_step(batch, batch_nb) def test_epoch_end(self, outputs): return self.validation_end(outputs) def total_steps(self) -> int: num_devices = max(1, self.hparams.gpus) if (self.hparams.original_batch_size is not None): effective_batch_size = ((self.hparams.original_batch_size * self.hparams.accumulate_grad_batches) * num_devices) else: effective_batch_size = ((self.hparams.train_batch_size * self.hparams.accumulate_grad_batches) * num_devices) dataset_size = len(self.train_loader.dataset) return ((dataset_size / effective_batch_size) * self.hparams.max_epochs) def setup(self, mode): if (mode == 'fit'): self.train_loader = self.get_dataloader('train', self.hparams.train_batch_size, shuffle=True) def get_dataloader(self, type_path, batch_size, shuffle=False): raise NotImplementedError('You must implement this for your task') def train_dataloader(self): return self.train_loader def val_dataloader(self): return self.get_dataloader('dev', self.hparams.eval_batch_size, shuffle=False) def test_dataloader(self): return self.get_dataloader('test', self.hparams.eval_batch_size, shuffle=False) def _feature_file(self, mode): return os.path.join(self.hparams.data_dir, 'cached_{}_{}_{}'.format(mode, list(filter(None, self.hparams.model_name_or_path.split('/'))).pop(), str(self.hparams.max_seq_length))) .rank_zero_only def save_checkpoint(self, trainer) -> None: rank_zero_info('Saving the the checkpoint.') return .rank_zero_only def on_save_checkpoint(self, checkpoint: Dict[(str, Any)], filepath=None) -> None: rank_zero_info('SEQ', self.seq2seq_model.shared.trainable_weight) self.model.es.trainable_weight = self.seq2seq_model.shared.trainable_weight rank_zero_info('Prefix_stored_weight', self.model.es.trainable_weight) save_path = self.output_dir.joinpath('checkpoint-curr_best') self.model.config.save_step = self.step_count self.model.save_pretrained(save_path) self.tokenizer.save_pretrained(save_path) rank_zero_info('SAVING TO checkpoint {}'.format(save_path)) def add_model_specific_args(parser, root_dir): parser.add_argument('--model_name_or_path', default='t5-large', type=str, required=False, help='Path to pretrained model or model identifier from huggingface.co/models') parser.add_argument('--prefixModel_name_or_path', default=None, type=str, help='Path to pretrained prefix model or model identifier from huggingface.co/models') parser.add_argument('--prefix_mode', default='activation', type=str, help='embedding or activation') parser.add_argument('--preseqlen', default=200, type=int, help='the length of the prefix.') parser.add_argument('--optim_prefix', default='yes', type=str, help='use the task specific optimization of the prefix.') parser.add_argument('--different_scheduler', default=False, type=bool, help='use a different lr scheduler for control prefixes and main prefix') parser.add_argument('--tuning_mode', default='prefixtune', type=str, help='Could be prefixtune or finetune') parser.add_argument('--prefix_dropout', default=0.0, type=float, help='the dropout rate for our prefix model.') parser.add_argument('--use_dropout', default='no', type=str, help='whether to dropout the main model during training. ') parser.add_argument('--mid_dim', default=800, type=int, help='the dimension of the intermediate layer of themain prefix reparameterization') parser.add_argument('--m_prefix_mid_dim', default=512, type=int, help='the dimension of the intermediate layer of the control prefix reparameterizations') parser.add_argument('--m_prefix_len', default=1, type=int, help='the control prefix length') parser.add_argument('--unseen', default=False, type=bool, help='Initializing a control prefix for unseen categories to zero') parser.add_argument('--format_mode', default='cat', type=str, help='whether to look at the input again, including [infix, cat, peek, nopeek]') parser.add_argument('--use_lowdata_token', default='yes', type=str, help='whether or not to use the lowdata token, ') parser.add_argument('--lowdata_token', default='summarize', type=str, help='the low data token to use. ') parser.add_argument('--config_name', default='', type=str, help='Pretrained config name or path if not the same as model_name') parser.add_argument('--tokenizer_name', default=None, type=str, help='Pretrained tokenizer name or path if not the same as model_name') parser.add_argument('--cache_dir', default='/content/gdrive/MyDrive/cache_dir', type=str, help='Where do you want to store the pre-trained models downloaded from s3') parser.add_argument('--encoder_layerdrop', type=float, help='Encoder layer dropout probability (Optional). Goes into model.config') parser.add_argument('--decoder_layerdrop', type=float, help='Decoder layer dropout probability (Optional). Goes into model.config') parser.add_argument('--dropout', type=float, help='Dropout probability (Optional). Goes into model.config') parser.add_argument('--control_prefixes', type=bool, default=False, help='if using control prefixes') parser.add_argument('--new_tokens', type=bool, default=False, help='if using demarcation tokens <H>, <R>, <T> that need to be learnable') parser.add_argument('--attention_dropout', type=float, help='Attention dropout probability (Optional). Goes into model.config') parser.add_argument('--learning_rate', default=5e-05, type=float, help='The target learning rate.') parser.add_argument('--lr_scheduler', default='linear', choices=arg_to_scheduler_choices, metavar=arg_to_scheduler_metavar, type=str, help='Learning rate scheduler') parser.add_argument('--weight_decay', default=0.0, type=float, help='Weight decay if we apply some.') parser.add_argument('--adam_epsilon', default=1e-08, type=float, help='Epsilon for Adam optimizer.') parser.add_argument('--warmup_steps', default=0, type=int, help='Linear warmup over warmup_steps.') parser.add_argument('--num_workers', default=4, type=int, help='kwarg passed to DataLoader') parser.add_argument('--num_train_epochs', dest='max_epochs', default=30, type=int) parser.add_argument('--original_batch_size', default=None, type=int) parser.add_argument('--hf_checkpoint', default=False, type=bool, help='if want to save a hf model checkpoint from a lightning ckpt') parser.add_argument('--test_checkpoint', default=None, type=str) parser.add_argument('--train_batch_size', default=8, type=int) parser.add_argument('--eval_batch_size', default=6, type=int) parser.add_argument('--adafactor', action='store_true')
class FairseqLRScheduler(object): def __init__(self, args, optimizer): super().__init__() if (not isinstance(optimizer, FairseqOptimizer)): raise ValueError('optimizer must be an instance of FairseqOptimizer') self.args = args self.optimizer = optimizer self.best = None def add_args(parser): pass def state_dict(self): return {'best': self.best} def load_state_dict(self, state_dict): self.best = state_dict['best'] def step(self, epoch, val_loss=None): if (val_loss is not None): if (self.best is None): self.best = val_loss else: self.best = min(self.best, val_loss) def step_update(self, num_updates): return self.optimizer.get_lr()
def set_output_path(context): path_output = copy.deepcopy(context[ConfigKW.PATH_OUTPUT]) if (not Path(path_output).is_dir()): logger.info(f'Creating output path: {path_output}') Path(path_output).mkdir(parents=True) else: logger.info(f'Output path already exists: {path_output}') return path_output
def _write_data_by_character(examples, output_directory): if (not os.path.exists(output_directory)): os.makedirs(output_directory) for (character_name, sound_bites) in examples.items(): filename = os.path.join(output_directory, (character_name + '.txt')) with open(filename, 'w') as output: for sound_bite in sound_bites: output.write((sound_bite + '\n'))
def extract_citationIDs(application_identifier, line): words = line.split('\t')[6].split(' ') indices = [i for (i, x) in enumerate(words) if ('sr-cit' in x)] return [((application_identifier + '_') + words[i][(words[i].find('sr-cit') + 6):(words[i].find('sr-cit') + 10)]) for i in indices]
class BaseNStepReturnBuffer(BaseReplayBuffer): def __init__(self, example, size, B, discount=1, n_step_return=1): self.T = T = math.ceil((size / B)) self.B = B self.size = (T * B) self.discount = discount self.n_step_return = n_step_return self.t = 0 self.samples = buffer_from_example(example, (T, B), share_memory=self.async_) if (n_step_return > 1): self.samples_return_ = buffer_from_example(example.reward, (T, B), share_memory=self.async_) self.samples_done_n = buffer_from_example(example.done, (T, B), share_memory=self.async_) else: self.samples_return_ = self.samples.reward self.samples_done_n = self.samples.done self._buffer_full = False self.off_backward = n_step_return self.off_forward = 1 def append_samples(self, samples): (T, B) = get_leading_dims(samples, n_dim=2) assert (B == self.B) t = self.t if ((t + T) > self.T): idxs = (np.arange(t, (t + T)) % self.T) else: idxs = slice(t, (t + T)) self.samples[idxs] = samples self.compute_returns(T) if ((not self._buffer_full) and ((t + T) >= self.T)): self._buffer_full = True self.t = ((t + T) % self.T) return (T, idxs) def compute_returns(self, T): if (self.n_step_return == 1): return (t, s) = (self.t, self.samples) nm1 = (self.n_step_return - 1) if (((t - nm1) >= 0) and ((t + T) <= self.T)): reward = s.reward[(t - nm1):(t + T)] done = s.done[(t - nm1):(t + T)] return_dest = self.samples_return_[(t - nm1):((t - nm1) + T)] done_n_dest = self.samples_done_n[(t - nm1):((t - nm1) + T)] discount_return_n_step(reward, done, n_step=self.n_step_return, discount=self.discount, return_dest=return_dest, done_n_dest=done_n_dest) else: idxs = (np.arange((t - nm1), (t + T)) % T) reward = s.reward[idxs] done = s.done[idxs] dest_idxs = idxs[:(- nm1)] (return_, done_n) = discount_return_n_step(reward, done, n_step=self.n_step_return, discount=self.discount) self.samples_return_[dest_idxs] = return_ self.samples_done_n[dest_idxs] = done_n
def build_failed_report(results, include_warning=True): failed_results = {} for config_name in results: if ('error' in results[config_name]): if (config_name not in failed_results): failed_results[config_name] = {} failed_results[config_name] = {'error': results[config_name]['error']} if (include_warning and ('warnings' in results[config_name])): if (config_name not in failed_results): failed_results[config_name] = {} failed_results[config_name]['warnings'] = results[config_name]['warnings'] for framework in FRAMEWORKS: if (framework not in results[config_name]): continue for arch_name in results[config_name][framework]: if ('error' in results[config_name][framework][arch_name]): if (config_name not in failed_results): failed_results[config_name] = {} if (framework not in failed_results[config_name]): failed_results[config_name][framework] = {} if (arch_name not in failed_results[config_name][framework]): failed_results[config_name][framework][arch_name] = {} error = results[config_name][framework][arch_name]['error'] failed_results[config_name][framework][arch_name]['error'] = error return failed_results
def generate_latency_model(agent_count, latency_type='deterministic'): assert (latency_type in ['deterministic', 'no_latency']), 'Please select a correct latency_type' latency_rstate = np.random.RandomState(seed=np.random.randint(low=0, high=(2 ** 32))) pairwise = (agent_count, agent_count) if (latency_type == 'deterministic'): nyc_to_seattle_meters = 3866660 pairwise_distances = generate_uniform_random_pairwise_dist_on_line(0.0, nyc_to_seattle_meters, agent_count, random_state=latency_rstate) pairwise_latencies = meters_to_light_ns(pairwise_distances) else: pairwise_latencies = np.zeros(pairwise, dtype=int) latency_model = LatencyModel(latency_model='deterministic', random_state=latency_rstate, connected=True, min_latency=pairwise_latencies) return latency_model
class BaseAssigner(metaclass=ABCMeta): def assign(self, pred_instances: InstanceData, gt_instances: InstanceData, gt_instances_ignore: Optional[InstanceData]=None, **kwargs):
class FlaxBertPreTrainedModel(metaclass=DummyObject): _backends = ['flax'] def __init__(self, *args, **kwargs): requires_backends(self, ['flax'])
class FileBinarizer(): def multiprocess_dataset(cls, input_file: str, dataset_impl: str, binarizer: Binarizer, output_prefix: str, vocab_size=None, num_workers=1) -> BinarizeSummary: final_summary = BinarizeSummary() offsets = find_offsets(input_file, num_workers) (first_chunk, *more_chunks) = zip(offsets, offsets[1:]) pool = None if (num_workers > 1): pool = Pool(processes=(num_workers - 1)) worker_results = [pool.apply_async(cls._binarize_chunk_and_finalize, args=(binarizer, input_file, start_offset, end_offset, _worker_prefix(output_prefix, worker_id), dataset_impl), kwds=({'vocab_size': vocab_size} if (vocab_size is not None) else {})) for (worker_id, (start_offset, end_offset)) in enumerate(more_chunks, start=1)] pool.close() pool.join() for r in worker_results: summ = r.get() final_summary.merge(summ) (final_ds, summ) = cls._binarize_file_chunk(binarizer, input_file, offset_start=first_chunk[0], offset_end=first_chunk[1], output_prefix=output_prefix, dataset_impl=dataset_impl, vocab_size=(vocab_size if (vocab_size is not None) else None)) final_summary.merge(summ) if (num_workers > 1): for worker_id in range(1, num_workers): worker_output_prefix = _worker_prefix(output_prefix, worker_id) final_ds.merge_file_(worker_output_prefix) try: os.remove(indexed_dataset.data_file_path(worker_output_prefix)) os.remove(indexed_dataset.index_file_path(worker_output_prefix)) except Exception as e: logger.error(f"couldn't remove {worker_output_prefix}.*", exc_info=e) idx_file = indexed_dataset.index_file_path(output_prefix) final_ds.finalize(idx_file) return final_summary def _binarize_file_chunk(binarizer: Binarizer, filename: str, offset_start: int, offset_end: int, output_prefix: str, dataset_impl: str, vocab_size=None) -> tp.Tuple[(tp.Any, BinarizeSummary)]: bin_file = indexed_dataset.data_file_path(output_prefix) ds = indexed_dataset.make_builder(bin_file, impl=dataset_impl, vocab_size=vocab_size) summary = BinarizeSummary() with Chunker(PathManager.get_local_path(filename), offset_start, offset_end) as line_iterator: for line in line_iterator: ds.add_item(binarizer.binarize_line(line, summary)) return (ds, summary) def _binarize_chunk_and_finalize(cls, binarizer: Binarizer, filename: str, offset_start: int, offset_end: int, output_prefix: str, dataset_impl: str, vocab_size=None): (ds, summ) = cls._binarize_file_chunk(binarizer, filename, offset_start, offset_end, output_prefix, dataset_impl, vocab_size=vocab_size) idx_file = indexed_dataset.index_file_path(output_prefix) ds.finalize(idx_file) return summ
class _ConvBN(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, norm_layer=nn.BatchNorm2d, **kwargs): super(_ConvBN, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias=False) self.bn = norm_layer(out_channels) def forward(self, x): x = self.conv(x) x = self.bn(x) return x
def _get_graphs(): return [nn.GraphsTuple(nodes=np.array([[1.0], [2.0]]), edges=np.array([[[1.0], [2.0]], [[3.0], [4.0]]]), globals=np.array([1.0]), edge_idx=np.array([[0, 1], [0, 1]])), nn.GraphsTuple(nodes=np.array([[1.0], [2.0]]), edges=np.array([[[1.0], [2.0]], [[3.0], [4.0]]]), globals=np.array([1.0]), edge_idx=np.array([[0, 1], [2, 1]]))]
class FlaxBertForMaskedLM(): def __init__(self, *args, **kwargs): requires_flax(self) def from_pretrained(self, *args, **kwargs): requires_flax(self)
class MvpModel(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
def main(): args = parse_args() assert (args.out or args.eval or args.format_only or args.show), 'Please specify at least one operation (save/eval/format/show the results) with the argument "--out", "--eval", "--format_only" or "--show"' if (args.eval and args.format_only): raise ValueError('--eval and --format_only cannot be both specified') if ((args.out is not None) and (not args.out.endswith(('.pkl', '.pickle')))): raise ValueError('The output file must be a pkl file.') cfg = mmcv.Config.fromfile(args.config) if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True if (args.launcher == 'none'): distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) dataset = build_dataset(cfg.data.test) data_loader = build_dataloader(dataset, imgs_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) fp16_cfg = cfg.get('fp16', None) if (fp16_cfg is not None): wrap_fp16_model(model) checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') if ('CLASSES' in checkpoint['meta']): model.CLASSES = checkpoint['meta']['CLASSES'] else: model.CLASSES = dataset.CLASSES if (not distributed): model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader, args.show) else: model = MMDistributedDataParallel(model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False) outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect) (rank, _) = get_dist_info() if (rank == 0): if args.out: print('\nwriting results to {}'.format(args.out)) mmcv.dump(outputs, args.out) kwargs = ({} if (args.options is None) else args.options) if args.format_only: dataset.format_results(outputs, **kwargs) if args.eval: dataset.evaluate(outputs, args.eval, **kwargs)
def conv(data, name, filters, kernel=3, stride=1, dilate=1, pad=(- 1), groups=1, no_bias=False, workspace=(- 1)): if (kernel == 1): dilate = 1 if (pad < 0): assert ((kernel % 2) == 1), 'Specify pad for an even kernel size' pad = ((((kernel - 1) * dilate) + 1) // 2) if (workspace < 0): workspace = cfg.get('workspace', 512) lr_type = cfg.get('lr_type', 'torch') with _attr_scope_lr(lr_type, 'weight'): weight = mx.sym.Variable('{}_weight'.format(name)) if no_bias: return mx.sym.Convolution(data=data, weight=weight, name=name, kernel=(kernel, kernel), stride=(stride, stride), dilate=(dilate, dilate), pad=(pad, pad), num_filter=filters, num_group=groups, workspace=workspace, no_bias=True) else: with _attr_scope_lr(lr_type, 'bias'): bias = mx.sym.Variable('{}_bias'.format(name)) return mx.sym.Convolution(data=data, weight=weight, bias=bias, name=name, kernel=(kernel, kernel), stride=(stride, stride), dilate=(dilate, dilate), pad=(pad, pad), num_filter=filters, num_group=groups, workspace=workspace, no_bias=False)
def main(): args = parse_args() if (args is None): exit() with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess: gan = UGATIT(sess, args) gan.build_model() show_all_variables() if (args.phase == 'train'): gan.train() print(' [*] Training finished!') if (args.phase == 'test'): gan.test(epoch=0) print(' [*] Test finished!')
def get_dataset(batch_size, dataset, is_training=True, inception_style=False, use_randaug=True): if is_training: if inception_style: dataset = dataset.repeat((args.epochs + 1)) def _pp(im, y): channels = im.shape[(- 1)] (begin, size, _) = tf.image.sample_distorted_bounding_box(tf.shape(im), tf.zeros([0, 0, 4], tf.float32), area_range=(0.05, 1.0), min_object_covered=0, use_image_if_no_bounding_boxes=True) im = tf.slice(im, begin, size) im.set_shape([None, None, 3]) if (args.data_set == 'Imagenet'): im = tf.image.resize(im, [224, 224]) else: im = tf.image.resize(im, [32, 32]) if (tf.random.uniform(shape=[]) > 0.5): im = tf.image.flip_left_right(im) return (tf.cast(((tf.cast(im, tf.float32) - 127.5) / 127.5), tf.float32), y) if use_randaug: randaug = RandAugment(num_layers=2, magnitude=15) dataset = dataset.map((lambda x, y: (randaug.distort(x), y)), num_parallel_calls=tf.data.AUTOTUNE) dataset = dataset.prefetch(buffer_size=tf.data.AUTOTUNE) dataset = dataset.map((lambda im, y: _pp(im, y)), num_parallel_calls=tf.data.AUTOTUNE) train_ds_one = dataset.shuffle(50000).batch(batch_size) train_ds_two = dataset.shuffle(50000).batch(batch_size) train_ds = tf.data.Dataset.zip((train_ds_one, train_ds_two)) train_ds_mu = train_ds.map((lambda ds_one, ds_two: mix_up(ds_one, ds_two, alpha=args.mixup)), num_parallel_calls=tf.data.AUTOTUNE) train_ds_out = train_ds_mu.prefetch(buffer_size=tf.data.AUTOTUNE) return train_ds_out else: dataset = dataset.repeat((args.epochs + 1)) train_ds_one = dataset.shuffle(50000).batch(batch_size) train_ds_two = dataset.shuffle(50000).batch(batch_size) train_ds = tf.data.Dataset.zip((train_ds_one, train_ds_two)) train_ds_mu = train_ds.map((lambda ds_one, ds_two: mix_up(ds_one, ds_two, alpha=args.mixup)), num_parallel_calls=tf.data.AUTOTUNE) train_ds_mu = train_ds_mu.prefetch(buffer_size=tf.data.AUTOTUNE) random_augmentation = keras.Sequential([layers.experimental.preprocessing.Resizing(256, 256), layers.experimental.preprocessing.RandomCrop(224, 224), layers.experimental.preprocessing.RandomFlip('horizontal'), layers.experimental.preprocessing.RandomRotation(factor=0.2), layers.experimental.preprocessing.Normalization(mean=[0.485, 0.456, 0.406], variance=[0.052, 0.05, 0.05])], name='random_augmentation') train_ds_mu = train_ds_mu.map((lambda x, y: (random_augmentation(x, training=True), y)), num_parallel_calls=tf.data.AUTOTUNE) train_ds_out = train_ds_mu.prefetch(buffer_size=tf.data.AUTOTUNE) return train_ds_out elif inception_style: def _pp_val(im, y): if (args.data_set == 'Imagenet'): im = tf.image.resize(im, [224, 224]) else: im = tf.image.resize(im, [32, 32]) return (tf.cast(((tf.cast(im, tf.float32) - 127.5) / 127.5), tf.float32), y) dataset = dataset.map((lambda im, y: _pp_val(im, y)), num_parallel_calls=tf.data.AUTOTUNE) dataset = dataset.batch(batch_size) val_ds_mu = dataset.prefetch(buffer_size=tf.data.AUTOTUNE) return val_ds_mu else: dataset = dataset.batch(batch_size) random_augmentation_val = keras.Sequential([layers.experimental.preprocessing.Resizing(256, 256), layers.experimental.preprocessing.CenterCrop(224, 224), layers.experimental.preprocessing.Normalization(mean=[0.485, 0.456, 0.406], variance=[0.052, 0.05, 0.05])], name='random_augmentation_val') dataset = dataset.map((lambda x_val, y_val: (random_augmentation_val(x_val, training=True), y_val)), num_parallel_calls=tf.data.AUTOTUNE) val_ds_mu = dataset.prefetch(buffer_size=tf.data.AUTOTUNE) return val_ds_mu
def concat_tensor_list(tensor_list, recurrent=False): if recurrent: return np.array(tensor_list) else: return np.concatenate(tensor_list, axis=0)
def main(): env = gym.make('CartPole-v0') act = deepq.load('cartpole_model.pkl') while True: (obs, done) = (env.reset(), False) episode_rew = 0 while (not done): env.render() (obs, rew, done, _) = env.step(act(obs[None])[0]) episode_rew += rew print('Episode reward', episode_rew)
class Transpose(Module): def __init__(self, perm): super().__init__() self.perm = perm def forward(self, input): assert (input.dim() == len(self.perm)) return input.permute(self.perm) def from_onnx(parameters=None, attributes=None): if (attributes is None): attributes = {} perm = attributes['perm'] return Transpose(perm)