code
stringlengths
101
5.91M
class MixtureGroupNorm(nn.Module): __constants__ = ['num_groups', 'num_channels', 'k', 'eps', 'weight', 'bias'] def __init__(self, k, num_groups, num_channels, eps=1e-05): super(MixtureGroupNorm, self).__init__() self.k = k self.num_groups = num_groups self.num_channels = num_channels self.eps = eps self.affine = True self.weight_ = nn.Parameter(torch.Tensor(k, num_channels)) self.bias_ = nn.Parameter(torch.Tensor(k, num_channels)) self.register_parameter('weight', None) self.register_parameter('bias', None) self.attention_weights = AttentionWeights(k, num_channels, norm='GN', group=1) self._init_params() def _init_params(self): nn.init.normal_(self.weight_, 1, 0.1) nn.init.normal_(self.bias_, 0, 0.1) def forward(self, x): output = F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps) size = output.size() y = self.attention_weights(x) weight = (y self.weight_) bias = (y self.bias_) weight = weight.unsqueeze((- 1)).unsqueeze((- 1)).expand(size) bias = bias.unsqueeze((- 1)).unsqueeze((- 1)).expand(size) return ((weight * output) + bias) def extra_repr(self): return '{num_groups}, {num_channels}, eps={eps}, affine={affine}'.format(**self.__dict__)
class ScriptArguments(): model_type: str = field(default=None, metadata={'help': ('Model type selected in the list: ' + ', '.join(MODEL_CLASSES.keys()))}) model_name_or_path: Optional[str] = field(default=None, metadata={'help': 'The model checkpoint for weights initialization.'}) tokenizer_name_or_path: Optional[str] = field(default=None, metadata={'help': 'The tokenizer for weights initialization.'}) load_in_8bit: bool = field(default=False, metadata={'help': 'Whether to load the model in 8bit mode or not.'}) load_in_4bit: bool = field(default=False, metadata={'help': 'Whether to load the model in 4bit mode or not.'}) cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}) use_fast_tokenizer: bool = field(default=False, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'}) torch_dtype: Optional[str] = field(default=None, metadata={'help': "Override the default `torch.dtype` and load the model under this dtype. If `auto` is passed, the dtype will be automatically derived from the model's weights.", 'choices': ['auto', 'bfloat16', 'float16', 'float32']}) device_map: Optional[str] = field(default='auto', metadata={'help': 'Device to map model to. If `auto` is passed, the device will be selected automatically. '}) trust_remote_code: bool = field(default=True, metadata={'help': 'Whether to trust remote code when loading a model from a remote checkpoint.'}) dataset_name: Optional[str] = field(default=None, metadata={'help': 'The name of the dataset to use (via the datasets library).'}) dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'}) train_file_dir: Optional[str] = field(default=None, metadata={'help': 'The input jsonl data file folder.'}) validation_file_dir: Optional[str] = field(default=None, metadata={'help': 'The evaluation jsonl file folder.'}) template_name: Optional[str] = field(default='vicuna', metadata={'help': 'The prompt template name.'}) per_device_train_batch_size: Optional[int] = field(default=4, metadata={'help': 'Train batch size per device'}) per_device_eval_batch_size: Optional[int] = field(default=1, metadata={'help': 'Eval batch size per device'}) max_source_length: Optional[int] = field(default=256, metadata={'help': 'Max length of prompt input text'}) max_target_length: Optional[int] = field(default=256, metadata={'help': 'Max length of output text'}) min_target_length: Optional[int] = field(default=4, metadata={'help': 'Min length of output text'}) max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'}) max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set.'}) overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'}) validation_split_percentage: Optional[int] = field(default=1, metadata={'help': "The percentage of the train set used as validation set in case there's no validation split"}) preprocessing_num_workers: Optional[int] = field(default=4, metadata={'help': 'The number of processes to use for the preprocessing.'}) use_peft: bool = field(default=True, metadata={'help': 'Whether to use peft'}) qlora: bool = field(default=False, metadata={'help': 'Whether to use qlora'}) target_modules: Optional[str] = field(default=None) lora_rank: Optional[int] = field(default=8) lora_dropout: Optional[float] = field(default=0.05) lora_alpha: Optional[float] = field(default=16.0) peft_path: Optional[str] = field(default=None) do_train: bool = field(default=False, metadata={'help': 'Whether to run training.'}) do_eval: bool = field(default=False, metadata={'help': 'Whether to run eval on the validation set.'}) beta: Optional[float] = field(default=0.1, metadata={'help': 'The beta parameter for DPO loss'}) learning_rate: Optional[float] = field(default=0.0005, metadata={'help': 'Learning rate'}) lr_scheduler_type: Optional[str] = field(default='cosine', metadata={'help': 'The lr scheduler type'}) warmup_steps: Optional[int] = field(default=100, metadata={'help': 'The number of warmup steps'}) weight_decay: Optional[float] = field(default=0.05, metadata={'help': 'The weight decay'}) optim: Optional[str] = field(default='adamw_hf', metadata={'help': 'The optimizer type'}) fp16: Optional[bool] = field(default=True, metadata={'help': 'Whether to use fp16'}) bf16: Optional[bool] = field(default=False, metadata={'help': 'Whether to use bf16'}) gradient_checkpointing: Optional[bool] = field(default=True, metadata={'help': 'Whether to use gradient checkpointing'}) gradient_accumulation_steps: Optional[int] = field(default=4, metadata={'help': 'The number of gradient accumulation steps'}) save_steps: Optional[int] = field(default=50, metadata={'help': 'X steps to save the model'}) eval_steps: Optional[int] = field(default=50, metadata={'help': 'X steps to evaluate the model'}) logging_steps: Optional[int] = field(default=1, metadata={'help': 'X steps to log the model'}) output_dir: Optional[str] = field(default='outputs-dpo', metadata={'help': 'The output directory'}) max_steps: Optional[int] = field(default=200, metadata={'help': 'Number of steps to train'}) eval_strategy: Optional[str] = field(default='steps', metadata={'help': 'Evaluation strategy'}) remove_unused_columns: Optional[bool] = field(default=False, metadata={'help': 'Remove unused columns from the dataset if `datasets.Dataset` is used'}) report_to: Optional[str] = field(default='tensorboard', metadata={'help': 'Report to wandb or tensorboard'}) def __post_init__(self): if (self.model_type is None): raise ValueError('You must specify a valid model_type to run training.') if (self.model_name_or_path is None): raise ValueError('You must specify a valid model_name_or_path to run training.')
def axiom(axiom): def with_axiom(self): return self._with_axiom(axiom) with_axiom.__name__ = axiom return with_axiom
def main(): args = parser.parse_args() if (not os.path.exists(args.save_path)): os.makedirs(args.save_path, exist_ok=True) if (args.seed is not None): random.seed(args.seed) torch.manual_seed(args.seed) cudnn.deterministic = True warnings.warn('You have chosen to seed training. This will turn on the CUDNN deterministic setting, which can slow down your training considerably! You may see unexpected behavior when restarting from checkpoints.') if (args.gpu is not None): warnings.warn('You have chosen a specific GPU. This will completely disable data parallelism.') if ((args.dist_url == 'env://') and (args.world_size == (- 1))): args.world_size = int(os.environ['WORLD_SIZE']) args.distributed = ((args.world_size > 1) or args.multiprocessing_distributed) ngpus_per_node = torch.cuda.device_count() if args.multiprocessing_distributed: args.world_size = (ngpus_per_node * args.world_size) mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args)) else: main_worker(args.gpu, ngpus_per_node, args)
def register_Ns3ArpL3Protocol_methods(root_module, cls): cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) cls.add_static_attribute('PROT_NUMBER', 'uint16_t const', is_const=True) cls.add_constructor([]) cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')]) cls.add_method('CreateCache', 'ns3::Ptr< ns3::ArpCache >', [param('ns3::Ptr< ns3::NetDevice >', 'device'), param('ns3::Ptr< ns3::Ipv4Interface >', 'interface')]) cls.add_method('Receive', 'void', [param('ns3::Ptr< ns3::NetDevice >', 'device'), param('ns3::Ptr< ns3::Packet const >', 'p'), param('uint16_t', 'protocol'), param('ns3::Address const &', 'from'), param('ns3::Address const &', 'to'), param('ns3::NetDevice::PacketType', 'packetType')]) cls.add_method('Lookup', 'bool', [param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Ipv4Header const &', 'ipHeader'), param('ns3::Ipv4Address', 'destination'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('ns3::Ptr< ns3::ArpCache >', 'cache'), param('ns3::Address *', 'hardwareDestination')]) cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')]) cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) cls.add_method('NotifyNewAggregate', 'void', [], visibility='protected', is_virtual=True) return
def trim_collate(batch): _use_shared_memory = True error_msg = 'batch must contain tensors, numbers, dicts or lists; found {}' elem_type = type(batch[0]) if torch.is_tensor(batch[0]): out = None if (1 < batch[0].dim()): max_num_boxes = max([x.size(0) for x in batch]) if _use_shared_memory: numel = ((len(batch) * max_num_boxes) * batch[0].size((- 1))) storage = batch[0].storage()._new_shared(numel) out = batch[0].new(storage) return torch.stack([F.pad(x, (0, 0, 0, (max_num_boxes - x.size(0)))).data for x in batch], 0, out=out) else: if _use_shared_memory: numel = sum([x.numel() for x in batch]) storage = batch[0].storage()._new_shared(numel) out = batch[0].new(storage) return torch.stack(batch, 0, out=out) elif ((elem_type.__module__ == 'numpy') and (elem_type.__name__ != 'str_') and (elem_type.__name__ != 'string_')): elem = batch[0] if (elem_type.__name__ == 'ndarray'): if (re.search('[SaUO]', elem.dtype.str) is not None): raise TypeError(error_msg.format(elem.dtype)) return torch.stack([torch.from_numpy(b) for b in batch], 0) if (elem.shape == ()): py_type = (float if elem.dtype.name.startswith('float') else int) return numpy_type_map[elem.dtype.name](list(map(py_type, batch))) elif isinstance(batch[0], int): return torch.LongTensor(batch) elif isinstance(batch[0], float): return torch.DoubleTensor(batch) elif isinstance(batch[0], string_classes): return batch elif isinstance(batch[0], collections.Mapping): return {key: default_collate([d[key] for d in batch]) for key in batch[0]} elif isinstance(batch[0], collections.Sequence): transposed = zip(*batch) return [trim_collate(samples) for samples in transposed] raise TypeError(error_msg.format(type(batch[0])))
def softmax(g, input, dim=None): if (dim < 0): dim = (len(input.type().sizes()) + dim) if (len(input.type().sizes()) != (dim + 1)): return _unimplemented('dim', 'ONNX and PyTorch use different strategies to split the input.') return g.op('Softmax', input, axis_i=dim)
def find_zero_result(fn, l): for prec in prec_seq(): result = None ambig = False for v in l: intv = fn(v, prec) if intv.contains_zero(): if (result is not None): ambig = True break result = v if ambig: continue if (result is None): raise ValueError('find_zero_result could not find any zeroes') return result
def cantilever_problem(config_top): gamma = 100.0 E = 1.0 nu = 0.3 plane_stress = True mu = (E / (2.0 * (1.0 + nu))) lambd = ((E * nu) / ((1.0 + nu) * (1.0 - (2.0 * nu)))) if plane_stress: lambd = (((2 * mu) * lambd) / (lambd + (2.0 * mu))) alpha_in = 1.0 alpha_out = 0.001 (mesh, subdomains, boundaries, dx, ds, dS) = cashocs.regular_mesh(16, length_x=2.0, diagonal='crossed') V = VectorFunctionSpace(mesh, 'CG', 1) CG1 = FunctionSpace(mesh, 'CG', 1) DG0 = FunctionSpace(mesh, 'DG', 0) alpha = Function(DG0) indicator_omega = Function(DG0) psi = Function(CG1) psi.vector()[:] = (- 1.0) def eps(u): return (Constant(0.5) * (grad(u) + grad(u).T)) def sigma(u): return ((Constant((2.0 * mu)) * eps(u)) + ((Constant(lambd) * tr(eps(u))) * Identity(2))) class Delta(UserExpression): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def eval(self, values, x): if (near(x[0], 2.0) and near(x[1], 0.5)): values[0] = (3.0 / mesh.hmax()) else: values[0] = 0.0 def value_shape(self): return () delta = Delta(degree=2) g = (delta * Constant((0.0, (- 1.0)))) u = Function(V) v = Function(V) F = (((alpha * inner(sigma(u), eps(v))) * dx) - (dot(g, v) * ds(2))) bcs = cashocs.create_dirichlet_bcs(V, Constant((0.0, 0.0)), boundaries, 1) J = cashocs.IntegralFunctional((((alpha * inner(sigma(u), eps(u))) * dx) + ((Constant(gamma) * indicator_omega) * dx))) kappa = ((lambd + (3.0 * mu)) / (lambd + mu)) r_in = (alpha_out / alpha_in) r_out = (alpha_in / alpha_out) dJ_in = ((Constant(((((alpha_in * (r_in - 1.0)) / ((kappa * r_in) + 1.0)) * (kappa + 1.0)) / 2.0)) * ((Constant(2.0) * inner(sigma(u), eps(u))) + ((Constant((((r_in - 1.0) * (kappa - 2.0)) / ((kappa + (2 * r_in)) - 1.0))) * tr(sigma(u))) * tr(eps(u))))) + Constant(gamma)) dJ_out = ((Constant((((((- alpha_out) * (r_out - 1.0)) / ((kappa * r_out) + 1.0)) * (kappa + 1.0)) / 2.0)) * ((Constant(2.0) * inner(sigma(u), eps(u))) + ((Constant((((r_out - 1.0) * (kappa - 2.0)) / ((kappa + (2 * r_out)) - 1.0))) * tr(sigma(u))) * tr(eps(u))))) + Constant(gamma)) def update_level_set(): cashocs.interpolate_levelset_function_to_cells(psi, alpha_in, alpha_out, alpha) cashocs.interpolate_levelset_function_to_cells(psi, 1.0, 0.0, indicator_omega) psi.vector()[:] = (- 1.0) top = cashocs.TopologyOptimizationProblem(F, bcs, J, u, v, psi, dJ_in, dJ_out, update_level_set, config=config_top) return top
def find_package(import_name): (root_mod_name, _, _) = import_name.partition('.') package_path = _find_package_path(root_mod_name) (site_parent, site_folder) = os.path.split(package_path) py_prefix = os.path.abspath(sys.prefix) if package_path.startswith(py_prefix): return (py_prefix, package_path) elif (site_folder.lower() == 'site-packages'): (parent, folder) = os.path.split(site_parent) if (folder.lower() == 'lib'): base_dir = parent elif (os.path.basename(parent).lower() == 'lib'): base_dir = os.path.dirname(parent) else: base_dir = site_parent return (base_dir, package_path) return (None, package_path)
class Norm(): def __call__(self, perturbations): raise NotImplementedError() def normalize(self, gradients): raise NotImplementedError() def scale(self, gradients): raise NotImplementedError()
class LitTrainer(): def __init__(self, f): cudnn.benchmark = True global beta beta = f.beta self.distributed = (torch.cuda.device_count() > 1) if (not os.path.exists(f.save_dir)): os.makedirs(f.save_dir) if (not os.path.exists(f.log_dir)): os.makedirs(f.log_dir) self.writer = SummaryWriter(f.log_dir) self.save_dir = f.save_dir self.model_name = f.model_name self.save_every = f.save_every self.start_epoch = f.start_epoch self.start_segment = f.start_segment self.half = f.half self.lit_sections = f.lit_sections self.sequence = f.sequence self.momentum = f.momentum self.weight_decay = f.weight_decay self.loss_scaling = f.loss_scaling self._make_optimizers(self.lit_sections) self.trainable_model = LearnerModelParallel(f.trainable_model, self.lit_sections) for section in self.lit_sections.values(): section.build(half=self.half) self.base_model = f.base_model if self.half: self.base_model = fp16.FP16(self.base_model) self.base_model = nn.DataParallel(self.base_model).cuda() for param in self.base_model.parameters(): param.requires_grad = False self.lit_train_loader = f.lit_training_data_loader self.fine_tuning_loader = f.fine_tuning_data_loader self.val_loader = f.val_data_loader self.lit_criterion = f.lit_criterion() self.fine_tuning_criterion = f.fine_tuning_criterion() self.best_accuracy1 = 0 def _make_optimizers(self, lit_sections): lit_start_epoch = (self.start_epoch if (self.start_segment == 0) else 0) for (k, v) in lit_sections.items(): v.loss_scaling = self.loss_scaling v.set_optimizer((lambda params: torch.optim.SGD(params, lr=self.sequence['lit']['lr'], momentum=self.momentum, weight_decay=self.weight_decay)), train_params=v.network.parameters()) if (lit_start_epoch > 0): v.set_initial_lr = True v.set_lr_scheduler((lambda optimizer: torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=self.sequence['lit']['milestones'], last_epoch=(lit_start_epoch - 1), gamma=0.5))) def train(self): self._train_lit() torch.cuda.empty_cache() self.trainable_model.cpu() self.trainable_model = self.trainable_model.module self._fine_tune() def _train_lit(self): if (self.start_segment > 0): return for param in self.trainable_model.parameters(): param.requires_grad = False self._unfreeze_training_model_sections(*list(self.lit_sections.keys())) for epoch in tqdm(range(self.start_epoch, self.sequence['lit']['epochs']), desc='LIT Training', dynamic_ncols=True): (t_losses, t_data_time, t_batch_time) = self._lit_train_one_epoch(epoch) (v_losses, v_data_time, v_batch_time) = self._lit_eval_one_epoch(epoch) for section in self.lit_sections.values(): section.lr_scheduler.step() for (i, (t_loss, v_loss)) in enumerate(zip(t_losses, v_losses)): self.writer.add_scalar('loss/section{t}/train'.format(t=(i + 1)), t_loss, epoch) self.writer.add_scalar('loss/section{t}/validation'.format(t=(i + 1)), v_loss, epoch) if ((epoch > 0) and (epoch % self.save_every)): torch.save(self.trainable_model.module.state_dict(), os.path.join(self.save_dir, str(('checkpoint_' + self.model_name)))) def _fine_tune(self): torch.cuda.empty_cache() if (self.start_segment <= 2): for param in self.trainable_model.parameters(): param.requires_grad = True if self.half: self.trainable_model = nn.DataParallel(fp16.FP16(self.trainable_model)).cuda() param_copy = [param.clone().type(torch.cuda.FloatTensor).detach() for param in self.trainable_model.parameters()] for param in param_copy: param.requires_grad = True optimizer = torch.optim.SGD(param_copy, self.sequence['full_model']['lr'], momentum=self.momentum, weight_decay=self.weight_decay) else: param_copy = None self.trainable_model = nn.DataParallel(self.trainable_model).cuda() optimizer = torch.optim.SGD(self.trainable_model.parameters(), self.sequence['full_model']['lr'], momentum=self.momentum, weight_decay=self.weight_decay) start_epoch = (self.start_epoch if (self.start_segment == 2) else 0) if (start_epoch > 0): for group in optimizer.param_groups: group.setdefault('initial_lr', group['lr']) lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=self.sequence['full_model']['milestones'], last_epoch=(start_epoch - 1)) self._run_fine_tuning_epochs(optimizer, lr_scheduler, 'full model', self.sequence['full_model']['epochs'], start_epoch=start_epoch, params=param_copy) def _run_fine_tuning_epochs(self, optimizer, lr_scheduler, tag, num_epochs, start_epoch=0, params=None): for epoch in tqdm(range(start_epoch, num_epochs), desc='Fine Tuning {}'.format(tag), dynamic_ncols=True): (t_loss, t_data_time, t_batch_time, t_accuracy1) = self._fine_tune_train_one_epoch(tag=tag, optimizer=optimizer, current_epoch=epoch) (v_loss, v_data_time, v_batch_time, v_accuracy1) = self._fine_tune_evaluate_one_epoch(tag=tag, current_epoch=epoch) self.writer.add_scalar('loss/{t}/train'.format(t=tag), t_loss, epoch) self.writer.add_scalar('loss/{t}/validation'.format(t=tag), v_loss, epoch) self.writer.add_scalar('prec1/{t}/train'.format(t=tag), t_accuracy1, epoch) self.writer.add_scalar('prec1/{t}/validation'.format(t=tag), v_accuracy1, epoch) best = (v_accuracy1 > self.best_accuracy1) self.best_accuracy1 = max(v_accuracy1, self.best_accuracy1) if best: tqdm.write('Saving the best model with {a}'.format(a=v_accuracy1)) torch.save(self.trainable_model.module.state_dict(), os.path.join(self.save_dir, self.model_name)) if ((epoch > 0) and (epoch % self.save_every)): torch.save(self.trainable_model.module.state_dict(), os.path.join(self.save_dir, str(('checkpoint_' + self.model_name)))) lr_scheduler.step() def _lit_train_one_epoch(self, current_epoch): global beta batch_time = AverageMeter() data_time = AverageMeter() losses = [AverageMeter() for i in range(len(self.lit_sections.keys()))] self.trainable_model.train() self.base_model.eval() criterion = self.lit_criterion end = time.time() data_loader = self.lit_train_loader lock = threading.Lock() for (i, (inp, target)) in tqdm(enumerate(data_loader), desc='LIT Training: Epoch {epoch}'.format(epoch=current_epoch), dynamic_ncols=True, total=len(data_loader), leave=True): batch_size = target.size(0) assert (batch_size < (2 ** 32)), 'Size is too large! correct will overflow' data_time.update((time.time() - end)) input_var = inp.cuda().detach() with torch.no_grad(): (teacher_features, soft_targets) = self.base_model(input_var, get_features=True) learner_features = self.trainable_model(teacher_features) learner_out = self.trainable_model.module(input_var) full_loss = (self.fine_tuning_criterion(learner_out, soft_targets, target) * beta) full_loss.backward(retain_graph=True) jobs = [] for id in self.lit_sections.keys(): learner_output = learner_features[id] target_feature = teacher_features[id].cuda(self.lit_sections[id].device) losses_log = losses[(id - 1)] p = threading.Thread(target=self.lit_sections[id].step, args=(learner_output, target_feature, losses_log, criterion, lock)) jobs.append(p) for job in jobs: job.start() for job in jobs: job.join() batch_time.update((time.time() - end)) end = time.time() del inp, teacher_features, input_var, learner_output return ([loss.avg for loss in losses], data_time.avg, batch_time.avg) def _lit_eval_one_epoch(self, current_epoch): batch_time = AverageMeter() data_time = AverageMeter() losses = [AverageMeter() for i in range(len(self.lit_sections.keys()))] self.trainable_model.eval() self.base_model.eval() criterion = self.lit_criterion end = time.time() lock = threading.Lock() with torch.no_grad(): for (i, (inp, target)) in tqdm(enumerate(self.val_loader), desc='LIT Validation: Epoch {epoch}'.format(epoch=current_epoch), dynamic_ncols=True, total=len(self.val_loader), leave=True): batch_size = target.size(0) assert (batch_size < (2 ** 32)), 'Size is too large! correct will overflow' data_time.update((time.time() - end)) input_var = inp.cuda().detach() (teacher_features, _) = self.base_model(input_var, get_features=True) learner_features = self.trainable_model(teacher_features) jobs = [] for id in self.lit_sections.keys(): learner_output = learner_features[id] target_feature = teacher_features[id].cuda(self.lit_sections[id].device) losses_log = losses[(id - 1)] p = threading.Thread(target=self.lit_sections[id].eval, args=(learner_output, target_feature, losses_log, criterion, lock)) jobs.append(p) for job in jobs: job.start() for job in jobs: job.join() batch_time.update((time.time() - end)) end = time.time() del inp, target, teacher_features, input_var, learner_output return ([loss.avg for loss in losses], data_time.avg, batch_time.avg) def _fine_tune_train_one_epoch(self, tag, optimizer, current_epoch, params=None): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() accuracies = AverageMeter() self.trainable_model.train() self.base_model.eval() criterion = self.fine_tuning_criterion end = time.time() data_loader = self.fine_tuning_loader for (i, (inp, target)) in tqdm(enumerate(data_loader), desc='Fine Tuning {tag}: Epoch {epoch}'.format(tag=tag, epoch=current_epoch), dynamic_ncols=True, total=len(data_loader), leave=True): batch_size = target.size(0) assert (batch_size < (2 ** 32)), 'Size is too large! correct will overflow' data_time.update((time.time() - end)) target_var = target.cuda(non_blocking=True) input_var = (inp.cuda() if (not self.distributed) else inp) with torch.no_grad(): teacher_outputs = self.base_model(input_var).detach().cuda(non_blocking=True) learner_output = self.trainable_model(input_var).cuda() loss = (criterion(learner_output, teacher_outputs, target_var) * self.loss_scaling) if self.half: self.trainable_model.zero_grad() loss.backward() fp16.set_grad(params, list(self.trainable_model.parameters())) if (self.loss_scaling != 1): for param in params: param.grad.data = (param.grad.data / self.loss_scaling) optimizer.step() fp16.copy_in_params(self.trainable_model, params) else: optimizer.zero_grad() loss.backward() optimizer.step() losses.update(loss.item(), batch_size) top_correct = correct(learner_output, target, top=(1,))[0] accuracies.update((top_correct.item() * (100.0 / batch_size)), batch_size) batch_time.update((time.time() - end)) end = time.time() del inp, target, loss, input_var, target_var, learner_output return (losses.avg, data_time.avg, batch_time.avg, accuracies.avg) def _fine_tune_evaluate_one_epoch(self, tag, current_epoch): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() accuracies = AverageMeter() self.trainable_model.eval() self.base_model.eval() criterion = self.fine_tuning_criterion end = time.time() data_loader = self.val_loader with torch.no_grad(): for (i, (inp, target)) in tqdm(enumerate(data_loader), desc='Evaluating {tag}: Epoch {epoch}'.format(tag=tag, epoch=current_epoch), dynamic_ncols=True, total=len(data_loader), leave=True): batch_size = target.size(0) assert (batch_size < (2 ** 32)), 'Size is too large! correct will overflow' data_time.update((time.time() - end)) target_var = target.cuda(non_blocking=True) input_var = (inp.cuda() if (not self.distributed) else inp) teacher_outputs = self.base_model(input_var).detach().cuda(non_blocking=True) learner_output = self.trainable_model(input_var).cuda() loss = (criterion(learner_output, teacher_outputs, target_var) * self.loss_scaling) losses.update(loss.item(), batch_size) top_correct = correct(learner_output, target, top=(1,))[0] accuracies.update((top_correct.item() * (100.0 / batch_size)), batch_size) batch_time.update((time.time() - end)) end = time.time() del input_var, target_var, inp, target, learner_output return (losses.avg, data_time.avg, batch_time.avg, accuracies.avg) def close_log_writer(self): self.writer.close() def validate_model(self): best_model = torch.load(os.path.join(self.save_dir, self.model_name)) try: self.trainable_model.module.load_state_dict(best_model) except: self.trainable_model.load_state_dict(best_model) self.trainable_model.cuda() if isinstance(self.trainable_model, LearnerModelParallel): self.trainable_model = self.trainable_model.module t_losses = AverageMeter() t_accuracy = AverageMeter() criterion = nn.CrossEntropyLoss() self.trainable_model.eval() with torch.no_grad(): for (i, (input, target)) in tqdm(enumerate(self.val_loader), desc='Validating Model for Benchmark', dynamic_ncols=True, total=len(self.val_loader)): batch_size = target.size(0) assert (batch_size < (2 ** 32)), 'Size is too large! correct will overflow' input = input.cuda() target_var = target.cuda(non_blocking=True) t_output = self.trainable_model(input) t_loss = criterion((t_output.cuda() + 1e-16), target_var) t_losses.update(t_loss.item(), input.size(0)) top_correct = correct(t_output, target, top=(1,))[0] t_accuracy.update((top_correct.item() * (100.0 / batch_size)), batch_size) return (t_losses.avg, t_accuracy.avg) def _unfreeze_training_model_sections(self, *sections): for i in sections: for param in self.lit_sections[i].network.parameters(): param.requires_grad = True def _freeze_training_model_sections(self, *sections): for i in sections: for param in self.lit_sections[i].network.parameters(): param.requires_grad = False
def test_case108(): url = (brokerIp + '/ngsi-ld/v1/entityOperations/upsert') headers = {'Content-Type': 'application/json', 'Accept': 'application/ld+json', 'Link': '<{{link}}>; rel=" type="application/ld+json"'} r = requests.post(url, data=json.dumps(ld_data.subdata108), headers=headers) print(r.content) print(r.status_code) assert (r.status_code == 204)
def _impl(array, pattern, ignore_case, highlevel, behavior, attrs): from awkward._connect.pyarrow import import_pyarrow_compute pc = import_pyarrow_compute('ak.str.match_substring') with HighLevelContext(behavior=behavior, attrs=attrs) as ctx: layout = ctx.unwrap(array, allow_record=False, allow_unknown=False, primitive_policy='error', string_policy='as-characters') apply = ak.operations.str._get_ufunc_action(pc.match_substring, pc.match_substring, bytestring_to_string=False, ignore_case=ignore_case, pattern=pattern) out = ak._do.recursively_apply(layout, apply) return ctx.wrap(out, highlevel=highlevel)
class RNNLayer(nn.Module): def __init__(self, input_dim, module, bidirection, dim, dropout, layer_norm, sample_rate, proj): super(RNNLayer, self).__init__() rnn_out_dim = ((2 * dim) if bidirection else dim) self.out_dim = rnn_out_dim self.dropout = dropout self.layer_norm = layer_norm self.sample_rate = sample_rate self.proj = proj self.layer = getattr(nn, module.upper())(input_dim, dim, bidirectional=bidirection, num_layers=1, batch_first=True) if self.layer_norm: self.ln = nn.LayerNorm(rnn_out_dim) if (self.dropout > 0): self.dp = nn.Dropout(p=dropout) if self.proj: self.pj = nn.Linear(rnn_out_dim, rnn_out_dim) def forward(self, input_x, x_len): if (not self.training): self.layer.flatten_parameters() input_x = pack_padded_sequence(input_x, x_len, batch_first=True, enforce_sorted=False) (output, _) = self.layer(input_x) (output, x_len) = pad_packed_sequence(output, batch_first=True) if self.layer_norm: output = self.ln(output) if (self.dropout > 0): output = self.dp(output) if (self.sample_rate > 1): (output, x_len) = downsample(output, x_len, self.sample_rate, 'drop') if self.proj: output = torch.tanh(self.pj(output)) return (output, x_len)
_module() class Shared2FCBBoxHead(ConvFCBBoxHead): def __init__(self, fc_out_channels=1024, *args, **kwargs): super(Shared2FCBBoxHead, self).__init__(*args, num_shared_convs=0, num_shared_fcs=2, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0, fc_out_channels=fc_out_channels, **kwargs)
def _save(im, fp, filename): if ((_handler is None) or (not hasattr('_handler', 'save'))): raise OSError('BUFR save handler not installed') _handler.save(im, fp, filename)
def test_suppress_warnings_type(): my_mod = _get_fresh_mod() assert_equal(getattr(my_mod, '__warningregistry__', {}), {}) with suppress_warnings() as sup: sup.filter(UserWarning) warnings.warn('Some warning') assert_warn_len_equal(my_mod, 0) sup = suppress_warnings() sup.filter(UserWarning) with sup: warnings.warn('Some warning') assert_warn_len_equal(my_mod, 0) sup.filter(module=my_mod) with sup: warnings.warn('Some warning') assert_warn_len_equal(my_mod, 0) with suppress_warnings(): warnings.simplefilter('ignore') warnings.warn('Some warning') assert_warn_len_equal(my_mod, 1, py37=0)
def name_feature(name, toplevel=None): if (toplevel is None): try: import sage.all as toplevel except ImportError: return None try: obj = getattr(toplevel, name) except AttributeError: return None from sage.misc.dev_tools import find_object_modules for (module, names) in find_object_modules(obj).items(): if ((name in names) and (feature := module_feature(module))): return feature return None
def make_parse(): parser = argparse.ArgumentParser() parser.add_argument('--base-loss', default='CrossEntropyLoss', type=str) args = parser.parse_args() return args
_builder('msrvtt_qa_instruct') class MSRVTTQAInstructBuilder(VideoQABuilder): train_dataset_cls = VideoQAInstructDataset eval_dataset_cls = VideoQAInstructDataset DATASET_CONFIG_DICT = {'default': 'configs/datasets/msrvtt/defaults_qa_instruct.yaml'}
def write_results(results_file, results): with open(results_file, mode='w') as cf: dw = csv.DictWriter(cf, fieldnames=results[0].keys()) dw.writeheader() for r in results: dw.writerow(r) cf.flush()
class PSPModule(nn.Module): def __init__(self, features, out_features=512, sizes=(1, 2, 3, 6)): super(PSPModule, self).__init__() self.stages = [] self.stages = nn.ModuleList([self._make_stage(features, out_features, size) for size in sizes]) self.bottleneck = nn.Sequential(nn.Conv2d((features + (len(sizes) * out_features)), out_features, kernel_size=3, padding=1, dilation=1, bias=False), InPlaceABNSync(out_features), nn.Dropout2d(0.1)) def _make_stage(self, features, out_features, size): prior = nn.AdaptiveAvgPool2d(output_size=(size, size)) conv = nn.Conv2d(features, out_features, kernel_size=1, bias=False) bn = InPlaceABNSync(out_features) return nn.Sequential(prior, conv, bn) def forward(self, feats): (h, w) = (feats.size(2), feats.size(3)) priors = ([F.interpolate(input=stage(feats), size=(h, w), mode='bilinear', align_corners=True) for stage in self.stages] + [feats]) bottle = self.bottleneck(torch.cat(priors, 1)) return bottle
class OneFormerModel(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
def test_transform_float_int_2d_different_one_to_one(): this = ak.contents.ListOffsetArray(ak.index.Index64(np.array([0, 3, 4], dtype='int64')), ak.contents.NumpyArray(np.array([1.0, 2.0, 3.0, 4.0], dtype='float64')), parameters={'name': 'this'}) that = ak.contents.ListOffsetArray(ak.index.Index64(np.array([0, 3, 4], dtype='int64')), ak.contents.NumpyArray(np.array([1, 2, 1, 9], dtype='int64')), parameters={'name': 'that'}) (this_next, that_next) = ak.operations.ak_transform.transform((lambda *a, **k: None), this, that, highlevel=False, broadcast_parameters_rule='one_to_one') assert (this_next.parameters == this.parameters) assert (that_next.parameters == that.parameters)
def result_region(finding): region_dict = {} for (f, r) in (('line', 'startLine'), ('column', 'startColumn'), ('line_end', 'endLine'), ('column_end', 'endColumn')): if (f in finding): region_dict[r] = int(finding[f]) if region_dict: return region_dict for (a, l, c) in (('address', 'startLine', 'startColumn'), ('address_end', 'endLine', 'endColumn')): if (a in finding): region_dict[l] = 1 region_dict[c] = (1 + (2 * int(finding[a]))) return region_dict
_axis_nan_policy_factory((lambda x: x), n_outputs=1, result_to_tuple=(lambda x: (x,))) def variation(a, axis=0, nan_policy='propagate', ddof=0, *, keepdims=False): n = a.shape[axis] NaN = _get_nan(a) if ((a.size == 0) or (ddof > n)): shp = np.asarray(a.shape) shp = np.delete(shp, axis) result = np.full(shp, fill_value=NaN) return result[()] mean_a = a.mean(axis) if (ddof == n): std_a = a.std(axis=axis, ddof=0) result = np.full_like(std_a, fill_value=NaN) i = (std_a > 0) result[i] = np.inf result[i] = np.copysign(result[i], mean_a[i]) return result[()] with np.errstate(divide='ignore', invalid='ignore'): std_a = a.std(axis, ddof=ddof) result = (std_a / mean_a) return result[()]
.parametrize('pct_accuracy', [(- 1.0), (- 0.5), 0.0, 1.01]) def test_check_pct_accuracy_value(pct_accuracy, create_X_y): (X, y) = create_X_y with pytest.raises(ValueError): desmi = DESMI(pct_accuracy=pct_accuracy) desmi.fit(X, y)
class Block(chainer.Chain): def __init__(self, in_channels, out_channels, hidden_channels=None, ksize=3, pad=1, activation=F.relu, upsample=False, n_classes=0): super(Block, self).__init__() initializer = chainer.initializers.GlorotUniform(math.sqrt(2)) initializer_sc = chainer.initializers.GlorotUniform() self.activation = activation self.upsample = upsample self.learnable_sc = ((in_channels != out_channels) or upsample) hidden_channels = (out_channels if (hidden_channels is None) else hidden_channels) self.n_classes = n_classes with self.init_scope(): self.c1 = L.Convolution2D(in_channels, hidden_channels, ksize=ksize, pad=pad, initialW=initializer) self.c2 = L.Convolution2D(hidden_channels, out_channels, ksize=ksize, pad=pad, initialW=initializer) if (n_classes > 0): self.b1 = CategoricalConditionalBatchNormalization(in_channels, n_cat=n_classes) self.b2 = CategoricalConditionalBatchNormalization(hidden_channels, n_cat=n_classes) else: self.b1 = L.BatchNormalization(in_channels) self.b2 = L.BatchNormalization(hidden_channels) if self.learnable_sc: self.c_sc = L.Convolution2D(in_channels, out_channels, ksize=1, pad=0, initialW=initializer_sc) def residual(self, x, y=None, z=None, **kwargs): h = x h = (self.b1(h, y, **kwargs) if (y is not None) else self.b1(h, **kwargs)) h = self.activation(h) h = (upsample_conv(h, self.c1) if self.upsample else self.c1(h)) h = (self.b2(h, y, **kwargs) if (y is not None) else self.b2(h, **kwargs)) h = self.activation(h) h = self.c2(h) return h def shortcut(self, x): if self.learnable_sc: x = (upsample_conv(x, self.c_sc) if self.upsample else self.c_sc(x)) return x else: return x def __call__(self, x, y=None, z=None, **kwargs): return (self.residual(x, y, z, **kwargs) + self.shortcut(x))
def prune_heads(args, model, eval_dataloader, head_mask): before_time = datetime.now() (_, _, loss) = compute_heads_importance(args, model, eval_dataloader, compute_entropy=False, compute_importance=False, head_mask=head_mask) score_masking = (1 / loss) original_time = (datetime.now() - before_time) original_num_params = sum((p.numel() for p in model.parameters())) heads_to_prune = dict(((layer, (1 - head_mask[layer].long()).nonzero().squeeze().tolist()) for layer in range(len(head_mask)))) for (k, v) in heads_to_prune.items(): if isinstance(v, int): heads_to_prune[k] = [v] assert (sum((len(h) for h in heads_to_prune.values())) == (1 - head_mask.long()).sum().item()) model.prune_heads(heads_to_prune) pruned_num_params = sum((p.numel() for p in model.parameters())) before_time = datetime.now() (_, _, loss) = compute_heads_importance(args, model, eval_dataloader, compute_entropy=False, compute_importance=False, head_mask=None, actually_pruned=True) score_pruning = (1 / loss) new_time = (datetime.now() - before_time) logger.info('Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)', original_num_params, pruned_num_params, ((pruned_num_params / original_num_params) * 100)) logger.info('Pruning: score with masking: %f score with pruning: %f', score_masking, score_pruning) logger.info('Pruning: speed ratio (original timing / new timing): %f percents', ((original_time / new_time) * 100)) save_model(model, args.output_dir)
class WeightCharacter(Element): def __init__(self, parent): Element.__init__(self, parent) self._p = self.parent().prime() def base_extend(self, R): return self.parent().base_extend(R).coerce(self) def is_even(self) -> bool: return (self((- 1)) != (- 1)) def pAdicEisensteinSeries(self, ring, prec=20): if (not self.is_even()): raise ValueError('Eisenstein series not defined for odd weight-characters') q = ring.gen() s = (ring(1) + ((2 * self.one_over_Lvalue()) * sum(((sum(((self(d) / d) for d in divisors(n))) * (q ** n)) for n in range(1, prec))))) return s.add_bigoh(prec) def values_on_gens(self): return (self(self.parent()._param), self.teichmuller_type()) def is_trivial(self) -> bool: return (self.values_on_gens() == (1, 0)) def _richcmp_(self, other, op) -> bool: return richcmp(self.values_on_gens(), other.values_on_gens(), op) def Lvalue(self): raise NotImplementedError def one_over_Lvalue(self): if self.is_trivial(): return ZZ(0) else: return (1 / self.Lvalue())
class Median(CombinerBase): def _combine_univariates(self, univariates: List[UnivariateTimeSeries]) -> UnivariateTimeSeries: non_none = [var for var in univariates if (var is not None)] v = non_none[0] if (self.abs_score and (sum(self.models_used) > 1)): signs = np.median(np.sign([var.np_values for var in non_none]), axis=0) signs[(signs == 0)] = (- 1) new_vals = (signs * np.median([np.abs(var.np_values) for var in non_none], axis=0)) else: new_vals = np.median([var.np_values for var in non_none], axis=0) return UnivariateTimeSeries(v.time_stamps, new_vals, v.name)
class BaseNNIndexer(): def __init__(self, config): super(BaseNNIndexer, self).__init__() self.token_dim = config['token_dim'] self.use_gpu = config['faiss_use_gpu'] self.use_fp16 = (config['token_dtype'] == 'float16') def prepare(self, data_chunks: List[numpy.ndarray], subsample=(- 1)): pass def index(self, ids: List[numpy.ndarray], data_chunks: List[numpy.ndarray]): pass def search(self, query_vec: numpy.ndarray, top_n: int): pass
class WithForegroundSelection(SelectionStrategy): def __call__(self, sample) -> bool: return sample[defs.KEY_LABELS].any()
class LReLU_MobileNet(nn.Module): cfg = [64, (128, 2), 128, (256, 2), 256, (512, 2), 512, 512, 512, 512, 512, (1024, 2), 1024] def __init__(self, num_classes=10): super(LReLU_MobileNet, self).__init__() self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(32) self.layers = self._make_layers(in_planes=32) self.linear = nn.Linear(1024, num_classes) def _make_layers(self, in_planes): layers = [] for x in self.cfg: out_planes = (x if isinstance(x, int) else x[0]) stride = (1 if isinstance(x, int) else x[1]) layers.append(Block(in_planes, out_planes, stride)) in_planes = out_planes return nn.Sequential(*layers) def forward(self, x): out = F.leaky_relu(self.bn1(self.conv1(x))) out = self.layers(out) out = F.avg_pool2d(out, 2) out = out.view(out.size(0), (- 1)) out = self.linear(out) return out
def _build_egg(egg, tarball, to_dir): tmpdir = tempfile.mkdtemp() log.warn('Extracting in %s', tmpdir) old_wd = os.getcwd() try: os.chdir(tmpdir) tar = tarfile.open(tarball) _extractall(tar) tar.close() subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0]) os.chdir(subdir) log.warn('Now working in %s', subdir) log.warn('Building a Distribute egg in %s', to_dir) _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir) finally: os.chdir(old_wd) shutil.rmtree(tmpdir) log.warn(egg) if (not os.path.exists(egg)): raise IOError('Could not build the egg.')
class Choice(ParamType): name = 'choice' def __init__(self, choices, case_sensitive=True): self.choices = choices self.case_sensitive = case_sensitive def get_metavar(self, param): return '[{}]'.format('|'.join(self.choices)) def get_missing_message(self, param): return 'Choose from:\n\t{}.'.format(',\n\t'.join(self.choices)) def convert(self, value, param, ctx): normed_value = value normed_choices = {choice: choice for choice in self.choices} if ((ctx is not None) and (ctx.token_normalize_func is not None)): normed_value = ctx.token_normalize_func(value) normed_choices = {ctx.token_normalize_func(normed_choice): original for (normed_choice, original) in normed_choices.items()} if (not self.case_sensitive): if PY2: lower = str.lower else: lower = str.casefold normed_value = lower(normed_value) normed_choices = {lower(normed_choice): original for (normed_choice, original) in normed_choices.items()} if (normed_value in normed_choices): return normed_choices[normed_value] self.fail('invalid choice: {}. (choose from {})'.format(value, ', '.join(self.choices)), param, ctx) def __repr__(self): return "Choice('{}')".format(list(self.choices))
() ('names', nargs=(- 1)) def run(names): if (not names): raise click.BadParameter('Empty names!') if (len(names) != len(set(names))): raise click.BadParameter('Duplicate names!') options = _get_all_options() for name in names: if (name not in options): raise click.BadParameter('Invalid run name! Make sure every name can be found in `garage_benchmark list`!') for name in names: options[name]()
def whiten(values: Tensor, shift_mean=True, epsilon=1e-08) -> Tensor: assert (values.size(0) >= 8), f'Internal error: Minibatch size {values.size(0)} is insufficient for whitening.' (mean, std) = (values.mean(), values.std(unbiased=False)) whitened = ((values - mean) / (std + epsilon)) if (not shift_mean): whitened = (whitened + mean) return whitened
def get_node_csv(node_file): node_procs = {} df = pd.read_csv(node_file) for (name, group) in zip(df['name'], df['group']): node_procs[name] = group return node_procs
class BayesianLinReg(ConjPrior): def __init__(self, sample=None): self.w_0 = np.zeros(2) self.Lambda_0 = (np.array([[0, 0], [0, 1]]) + _epsilon) self.alpha = (1 + _epsilon) self.beta = _epsilon super().__init__(sample=sample) def n_params(self) -> int: return 8 def update(self, x): (t, x) = self.process_time_series(x) t_full = np.stack((t, np.ones_like(t)), axis=(- 1)) self.w_0 = self.w_0.reshape((2, 1)) pred0 = ((self.w_0.T self.Lambda_0) self.w_0) design = (t_full.T t_full) ols = (pinv(t_full) x) self.w_0 = (pinvh((self.Lambda_0 + design)) ((self.Lambda_0 self.w_0) + (design ols))) self.Lambda_0 = (self.Lambda_0 + design) pred = ((self.w_0.T self.Lambda_0) self.w_0) self.w_0 = self.w_0.flatten() self.n = (self.n + len(x)) self.alpha = (self.alpha + (len(x) / 2)) self.beta = (self.beta + ((((x.T x) + pred0) - pred).item() / 2)) def posterior_explicit(self, x, return_rv=False, log=True, return_updated=False): if ((x is None) or return_rv): raise ValueError("Bayesian linear regression doesn't have a scipy.stats random variable posterior. Please specify a non-``None`` value of ``x`` and set ``return_rv = False``.") updated = copy.deepcopy(self) updated.update(x) (t, x_np) = self.process_time_series(x) a = (((- len(x_np)) / 2) * np.log((2 * np.pi))) b = ((np.linalg.slogdet(self.Lambda_0)[1] - np.linalg.slogdet(updated.Lambda_0)[1]) / 2) c = ((self.alpha * np.log(self.beta)) - (updated.alpha * np.log(updated.beta))) d = (gammaln(updated.alpha) - gammaln(self.alpha)) ret = ((((a + b) + c) + d) if log else np.exp((((a + b) + c) + d))).reshape(1) return ((ret, updated) if return_updated else ret) def posterior(self, x, return_rv=False, log=True, return_updated=False): if ((x is None) or return_rv): raise ValueError("Bayesian linear regression doesn't have a scipy.stats random variable posterior. Please specify a non-``None`` value of ``x`` and set ``return_rv = False``.") (t, x_np) = self.process_time_series(x) prior_sigma2 = invgamma(a=self.alpha, scale=self.beta) sigma2_hat = prior_sigma2.mean() prior_w = mvnorm(self.w_0, (sigma2_hat * pinvh(self.Lambda_0)), allow_singular=True) w_hat = self.w_0 xhat = (np.stack((t, np.ones_like(t)), axis=(- 1)) w_hat) updated = copy.deepcopy(self) updated.update(x) post_sigma2 = invgamma(a=updated.alpha, scale=updated.beta) post_w = mvnorm(updated.w_0, (sigma2_hat * pinvh(updated.Lambda_0)), allow_singular=True) evidence = norm(xhat, np.sqrt(sigma2_hat)).logpdf(x_np.flatten()).reshape(len(x_np)) prior = (prior_sigma2.logpdf(sigma2_hat) + prior_w.logpdf(w_hat)) post = (post_sigma2.logpdf(sigma2_hat) + post_w.logpdf(w_hat)) logp = ((evidence + prior.item()) - post.item()) ret = (logp if log else np.exp(logp)) return ((ret, updated) if return_updated else ret) def forecast(self, time_stamps) -> Tuple[(TimeSeries, TimeSeries)]: name = self.names[0] t = to_timestamp(time_stamps) if (self.t0 is None): self.t0 = t[0] if (self.dt is None): self.dt = ((t[(- 1)] - t[0]) if (len(t) > 1) else 1) t = ((t - self.t0) / self.dt) t_full = np.stack((t, np.ones_like(t)), axis=(- 1)) sigma2_hat = invgamma(a=self.alpha, scale=self.beta).mean() w_cov = (sigma2_hat * pinvh(self.Lambda_0)) xhat = (t_full self.w_0) xhat = UnivariateTimeSeries(time_stamps=time_stamps, values=xhat, name=name) sigma2 = np.sum(((t_full w_cov) * t_full), axis=(- 1)) sigma = np.sqrt((sigma2 + sigma2_hat)) sigma = UnivariateTimeSeries(time_stamps=time_stamps, values=sigma, name=f'{name}_stderr') return (xhat.to_ts(), sigma.to_ts())
def clean_py_ruc(df: Union[(pd.DataFrame, dd.DataFrame)], column: str, output_format: str='standard', inplace: bool=False, errors: str='coerce', progress: bool=True) -> pd.DataFrame: if (output_format not in {'compact', 'standard'}): raise ValueError(f'output_format {output_format} is invalid. It needs to be "compact" or "standard".') df = to_dask(df) df['clean_code_tup'] = df[column].map_partitions((lambda srs: [_format(x, output_format, errors) for x in srs]), meta=object) df = df.assign(_temp_=df['clean_code_tup'].map(itemgetter(0))) df = df.rename(columns={'_temp_': f'{column}_clean'}) df = df.drop(columns=['clean_code_tup']) if inplace: df[column] = df[f'{column}_clean'] df = df.drop(columns=f'{column}_clean') df = df.rename(columns={column: f'{column}_clean'}) with ProgressBar(minimum=1, disable=(not progress)): df = df.compute() return df
def check_optimization_criteria(nnp, batch_size): def find_network(nnp, exe): net = None for network in nnp.protobuf.network: if (network.name == exe.network_name): net = network return net def get_input_info(exec_info, network): input_dict = collections.OrderedDict() for v in exec_info.data_variable: input_dict[v.variable_name] = [] for v in network.variable: if (v.name in input_dict): shape = v.shape.dim input_dict[v.name] = [(x if (x > 0) else batch_size) for x in shape] return input_dict state = {'NCHW_TO_NHWC': {'doc': 'Convert the NCHW format to NHWC, and remove the extra nodes', 'status': True}} func_list = ['Convolution', 'Deconvolution', 'MaxPooling', 'AveragePooling', 'SumPooling', 'Unpooling', 'Interpolate', 'RandomErase', 'MaxPoolingBackward'] func_cnt = collections.Counter() exec_info = nnp.protobuf.executor[0] network = find_network(nnp, exec_info) input_dict = get_input_info(exec_info, network) for (k, shape) in input_dict.items(): if (len(shape) != 4): state['NCHW_TO_NHWC']['status'] = False break for func in network.function: if (func.type in func_list): func_cnt[func.type] += 1 for inp in func.input: if ((inp in input_dict) and (len(func.ListFields()) > 4) and hasattr(func.ListFields()[(- 1)][1], 'base_axis') and (func.ListFields()[(- 1)][1].base_axis != 1)): state['NCHW_TO_NHWC']['status'] = False break if (len(func_cnt) == 0): state['NCHW_TO_NHWC']['status'] = False return state
class SGDMixin(StepwiseMixin, abc.ABC): def step(self, **kwargs): if ('x' in kwargs): x = kwargs['x'] else: raise TypeError('x argument is missing in step function.') if ('grad' in kwargs): grad = kwargs['grad'] else: raise TypeError('grad argument is missing in step function.') update = self._get_update(grad) x_return = (x + update) self.iteration += 1 self._update_lr() return x_return def reset(self): pass def _get_update(self, grad: np.ndarray) -> np.ndarray: raise NotImplementedError() def _update_lr(self) -> None: raise NotImplementedError()
class Timer(): def __init__(self, start=True, print_tmpl=None): self._is_running = False self.print_tmpl = (print_tmpl if print_tmpl else '{:.3f}') if start: self.start() def is_running(self): return self._is_running def __enter__(self): self.start() return self def __exit__(self, type, value, traceback): print(self.print_tmpl.format(self.since_last_check())) self._is_running = False def start(self): if (not self._is_running): self._t_start = time() self._is_running = True self._t_last = time() def since_start(self): if (not self._is_running): raise TimerError('timer is not running') self._t_last = time() return (self._t_last - self._t_start) def since_last_check(self): if (not self._is_running): raise TimerError('timer is not running') dur = (time() - self._t_last) self._t_last = time() return dur
def _lookfor_generate_cache(module, import_modules, regenerate): global _lookfor_caches import inspect if (sys.version_info[0] >= 3): from io import StringIO else: from StringIO import StringIO if (module is None): module = 'numpy' if isinstance(module, str): try: __import__(module) except ImportError: return {} module = sys.modules[module] elif (isinstance(module, list) or isinstance(module, tuple)): cache = {} for mod in module: cache.update(_lookfor_generate_cache(mod, import_modules, regenerate)) return cache if ((id(module) in _lookfor_caches) and (not regenerate)): return _lookfor_caches[id(module)] cache = {} _lookfor_caches[id(module)] = cache seen = {} index = 0 stack = [(module.__name__, module)] while stack: (name, item) = stack.pop(0) if (id(item) in seen): continue seen[id(item)] = True index += 1 kind = 'object' if inspect.ismodule(item): kind = 'module' try: _all = item.__all__ except AttributeError: _all = None if (import_modules and hasattr(item, '__path__')): for pth in item.__path__: for mod_path in os.listdir(pth): this_py = os.path.join(pth, mod_path) init_py = os.path.join(pth, mod_path, '__init__.py') if (os.path.isfile(this_py) and mod_path.endswith('.py')): to_import = mod_path[:(- 3)] elif os.path.isfile(init_py): to_import = mod_path else: continue if (to_import == '__init__'): continue try: old_stdout = sys.stdout old_stderr = sys.stderr try: sys.stdout = StringIO() sys.stderr = StringIO() __import__(('%s.%s' % (name, to_import))) finally: sys.stdout = old_stdout sys.stderr = old_stderr except BaseException: continue for (n, v) in _getmembers(item): try: item_name = getattr(v, '__name__', ('%s.%s' % (name, n))) mod_name = getattr(v, '__module__', None) except NameError: item_name = ('%s.%s' % (name, n)) mod_name = None if (('.' not in item_name) and mod_name): item_name = ('%s.%s' % (mod_name, item_name)) if (not item_name.startswith((name + '.'))): if isinstance(v, ufunc): pass else: continue elif (not (inspect.ismodule(v) or (_all is None) or (n in _all))): continue stack.append((('%s.%s' % (name, n)), v)) elif inspect.isclass(item): kind = 'class' for (n, v) in _getmembers(item): stack.append((('%s.%s' % (name, n)), v)) elif hasattr(item, '__call__'): kind = 'func' try: doc = inspect.getdoc(item) except NameError: doc = None if (doc is not None): cache[name] = (doc, kind, index) return cache
def process(model, clip, path_indata, dname, frame_no, args, img_size): with torch.no_grad(): smap = model(clip.to(device)).cpu().data[0] smap = smap.numpy() _id = frame_no.split('.')[0].split('_')[(- 1)] gt = cv2.imread(join(path_indata, 'annotations/DIEM', dname, 'maps', 'eyeMap_{}.jpg'.format(_id)), 0) smap = cv2.resize(smap, (gt.shape[1], gt.shape[0])) fix = get_fixation(path_indata, dname, _id) smap = blur(smap) gt = torch.FloatTensor(gt).unsqueeze(0) fix = torch.FloatTensor(fix).unsqueeze(0) smap = smap.unsqueeze(0) sim_loss = similarity(smap, gt) cc_loss = cc(smap, gt) nss_loss = nss(smap, fix) aucj_loss = auc_judd(smap, fix) if (np.isnan(sim_loss) or np.isnan(cc_loss) or np.isnan(nss_loss)): assert (gt.numpy().max() == 0), gt.numpy().max() return (sim_loss, cc_loss, nss_loss, aucj_loss)
class AbstractPlayer(): def __init__(self): self.lastSsoType = LEARNING_SSO_TYPE.JSON def init(self, sso, timer): pass def act(self, sso, timer): pass def result(self, sso, timer): pass
def flatten_to_tuple(outputs): result = [] if isinstance(outputs, torch.Tensor): result.append(outputs) elif isinstance(outputs, (list, tuple)): for v in outputs: result.extend(flatten_to_tuple(v)) elif isinstance(outputs, dict): for (_, v) in outputs.items(): result.extend(flatten_to_tuple(v)) elif isinstance(outputs, BoxList): result.extend(flatten_to_tuple(outputs.bbox)) if outputs.has_field('grid'): result.extend(flatten_to_tuple(outputs.get_field('grid'))) if outputs.has_field('mask'): result.extend(flatten_to_tuple(outputs.get_field('mask'))) result.extend(flatten_to_tuple(outputs.get_field('mask_scores'))) if outputs.has_field('keypoints'): result.extend(flatten_to_tuple(outputs.get_field('keypoints'))) if outputs.has_field('parsing'): result.extend(flatten_to_tuple(outputs.get_field('parsing'))) result.extend(flatten_to_tuple(outputs.get_field('parsing_scores'))) if outputs.has_field('uv'): result.extend(flatten_to_tuple(outputs.get_field('uv'))) if outputs.has_field('hier'): result.extend(flatten_to_tuple(outputs.get_field('hier'))) else: logging_rank('Output of type {} not included in flops/activations count.'.format(type(outputs))) return tuple(result)
class TrainerDeviceMixin(ABC): def configure_seed(self): seed = self.config.training.seed if (seed is None): return torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False def configure_device(self): if (self.config.training.get('device', 'cuda') == 'xla'): import torch_xla.core.xla_model as xm self.device = xm.xla_device() self.distributed = True self.local_rank = xm.get_local_ordinal() is_xla = True else: is_xla = False if ('device_id' not in self.config): warnings.warn("No 'device_id' in 'config', setting to -1. This can cause issues later in training. Ensure that distributed setup is properly initialized.") self.local_rank = (- 1) else: self.local_rank = self.config.device_id self.device = self.local_rank self.distributed = False registry.register('global_device', self.device) if (self.config.distributed.init_method is not None): self.distributed = True self.device = torch.device('cuda', self.local_rank) torch.cuda.set_device(self.local_rank) elif torch.cuda.is_available(): self.device = torch.device('cuda') torch.cuda.set_device(0) elif (not is_xla): self.device = torch.device('cpu') if ('rank' not in self.config.distributed): if (torch.distributed.is_available() and torch.distributed.is_initialized()): global_rank = torch.distributed.get_rank() else: global_rank = (- 1) with open_dict(self.config.distributed): self.config.distributed.rank = global_rank registry.register('global_device', self.config.distributed.rank) def parallelize_model(self): registry.register('data_parallel', False) registry.register('distributed', False) if (('cuda' in str(self.device)) and (torch.cuda.device_count() > 1) and (not self.distributed)): registry.register('data_parallel', True) self.model = torch.nn.DataParallel(self.model) if (('cuda' in str(self.device)) and self.distributed): registry.register('distributed', True) set_torch_ddp = True try: from fairscale.nn.data_parallel import ShardedDataParallel from fairscale.optim.oss import OSS if isinstance(self.optimizer, OSS): self.model = ShardedDataParallel(self.model, self.optimizer) set_torch_ddp = False logger.info('Using FairScale ShardedDataParallel') except ImportError: logger.info('Using PyTorch DistributedDataParallel') warnings.warn(('You can enable ZeRO and Sharded DDP, by installing fairscale ' + 'and setting optimizer.enable_state_sharding=True.')) if set_torch_ddp: self.model = torch.nn.parallel.DistributedDataParallel(self.model, device_ids=[self.local_rank], output_device=self.local_rank, find_unused_parameters=self.config.training.find_unused_parameters) if (is_xla() and (get_world_size() > 1)): broadcast_xla_master_model_param(self.model)
def test_array_highlevel_true(): form = ak.forms.from_dict(form_dict) (array, report) = ak.typetracer.typetracer_with_report(form, highlevel=True) assert isinstance(array, ak.Array) y = array.y assert (len(report.data_touched) == 0) assert (len(report.shape_touched) == 0) ak.sum(y) assert (set(report.data_touched) == {'y.list.offsets', 'y.list.content'}) assert (set(report.shape_touched) == {'y.list.offsets', 'y.list.content'})
class Generator(keras.Model): def __init__(self, data, learning_rate=0.001, l_w=0, l_b=0, l_gan=0, num_users=100, num_items=100, name='CFGAN-GEN', **kwargs): super().__init__(name=name, **kwargs) self._learning_rate = learning_rate self._l_w = l_w self._l_b = l_b self._l_gan = l_gan self._num_items = num_items self._num_users = num_users self.data = data self.initializer = tf.initializers.GlorotUniform() self.sampler = pws.Sampler(self.data.i_train_dict) self.B = tf.Variable(tf.zeros(shape=[self._num_items]), name='B_gen', dtype=tf.float32) self.G = tf.Variable(self.initializer(shape=[self._num_items, self._num_items]), name='G_gen', dtype=tf.float32) self.optimizer = tf.optimizers.Adam(self._learning_rate) def generate_fake_data(self, mask, C_u): r_hat = tf.nn.sigmoid((tf.matmul(tf.cast(C_u, tf.float32), self.G) + self.B)) fake_data = tf.multiply(r_hat, mask) return fake_data def infer(self, C_u): r_hat = tf.nn.sigmoid((tf.matmul(C_u, self.G) + self.B)) return r_hat def train_step(self, batch): (C_u, mask, N_zr, g_sample, d_fake) = batch with tf.GradientTape() as tape: loss = tf.reduce_mean((tf.math.log(((1.0 - d_fake) + 0.0001)) + (self._l_gan * tf.nn.l2_loss(tf.multiply(tf.cast(N_zr, tf.float32), g_sample))))) reg_loss = ((self._l_w * tf.nn.l2_loss(self.G)) + (self._l_b * tf.nn.l2_loss(self.B))) loss += reg_loss grads = tape.gradient(loss, self.trainable_weights) self.optimizer.apply_gradients(zip(grads, self.trainable_weights)) return loss
.gpu .parametrize('dl', LAYOUTS) def test_layouts(dl): with change_default(blas, 'cuBLAS'): _test_matmul(('cuBLAS float ' + dl), dace.float32, 'cuBLAS', dace.StorageType.GPU_Global, data_layout=dl)
_model def tv_resnext50_32x4d(pretrained=False, num_classes=1000, in_chans=3, **kwargs): default_cfg = default_cfgs['tv_resnext50_32x4d'] model = ResNet(Bottleneck, [3, 4, 6, 3], cardinality=32, base_width=4, num_classes=num_classes, in_chans=in_chans, **kwargs) model.default_cfg = default_cfg if pretrained: load_pretrained(model, default_cfg, num_classes, in_chans) return model
def _make_arg(kwargs: Dict[(str, Any)]): assert ('tag' in kwargs) _deprecate_arg_args(kwargs) proc = {ArgKind.SCALAR: _make_arg_scalar, ArgKind.NDARRAY: _make_arg_ndarray, ArgKind.MATRIX: _make_arg_matrix, ArgKind.TEXTURE: _make_arg_texture, ArgKind.RWTEXTURE: _make_arg_rwtexture} tag = kwargs['tag'] return proc[tag](kwargs)
def register_Ns3ConstantRandomVariable_methods(root_module, cls): cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) cls.add_constructor([]) cls.add_method('GetConstant', 'double', [], is_const=True) cls.add_method('GetValue', 'double', [param('double', 'constant')]) cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'constant')]) cls.add_method('GetValue', 'double', [], is_virtual=True) cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return
def fnmatchcase(name, pat, case_sensitive=True): match = _compile_pattern(pat, case_sensitive) return (match(name) is not None)
def _get_all_lines(graph_parse, is_variable): assert isinstance(graph_parse, GraphParse) items = [] for (a_key, b_key) in graph_parse.line_graph.edges(): items.extend(_get_lines(graph_parse, is_variable, a_key, b_key).iteritems()) return dict(items)
def tactics(ctx=None): ctx = _get_ctx(ctx) return [Z3_get_tactic_name(ctx.ref(), i) for i in range(Z3_get_num_tactics(ctx.ref()))]
def make_args_list(n_trials, dataset_names, algorithms, n_hparams_from, n_hparams, steps, data_dir, task, holdout_fraction, single_test_envs, hparams): args_list = [] for trial_seed in range(n_trials): for dataset in dataset_names: for algorithm in algorithms: if single_test_envs: all_test_envs = [[i] for i in range(datasets.num_environments(dataset))] else: all_test_envs = all_test_env_combinations(datasets.num_environments(dataset)) for test_envs in all_test_envs: for hparams_seed in range(n_hparams_from, n_hparams): train_args = {} train_args['dataset'] = dataset train_args['algorithm'] = algorithm train_args['test_envs'] = test_envs train_args['holdout_fraction'] = holdout_fraction train_args['hparams_seed'] = hparams_seed train_args['data_dir'] = data_dir train_args['task'] = task train_args['trial_seed'] = trial_seed train_args['seed'] = misc.seed_hash(dataset, algorithm, test_envs, hparams_seed, trial_seed) if (steps is not None): train_args['steps'] = steps if (hparams is not None): train_args['hparams'] = hparams args_list.append(train_args) return args_list
class TransformerAlgoConfig(Config): tokenizer_config: dict = {'name': 'auto', 'model': 'bert-base-cased'} trainer_config: dict = {}
def configurator(forward_dict, mode='posterior', scale_data=12): if (mode == 'posterior'): input_dict = _config_posterior(forward_dict, scale_data) elif (mode == 'likelihood'): input_dict = _config_likelihood(forward_dict, scale_data) elif (mode == 'joint'): input_dict = {} input_dict['posterior_inputs'] = _config_posterior(forward_dict, scale_data) input_dict['likelihood_inputs'] = _config_likelihood(forward_dict, scale_data) else: raise NotImplementedError('For now, only a choice between ["posterior", "likelihood", "joint"] is available!') return input_dict
def conjugacy_test(jlist, verbose=False): from sage.sets.set import Set jQ = next((j for j in jlist if (j in QQ)), None) if jQ: if verbose: print('Yes: an isogenous curve has rational j-invariant {}'.format(jQ)) x = polygen(QQ) return [(x - jQ)] K = jlist[0].parent() if (K.degree() % 2): if verbose: print('Odd-degree case: no rational j-invariant in the class {}'.format(jlist)) return [] if (K(1).descend_mod_power(QQ, 2) == [1]): if verbose: print('No-quadratic-subfield case: no rational j-invariant in the class {}'.format(jlist)) return [] pols = (j.minpoly() for j in jlist) pols = [f for f in pols if (f.degree().prime_to_m_part(2) == 1)] if (not pols): return [] mindeg = min((f.degree() for f in pols)) minpols = [f for f in pols if (f.degree() == mindeg)] centrepols = list(Set([f for f in pols if (f.degree() == minpols.count(f))])) if centrepols: if verbose: print('Yes: the isogeny class contains all j-invariants with min poly {}'.format(centrepols)) return centrepols if verbose: print('No complete conjugacy class of 2-power size found in {}'.format(jlist)) return []
def to_value(original_string, corenlp_value=None): if isinstance(original_string, Value): return original_string if (not corenlp_value): corenlp_value = original_string amount = NumberValue.parse(corenlp_value) if (amount is not None): return NumberValue(amount, original_string) ymd = DateValue.parse(corenlp_value) if (ymd is not None): if (ymd[1] == ymd[2] == (- 1)): return NumberValue(ymd[0], original_string) else: return DateValue(ymd[0], ymd[1], ymd[2], original_string) return StringValue(original_string)
class FakeOwner(): def __init__(self): self.generator = None def get_generator(self): return self.generator
def find_files(folder, extension): return sorted([Path(os.path.join(folder, f)) for f in os.listdir(folder) if f.endswith(extension)])
def register_Ns3SystemThread_methods(root_module, cls): cls.add_constructor([param('ns3::SystemThread const &', 'arg0')]) cls.add_constructor([param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')]) cls.add_method('Equals', 'bool', [param('pthread_t', 'id')], is_static=True) cls.add_method('Join', 'void', []) cls.add_method('Self', 'ns3::SystemThread::ThreadId', [], is_static=True) cls.add_method('Start', 'void', []) return
def hdf_dump_from_dataset(dataset, hdf_dataset, parser_args): hdf_dataset.dump_from_dataset(dataset=dataset, epoch=parser_args.epoch, start_seq=parser_args.start_seq, end_seq=parser_args.end_seq, use_progress_bar=True)
def extract_batch(args, model, batch, options, clusterings): batch = to_device(batch, args.computation.device) data = batch['data'] features = model(data) return _extract_batch(args, batch, features, clusterings)
class DDPCommHookType(Enum): ALLREDUCE = partial(_ddp_comm_hook_wrapper, comm_hook=default.allreduce_hook) FP16_COMPRESS = partial(_ddp_comm_hook_wrapper, comm_hook=default.fp16_compress_hook) QUANTIZE_PER_TENSOR = partial(_ddp_comm_hook_wrapper, comm_hook=quantization.quantization_pertensor_hook) QUANTIZE_PER_CHANNEL = partial(_ddp_comm_hook_wrapper, comm_hook=quantization.quantization_perchannel_hook)
def process_files(args): print(f'listing files', flush=True) files = [f for f in glob(f'{args.data_bucket_path}/*')] random.seed(args.seed) random.shuffle(files) print(f'splitting {len(files)} files into {args.n_workers} partitions', flush=True) files_chunks = split(files, args.n_workers) print(f'processing {len(files_chunks)} partitions in parallel', flush=True) bind_process_files_chunk = partial(process_files_chunk, args) t0 = time() pool = Pool(processes=args.n_workers) pool.starmap(bind_process_files_chunk, [(i, file_chunk) for (i, file_chunk) in enumerate(files_chunks)]) t1 = time() print('time', (t1 - t0), len(files_chunks))
_function def modulated_conv2d(x, weight, styles, noise=None, up=1, down=1, padding=0, resample_filter=None, demodulate=True, flip_weight=True, fused_modconv=True): batch_size = x.shape[0] (out_channels, in_channels, kh, kw) = weight.shape misc.assert_shape(weight, [out_channels, in_channels, kh, kw]) misc.assert_shape(x, [batch_size, in_channels, None, None]) misc.assert_shape(styles, [batch_size, in_channels]) if ((x.dtype == torch.float16) and demodulate): weight = (weight * ((1 / np.sqrt(((in_channels * kh) * kw))) / weight.norm(float('inf'), dim=[1, 2, 3], keepdim=True))) styles = (styles / styles.norm(float('inf'), dim=1, keepdim=True)) w = None dcoefs = None if (demodulate or fused_modconv): w = weight.unsqueeze(0) w = (w * styles.reshape(batch_size, 1, (- 1), 1, 1)) if demodulate: dcoefs = (w.square().sum(dim=[2, 3, 4]) + 1e-08).rsqrt() if (demodulate and fused_modconv): w = (w * dcoefs.reshape(batch_size, (- 1), 1, 1, 1)) if (not fused_modconv): x = (x * styles.to(x.dtype).reshape(batch_size, (- 1), 1, 1)) x = conv2d_resample.conv2d_resample(x=x, w=weight.to(x.dtype), f=resample_filter, up=up, down=down, padding=padding, flip_weight=flip_weight) if (demodulate and (noise is not None)): x = fma.fma(x, dcoefs.to(x.dtype).reshape(batch_size, (- 1), 1, 1), noise.to(x.dtype)) elif demodulate: x = (x * dcoefs.to(x.dtype).reshape(batch_size, (- 1), 1, 1)) elif (noise is not None): x = x.add_(noise.to(x.dtype)) return x with misc.suppress_tracer_warnings(): batch_size = int(batch_size) misc.assert_shape(x, [batch_size, in_channels, None, None]) x = x.reshape(1, (- 1), *x.shape[2:]) w = w.reshape((- 1), in_channels, kh, kw) x = conv2d_resample.conv2d_resample(x=x, w=w.to(x.dtype), f=resample_filter, up=up, down=down, padding=padding, groups=batch_size, flip_weight=flip_weight) x = x.reshape(batch_size, (- 1), *x.shape[2:]) if (noise is not None): x = x.add_(noise) return x
def slide_split(train, test): train_list = [i for i in train] data_map = {} data_map['data1'] = [DLBCL_1, nonDLBCL_1] data_map['data2'] = [DLBCL_2, nonDLBCL_2] data_map['data3'] = [DLBCL_3, nonDLBCL_3] data_map['data4'] = [DLBCL_4, nonDLBCL_4] data_map['data5'] = [DLBCL_5, nonDLBCL_5] train_DLBCL = [] train_nonDLBCL = [] for num in train_list: train_DLBCL = (train_DLBCL + data_map[f'data{num}'][0]) train_nonDLBCL = (train_nonDLBCL + data_map[f'data{num}'][1]) test_DLBCL = data_map[f'data{test}'][0] test_nonDLBCL = data_map[f'data{test}'][1] return (train_DLBCL, train_nonDLBCL, test_DLBCL, test_nonDLBCL)
def dgp_model(test_data): (X, Y) = test_data (num_data, x_dim) = X.shape (_, y_dim) = Y.shape w_dim = 1 return build_LVGPGP_model(x_dim, w_dim, y_dim, num_data)
class MyCorpus(): def __init__(self, data_path): self.data_path = data_path self.bnltk = NLTKTokenizer() def __iter__(self): for line in open(self.data_path): sentences = self.bnltk.sentence_tokenize(line) for sentence in sentences: tokens = self.bnltk.word_tokenize(sentence) (yield tokens)
def test_getSubscription23(): url = (brokerIp + '/ngsi10/updateContext') headers = {'Content-Type': 'application/json'} r = requests.post(url, data=json.dumps(data_ngsi10.subdata39), headers=headers) resp_content1 = r.content resInJson = resp_content1.decode('utf8').replace("'", '"') resp1 = json.loads(resInJson) url = (brokerIp + '/ngsi10/subscribeContext') headers = {'Content-Type': 'application/json', 'Destination': 'orion-broker', 'User-Agent': 'lightweight-iot-broker', 'Require-Reliability': 'true'} r = requests.post(url, data=json.dumps(data_ngsi10.subdata40), headers=headers) resp_content = r.content resInJson = resp_content.decode('utf8').replace("'", '"') resp = json.loads(resInJson) resp = resp['subscribeResponse'] sid = resp['subscriptionId'] url = (brokerIp + '/ngsi10/updateContext') r = requests.post(url, data=json.dumps(data_ngsi10.subdata41), headers=headers) resp_content1 = r.content resInJson = resp_content1.decode('utf8').replace("'", '"') resp1 = json.loads(resInJson) url = ' r = requests.post(url, json={'subscriptionId': sid}) if (r.content == 'Not validated'): print('\nValidated') else: print('\nNot Validated') assert (r.status_code == 200)
def _mean(tensor: FloatTensor, dim: Optional[int]=None, keepdim: bool=False) -> FloatTensor: if (dim is None): return torch.mean(tensor) else: if isinstance(dim, int): dim = [dim] dim = sorted(dim) for d in dim: tensor = tensor.mean(dim=d, keepdim=True) if (not keepdim): for (i, d) in enumerate(dim): tensor.squeeze_((d - i)) return tensor
def draw_bounding_boxes(image, boxes, **kwargs): if isinstance(image, Image.Image): image = PILToTensor()(image) assert isinstance(image, torch.Tensor), '' if (not isinstance(boxes, torch.Tensor)): boxes = torch.as_tensor(boxes) assert isinstance(boxes, torch.Tensor) return _draw_bounding_boxes(image, boxes, **kwargs)
def is_partition_valid(prob, nodes_in_part): G_sub = prob.G.subgraph(nodes_in_part) tm = prob.traffic_matrix.tm for (src, target) in permutations(G_sub.nodes, 2): if (tm[(src, target)] == 0.0): continue if (not nx.has_path(G_sub, src, target)): print(src, target) return False return True
def test_unet_skip_connection_block(): _cfg = dict(outer_channels=1, inner_channels=1, in_channels=None, submodule=None, is_outermost=False, is_innermost=False, norm_cfg=dict(type='BN'), use_dropout=True) feature_shape = (1, 1, 8, 8) feature = _demo_inputs(feature_shape) input_shape = (1, 3, 8, 8) img = _demo_inputs(input_shape) cfg = copy.deepcopy(_cfg) cfg['is_innermost'] = True block = UnetSkipConnectionBlock(**cfg) output = block(feature) assert (output.shape == (1, 2, 8, 8)) if torch.cuda.is_available(): block.cuda() output = block(feature.cuda()) assert (output.shape == (1, 2, 8, 8)) block.cpu() cfg = copy.deepcopy(_cfg) cfg['submodule'] = block block = UnetSkipConnectionBlock(**cfg) output = block(feature) assert (output.shape == (1, 2, 8, 8)) if torch.cuda.is_available(): block.cuda() output = block(feature.cuda()) assert (output.shape == (1, 2, 8, 8)) block.cpu() cfg = copy.deepcopy(_cfg) cfg['submodule'] = block cfg['is_outermost'] = True cfg['in_channels'] = 3 cfg['outer_channels'] = 3 block = UnetSkipConnectionBlock(**cfg) output = block(img) assert (output.shape == (1, 3, 8, 8)) if torch.cuda.is_available(): block.cuda() output = block(img.cuda()) assert (output.shape == (1, 3, 8, 8)) block.cpu() cfg = copy.deepcopy(_cfg) cfg['is_innermost'] = True cfg['is_outermost'] = True with pytest.raises(AssertionError): _ = UnetSkipConnectionBlock(**cfg) bad_cfg = copy.deepcopy(_cfg) bad_cfg['is_innermost'] = True bad_cfg['norm_cfg'] = None with pytest.raises(AssertionError): _ = UnetSkipConnectionBlock(**bad_cfg) bad_cfg['norm_cfg'] = dict(tp='BN') with pytest.raises(AssertionError): _ = UnetSkipConnectionBlock(**bad_cfg)
_HEADS_REGISTRY.register() class AttrHead(EmbeddingHead): def __init__(self, cfg): super().__init__(cfg) num_classes = cfg.MODEL.HEADS.NUM_CLASSES self.bnneck = nn.BatchNorm1d(num_classes) self.bnneck.apply(weights_init_kaiming) def forward(self, features, targets=None): pool_feat = self.pool_layer(features) neck_feat = self.bottleneck(pool_feat) neck_feat = neck_feat.view(neck_feat.size(0), (- 1)) logits = F.linear(neck_feat, self.weight) logits = self.bnneck(logits) if (not self.training): cls_outptus = torch.sigmoid(logits) return cls_outptus return {'cls_outputs': logits}
def sortkey(K): return (K.degree(), abs(K.discriminant()), (K.discriminant() > 0), K.polynomial())
def _get_logger_dict_helper(mod, target_dict, prefix=''): def get_prefix(prefix): return (prefix if (prefix == '') else (prefix + '.')) for (name, child) in mod.named_children(): if isinstance(child, Logger): target_dict[(get_prefix(prefix) + 'stats')] = child.stats break for (name, child) in mod.named_children(): module_prefix = ((get_prefix(prefix) + name) if prefix else name) _get_logger_dict_helper(child, target_dict, module_prefix)
def simplify_chain_generic(expr): if (expr.number_of_operands() == 0): return expr expr = expr.simplify_factorial() expr = expr.simplify_rectform() expr = expr.simplify_trig() expr = expr.simplify_rational() expr = expr.expand_sum() return expr
class _History(keras.callbacks.Callback): def __init__(self, data, model, save_weights=False, *args, **kwargs): self.trace_data = data self.trace_model = model self.save_weights = save_weights if save_weights: self.weights = [] self.trace = [] super().__init__(*args, **kwargs) def _record_trace(self): if (len(self.trace_model.outputs) > 1): self.trace.append(np.vstack([data.T for data in self.trace_model.predict(self.trace_data)])) else: self.trace.append(self.trace_model.predict(self.trace_data).T) if self.save_weights: self.weights.append(self.trace_model.layers[1].get_weights()[0])
class NetworkWrapper(nn.Module): def get_args(parser): parser.add('--emb_num_channels', default=64, type=int, help='minimum number of channels') parser.add('--emb_max_channels', default=512, type=int, help='maximum number of channels') parser.add('--emb_no_stickman', action='store_true', help='do not input stickman into the embedder') parser.add('--emb_output_tensor_size', default=8, type=int, help='spatial size of the last tensor') parser.add('--emb_norm_layer_type', default='none', type=str, help='norm layer inside the embedder') parser.add('--emb_activation_type', default='leakyrelu', type=str, help='activation layer inside the embedder') parser.add('--emb_downsampling_type', default='avgpool', type=str, help='downsampling layer inside the embedder') parser.add('--emb_apply_masks', default='True', type=rn_utils.str2bool, choices=[True, False], help='apply segmentation masks to source ground-truth images') def __init__(self, args): super(NetworkWrapper, self).__init__() self.args = args self.net = Embedder(args) def forward(self, data_dict: dict, networks_to_train: list, all_networks: dict) -> dict: if ('identity_embedder' not in networks_to_train): prev = torch.is_grad_enabled() torch.set_grad_enabled(False) inputs = data_dict['source_imgs'] (b, n) = inputs.shape[:2] if self.args.emb_apply_masks: inputs = ((inputs * data_dict['source_segs']) + ((- 1) * (1 - data_dict['source_segs']))) if (not self.args.emb_no_stickman): inputs = torch.cat([inputs, data_dict['source_stickmen']], 2) source_embeds = self.net(inputs) if ('identity_embedder' not in networks_to_train): torch.set_grad_enabled(prev) data_dict['source_idt_embeds'] = source_embeds return data_dict _grad() def visualize_outputs(self, data_dict): visuals = [data_dict['source_imgs'].detach()] if ('source_stickmen' in data_dict.keys()): visuals += [data_dict['source_stickmen']] return visuals def __repr__(self): num_params = 0 for p in self.net.parameters(): num_params += p.numel() output = self.net.__repr__() output += '\n' output += ('Number of parameters: %d' % num_params) return output
def img2video(path, size, seq, frame_start, frame_end, marks, fps=10): file_path = join(path, '{}.avi'.format(seq)) os.makedirs(dirname(path), exist_ok=True) path = join(path, '{}'.format(seq)) fourcc = cv2.VideoWriter_fourcc(*'MJPG') video = cv2.VideoWriter(file_path, fourcc, fps, size) for i in range(frame_start, frame_end): imgs = [] for j in range(len(marks)): img_path = join(path, '{:08d}_{}.png'.format(i, marks[j].lower())) img = cv2.imread(img_path) img = cv2.putText(img, marks[j], (60, 60), cv2.FONT_HERSHEY_PLAIN, 2.0, (0, 0, 255), 2) imgs.append(img) frame = np.concatenate(imgs, axis=1) video.write(frame) video.release()
def is_pythran_supported_node_or_none(node): return (node.is_none or is_pythran_supported_type(node.type))
class TFAlbertModel(metaclass=DummyObject): _backends = ['tf'] def __init__(self, *args, **kwargs): requires_backends(self, ['tf'])
def test_parameter_list(): time_dim = Dim(Tensor('time', [batch_dim], dtype='int32')) in_dim = Dim(7, name='in') extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')}) class _Net(rf.Module): def __init__(self): super().__init__() self.param_list = rf.ParameterList([rf.Parameter([in_dim]) for _ in range(3)]) def __call__(self, data: Tensor) -> Tensor: for param in self.param_list: data += param return data def _forward_step(*, model: _Net, extern_data: TensorDict): out = model(extern_data['data']) out.mark_as_default_output(shape=(batch_dim, time_dim, in_dim)) run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step)
def register_Ns3Mac48AddressValue_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::Mac48Address const &', 'value')]) cls.add_constructor([param('ns3::Mac48AddressValue const &', 'arg0')]) cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) cls.add_method('Get', 'ns3::Mac48Address', [], is_const=True) cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) cls.add_method('Set', 'void', [param('ns3::Mac48Address const &', 'value')]) return
def reinit(): if (wrapped_stdout is not None): sys.stdout = wrapped_stdout if (wrapped_stderr is not None): sys.stderr = wrapped_stderr
class CallHLS(): def __init__(self, backend='vivado_hls'): self.tcl_script = '' self.ipgen_path = '' self.code_gen_dir = '' self.ipgen_script = '' assert (backend in ['vivado_hls', 'vitis_hls']), 'Unrecognized backend for CallHLS' self.backend = backend def append_tcl(self, tcl_script): self.tcl_script = tcl_script def set_ipgen_path(self, path): self.ipgen_path = path def build(self, code_gen_dir): assert (which(self.backend) is not None), ('%s not found in PATH' % self.backend) self.code_gen_dir = code_gen_dir self.ipgen_script = (str(self.code_gen_dir) + '/ipgen.sh') working_dir = os.environ['PWD'] f = open(self.ipgen_script, 'w') f.write('#!/bin/bash \n') f.write('cd {}\n'.format(code_gen_dir)) f.write(('%s %s\n' % (self.backend, self.tcl_script))) f.write('cd {}\n'.format(working_dir)) f.close() bash_command = ['bash', self.ipgen_script] process_compile = subprocess.Popen(bash_command, stdout=subprocess.PIPE) process_compile.communicate()
class MixDataset(torch.utils.data.Dataset): def __init__(self, filename, split, n_mix=2, audio_len=80000, audio_rate=16000, n_fft=1024, hop_len=256, win_len=1024, n_frames=3, stride_frames=1, img_size=224, fps=1, preprocess_func=None, max_sample=None, return_waveform=True, repeat=None, frame_margin=None, audio_only=False): assert (split in ('train', 'valid')), "`split` must be one of 'train' or 'valid'." super().__init__() self.split = split self.n_mix = n_mix self.audio_len = audio_len self.audio_rate = audio_rate self.n_fft = n_fft self.hop_len = hop_len self.win_len = win_len self.n_frames = n_frames self.stride_frames = stride_frames self.img_size = img_size self.fps = fps self.preprocess_func = preprocess_func self.return_waveform = return_waveform self.frame_margin = frame_margin self.audio_only = audio_only self.audio_sec = ((1.0 * self.audio_len) / self.audio_rate) self.HS = ((self.n_fft // 2) + 1) self.WS = ((self.audio_len + 1) // self.hop_len) self.samples = [] for row in csv.reader(open(filename, 'r'), delimiter=','): if (len(row) < 2): continue self.samples.append(row) assert (len(self.samples) > 0), 'No samples found!' if (repeat is not None): self.samples *= repeat if (max_sample is not None): self.samples = self.samples[:max_sample] def __len__(self): return len(self.samples) def __getitem__(self, idx): N = self.n_mix frames = ([None] * N) audios = ([None] * N) infos = [[] for _ in range(N)] filenames_frame = [[] for _ in range(N)] filenames_audio = ([''] * N) center_frames = ([0] * N) infos[0] = self.samples[idx] if (self.split != 'train'): random.seed(idx) candidates = list(range(idx)) candidates.extend(range((idx + 1), len(self.samples))) sampled = random.sample(candidates, (N - 1)) for n in range(1, N): infos[n] = self.samples[sampled[(n - 1)]] if (self.frame_margin is None): idx_margin = int((self.fps * 3)) else: idx_margin = self.frame_margin for (n, (filename_audio, filename_frame, total_frames, _)) in enumerate(infos): if (self.split == 'train'): center_frameN = random.randint((idx_margin + 1), (int(total_frames) - idx_margin)) else: center_frameN = (int(total_frames) // 2) center_frames[n] = center_frameN if (not self.audio_only): for i in range(self.n_frames): idx_offset = ((i - (self.n_frames // 2)) * self.stride_frames) filenames_frame[n].append(f'{filename_frame}/{(center_frameN + idx_offset):06d}.jpg') filenames_audio[n] = filename_audio try: for n in range(N): if (not self.audio_only): frames[n] = self._load_frames(filenames_frame[n]) center_time = ((center_frames[n] - 0.5) / self.fps) audios[n] = self._load_audio(filenames_audio[n], center_time) for n in range(N): audios[n] /= N if self.return_waveform: audio_mix = sum(audios) else: spec_mix = 0 mags = [] for n in range(N): spec = librosa.stft(audios[n], n_fft=self.n_fft, hop_length=self.hop_len, win_length=self.win_len) spec_mix += spec mags.append(torch.tensor(np.abs(spec)).unsqueeze(0)) mag_mix = torch.tensor(np.abs(spec_mix)).unsqueeze(0) phase_mix = torch.tensor(np.angle(spec_mix)).unsqueeze(0) for n in range(N): audios[n] = torch.tensor(audios[n]) if self.return_waveform: audio_mix = torch.tensor(audio_mix) except Exception as e: logging.debug(f'Failed loading frame/audio: {e}') frames = [torch.zeros(self.n_frames, 3, self.img_size, self.img_size) for _ in range(N)] audios = [torch.zeros(self.audio_len) for _ in range(N)] mags = [torch.zeros(1, self.HS, self.WS) for _ in range(N)] if self.return_waveform: audio_mix = torch.zeros(self.audio_len) else: mag_mix = torch.zeros(1, self.HS, self.WS) phase_mix = torch.zeros(1, self.HS, self.WS) ret_dict = {'infos': infos} if (not self.audio_only): ret_dict['frames'] = frames if self.return_waveform: ret_dict['audio_mix'] = audio_mix ret_dict['audios'] = audios else: ret_dict['mag_mix'] = mag_mix ret_dict['mags'] = mags if (self.split != 'train'): ret_dict['audios'] = audios if (not self.return_waveform): ret_dict['phase_mix'] = phase_mix return ret_dict def _load_frames(self, filenames): frames = [Image.open(filename).convert('RGB') for filename in filenames] if (self.preprocess_func is None): return torch.stack(frames) return torch.stack([self.preprocess_func(frame) for frame in frames]) def _load_audio(self, filename, center_time): audio = np.zeros(self.audio_len, dtype=np.float32) (rate, audio_raw) = scipy.io.wavfile.read(filename) audio_raw = torch.tensor((audio_raw / (- np.iinfo(np.int16).min))) assert (rate == self.audio_rate), f'Found an unexpected sampling rate of {rate} for {filename} (expected {self.audio_rate})' if (audio_raw.shape[0] < (rate * self.audio_sec)): repeats = (int(((rate * self.audio_sec) / audio_raw.shape[0])) + 1) audio_raw = np.tile(audio_raw, repeats) len_raw = audio_raw.shape[0] center = int((center_time * self.audio_rate)) start = max(0, (center - (self.audio_len // 2))) end = min(len_raw, (center + (self.audio_len // 2))) audio[((self.audio_len // 2) - (center - start)):((self.audio_len // 2) + (end - center))] = audio_raw[start:end] if (self.split == 'train'): audio *= (random.random() + 0.5) audio = np.clip(audio, (- 1), 1) return audio
class ModeFilter(Filter): name = 'Mode' def __init__(self, size=3): self.size = size def filter(self, image): return image.modefilter(self.size)
def align(input_file, output_file, nbest): skipped = 0 total = 0 source_to_targets = defaultdict(list) for line in input_file: fields = line.rstrip().split('\t') source_tokens = fields[0].split() target_tokens = fields[1].split() total += 1 if (len(source_tokens) != len(target_tokens)): print('Skipping line, counts? ', len(source_tokens), len(target_tokens), '[', source_tokens, ' vs ', target_tokens, ']', line) skipped += 1 continue for (idx, val) in enumerate(source_tokens): source_to_targets[source_tokens[idx]].append(target_tokens[idx]) for (key, vals) in source_to_targets.items(): for n in Counter(vals).most_common(nbest): (target, freq) = (n[0], n[1]) output_file.write(((((key + '\t') + target) + '\t') + str(freq))) output_file.write('\n') return (skipped, total)
def print_node_family_to_file(G, f, nodetype): if (nodetype == 'root'): node_family = [n for n in G.nodes() if ((G.out_degree(n) > 0) and (G.in_degree(n) == 0))] node_family = sorted(node_family, key=(lambda x: sort_key(x))) elif (nodetype == 'leaf'): node_family = [n for n in G.nodes() if ((G.out_degree(n) == 0) and (G.in_degree(n) >= 1))] node_family = sorted(node_family, key=(lambda x: sort_key(x))) elif (nodetype == 'isolated'): node_family = [n for n in G.nodes() if (G.degree(n) == 0)] node_family = sorted(node_family, key=(lambda x: sort_key(x))) else: node_family = [n[0] for n in sorted(list(G.nodes(data=True)), key=(lambda x: sort_key(x))) if (n[1]['id'] == nodetype)] f.write((('#nodes: ' + str(len(node_family))) + '\n')) f.write((('-' * 80) + '\n')) for n in node_family: f.write('{n:<60}\n'.format(n=n))
def get_dataset(dataset, task): if (task == 'mot'): return JointDataset else: return None
def _hard_rmtree(path): shutil.rmtree(path, ignore_errors=True) try: with tempfile.TemporaryDirectory() as trash_dir: shutil.move(str(path), trash_dir) except FileNotFoundError: pass
class CFGDenoiser(nn.Module): def __init__(self, model): super().__init__() self.inner_model = model def forward(self, z, sigma, cond, uncond, text_cfg_scale, image_cfg_scale=1.0): if ('c_crossattn' in cond): cfg_z = einops.repeat(z, '1 ... -> n ...', n=3) cfg_sigma = einops.repeat(sigma, '1 ... -> n ...', n=3) cfg_cond = {'c_crossattn': [torch.cat([cond['c_crossattn'][0], uncond['c_crossattn'][0], uncond['c_crossattn'][0]])], 'c_concat': [torch.cat([cond['c_concat'][0], cond['c_concat'][0], uncond['c_concat'][0]])]} (out_cond, out_img_cond, out_uncond) = self.inner_model(cfg_z, cfg_sigma, cond=cfg_cond).chunk(3) return ((out_uncond + (text_cfg_scale * (out_cond - out_img_cond))) + (image_cfg_scale * (out_img_cond - out_uncond))) else: cfg_z = einops.repeat(z, '1 ... -> n ...', n=2) cfg_sigma = einops.repeat(sigma, '1 ... -> n ...', n=2) cfg_cond = {'c_concat': [torch.cat([cond['c_concat'][0], uncond['c_concat'][0]])]} (out_cond, out_uncond) = self.inner_model(cfg_z, cfg_sigma, cond=cfg_cond).chunk(2) return (out_uncond + (text_cfg_scale * (out_cond - out_uncond)))