code
stringlengths
101
5.91M
def get_defense_summary(defense, trace_ids, traces): summary = pd.DataFrame(columns=summary_columns) for (trace_id, trace) in tqdm.tqdm_notebook(list(zip(trace_ids, traces))): x = extract(trace) orig_trace = traces_by_trace_id[trace_id] orig_x = featurevecs_by_trace_id[trace_id] summary = summary.append([{'filename': trace_id, 'defense': defense, 'packets_added': (len(trace) - len(orig_trace)), 'dirratio': (len((np.array(trace) < 0)) / (len((np.array(trace) > 0)) + 1)), 'svmpred': get_confidence(x), 'orig_svmpred': get_confidence(orig_x), 'tracelen': len(orig_trace)}], ignore_index=True) return summary
def zip_item_is_executable(info): mode = (info.external_attr >> 16) return bool((mode and stat.S_ISREG(mode) and (mode & 73)))
def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050): for (k, v) in scalars.items(): writer.add_scalar(k, v, global_step) for (k, v) in histograms.items(): writer.add_histogram(k, v, global_step) for (k, v) in images.items(): writer.add_image(k, v, global_step, dataformats='HWC') for (k, v) in audios.items(): writer.add_audio(k, v, global_step, audio_sampling_rate)
def convert_rembert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file, pytorch_dump_path): config = RemBertConfig.from_json_file(bert_config_file) print('Building PyTorch model from configuration: {}'.format(str(config))) model = RemBertModel(config) load_tf_weights_in_rembert(model, config, tf_checkpoint_path) print('Save PyTorch model to {}'.format(pytorch_dump_path)) torch.save(model.state_dict(), pytorch_dump_path)
class Seq(RE): def __init__(self, *re_list): nullable = 1 for (i, re) in enumerate(re_list): self.check_re(i, re) nullable = (nullable and re.nullable) self.re_list = re_list self.nullable = nullable i = len(re_list) match_nl = 0 while i: i -= 1 re = re_list[i] if re.match_nl: match_nl = 1 break if (not re.nullable): break self.match_nl = match_nl def build_machine(self, m, initial_state, final_state, match_bol, nocase): re_list = self.re_list if (len(re_list) == 0): initial_state.link_to(final_state) else: s1 = initial_state n = len(re_list) for (i, re) in enumerate(re_list): if (i < (n - 1)): s2 = m.new_state() else: s2 = final_state re.build_machine(m, s1, s2, match_bol, nocase) s1 = s2 match_bol = (re.match_nl or (match_bol and re.nullable)) def calc_str(self): return ('Seq(%s)' % ','.join(map(str, self.re_list)))
class FastBatchNorm1d(nn.Module): def __init__(self, num_features, **kwargs): super().__init__() self.bn = nn.BatchNorm1d(num_features, **kwargs) def _forward_dense(self, x): return self.bn(x.transpose(1, 2)).transpose(2, 1) def _forward_sparse(self, x): return self.bn(x) def forward(self, x): if (x.dim() == 2): return self._forward_sparse(x) elif (x.dim() == 3): return self._forward_dense(x) else: raise ValueError('Non supported number of dimensions {}'.format(x.dim()))
def _format(val: Any, output_format: str='standard', errors: str='coarse') -> Any: val = str(val) result: Any = [] if (val in NULL_VALUES): return [np.nan] if (not validate_no_mva(val)): if (errors == 'raise'): raise ValueError(f'Unable to parse value {val}') error_result = (val if (errors == 'ignore') else np.nan) return [error_result] if (output_format == 'compact'): result = ([mva.compact(val)] + result) elif (output_format == 'standard'): result = ([mva.format(val)] + result) return result
class StandardTableaux_residue_shape(StandardTableaux_residue): def __init__(self, residue, shape): if (residue.size() != shape.size()): raise ValueError('the size of the shape and the length of the residue defence must coincide') StandardTableauTuples.__init__(self, category=FiniteEnumeratedSets()) self._level = residue.level() self._multicharge = residue.multicharge() self._quantum_characteristic = residue.quantum_characteristic() self._residue = residue self._shape = shape self._size = residue.size() def __contains__(self, t): if (not isinstance(t, self.element_class)): try: t = StandardTableauTuple(t) except ValueError: return False return ((t.shape() == self._shape) and (t.residue_sequence(self._quantum_characteristic, self._multicharge) == self._residue)) def _repr_(self): return 'Standard ({})-tableaux with {}'.format(self._shape._repr_compact_high(), self._residue.__str__('and')) def __iter__(self): if (self._size == 0): (yield StandardTableauTuple([[] for _ in range(self._level)])) return for cell in self._shape.removable_cells(): if (self._residue[self._size] == self._residue.parent().cell_residue(*cell)): for t in StandardTableaux_residue_shape(self._residue.restrict((self._size - 1)), self._shape.remove_cell(*cell)): if (self._level == 1): (yield t.add_entry(cell, self._size)) else: tab = _add_entry_fast(t, cell, self._size) (yield self.element_class(self, tab, check=False)) def an_element(self): try: return self[0] except ValueError: return None
.parametrize('sparse_feature_num,dense_feature_num', [(2, 0)]) def test_CCPM_without_seq(sparse_feature_num, dense_feature_num): model_name = 'CCPM' sample_size = SAMPLE_SIZE (x, y, feature_columns) = get_test_data(sample_size, sparse_feature_num=sparse_feature_num, dense_feature_num=dense_feature_num, sequence_feature=()) model = CCPM(feature_columns, feature_columns, conv_kernel_width=(3, 2), conv_filters=(2, 1), dnn_hidden_units=[32], dnn_dropout=0.5, device=get_device()) check_model(model, model_name, x, y)
def skip_backend(backend): backend = _backend_from_arg(backend) return ua.skip_backend(backend)
def train(epoch, model, dataloader, optimizer, training): (utils.fix_randseed(None) if training else utils.fix_randseed(0)) (model.module.train_mode() if training else model.module.eval()) average_meter = AverageMeter(dataloader.dataset) for (idx, batch) in enumerate(dataloader): batch = utils.to_cuda(batch) logit_mask = model(batch['query_img'], batch['support_imgs'].squeeze(1), batch['support_masks'].squeeze(1)) pred_mask = logit_mask.argmax(dim=1) loss = model.module.compute_objective(logit_mask, batch['query_mask']) if training: optimizer.zero_grad() loss.backward() optimizer.step() (area_inter, area_union) = Evaluator.classify_prediction(pred_mask, batch) average_meter.update(area_inter, area_union, batch['class_id'], loss.detach().clone()) average_meter.write_process(idx, len(dataloader), epoch, write_batch_idx=50) average_meter.write_result(('Training' if training else 'Validation'), epoch) avg_loss = utils.mean(average_meter.loss_buf) (miou, fb_iou) = average_meter.compute_iou() return (avg_loss, miou, fb_iou)
class AttributeExpandSuggestion(object): def __init__(self, att_idx, att_val, operator, resulting_class_distributions, merit): self.resulting_class_distributions = resulting_class_distributions self.merit = merit self.att_idx = att_idx self.att_val = att_val self.operator = operator def num_splits(self): return len(self.resulting_class_distributions) def resulting_stats_from_split(self, split_idx): return self.resulting_class_distributions[split_idx]
class StraightLinePolicy(): def __init__(self, env): self.action_space = env.action_space self.env = env def reset(self): pass def get_action(self, obs): current = self.env.state.q goal = self.env.goal_state.q action = (goal - current)[:self.action_space.low.shape[0]] norm = np.linalg.norm(action) norm = max(0.07, norm) action /= norm return (action, {})
def from_args(func, ns, *args, **kwargs): return func(*args, **strip_unexpected_kwargs(func, vars(ns)), **kwargs)
def mediainfo_json(filepath, read_ahead_limit=(- 1)): prober = get_prober_name() command_args = ['-v', 'info', '-show_format', '-show_streams'] try: command_args += [fsdecode(filepath)] stdin_parameter = None stdin_data = None except TypeError: if (prober == 'ffprobe'): command_args += ['-read_ahead_limit', str(read_ahead_limit), 'cache:pipe:0'] else: command_args += ['-'] stdin_parameter = PIPE (file, close_file) = _fd_or_path_or_tempfile(filepath, 'rb', tempfile=False) file.seek(0) stdin_data = file.read() if close_file: file.close() command = ([prober, '-of', 'json'] + command_args) res = Popen(command, stdin=stdin_parameter, stdout=PIPE, stderr=PIPE) (output, stderr) = res.communicate(input=stdin_data) output = output.decode('utf-8', 'ignore') stderr = stderr.decode('utf-8', 'ignore') info = json.loads(output) if (not info): return info extra_info = get_extra_info(stderr) audio_streams = [x for x in info['streams'] if (x['codec_type'] == 'audio')] if (len(audio_streams) == 0): return info stream = audio_streams[0] def set_property(stream, prop, value): if ((prop not in stream) or (stream[prop] == 0)): stream[prop] = value for token in extra_info[stream['index']]: m = re.match('([su]([0-9]{1,2})p?) \\(([0-9]{1,2}) bit\\)$', token) m2 = re.match('([su]([0-9]{1,2})p?)( \\(default\\))?$', token) if m: set_property(stream, 'sample_fmt', m.group(1)) set_property(stream, 'bits_per_sample', int(m.group(2))) set_property(stream, 'bits_per_raw_sample', int(m.group(3))) elif m2: set_property(stream, 'sample_fmt', m2.group(1)) set_property(stream, 'bits_per_sample', int(m2.group(2))) set_property(stream, 'bits_per_raw_sample', int(m2.group(2))) elif re.match('(flt)p?( \\(default\\))?$', token): set_property(stream, 'sample_fmt', token) set_property(stream, 'bits_per_sample', 32) set_property(stream, 'bits_per_raw_sample', 32) elif re.match('(dbl)p?( \\(default\\))?$', token): set_property(stream, 'sample_fmt', token) set_property(stream, 'bits_per_sample', 64) set_property(stream, 'bits_per_raw_sample', 64) return info
class PegasusTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file, pad_token='<pad>', eos_token='</s>', unk_token='<unk>', mask_token='<mask_2>', mask_token_sent='<mask_1>', additional_special_tokens=None, offset=103, sp_model_kwargs: Optional[Dict[(str, Any)]]=None, **kwargs) -> None: self.offset = offset if (additional_special_tokens is not None): if (not isinstance(additional_special_tokens, list)): raise TypeError(f'additional_special_tokens should be of type {type(list)}, but is {type(additional_special_tokens)}') additional_special_tokens_extended = (([mask_token_sent] + additional_special_tokens) if ((mask_token_sent not in additional_special_tokens) and (mask_token_sent is not None)) else additional_special_tokens) additional_special_tokens_extended += [f'<unk_{i}>' for i in range(len(additional_special_tokens_extended), (self.offset - 1))] if (len(set(additional_special_tokens_extended)) != len(additional_special_tokens_extended)): raise ValueError(f'Please make sure that the provided additional_special_tokens do not contain an incorrectly shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.') additional_special_tokens = additional_special_tokens_extended else: additional_special_tokens = ([mask_token_sent] if (mask_token_sent is not None) else []) additional_special_tokens += [f'<unk_{i}>' for i in range(2, self.offset)] self.sp_model_kwargs = ({} if (sp_model_kwargs is None) else sp_model_kwargs) super().__init__(eos_token=eos_token, unk_token=unk_token, mask_token=mask_token, pad_token=pad_token, mask_token_sent=mask_token_sent, offset=offset, additional_special_tokens=additional_special_tokens, sp_model_kwargs=self.sp_model_kwargs, **kwargs) self.mask_token_sent = mask_token_sent self.vocab_file = vocab_file self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(vocab_file) self.encoder: Dict[(int, str)] = {0: self.pad_token, 1: self.eos_token} if (self.mask_token_sent is not None): self.encoder.update({2: self.mask_token_sent, 3: self.mask_token}) if (self.offset > 0): self.encoder.update({(i + 3): additional_special_tokens[i] for i in range(1, (self.offset - 1))}) self.decoder: Dict[(str, int)] = {v: k for (k, v) in self.encoder.items()} def vocab_size(self) -> int: return (len(self.sp_model) + self.offset) def get_vocab(self) -> Dict[(str, int)]: vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__(self): state = self.__dict__.copy() state['sp_model'] = None return state def __setstate__(self, d): self.__dict__ = d if (not hasattr(self, 'sp_model_kwargs')): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def _tokenize(self, text: str) -> List[str]: return self.sp_model.encode(text, out_type=str) def _convert_token_to_id(self, token: str) -> int: if (token in self.decoder): return self.decoder[token] elif (token in self.added_tokens_decoder): return self.added_tokens_decoder[token] sp_id = self.sp_model.piece_to_id(token) return (sp_id + self.offset) def _convert_id_to_token(self, index: int) -> str: if (index in self.encoder): return self.encoder[index] elif (index in self.added_tokens_encoder): return self.added_tokens_encoder[index] else: token = self.sp_model.IdToPiece((index - self.offset)) return token def convert_tokens_to_string(self, tokens): current_sub_tokens = [] out_string = '' for token in tokens: if (token in self.all_special_tokens): out_string += (self.sp_model.decode(current_sub_tokens) + token) current_sub_tokens = [] else: current_sub_tokens.append(token) out_string += self.sp_model.decode(current_sub_tokens) return out_string.strip() def num_special_tokens_to_add(self, pair=False): return 1 def _special_token_mask(self, seq): all_special_ids = set(self.all_special_ids) all_special_ids.remove(self.unk_token_id) return [(1 if (x in all_special_ids) else 0) for x in seq] def get_special_tokens_mask(self, token_ids_0: List, token_ids_1: Optional[List]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return self._special_token_mask(token_ids_0) elif (token_ids_1 is None): return (self._special_token_mask(token_ids_0) + [1]) else: return (self._special_token_mask((token_ids_0 + token_ids_1)) + [1]) def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> List[int]: if (token_ids_1 is None): return (token_ids_0 + [self.eos_token_id]) return ((token_ids_0 + token_ids_1) + [self.eos_token_id]) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if (not os.path.isdir(save_directory)): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return out_vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])) if ((os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file)) and os.path.isfile(self.vocab_file)): copyfile(self.vocab_file, out_vocab_file) elif (not os.path.isfile(self.vocab_file)): with open(out_vocab_file, 'wb') as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,)
def dataset_10x(dataset_name: (str | None)=None, filename: (str | None)=None, save_path: str='data/10X', url: str=None, return_filtered: bool=True, remove_extracted_data: bool=False, **scanpy_read_10x_kwargs) -> anndata.AnnData: return _load_dataset_10x(dataset_name=dataset_name, filename=filename, save_path=save_path, url=url, return_filtered=return_filtered, remove_extracted_data=remove_extracted_data, **scanpy_read_10x_kwargs)
class DocSettings(): dim: int = DefaultVal(128) doc_maxlen: int = DefaultVal(220) mask_punctuation: bool = DefaultVal(True)
def rbf_kernel(X: torch.Tensor, Y: torch.Tensor, h_dim: int): batch_size = X.size(0) norms_x = X.pow(2).sum(1, keepdim=True) prods_x = torch.mm(X, X.t()) dists_x = ((norms_x + norms_x.t()) - (2 * prods_x)) norms_y = Y.pow(2).sum(1, keepdim=True) prods_y = torch.mm(Y, Y.t()) dists_y = ((norms_y + norms_y.t()) - (2 * prods_y)) dot_prd = torch.mm(X, Y.t()) dists_c = ((norms_x + norms_y.t()) - (2 * dot_prd)) stats = 0 for scale in [0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 10.0]: C = (((2 * h_dim) * 1.0) / scale) res1 = torch.exp(((- C) * dists_x)) res1 += torch.exp(((- C) * dists_y)) if torch.cuda.is_available(): res1 = ((1 - torch.eye(batch_size).cuda()) * res1) else: res1 = ((1 - torch.eye(batch_size)) * res1) res1 = (res1.sum() / (batch_size - 1)) res2 = torch.exp(((- C) * dists_c)) res2 = ((res2.sum() * 2.0) / batch_size) stats += (res1 - res2) return stats
class Perceptron(BaseSGDClassifier): _parameter_constraints: dict = {**BaseSGDClassifier._parameter_constraints} _parameter_constraints.pop('loss') _parameter_constraints.pop('average') _parameter_constraints.update({'penalty': [StrOptions({'l2', 'l1', 'elasticnet'}), None], 'alpha': [Interval(Real, 0, None, closed='left')], 'l1_ratio': [Interval(Real, 0, 1, closed='both')], 'eta0': [Interval(Real, 0, None, closed='left')]}) def __init__(self, *, penalty=None, alpha=0.0001, l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=0.001, shuffle=True, verbose=0, eta0=1.0, n_jobs=None, random_state=0, early_stopping=False, validation_fraction=0.1, n_iter_no_change=5, class_weight=None, warm_start=False): super().__init__(loss='perceptron', penalty=penalty, alpha=alpha, l1_ratio=l1_ratio, fit_intercept=fit_intercept, max_iter=max_iter, tol=tol, shuffle=shuffle, verbose=verbose, random_state=random_state, learning_rate='constant', eta0=eta0, early_stopping=early_stopping, validation_fraction=validation_fraction, n_iter_no_change=n_iter_no_change, power_t=0.5, warm_start=warm_start, class_weight=class_weight, n_jobs=n_jobs)
def build(net, netName='net.net.xml'): connections = [] nodesFile = tempfile.NamedTemporaryFile(mode='w', delete=False) print('<nodes>', file=nodesFile) for nid in net._nodes: n = net._nodes[nid] print((' <node id="%s" x="%s" y="%s" type="%s"/>' % (n.nid, n._x, n._y, n.nodeType)), file=nodesFile) print('</nodes>', file=nodesFile) nodesFile.close() edgesFile = tempfile.NamedTemporaryFile(mode='w', delete=False) print('<edges>', file=edgesFile) for eid in net._edges: e = net._edges[eid] print((' <edge id="%s" from="%s" to="%s" numLanes="%s" speed="%s" shape="%s">' % (e.eid, e.fromNode.nid, e.toNode.nid, e.numLanes, e.maxSpeed, e.shapes)), file=edgesFile) for s in e.splits: print((' <split pos="%s" lanes="%s"/>' % ((- s.distance), ' '.join(map(str, s.lanes)))), file=edgesFile) '\n for i,l in enumerate(e.lanes):\n if l.allowed==None and l.disallowed==None:\n continue\n ls = \' <lane index="%s" \' % (i)\n if l.allowed!=None:\n ls = ls + \'allow="%s"\' % l.allowed\n if l.disallowed!=None:\n ls = ls + \'disallow="%s"\' % l.disallowed\n print >> edgesFile, ls+\'/>\'\n ' connections.extend(e.getConnections(net)) print(' </edge>', file=edgesFile) hadConstraints = False for (i, l) in enumerate(e.lanes): if ((l.allowed is None) and (l.disallowed is None)): continue hadConstraints = True if hadConstraints: for s in e.splits: eid = e.eid if (s.distance != 0): eid = (eid + ('.%s' % (- s.distance))) print((' <edge id="%s">' % eid), file=edgesFile) for (i, l) in enumerate(e.lanes): if ((l.allowed is None) and (l.disallowed is None)): continue ls = (' <lane index="%s" ' % i) if (l.allowed is not None): ls = (ls + ('allow="%s"' % l.allowed)) if (l.disallowed is not None): ls = (ls + ('disallow="%s"' % l.disallowed)) print((ls + '/>'), file=edgesFile) print(' </edge>', file=edgesFile) print('</edges>', file=edgesFile) edgesFile.close() connectionsFile = tempfile.NamedTemporaryFile(mode='w', delete=False) print('<connections>', file=connectionsFile) for c in connections: eid = c.fromEdge.eid if (len(c.fromEdge.splits) > 1): eid = ((eid + '.-') + str(c.fromEdge.splits[(- 1)].distance)) print((' <connection from="%s" to="%s" fromLane="%s" toLane="%s"/>' % (eid, c.toEdge.eid, c.fromLane, c.toLane)), file=connectionsFile) for n in net._nodes: if (len(net._nodes[n].crossings) == 0): continue for c in net._nodes[n].crossings: print((' <crossing node="%s" edges="%s"/>' % (n, ' '.join(c))), file=connectionsFile) print('</connections>', file=connectionsFile) connectionsFile.close() netconvert = sumolib.checkBinary('netconvert') subprocess.call([netconvert, '-n', nodesFile.name, '-e', edgesFile.name, '-x', connectionsFile.name, '-o', netName, '--no-turnarounds', 'true']) import time time.sleep(1) os.remove(nodesFile.name) os.remove(edgesFile.name) os.remove(connectionsFile.name) net.netName = netName return netName
class ActivationTraceHessianCalculatorKeras(TraceHessianCalculatorKeras): def __init__(self, graph: Graph, input_images: List[tf.Tensor], fw_impl, trace_hessian_request: TraceHessianRequest, num_iterations_for_approximation: int=HESSIAN_NUM_ITERATIONS): super(ActivationTraceHessianCalculatorKeras, self).__init__(graph=graph, input_images=input_images, fw_impl=fw_impl, trace_hessian_request=trace_hessian_request, num_iterations_for_approximation=num_iterations_for_approximation) def compute(self) -> List[float]: if (self.hessian_request.granularity == HessianInfoGranularity.PER_TENSOR): model_output_nodes = [ot.node for ot in self.graph.get_outputs()] if (self.hessian_request.target_node in model_output_nodes): Logger.exception('Trying to compute activation Hessian approximation with respect to the model output. This operation is not supported. Remove the output node from the set of node targets in the Hessian request.') grad_model_outputs = ([self.hessian_request.target_node] + model_output_nodes) (model, _) = FloatKerasModelBuilder(graph=self.graph, append2output=grad_model_outputs).build_model() with tf.GradientTape(persistent=True, watch_accessed_variables=False) as g: g.watch(self.input_images) if (len(self.input_images) > 1): outputs = model(self.input_images) else: outputs = model(*self.input_images) if (len(outputs) != len(grad_model_outputs)): Logger.error(f'Model for computing activation Hessian approximation expects {len(grad_model_outputs)} outputs, but got {len(outputs)} output tensors.') target_activation_tensors = [outputs[0]] output_tensors = outputs[1:] output = self._concat_tensors(output_tensors) trace_approx_by_node = [] for ipt in tqdm(target_activation_tensors): interest_point_scores = [] for j in range(self.num_iterations_for_approximation): v = tf.random.normal(shape=output.shape, dtype=output.dtype) f_v = tf.reduce_sum((v * output)) with g.stop_recording(): gradients = g.gradient(f_v, ipt, unconnected_gradients=tf.UnconnectedGradients.ZERO) if (not isinstance(gradients, list)): gradients = [gradients] score_approx_per_output = [] for grad in gradients: score_approx_per_output.append(tf.reduce_sum(tf.pow(grad, 2.0))) del grad del gradients if (j > MIN_HESSIAN_ITER): new_mean_per_output = [] delta_per_output = [] for (output_idx, score_approx) in enumerate(score_approx_per_output): prev_scores_output = [x[output_idx] for x in interest_point_scores] new_mean = np.mean([score_approx, *prev_scores_output]) delta = (new_mean - np.mean(prev_scores_output)) new_mean_per_output.append(new_mean) delta_per_output.append(delta) is_converged = all([((np.abs(delta) / (np.abs(new_mean) + 1e-06)) < HESSIAN_COMP_TOLERANCE) for (delta, new_mean) in zip(delta_per_output, new_mean_per_output)]) if is_converged: interest_point_scores.append(score_approx_per_output) break interest_point_scores.append(score_approx_per_output) final_approx_per_output = [] num_node_outputs = len(interest_point_scores[0]) for output_idx in range(num_node_outputs): final_approx_per_output.append(tf.reduce_mean([x[output_idx] for x in interest_point_scores])) trace_approx_by_node.append(tf.reduce_mean(final_approx_per_output)) trace_approx_by_node = tf.reduce_mean([trace_approx_by_node], axis=0) del g return trace_approx_by_node.numpy().tolist() else: Logger.error(f"{self.hessian_request.granularity} is not supported for Keras activation hessian's trace approx calculator")
def test_isotonic_regression_pickle(): y = np.array([3, 7, 5, 9, 8, 7, 10]) x = np.arange(len(y)) ir = IsotonicRegression(increasing='auto', out_of_bounds='clip') ir.fit(x, y) ir_ser = pickle.dumps(ir, pickle.HIGHEST_PROTOCOL) ir2 = pickle.loads(ir_ser) np.testing.assert_array_equal(ir.predict(x), ir2.predict(x))
def register_methods(root_module): register_Ns3Address_methods(root_module, root_module['ns3::Address']) register_Ns3AsciiTraceHelper_methods(root_module, root_module['ns3::AsciiTraceHelper']) register_Ns3AsciiTraceHelperForDevice_methods(root_module, root_module['ns3::AsciiTraceHelperForDevice']) register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList']) register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item']) register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer']) register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator']) register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator']) register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item']) register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList']) register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator']) register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item']) register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase']) register_Ns3Cid_methods(root_module, root_module['ns3::Cid']) register_Ns3CidFactory_methods(root_module, root_module['ns3::CidFactory']) register_Ns3CsParameters_methods(root_module, root_module['ns3::CsParameters']) register_Ns3DcdChannelEncodings_methods(root_module, root_module['ns3::DcdChannelEncodings']) register_Ns3DefaultDeleter__Ns3AttributeAccessor_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeAccessor >']) register_Ns3DefaultDeleter__Ns3AttributeChecker_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeChecker >']) register_Ns3DefaultDeleter__Ns3AttributeValue_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeValue >']) register_Ns3DefaultDeleter__Ns3CallbackImplBase_methods(root_module, root_module['ns3::DefaultDeleter< ns3::CallbackImplBase >']) register_Ns3DefaultDeleter__Ns3EventImpl_methods(root_module, root_module['ns3::DefaultDeleter< ns3::EventImpl >']) register_Ns3DefaultDeleter__Ns3HashImplementation_methods(root_module, root_module['ns3::DefaultDeleter< ns3::Hash::Implementation >']) register_Ns3DefaultDeleter__Ns3NixVector_methods(root_module, root_module['ns3::DefaultDeleter< ns3::NixVector >']) register_Ns3DefaultDeleter__Ns3OutputStreamWrapper_methods(root_module, root_module['ns3::DefaultDeleter< ns3::OutputStreamWrapper >']) register_Ns3DefaultDeleter__Ns3Packet_methods(root_module, root_module['ns3::DefaultDeleter< ns3::Packet >']) register_Ns3DefaultDeleter__Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::DefaultDeleter< ns3::TraceSourceAccessor >']) register_Ns3DlFramePrefixIe_methods(root_module, root_module['ns3::DlFramePrefixIe']) register_Ns3EventId_methods(root_module, root_module['ns3::EventId']) register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher']) register_Ns3IpcsClassifierRecord_methods(root_module, root_module['ns3::IpcsClassifierRecord']) register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address']) register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask']) register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address']) register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix']) register_Ns3LogComponent_methods(root_module, root_module['ns3::LogComponent']) register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address']) register_Ns3Mac8Address_methods(root_module, root_module['ns3::Mac8Address']) register_Ns3NetDeviceContainer_methods(root_module, root_module['ns3::NetDeviceContainer']) register_Ns3NodeContainer_methods(root_module, root_module['ns3::NodeContainer']) register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase']) register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter']) register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory']) register_Ns3OfdmDcdChannelEncodings_methods(root_module, root_module['ns3::OfdmDcdChannelEncodings']) register_Ns3OfdmDlBurstProfile_methods(root_module, root_module['ns3::OfdmDlBurstProfile']) register_Ns3OfdmDlMapIe_methods(root_module, root_module['ns3::OfdmDlMapIe']) register_Ns3OfdmUlBurstProfile_methods(root_module, root_module['ns3::OfdmUlBurstProfile']) register_Ns3OfdmUlMapIe_methods(root_module, root_module['ns3::OfdmUlMapIe']) register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata']) register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item']) register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator']) register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator']) register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item']) register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList']) register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData']) register_Ns3ParameterLogger_methods(root_module, root_module['ns3::ParameterLogger']) register_Ns3PcapFile_methods(root_module, root_module['ns3::PcapFile']) register_Ns3PcapHelper_methods(root_module, root_module['ns3::PcapHelper']) register_Ns3PcapHelperForDevice_methods(root_module, root_module['ns3::PcapHelperForDevice']) register_Ns3SNRToBlockErrorRateManager_methods(root_module, root_module['ns3::SNRToBlockErrorRateManager']) register_Ns3SNRToBlockErrorRateRecord_methods(root_module, root_module['ns3::SNRToBlockErrorRateRecord']) register_Ns3SSRecord_methods(root_module, root_module['ns3::SSRecord']) register_Ns3SendParams_methods(root_module, root_module['ns3::SendParams']) register_Ns3ServiceFlow_methods(root_module, root_module['ns3::ServiceFlow']) register_Ns3ServiceFlowRecord_methods(root_module, root_module['ns3::ServiceFlowRecord']) register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator']) register_Ns3Tag_methods(root_module, root_module['ns3::Tag']) register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer']) register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit']) register_Ns3TlvValue_methods(root_module, root_module['ns3::TlvValue']) register_Ns3TosTlvValue_methods(root_module, root_module['ns3::TosTlvValue']) register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId']) register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation']) register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation']) register_Ns3U16TlvValue_methods(root_module, root_module['ns3::U16TlvValue']) register_Ns3U32TlvValue_methods(root_module, root_module['ns3::U32TlvValue']) register_Ns3U8TlvValue_methods(root_module, root_module['ns3::U8TlvValue']) register_Ns3UcdChannelEncodings_methods(root_module, root_module['ns3::UcdChannelEncodings']) register_Ns3VectorTlvValue_methods(root_module, root_module['ns3::VectorTlvValue']) register_Ns3WimaxHelper_methods(root_module, root_module['ns3::WimaxHelper']) register_Ns3Empty_methods(root_module, root_module['ns3::empty']) register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t']) register_Ns3SimpleOfdmSendParam_methods(root_module, root_module['ns3::simpleOfdmSendParam']) register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk']) register_Ns3ClassificationRuleVectorTlvValue_methods(root_module, root_module['ns3::ClassificationRuleVectorTlvValue']) register_Ns3CsParamVectorTlvValue_methods(root_module, root_module['ns3::CsParamVectorTlvValue']) register_Ns3Header_methods(root_module, root_module['ns3::Header']) register_Ns3Ipv4AddressTlvValue_methods(root_module, root_module['ns3::Ipv4AddressTlvValue']) register_Ns3Ipv4AddressTlvValueIpv4Addr_methods(root_module, root_module['ns3::Ipv4AddressTlvValue::ipv4Addr']) register_Ns3MacHeaderType_methods(root_module, root_module['ns3::MacHeaderType']) register_Ns3ManagementMessageType_methods(root_module, root_module['ns3::ManagementMessageType']) register_Ns3Object_methods(root_module, root_module['ns3::Object']) register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator']) register_Ns3OfdmDownlinkFramePrefix_methods(root_module, root_module['ns3::OfdmDownlinkFramePrefix']) register_Ns3OfdmSendParams_methods(root_module, root_module['ns3::OfdmSendParams']) register_Ns3OfdmUcdChannelEncodings_methods(root_module, root_module['ns3::OfdmUcdChannelEncodings']) register_Ns3PacketBurst_methods(root_module, root_module['ns3::PacketBurst']) register_Ns3PcapFileWrapper_methods(root_module, root_module['ns3::PcapFileWrapper']) register_Ns3PortRangeTlvValue_methods(root_module, root_module['ns3::PortRangeTlvValue']) register_Ns3PortRangeTlvValuePortRange_methods(root_module, root_module['ns3::PortRangeTlvValue::PortRange']) register_Ns3PriorityUlJob_methods(root_module, root_module['ns3::PriorityUlJob']) register_Ns3PropagationLossModel_methods(root_module, root_module['ns3::PropagationLossModel']) register_Ns3ProtocolTlvValue_methods(root_module, root_module['ns3::ProtocolTlvValue']) register_Ns3RandomPropagationLossModel_methods(root_module, root_module['ns3::RandomPropagationLossModel']) register_Ns3RandomVariableStream_methods(root_module, root_module['ns3::RandomVariableStream']) register_Ns3RangePropagationLossModel_methods(root_module, root_module['ns3::RangePropagationLossModel']) register_Ns3RngReq_methods(root_module, root_module['ns3::RngReq']) register_Ns3RngRsp_methods(root_module, root_module['ns3::RngRsp']) register_Ns3SSManager_methods(root_module, root_module['ns3::SSManager']) register_Ns3SequentialRandomVariable_methods(root_module, root_module['ns3::SequentialRandomVariable']) register_Ns3ServiceFlowManager_methods(root_module, root_module['ns3::ServiceFlowManager']) register_Ns3SfVectorTlvValue_methods(root_module, root_module['ns3::SfVectorTlvValue']) register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >']) register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >']) register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >']) register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >']) register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >']) register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) register_Ns3SsServiceFlowManager_methods(root_module, root_module['ns3::SsServiceFlowManager']) register_Ns3ThreeLogDistancePropagationLossModel_methods(root_module, root_module['ns3::ThreeLogDistancePropagationLossModel']) register_Ns3Time_methods(root_module, root_module['ns3::Time']) register_Ns3Tlv_methods(root_module, root_module['ns3::Tlv']) register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor']) register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer']) register_Ns3TriangularRandomVariable_methods(root_module, root_module['ns3::TriangularRandomVariable']) register_Ns3TwoRayGroundPropagationLossModel_methods(root_module, root_module['ns3::TwoRayGroundPropagationLossModel']) register_Ns3Ucd_methods(root_module, root_module['ns3::Ucd']) register_Ns3UlJob_methods(root_module, root_module['ns3::UlJob']) register_Ns3UlMap_methods(root_module, root_module['ns3::UlMap']) register_Ns3UniformRandomVariable_methods(root_module, root_module['ns3::UniformRandomVariable']) register_Ns3UplinkScheduler_methods(root_module, root_module['ns3::UplinkScheduler']) register_Ns3UplinkSchedulerMBQoS_methods(root_module, root_module['ns3::UplinkSchedulerMBQoS']) register_Ns3UplinkSchedulerRtps_methods(root_module, root_module['ns3::UplinkSchedulerRtps']) register_Ns3UplinkSchedulerSimple_methods(root_module, root_module['ns3::UplinkSchedulerSimple']) register_Ns3WeibullRandomVariable_methods(root_module, root_module['ns3::WeibullRandomVariable']) register_Ns3WimaxConnection_methods(root_module, root_module['ns3::WimaxConnection']) register_Ns3WimaxMacQueue_methods(root_module, root_module['ns3::WimaxMacQueue']) register_Ns3WimaxMacQueueQueueElement_methods(root_module, root_module['ns3::WimaxMacQueue::QueueElement']) register_Ns3WimaxMacToMacHeader_methods(root_module, root_module['ns3::WimaxMacToMacHeader']) register_Ns3WimaxPhy_methods(root_module, root_module['ns3::WimaxPhy']) register_Ns3ZetaRandomVariable_methods(root_module, root_module['ns3::ZetaRandomVariable']) register_Ns3ZipfRandomVariable_methods(root_module, root_module['ns3::ZipfRandomVariable']) register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor']) register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker']) register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue']) register_Ns3BSScheduler_methods(root_module, root_module['ns3::BSScheduler']) register_Ns3BSSchedulerRtps_methods(root_module, root_module['ns3::BSSchedulerRtps']) register_Ns3BSSchedulerSimple_methods(root_module, root_module['ns3::BSSchedulerSimple']) register_Ns3BandwidthRequestHeader_methods(root_module, root_module['ns3::BandwidthRequestHeader']) register_Ns3BsServiceFlowManager_methods(root_module, root_module['ns3::BsServiceFlowManager']) register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker']) register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase']) register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue']) register_Ns3Channel_methods(root_module, root_module['ns3::Channel']) register_Ns3ConnectionManager_methods(root_module, root_module['ns3::ConnectionManager']) register_Ns3ConstantRandomVariable_methods(root_module, root_module['ns3::ConstantRandomVariable']) register_Ns3Dcd_methods(root_module, root_module['ns3::Dcd']) register_Ns3DeterministicRandomVariable_methods(root_module, root_module['ns3::DeterministicRandomVariable']) register_Ns3DlMap_methods(root_module, root_module['ns3::DlMap']) register_Ns3DsaAck_methods(root_module, root_module['ns3::DsaAck']) register_Ns3DsaReq_methods(root_module, root_module['ns3::DsaReq']) register_Ns3DsaRsp_methods(root_module, root_module['ns3::DsaRsp']) register_Ns3EmpiricalRandomVariable_methods(root_module, root_module['ns3::EmpiricalRandomVariable']) register_Ns3EmptyAttributeAccessor_methods(root_module, root_module['ns3::EmptyAttributeAccessor']) register_Ns3EmptyAttributeChecker_methods(root_module, root_module['ns3::EmptyAttributeChecker']) register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue']) register_Ns3ErlangRandomVariable_methods(root_module, root_module['ns3::ErlangRandomVariable']) register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl']) register_Ns3ExponentialRandomVariable_methods(root_module, root_module['ns3::ExponentialRandomVariable']) register_Ns3FixedRssLossModel_methods(root_module, root_module['ns3::FixedRssLossModel']) register_Ns3FragmentationSubheader_methods(root_module, root_module['ns3::FragmentationSubheader']) register_Ns3FriisPropagationLossModel_methods(root_module, root_module['ns3::FriisPropagationLossModel']) register_Ns3GammaRandomVariable_methods(root_module, root_module['ns3::GammaRandomVariable']) register_Ns3GenericMacHeader_methods(root_module, root_module['ns3::GenericMacHeader']) register_Ns3GrantManagementSubheader_methods(root_module, root_module['ns3::GrantManagementSubheader']) register_Ns3IpcsClassifier_methods(root_module, root_module['ns3::IpcsClassifier']) register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker']) register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue']) register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker']) register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue']) register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker']) register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue']) register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker']) register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue']) register_Ns3LogDistancePropagationLossModel_methods(root_module, root_module['ns3::LogDistancePropagationLossModel']) register_Ns3LogNormalRandomVariable_methods(root_module, root_module['ns3::LogNormalRandomVariable']) register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker']) register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue']) register_Ns3MatrixPropagationLossModel_methods(root_module, root_module['ns3::MatrixPropagationLossModel']) register_Ns3NakagamiPropagationLossModel_methods(root_module, root_module['ns3::NakagamiPropagationLossModel']) register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice']) register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector']) register_Ns3Node_methods(root_module, root_module['ns3::Node']) register_Ns3NormalRandomVariable_methods(root_module, root_module['ns3::NormalRandomVariable']) register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker']) register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue']) register_Ns3OutputStreamWrapper_methods(root_module, root_module['ns3::OutputStreamWrapper']) register_Ns3Packet_methods(root_module, root_module['ns3::Packet']) register_Ns3ParetoRandomVariable_methods(root_module, root_module['ns3::ParetoRandomVariable']) register_Ns3SimpleOfdmWimaxPhy_methods(root_module, root_module['ns3::SimpleOfdmWimaxPhy']) register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue']) register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker']) register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue']) register_Ns3UintegerValue_methods(root_module, root_module['ns3::UintegerValue']) register_Ns3WimaxChannel_methods(root_module, root_module['ns3::WimaxChannel']) register_Ns3WimaxNetDevice_methods(root_module, root_module['ns3::WimaxNetDevice']) register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker']) register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue']) register_Ns3BaseStationNetDevice_methods(root_module, root_module['ns3::BaseStationNetDevice']) register_Ns3CallbackImpl__Ns3ObjectBase___star___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Bool_Unsigned_long_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, bool, unsigned long long, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3Packet__gt___Const_ns3Mac48Address___amp___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<const ns3::Packet>, const ns3::Mac48Address &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3Packet__gt___Ns3Mac48Address_Const_ns3Cid___amp___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<const ns3::Packet>, ns3::Mac48Address, const ns3::Cid &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3Packet__gt___Ns3Mac48Address_Ns3Cid_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<const ns3::Packet>, ns3::Mac48Address, ns3::Cid, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3Packet__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<const ns3::Packet>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3PacketBurst__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<const ns3::PacketBurst>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3NetDevice__gt___Ns3Ptr__lt__const_ns3Packet__gt___Unsigned_short_Const_ns3Address___amp___Const_ns3Address___amp___Ns3NetDevicePacketType_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<const ns3::Packet>, unsigned short, const ns3::Address &, const ns3::Address &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3NetDevice__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<ns3::NetDevice>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3PacketBurst__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<ns3::PacketBurst>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3SimpleOfdmWimaxChannel_methods(root_module, root_module['ns3::SimpleOfdmWimaxChannel']) register_Ns3SubscriberStationNetDevice_methods(root_module, root_module['ns3::SubscriberStationNetDevice']) register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation']) register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a']) register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32']) register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64']) register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3']) return
def model_config(input_file_pattern=None, input_queue_capacity=640000, num_input_reader_threads=1, shuffle_input_data=True, uniform_init_scale=0.1, vocab_size=20000, batch_size=128, word_embedding_dim=620, bidirectional_encoder=False, encoder_dim=2400): config = _HParams() config.input_file_pattern = input_file_pattern config.input_queue_capacity = input_queue_capacity config.num_input_reader_threads = num_input_reader_threads config.shuffle_input_data = shuffle_input_data config.uniform_init_scale = uniform_init_scale config.vocab_size = vocab_size config.batch_size = batch_size config.word_embedding_dim = word_embedding_dim config.bidirectional_encoder = bidirectional_encoder config.encoder_dim = encoder_dim return config
class _DeprecatedBool(object): def __init__(self, name, version, value): self.message = "'{}' is deprecated and will be removed in version {}.".format(name, version) self.value = value def _warn(self): import warnings warnings.warn(self.message, DeprecationWarning, stacklevel=2) def __eq__(self, other): self._warn() return (other == self.value) def __ne__(self, other): self._warn() return (other != self.value) def __bool__(self): self._warn() return self.value __nonzero__ = __bool__
def randint_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes, low=0, high=1, shape=[], seed=(- 1)): return ([None] * (len(grad_inputs) + len(inputs)))
def helio_jd(date, ra, dec, b1950=False, time_diff=False): if (not b1950): (ra1, dec1) = bprecess(ra, dec) else: ra1 = ra dec1 = dec delta_t = ((array(date).astype(float) - 33282.) / 36525.0) epsilon_sec = poly1d([44.836, (- 46.8495), (- 0.00429), 0.00181][::(- 1)])(delta_t) epsilon = deg2rad((23.433333 + (epsilon_sec / 3600.0))) ra1 = deg2rad(ra1) dec1 = deg2rad(dec1) (x, y, z, tmp, tmp, tmp) = xyz(date) time = ((- 499.00522) * (((cos(dec1) * cos(ra1)) * x) + (((tan(epsilon) * sin(dec1)) + (cos(dec1) * sin(ra1))) * y))) if time_diff: return time else: return (array(date).astype(float) + (time / 86400.0))
def _main(config, num_trials): load_metadata(config) if config.train: comb_train_ds = read_data(config, 'train', config.task) comb_dev_ds = read_data(config, 'dev', config.task) comb_test_ds = read_data(config, 'test', config.task) if config.draft: config.train_num_batches = 1 config.val_num_batches = 1 config.test_num_batches = 1 config.num_epochs = 2 config.val_period = 1 config.save_period = 1 pprint(config.__dict__) eval_tensor_names = ['a', 's', 'of', 'ob', 'correct', 'yp'] eval_ph_names = ['q', 'q_mask', 'x', 'x_mask', 'y'] def get_best_trial_idx(_val_losses): return min(enumerate(_val_losses), key=(lambda x: x[1]))[0] val_losses = [] test_accs = [] for trial_idx in range(1, (num_trials + 1)): if config.train: print(('-' * 80)) print('Task {} trial {}'.format(config.task, trial_idx)) mkdirs(config, trial_idx) graph = tf.Graph() towers = [Tower(config) for _ in range(config.num_devices)] sess = tf.Session(graph=graph, config=tf.ConfigProto(allow_soft_placement=True)) runner = Runner(config, sess, towers) with graph.as_default(), tf.device('/cpu:0'): runner.initialize() if config.train: if config.load: runner.load() (val_loss, val_acc) = runner.train(comb_train_ds, config.num_epochs, val_data_set=comb_dev_ds, eval_tensor_names=eval_tensor_names, num_batches=config.train_num_batches, val_num_batches=config.val_num_batches, eval_ph_names=eval_ph_names) val_losses.append(val_loss) else: runner.load() (test_loss, test_acc) = runner.eval(comb_test_ds, eval_tensor_names=eval_tensor_names, num_batches=config.test_num_batches, eval_ph_names=eval_ph_names) test_accs.append(test_acc) if config.train: best_trial_idx = get_best_trial_idx(val_losses) print(('-' * 80)) print('Num trials: {}'.format(trial_idx)) print('Min val loss: {:.4f}'.format(min(val_losses))) print('Test acc at min val acc: {:.2f}%'.format((100 * test_accs[best_trial_idx]))) print('Trial idx: {}'.format((best_trial_idx + 1))) if (test_acc == 1.0): break best_trial_idx = get_best_trial_idx(val_losses) summary = 'Task {}: {:.2f}% at trial {}'.format(config.task, (test_accs[best_trial_idx] * 100), best_trial_idx) return summary
def train_model(model, train_dl, val_dl, epochs: int=10, lr: float=0.0003, name: str='no_name', mcat_ratio: float=0.1, ema: float=0.99, pbar_width: int=None, use_wandb: bool=True, overwrite_model: bool=True): from sklearn.metrics import f1_score import warnings from pathlib import Path print(f'Training on {(len(train_dl) * train_dl.batch_size):,d} samples and validating on {(len(val_dl) * val_dl.batch_size):,d} samples.') print(f'Number of trainable parameters: {model.trainable_params():,d}') if use_wandb: import wandb config = {'name': name, 'mcat_ratio': mcat_ratio, 'epochs': epochs, 'lr': lr, 'batch_size': train_dl.batch_size, 'ema': ema, 'vectors': train_dl.vectors, 'dropout': model.params['dropout'], 'nlayers': model.params['nlayers'], 'dim': model.params['dim'], 'boom_dim': model.params['boom_dim'], 'emb_dim': model.params['vocab'].vectors.shape[1]} wandb.init(project='scholarly', config=config) wandb.watch(model) weights = get_class_weights(train_dl, pbar_width=model.pbar_width, data_dir=model.data_dir) criterion = NestedBCELoss(**weights, mcat_ratio=mcat_ratio, data_dir=model.data_dir) optimizer = optim.Adam(model.parameters(), lr=lr) mcat_masks = get_mcat_masks(data_dir=model.data_dir) if model.is_cuda(): mcat_masks = mcat_masks.cuda() criterion = criterion.cuda() (avg_loss, avg_cat_f1, avg_mcat_f1, best_score) = (0, 0, 0, 0) for epoch in range(epochs): with tqdm(total=(len(train_dl) * train_dl.batch_size), ncols=model.pbar_width) as pbar: model.train() for (idx, (x_train, y_train)) in enumerate(train_dl): optimizer.zero_grad() if model.is_cuda(): x_train = x_train.cuda() y_train = y_train.cuda() y_hat = model(x_train) preds = torch.sigmoid(y_hat) (my_hat, my_train) = cats2mcats(y_hat, y_train, masks=mcat_masks, data_dir=model.data_dir) mpreds = torch.sigmoid(my_hat) loss = criterion(y_hat, y_train) loss.backward() optimizer.step() with warnings.catch_warnings(): warnings.simplefilter('ignore') cat_f1 = f1_score((preds.cpu() > 0.5), y_train.cpu(), average='samples') mcat_f1 = f1_score((mpreds.cpu() > 0.5), my_train.cpu(), average='samples') iteration = ((epoch * len(train_dl)) * train_dl.batch_size) iteration += (idx * train_dl.batch_size) avg_loss = ((ema * avg_loss) + ((1 - ema) * float(loss))) avg_loss /= (1 - (ema ** ((iteration / (1 - ema)) + 1))) avg_cat_f1 = ((ema * avg_cat_f1) + ((1 - ema) * float(cat_f1))) avg_cat_f1 /= (1 - (ema ** ((iteration / (1 - ema)) + 1))) avg_mcat_f1 = ((ema * avg_mcat_f1) + ((1 - ema) * float(mcat_f1))) avg_mcat_f1 /= (1 - (ema ** ((iteration / (1 - ema)) + 1))) if use_wandb: wandb.log({'loss': avg_loss, 'cat f1': avg_cat_f1, 'mcat f1': avg_mcat_f1}) desc = f'Epoch {epoch:2d} - loss {avg_loss:.4f} - cat f1 {avg_cat_f1:.4f} - mcat f1 {avg_mcat_f1:.4f}' pbar.set_description(desc) pbar.update(train_dl.batch_size) with torch.no_grad(): model.eval() (val_loss, val_cat_f1, val_mcat_f1) = (0, 0, 0) (y_vals, y_hats) = ([], []) for (x_val, y_val) in val_dl: if model.is_cuda(): x_val = x_val.cuda() y_val = y_val.cuda() y_hat = model(x_val) preds = torch.sigmoid(y_hat) (my_hat, my_val) = cats2mcats(y_hat, y_val, masks=mcat_masks, data_dir=model.data_dir) mpreds = torch.sigmoid(my_hat) y_vals.append(y_val) y_hats.append((preds > 0.5)) val_loss += float(criterion(y_hat, y_val, weighted=False)) with warnings.catch_warnings(): warnings.simplefilter('ignore') val_cat_f1 += f1_score((preds.cpu() > 0.5), y_val.cpu(), average='samples') val_mcat_f1 += f1_score((mpreds.cpu() > 0.5), my_val.cpu(), average='samples') y_val = torch.cat(y_vals, dim=0) y_hat = torch.cat(y_hats, dim=0) val_loss /= len(val_dl) val_cat_f1 /= len(val_dl) val_mcat_f1 /= len(val_dl) if use_wandb: wandb.log({'val loss': val_loss, 'val cat f1': val_cat_f1, 'val mcat f1': val_mcat_f1}) if (val_cat_f1 > best_score): model_fname = f'model_{(val_cat_f1 * 100):.2f}.pt' best_score = val_cat_f1 data = {'params': model.params, 'state_dict': model.state_dict(), 'scores': model.evaluate(val_dl, output_dict=True)} if overwrite_model: for f in get_path(model.data_dir).glob(f'model*.pt'): f.unlink() with warnings.catch_warnings(): warnings.simplefilter('ignore') path = (get_path(model.data_dir) / model_fname) torch.save(data, path) if use_wandb: if overwrite_model: for f in Path(wandb.run.dir).glob(f'model*.pt'): f.unlink() torch.save(data, (Path(wandb.run.dir) / model_fname)) wandb.save(model_fname) desc = f'Epoch {epoch:2d} - loss {avg_loss:.4f} - cat f1 {avg_cat_f1:.4f} - mcat f1 {avg_mcat_f1:.4f} - val_loss {val_loss:.4f} - val cat f1 {val_cat_f1:.4f} - val mcat f1 {val_mcat_f1:.4f}' pbar.set_description(desc) return model
class MLP_2HL(nn.Module): def __init__(self, dim_in, dim_hidden1, dim_hidden2, sparse=False, bn=True): super(MLP_2HL, self).__init__() self.in_layer = (SpLinear(dim_in, dim_hidden1) if sparse else nn.Linear(dim_in, dim_hidden1)) self.dropout_layer = nn.Dropout(0.0) self.lrelu = nn.LeakyReLU(0.1) self.relu = nn.ReLU() self.hidden_layer = nn.Linear(dim_hidden1, dim_hidden2) self.out_layer = nn.Linear(dim_hidden2, 1) self.bn = nn.BatchNorm1d(dim_hidden1) self.bn2 = nn.BatchNorm1d(dim_in) def forward(self, x, lower_f): if (lower_f is not None): x = torch.cat([x, lower_f], dim=1) x = self.bn2(x) out = self.lrelu(self.in_layer(x)) out = self.bn(out) out = self.hidden_layer(out) return (out, self.out_layer(self.relu(out)).squeeze()) def get_model(cls, stage, opt): if (stage == 0): dim_in = opt.feat_d else: dim_in = (opt.feat_d + opt.hidden_d) model = MLP_2HL(dim_in, opt.hidden_d, opt.hidden_d, opt.sparse) return model
.parametrize('task_name', [tn for tn in get_available_tasks() if (not re.search('lotka|sir', tn))]) def test_benchmark_metrics_selfobserved(task_name): task = get_task(task_name) nobs = 1 theta_o = task.get_prior()(num_samples=nobs) sim = task.get_simulator() x_o = sim(theta_o) (outputs, nsim, logprob_truep) = run(task, observation=x_o, num_samples=16, num_simulations=64, neural_net='mdn', num_rounds=1) assert outputs.shape assert (outputs.shape[0] > 0) assert (logprob_truep == None) predictive_samples = sim(outputs) value = median_distance(predictive_samples, x_o) assert (value > 0)
class AsyncRenderer(): def __init__(self): self._closed = False self._is_async = False self._cur_args = None self._cur_result = None self._cur_stamp = 0 self._renderer_obj = None self._args_queue = None self._result_queue = None self._process = None def close(self): self._closed = True self._renderer_obj = None if (self._process is not None): self._process.terminate() self._process = None self._args_queue = None self._result_queue = None def is_async(self): return self._is_async def set_async(self, is_async): self._is_async = is_async def set_args(self, **args): assert (not self._closed) if (args != self._cur_args): if self._is_async: self._set_args_async(**args) else: self._set_args_sync(**args) self._cur_args = args def _set_args_async(self, **args): if (self._process is None): self._args_queue = multiprocessing.Queue() self._result_queue = multiprocessing.Queue() try: multiprocessing.set_start_method('spawn') except RuntimeError: pass self._process = multiprocessing.Process(target=self._process_fn, args=(self._args_queue, self._result_queue), daemon=True) self._process.start() self._args_queue.put([args, self._cur_stamp]) def _set_args_sync(self, **args): if (self._renderer_obj is None): self._renderer_obj = renderer.Renderer() self._cur_result = self._renderer_obj.render(**args) def get_result(self): assert (not self._closed) if (self._result_queue is not None): while (self._result_queue.qsize() > 0): (result, stamp) = self._result_queue.get() if (stamp == self._cur_stamp): self._cur_result = result return self._cur_result def clear_result(self): assert (not self._closed) self._cur_args = None self._cur_result = None self._cur_stamp += 1 def _process_fn(args_queue, result_queue): renderer_obj = renderer.Renderer() cur_args = None cur_stamp = None while True: (args, stamp) = args_queue.get() while (args_queue.qsize() > 0): (args, stamp) = args_queue.get() if ((args != cur_args) or (stamp != cur_stamp)): result = renderer_obj.render(**args) if ('error' in result): result.error = renderer.CapturedException(result.error) result_queue.put([result, stamp]) cur_args = args cur_stamp = stamp
def test_byte(): t = NumpyType('uint8', {'__array__': 'byte'}) assert (str(parser.parse(str(t))) == str(t))
def train_G_D1(fake, D1, optimizer, **kwargs): y = D1(fake) err = loss._loss(y=y, target=True) err.backward() optimizer.step() return (0.0, y.detach().mean().item(), 0.0, 0.0, err.item(), 0.0)
class Subwords_w(Parent): def __init__(self, w, element_constructor): Parent.__init__(self, category=FiniteEnumeratedSets()) self._w = w self._build = element_constructor def __eq__(self, other) -> bool: return ((self.__class__ == other.__class__) and (self._w == other._w) and (self._build == other._build)) def __ne__(self, other) -> bool: return (not (self == other)) def __reduce__(self): return (Subwords_w, (self._w, self._build)) def _repr_(self) -> str: return 'Subwords of {!r}'.format(self._w) def __contains__(self, w) -> bool: return (smallest_positions(self._w, w) is not False) def cardinality(self) -> Integer: return (Integer(1) << len(self._w)) def first(self): return self._build([]) def last(self): return self._build(self._w) def random_element(self): return self._build((elt for elt in self._w if prandom.randint(0, 1))) def __iter__(self) -> Iterator: return itertools.chain(*[Subwords_wk(self._w, i, self._build) for i in range((len(self._w) + 1))])
def main(argv): arg_parser = argparse.ArgumentParser(description='Dump search scores and other info to HDF file.') arg_parser.add_argument('config', help='filename to config-file') arg_parser.add_argument('--dataset', default='config:train') arg_parser.add_argument('--epoch', type=int, default=(- 1), help='-1 for last epoch') arg_parser.add_argument('--output_file', help='hdf', required=True) arg_parser.add_argument('--rec_layer_name', default='output') arg_parser.add_argument('--cheating', action='store_true', help='add ground truth to the beam') arg_parser.add_argument('--att_weights', action='store_true', help='dump all softmax_over_spatial layers') arg_parser.add_argument('--verbosity', default=4, type=int, help='5 for all seqs (default: 4)') arg_parser.add_argument('--seq_list', nargs='+', help='use only these seqs') (args, remaining_args) = arg_parser.parse_known_args(argv[1:]) init(config_filename=args.config, log_verbosity=args.verbosity, remaining_args=remaining_args) dataset = init_dataset(args.dataset) print('Dataset:') pprint(dataset) if args.seq_list: dataset.seq_tags_filter = set(args.seq_list) dataset.partition_epoch = 1 if isinstance(dataset, MetaDataset): for sub_dataset in dataset.datasets.values(): dataset.seq_tags_filter = set(args.seq_list) sub_dataset.partition_epoch = 1 dataset.finish_epoch() if (dataset.seq_tags_filter is not None): print('Using sequences:') pprint(dataset.seq_tags_filter) if (args.epoch >= 1): config.set('load_epoch', args.epoch) def net_dict_post_proc(net_dict): prepare_compile(rec_layer_name=args.rec_layer_name, net_dict=net_dict, cheating=args.cheating, dump_att_weights=args.att_weights, hdf_filename=args.output_file, possible_labels=dataset.labels) return net_dict engine = Engine(config=config) engine.use_search_flag = True engine.init_network_from_config(config, net_dict_post_proc=net_dict_post_proc) engine.search(dataset, do_eval=config.bool('search_do_eval', True), output_layer_names=args.rec_layer_name) engine.finalize() print('Search finished.') assert os.path.exists(args.output_file), 'hdf file not dumped?'
class CamEncode(nn.Module): def __init__(self, C): super(CamEncode, self).__init__() self.C = C self.trunk = EfficientNet.from_pretrained('efficientnet-b0') self.up1 = Up((320 + 112), self.C) def get_eff_depth(self, x): endpoints = dict() x = self.trunk._swish(self.trunk._bn0(self.trunk._conv_stem(x))) prev_x = x for (idx, block) in enumerate(self.trunk._blocks): drop_connect_rate = self.trunk._global_params.drop_connect_rate if drop_connect_rate: drop_connect_rate *= (float(idx) / len(self.trunk._blocks)) x = block(x, drop_connect_rate=drop_connect_rate) if (prev_x.size(2) > x.size(2)): endpoints['reduction_{}'.format((len(endpoints) + 1))] = prev_x prev_x = x endpoints['reduction_{}'.format((len(endpoints) + 1))] = x x = self.up1(endpoints['reduction_5'], endpoints['reduction_4']) return x def forward(self, x): return self.get_eff_depth(x)
def scrape_index_pages(seed_page): scraped_links = [] try: soup = BeautifulSoup(urllib.request.urlopen(seed_page), 'html.parser') except Exception as e: print('Skipping: ', seed_page) errors_file.write((((seed_page + '\t') + str(e)) + '\n')) return [] items = soup.findAll('li', {'class': 'hub-AZ-list__card hub-AZ-list__card--byTitle'}) print(('Found %d items.' % len(items))) for (index, item) in enumerate(items): item_title = item.find('a', href=True).text item_url = item.find('a').get('href') scraped_links.append({'title': item_title.strip().replace(',', ''), 'url': urllib.parse.urljoin(MAIN_SITE, item_url.strip())}) return scraped_links
class AffordanceCVAE(nn.Module): def __init__(self, in_dim, hidden_dim, latent_dim, condition_dim, coord_dim=None, pred_len=4, condition_traj=True, z_scale=2.0): super().__init__() self.latent_dim = latent_dim self.condition_traj = condition_traj self.z_scale = z_scale if self.condition_traj: if (coord_dim is None): coord_dim = (hidden_dim // 2) self.coord_dim = coord_dim self.traj_to_feature = nn.Sequential(nn.Linear((2 * (pred_len + 1)), (coord_dim * (pred_len + 1)), bias=False), nn.ELU(inplace=True)) self.traj_context_fusion = nn.Sequential(nn.Linear((condition_dim + (coord_dim * (pred_len + 1))), condition_dim, bias=False), nn.ELU(inplace=True)) self.cvae = VAE(in_dim=in_dim, hidden_dim=hidden_dim, latent_dim=latent_dim, conditional=True, condition_dim=condition_dim) def forward(self, context, contact_point, hand_traj=None, return_pred=False): if self.condition_traj: assert (hand_traj is not None) batch_size = context.shape[0] hand_traj = hand_traj.reshape(batch_size, (- 1)) traj_feat = self.traj_to_feature(hand_traj) fusion_feat = torch.cat([context, traj_feat], dim=1) condition_context = self.traj_context_fusion(fusion_feat) else: condition_context = context if (not return_pred): (recon_loss, KLD) = self.cvae(contact_point, c=condition_context) return (recon_loss, KLD) else: (pred_contact, recon_loss, KLD) = self.cvae(contact_point, c=condition_context, return_pred=return_pred) return (pred_contact, recon_loss, KLD) def inference(self, context, hand_traj=None): if self.condition_traj: assert (hand_traj is not None) batch_size = context.shape[0] hand_traj = hand_traj.reshape(batch_size, (- 1)) traj_feat = self.traj_to_feature(hand_traj) fusion_feat = torch.cat([context, traj_feat], dim=1) condition_context = self.traj_context_fusion(fusion_feat) else: condition_context = context z = (self.z_scale * torch.randn([condition_context.shape[0], self.latent_dim], device=condition_context.device)) recon_x = self.cvae.inference(z, c=condition_context) return recon_x
class LogisticMatrixFactorization(RecMixin, BaseRecommenderModel): _charger def __init__(self, data, config, params, *args, **kwargs): self._params_list = [('_learning_rate', 'lr', 'lr', 0.001, None, None), ('_factors', 'factors', 'factors', 10, None, None), ('_l_w', 'reg', 'reg', 0.1, None, None), ('_alpha', 'alpha', 'alpha', 0.5, None, None)] self.autoset_params() if (self._batch_size < 1): self._batch_size = self._data.transactions self._ratings = self._data.train_dict self._sp_i_train = self._data.sp_i_train self._i_items_set = list(range(self._num_items)) self._sampler = pws.Sampler(self._data.i_train_dict) self._model = LogisticMatrixFactorizationModel(self._num_users, self._num_items, self._factors, self._l_w, self._alpha, self._learning_rate, self._seed) def name(self): return (('LMF' + f'_{self.get_base_params_shortcut()}') + f'_{self.get_params_shortcut()}') def predict(self, u: int, i: int): pass def train(self): if self._restore: return self.restore_weights() for it in self.iterate(self._epochs): loss = 0 steps = 0 with tqdm(total=int(((self._data.transactions * 2) // self._batch_size)), disable=(not self._verbose)) as t: for batch in self._sampler.step(self._data.transactions, self._batch_size): steps += 1 self._model.set_update_user(False) loss += self._model.train_step(batch) t.set_postfix({'loss': f'{(loss.numpy() / steps):.5f}'}) t.update() for batch in self._sampler.step(self._data.transactions, self._batch_size): steps += 1 self._model.set_update_user(True) loss += self._model.train_step(batch) t.set_postfix({'loss': f'{(loss.numpy() / steps):.5f}'}) t.update() self.evaluate(it, (loss.numpy() / (it + 1))) def get_recommendations(self, k: int=100): predictions_top_k_test = {} predictions_top_k_val = {} for (index, offset) in enumerate(range(0, self._num_users, self._batch_size)): offset_stop = min((offset + self._batch_size), self._num_users) predictions = self._model.predict_batch(offset, offset_stop) (recs_val, recs_test) = self.process_protocol(k, predictions, offset, offset_stop) predictions_top_k_val.update(recs_val) predictions_top_k_test.update(recs_test) return (predictions_top_k_val, predictions_top_k_test)
def array2csv(array, filename, **kwargs): df = pd.DataFrame(array) return df.to_csv(filename, index=False, **kwargs)
class DecreasingHeckeFactorization(Element, metaclass=InheritComparisonClasscallMetaclass): def __classcall_private__(self, t, max_value=None, parent=None): _check_decreasing_hecke_factorization(t) if isinstance(t, DecreasingHeckeFactorization): u = t.value if (parent is None): parent = t.parent() else: u = t if (parent is None): if (max_value is None): letters = [x for factor in t for x in factor] max_value = (max(letters) if letters else 1) from sage.monoids.hecke_monoid import HeckeMonoid S = SymmetricGroup((max_value + 1)) H = HeckeMonoid(S) word = H.from_reduced_word((x for factor in t for x in factor)).reduced_word() factors = len(t) excess = (sum((len(l) for l in t)) - len(word)) p = permutation.from_reduced_word(word) if p.has_pattern([3, 2, 1]): word = S.from_reduced_word(word) parent = DecreasingHeckeFactorizations(word, factors, excess) else: word = S.from_reduced_word(word) parent = FullyCommutativeStableGrothendieckCrystal(word, factors, excess) return parent.element_class(parent, u) def __init__(self, parent, t): Element.__init__(self, parent) self.factors = parent.factors self.max_value = parent.max_value self.w = parent.w self.excess = parent.excess self.value = tuple((tuple(factors) for factors in t)) def _repr_(self): return ''.join(((('(' + repr(list(factor))[1:(- 1)]) + ')') for factor in self.value)) def __hash__(self): return hash((self.max_value, self.value)) _richcmp_ = richcmp_by_eq_and_lt('__eq__', '__lt__') def __eq__(self, other): return (isinstance(self, type(other)) and (self.value == other.value)) def __lt__(self, other): return ((self.weight(), self.value) < (other.weight(), other.value)) def _latex_(self): s = '' for factor in self.value: if factor: s += (('\\left(' + repr(list(factor))[1:(- 1)]) + '\\right)') else: s += '\\left(\\;\\right)' return s def weight(self): return tuple([len(l) for l in reversed(self.value)]) def to_word(self): return [j for factors in self.value for j in factors] def to_increasing_hecke_biword(self): L = [[], []] for j in range(len(self.value)): L[1] += list(self.value[((- j) - 1)][::(- 1)]) L[0] += ([(j + 1)] * len(self.value[((- j) - 1)])) return L
def list_files(files, recursive=False, extensions=None, exclude=None): if (extensions is None): extensions = [] if (exclude is None): exclude = [] out = [] for file in files: if (recursive and os.path.isdir(file)): for (dirpath, dnames, fnames) in os.walk(file): fpaths = [os.path.join(dirpath, fname) for fname in fnames] for pattern in exclude: dnames[:] = [x for x in dnames if (not fnmatch.fnmatch(os.path.join(dirpath, x), pattern))] fpaths = [x for x in fpaths if (not fnmatch.fnmatch(x, pattern))] for f in fpaths: ext = os.path.splitext(f)[1][1:] if (ext in extensions): out.append(f) else: out.append(file) return out
def label_smoothed_nll_loss(lprobs, target, epsilon, ignore_index=None, reduce=True): if (target.dim() == (lprobs.dim() - 1)): target = target.unsqueeze((- 1)) nll_loss = (- lprobs.gather(dim=(- 1), index=target)) smooth_loss = (- lprobs.sum(dim=(- 1), keepdim=True)) if (ignore_index is not None): pad_mask = target.eq(ignore_index) nll_loss.masked_fill_(pad_mask, 0.0) smooth_loss.masked_fill_(pad_mask, 0.0) else: nll_loss = nll_loss.squeeze((- 1)) smooth_loss = smooth_loss.squeeze((- 1)) if reduce: nll_loss = nll_loss.sum() smooth_loss = smooth_loss.sum() eps_i = (epsilon / lprobs.size((- 1))) loss = (((1.0 - epsilon) * nll_loss) + (eps_i * smooth_loss)) return (loss, nll_loss)
def merge_datasets(datasets): if isinstance(datasets, dict): keys = sorted(list(datasets.keys())) datasets = [datasets[key] for key in keys] res = datasets[0] if torch.is_tensor(datasets[0].data): res.data = torch.cat([dataset.data for dataset in datasets], dim=0) else: res.data = np.concatenate([dataset.data for dataset in datasets], axis=0) res.targets = list(chain(*[dataset.targets for dataset in datasets])) return res
def calculate_diff_w_significance(A_scores, B_scores, alpha=1e-05): A_scores = np.array(A_scores) B_scores = np.array(B_scores) mu = (np.mean(A_scores) - np.mean(B_scores)) p_value = stats.ttest_ind(A_scores, B_scores, alternative='greater')[1] mu_variance = ((np.var(A_scores) / len(A_scores)) + (np.var(B_scores) / len(B_scores))) mu_std = np.sqrt(mu_variance) target_z = norm.ppf((1 - (alpha / 2))) (lo, hi) = ((mu - (target_z * mu_std)), (mu + (target_z * mu_std))) return {'mu': mu, 'p_value': p_value, 'mu_std': mu_std, 'lo': lo, 'hi': hi}
class CPM(object): def __init__(self, crop_size=256, out_chan=21, withPAF=False, PAFdim=2, numPAF=19, numStage=5, input_chan=3, withDirVec=False, withConf=False, withSeg=False): self.name = 'CPM' self.out_chan = out_chan self.crop_size = crop_size self.withPAF = withPAF self.PAFdim = PAFdim self.numPAF = numPAF self.numStage = numStage self.withDirVec = withDirVec self.withConf = withConf self.withSeg = withSeg def init(self, weight_path, sess): with tf.variable_scope('CPM'): data_dict = np.load(weight_path, encoding='latin1').item() for op_name in data_dict: with tf.variable_scope(op_name, reuse=True): for (param_name, data) in data_dict[op_name].items(): var = tf.get_variable(param_name) sess.run(var.assign(data)) print('Finish loading weight from {}'.format(weight_path)) def init_pickle(self, session, weight_files=None, exclude_var_list=None): if (exclude_var_list is None): exclude_var_list = list() import pickle import os for file_name in weight_files: assert os.path.exists(file_name), 'File not found.' with open(file_name, 'rb') as fi: weight_dict = pickle.load(fi) weight_dict = {k: v for (k, v) in weight_dict.items() if (not any([(x in k) for x in exclude_var_list]))} if (len(weight_dict) > 0): (init_op, init_feed) = tf.contrib.framework.assign_from_values(weight_dict) session.run(init_op, init_feed) print(('Loaded %d variables from %s' % (len(weight_dict), file_name))) def init_vgg(self, sess, weight_path='./weights/vgg16.npy'): print('initialize from ImageNet pretrained VGG') with tf.variable_scope('CPM'): data_dict = np.load(weight_path, encoding='latin1').item() for op_name in data_dict: if ((not op_name.startswith('conv')) or (op_name == 'conv5_3')): continue with tf.variable_scope(op_name, reuse=True): assert (len(data_dict[op_name]) == 2) for data in data_dict[op_name]: try: if (data.ndim == 4): var = tf.get_variable('weights') elif (data.ndim == 1): var = tf.get_variable('biases') else: raise Exception sess.run(var.assign(data)) except Exception: print('Fail to load {}'.format(op_name)) print('Finish loading weight from {}'.format(weight_path)) def inference(self, input_image, train=False): with tf.variable_scope('CPM'): s = input_image.get_shape().as_list() assert ((s[1] == self.crop_size) and (s[2] == self.crop_size)) layers_per_block = [2, 2, 4, 2] out_chan_list = [64, 128, 256, 512] pool_list = [True, True, True, False] scMaps = [] x = input_image for (block_id, (layer_num, chan_num, pool)) in enumerate(zip(layers_per_block, out_chan_list, pool_list), 1): for layer_id in range(layer_num): x = ops.conv_relu(x, ('conv%d_%d' % (block_id, (layer_id + 1))), kernel_size=3, stride=1, out_chan=chan_num, leaky=False, trainable=train) if pool: scMaps.append(x) x = ops.max_pool(x, ('pool%d' % block_id)) PAF = [] fcOut = None fcOutConf = None logitsSeg = None if (not self.withPAF): x = ops.conv_relu(x, 'conv4_3', kernel_size=3, stride=1, out_chan=512, leaky=False, trainable=train) x = ops.conv_relu(x, 'conv4_4', kernel_size=3, stride=1, out_chan=512, leaky=False, trainable=train) x = ops.conv_relu(x, 'conv5_1', kernel_size=3, stride=1, out_chan=512, leaky=False, trainable=train) x = ops.conv_relu(x, 'conv5_2', kernel_size=3, stride=1, out_chan=512, leaky=False, trainable=train) conv_feature = ops.conv_relu(x, 'conv5_3_CPM', kernel_size=3, stride=1, out_chan=128, leaky=False, trainable=train) x = ops.conv_relu(conv_feature, 'conv6_1_CPM', kernel_size=1, stride=1, out_chan=512, leaky=False, trainable=train) x = ops.conv(x, 'conv6_2_CPM', kernel_size=1, stride=1, out_chan=self.out_chan, trainable=train) scoremaps = [x] for stage_id in range(2, (2 + self.numStage)): x = tf.concat([x, conv_feature], axis=3, name='concat_stage{}'.format(stage_id)) for layer_id in range(1, 6): x = ops.conv_relu(x, 'Mconv{}_stage{}'.format(layer_id, stage_id), kernel_size=7, stride=1, out_chan=128, leaky=False, trainable=train) x = ops.conv_relu(x, 'Mconv6_stage{}'.format(stage_id), kernel_size=1, stride=1, out_chan=128, leaky=False, trainable=train) x = ops.conv(x, 'Mconv7_stage{}'.format(stage_id), kernel_size=1, stride=1, out_chan=self.out_chan, trainable=train) scoremaps.append(x) xKps = tf.identity(x) else: x = ops.conv_relu(x, 'conv4_3_CPM', kernel_size=3, stride=1, out_chan=256, leaky=False, trainable=train) conv_feature = ops.conv_relu(x, 'conv4_4_CPM', kernel_size=3, stride=1, out_chan=128, leaky=False, trainable=train) x1 = ops.conv_relu(conv_feature, 'conv5_1_CPM_L1', kernel_size=3, stride=1, out_chan=128, leaky=False, trainable=train) x1 = ops.conv_relu(x1, 'conv5_2_CPM_L1', kernel_size=3, stride=1, out_chan=128, leaky=False, trainable=train) x1 = ops.conv_relu(x1, 'conv5_3_CPM_L1', kernel_size=3, stride=1, out_chan=128, leaky=False, trainable=train) x1 = ops.conv_relu(x1, 'conv5_4_CPM_L1', kernel_size=1, stride=1, out_chan=512, leaky=False, trainable=train) x1 = ops.conv(x1, 'conv5_5_CPM_L1', kernel_size=1, stride=1, out_chan=(self.PAFdim * self.numPAF), trainable=train) x2 = ops.conv_relu(conv_feature, 'conv5_1_CPM_L2', kernel_size=3, stride=1, out_chan=128, leaky=False, trainable=train) x2 = ops.conv_relu(x2, 'conv5_2_CPM_L2', kernel_size=3, stride=1, out_chan=128, leaky=False, trainable=train) x2 = ops.conv_relu(x2, 'conv5_3_CPM_L2', kernel_size=3, stride=1, out_chan=128, leaky=False, trainable=train) x2 = ops.conv_relu(x2, 'conv5_4_CPM_L2', kernel_size=1, stride=1, out_chan=512, leaky=False, trainable=train) x2 = ops.conv(x2, 'conv5_5_CPM_L2', kernel_size=1, stride=1, out_chan=self.out_chan, trainable=train) scoremaps = [x2] PAF.append(x1) for stage_id in range(2, (2 + self.numStage)): x = tf.concat([x1, x2, conv_feature], axis=3, name='concat_stage{}'.format(stage_id)) x1 = ops.conv_relu(x, 'Mconv{}_stage{}_L1'.format(1, stage_id), kernel_size=7, stride=1, out_chan=128, leaky=False, trainable=train) x2 = ops.conv_relu(x, 'Mconv{}_stage{}_L2'.format(1, stage_id), kernel_size=7, stride=1, out_chan=128, leaky=False, trainable=train) for layer_id in range(2, 6): x1 = ops.conv_relu(x1, 'Mconv{}_stage{}_L1'.format(layer_id, stage_id), kernel_size=7, stride=1, out_chan=128, leaky=False, trainable=train) x2 = ops.conv_relu(x2, 'Mconv{}_stage{}_L2'.format(layer_id, stage_id), kernel_size=7, stride=1, out_chan=128, leaky=False, trainable=train) x1 = ops.conv_relu(x1, 'Mconv6_stage{}_L1'.format(stage_id), kernel_size=1, stride=1, out_chan=128, leaky=False, trainable=train) x2 = ops.conv_relu(x2, 'Mconv6_stage{}_L2'.format(stage_id), kernel_size=1, stride=1, out_chan=128, leaky=False, trainable=train) x1 = ops.conv(x1, 'Mconv7_stage{}_L1'.format(stage_id), kernel_size=1, stride=1, out_chan=(self.PAFdim * self.numPAF), trainable=train) x2 = ops.conv(x2, 'Mconv7_stage{}_L2'.format(stage_id), kernel_size=1, stride=1, out_chan=self.out_chan, trainable=train) scoremaps.append(x2) PAF.append(x1) xKps = tf.identity(x2) return (scoremaps, conv_feature, PAF, fcOut, fcOutConf, logitsSeg)
class TestGF2Ops(unittest.TestCase): def field_size_test(self, field_size): gf = GF2Ops(field_size) for i in range(100): x = random.randrange((1 << field_size)) y = random.randrange((1 << field_size)) x2 = gf.mul2(x) xy = gf.mul(x, y) self.assertEqual(x2, gf.mul(x, 2)) self.assertEqual(x2, gf.mul(2, x)) self.assertEqual((xy == 0), ((x == 0) or (y == 0))) self.assertEqual((xy == x), ((y == 1) or (x == 0))) self.assertEqual((xy == y), ((x == 1) or (y == 0))) self.assertEqual(xy, gf.mul(y, x)) if (i < 10): xp = x for _ in range(field_size): xp = gf.sqr(xp) self.assertEqual(xp, x) if (y != 0): yi = gf.inv(y) self.assertEqual((y == yi), (y == 1)) self.assertEqual(gf.mul(y, yi), 1) yii = gf.inv(yi) self.assertEqual(y, yii) if (x != 0): xi = gf.inv(x) xyi = gf.inv(xy) self.assertEqual(xyi, gf.mul(xi, yi)) def test(self): for field_size in range(2, 65): self.field_size_test(field_size)
def inference(args, model, test_save_path=None): db_test = args.Dataset(base_dir=args.volume_path, split='test', list_dir=args.list_dir) testloader = DataLoader(db_test, batch_size=1, shuffle=False, num_workers=1) logging.info('{} test iterations per epoch'.format(len(testloader))) model.eval() metric_list = 0.0 big_list = [] small_list = [] for (i_batch, sampled_batch) in tqdm(enumerate(testloader)): (h, w) = sampled_batch['image'].size()[2:] (image, label, case_name) = (sampled_batch['image'], sampled_batch['label'], sampled_batch['case_name'][0]) (metric_i, big_i, small_i) = test_single_volume(image, label, model, classes=args.num_classes, patch_size=[args.img_size, args.img_size], test_save_path=test_save_path, case=case_name, z_spacing=args.z_spacing) metric_list += np.array(metric_i) big_list += big_i small_list += small_i logging.info(('idx %d case %s mean_dice %f mean_hd95 %f' % (i_batch, case_name, np.mean(metric_i, axis=0)[0], np.mean(metric_i, axis=0)[1]))) metric_list = (metric_list / len(db_test)) big_dice = np.mean(big_list) small_dice = np.mean(small_list) for i in range(1, args.num_classes): logging.info(('Mean class %d mean_dice %f mean_hd95 %f' % (i, metric_list[(i - 1)][0], metric_list[(i - 1)][1]))) performance = np.mean(metric_list, axis=0)[0] mean_hd95 = np.mean(metric_list, axis=0)[1] boundary_IoU = np.mean(metric_list, axis=0)[2] logging.info(('Testing performance in best val model: mean_dice : %f mean_hd95 : %f, boundary_IoU: %f' % (performance, mean_hd95, boundary_IoU))) logging.info(('big : %f, small: %f' % (big_dice, small_dice))) return 'Testing Finished!'
def _cvt_variable(v): if isinstance(v, variable.Variable): v = v.data if hasattr(v, 'get'): v = v.get() return v
def register_Ns3SchedulerEvent_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_constructor([]) cls.add_constructor([param('ns3::Scheduler::Event const &', 'arg0')]) cls.add_instance_attribute('impl', 'ns3::EventImpl *', is_const=False) cls.add_instance_attribute('key', 'ns3::Scheduler::EventKey', is_const=False) return
def _merge_entity_utterances(raw_utterances, stemmed_utterances): for (raw_stemmed_value, resolved_value) in sorted(iteritems(stemmed_utterances), key=operator.itemgetter(1)): if (raw_stemmed_value not in raw_utterances): raw_utterances[raw_stemmed_value] = resolved_value return raw_utterances
def get_optimizer(name, model, **kwargs): name = name.lower().strip() parameters = get_trainable_params(model) if (name == 'adam'): lr = (kwargs['lr'] if ('lr' in kwargs) else 0.001) wd = (kwargs['weight_decay'] if ('weight_decay' in kwargs) else 0) print('Using Adam optimizer: Lr=', lr, 'Wd=', wd) return optim.Adam(parameters, lr=lr, weight_decay=wd) elif (name == 'sgd'): print('Using SGD optimizer') lr = (kwargs['lr'] if ('lr' in kwargs) else 0.01) momentum = 0.9 wd = (kwargs['weight_decay'] if ('weight_decay' in kwargs) else 0) nesterov = (kwargs['nesterov'] if ('nesterov' in kwargs) else True) return optim.SGD(parameters, lr=lr, momentum=momentum, weight_decay=wd, nesterov=nesterov) elif (name == 'rmsprop'): lr = (kwargs['lr'] if ('lr' in kwargs) else 0.005) return optim.RMSprop(parameters, lr=lr, momentum=0.0, eps=1e-10) else: raise ValueError('Unknown optimizer', name)
class SerializationInterop(FileSetup): path = 'ivalue.pt' def setup(self): ones = torch.ones(2, 2) twos = (torch.ones(3, 5) * 2) value = (ones, twos) torch.save(value, self.path, _use_new_zipfile_serialization=True)
def SamplePairing(imgs): def f(img1, v): i = np.random.choice(len(imgs)) img2 = PIL.Image.fromarray(imgs[i]) return PIL.Image.blend(img1, img2, v) return f
def yuv2rgb(Y, U, V, height, width): U = imresize(U, [height, width], 'bilinear', mode='F') V = imresize(V, [height, width], 'bilinear', mode='F') Y = Y rf = (Y + (1.4075 * (V - 128.0))) gf = ((Y - (0.3455 * (U - 128.0))) - (0.7169 * (V - 128.0))) bf = (Y + (1.779 * (U - 128.0))) for m in range(height): for n in range(width): if (rf[(m, n)] > 255): rf[(m, n)] = 255 if (gf[(m, n)] > 255): gf[(m, n)] = 255 if (bf[(m, n)] > 255): bf[(m, n)] = 255 if (rf[(m, n)] < 0): rf[(m, n)] = 0 if (gf[(m, n)] < 0): gf[(m, n)] = 0 if (bf[(m, n)] < 0): bf[(m, n)] = 0 r = rf.astype(uint8) g = gf.astype(uint8) b = bf.astype(uint8) return (r, g, b)
_module() class DilatedEncoder(nn.Module): def __init__(self, in_channels, out_channels, block_mid_channels, num_residual_blocks, block_dilations): super(DilatedEncoder, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.block_mid_channels = block_mid_channels self.num_residual_blocks = num_residual_blocks self.block_dilations = block_dilations self._init_layers() def _init_layers(self): self.lateral_conv = nn.Conv2d(self.in_channels, self.out_channels, kernel_size=1) self.lateral_norm = BatchNorm2d(self.out_channels) self.fpn_conv = nn.Conv2d(self.out_channels, self.out_channels, kernel_size=3, padding=1) self.fpn_norm = BatchNorm2d(self.out_channels) encoder_blocks = [] for i in range(self.num_residual_blocks): dilation = self.block_dilations[i] encoder_blocks.append(Bottleneck(self.out_channels, self.block_mid_channels, dilation=dilation)) self.dilated_encoder_blocks = nn.Sequential(*encoder_blocks) def init_weights(self): caffe2_xavier_init(self.lateral_conv) caffe2_xavier_init(self.fpn_conv) for m in [self.lateral_norm, self.fpn_norm]: constant_init(m, 1) for m in self.dilated_encoder_blocks.modules(): if isinstance(m, nn.Conv2d): normal_init(m, mean=0, std=0.01) if is_norm(m): constant_init(m, 1) def forward(self, feature): out = self.lateral_norm(self.lateral_conv(feature[(- 1)])) out = self.fpn_norm(self.fpn_conv(out)) return (self.dilated_encoder_blocks(out),)
class Test_init_nd_shape_and_axes(object): def test_py_0d_defaults(self): x = np.array(4) shape = None axes = None shape_expected = np.array([]) axes_expected = np.array([]) (shape_res, axes_res) = _init_nd_shape_and_axes(x, shape, axes) assert_equal(shape_res, shape_expected) assert_equal(axes_res, axes_expected) def test_np_0d_defaults(self): x = np.array(7.0) shape = None axes = None shape_expected = np.array([]) axes_expected = np.array([]) (shape_res, axes_res) = _init_nd_shape_and_axes(x, shape, axes) assert_equal(shape_res, shape_expected) assert_equal(axes_res, axes_expected) def test_py_1d_defaults(self): x = np.array([1, 2, 3]) shape = None axes = None shape_expected = np.array([3]) axes_expected = np.array([0]) (shape_res, axes_res) = _init_nd_shape_and_axes(x, shape, axes) assert_equal(shape_res, shape_expected) assert_equal(axes_res, axes_expected) def test_np_1d_defaults(self): x = np.arange(0, 1, 0.1) shape = None axes = None shape_expected = np.array([10]) axes_expected = np.array([0]) (shape_res, axes_res) = _init_nd_shape_and_axes(x, shape, axes) assert_equal(shape_res, shape_expected) assert_equal(axes_res, axes_expected) def test_py_2d_defaults(self): x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) shape = None axes = None shape_expected = np.array([2, 4]) axes_expected = np.array([0, 1]) (shape_res, axes_res) = _init_nd_shape_and_axes(x, shape, axes) assert_equal(shape_res, shape_expected) assert_equal(axes_res, axes_expected) def test_np_2d_defaults(self): x = np.arange(0, 1, 0.1).reshape(5, 2) shape = None axes = None shape_expected = np.array([5, 2]) axes_expected = np.array([0, 1]) (shape_res, axes_res) = _init_nd_shape_and_axes(x, shape, axes) assert_equal(shape_res, shape_expected) assert_equal(axes_res, axes_expected) def test_np_5d_defaults(self): x = np.zeros([6, 2, 5, 3, 4]) shape = None axes = None shape_expected = np.array([6, 2, 5, 3, 4]) axes_expected = np.array([0, 1, 2, 3, 4]) (shape_res, axes_res) = _init_nd_shape_and_axes(x, shape, axes) assert_equal(shape_res, shape_expected) assert_equal(axes_res, axes_expected) def test_np_5d_set_shape(self): x = np.zeros([6, 2, 5, 3, 4]) shape = [10, (- 1), (- 1), 1, 4] axes = None shape_expected = np.array([10, 2, 5, 1, 4]) axes_expected = np.array([0, 1, 2, 3, 4]) (shape_res, axes_res) = _init_nd_shape_and_axes(x, shape, axes) assert_equal(shape_res, shape_expected) assert_equal(axes_res, axes_expected) def test_np_5d_set_axes(self): x = np.zeros([6, 2, 5, 3, 4]) shape = None axes = [4, 1, 2] shape_expected = np.array([4, 2, 5]) axes_expected = np.array([4, 1, 2]) (shape_res, axes_res) = _init_nd_shape_and_axes(x, shape, axes) assert_equal(shape_res, shape_expected) assert_equal(axes_res, axes_expected) def test_np_5d_set_shape_axes(self): x = np.zeros([6, 2, 5, 3, 4]) shape = [10, (- 1), 2] axes = [1, 0, 3] shape_expected = np.array([10, 6, 2]) axes_expected = np.array([1, 0, 3]) (shape_res, axes_res) = _init_nd_shape_and_axes(x, shape, axes) assert_equal(shape_res, shape_expected) assert_equal(axes_res, axes_expected) def test_shape_axes_subset(self): x = np.zeros((2, 3, 4, 5)) (shape, axes) = _init_nd_shape_and_axes(x, shape=(5, 5, 5), axes=None) assert_array_equal(shape, [5, 5, 5]) assert_array_equal(axes, [1, 2, 3]) def test_errors(self): x = np.zeros(1) with assert_raises(ValueError, match='axes must be a scalar or iterable of integers'): _init_nd_shape_and_axes(x, shape=None, axes=[[1, 2], [3, 4]]) with assert_raises(ValueError, match='axes must be a scalar or iterable of integers'): _init_nd_shape_and_axes(x, shape=None, axes=[1.0, 2.0, 3.0, 4.0]) with assert_raises(ValueError, match='axes exceeds dimensionality of input'): _init_nd_shape_and_axes(x, shape=None, axes=[1]) with assert_raises(ValueError, match='axes exceeds dimensionality of input'): _init_nd_shape_and_axes(x, shape=None, axes=[(- 2)]) with assert_raises(ValueError, match='all axes must be unique'): _init_nd_shape_and_axes(x, shape=None, axes=[0, 0]) with assert_raises(ValueError, match='shape must be a scalar or iterable of integers'): _init_nd_shape_and_axes(x, shape=[[1, 2], [3, 4]], axes=None) with assert_raises(ValueError, match='shape must be a scalar or iterable of integers'): _init_nd_shape_and_axes(x, shape=[1.0, 2.0, 3.0, 4.0], axes=None) with assert_raises(ValueError, match='when given, axes and shape arguments have to be of the same length'): _init_nd_shape_and_axes(np.zeros([1, 1, 1, 1]), shape=[1, 2, 3], axes=[1]) with assert_raises(ValueError, match='invalid number of data points \\(\\[0\\]\\) specified'): _init_nd_shape_and_axes(x, shape=[0], axes=None) with assert_raises(ValueError, match='invalid number of data points \\(\\[-2\\]\\) specified'): _init_nd_shape_and_axes(x, shape=(- 2), axes=None)
def LF_travel(s): rgx = '\\b(travel(s|ed|ing)*|vacation|trip)\\b' trigger = match_regex(rgx, s) if (not trigger): return ABSTAIN return (TRAVEL if (not is_negated(trigger)) else NO_TRAVEL)
def _pairwise_emd_cd_(sample_pcs, ref_pcs, batch_size): print('computing Earth Mover and Chamfer distances') n_sample = sample_pcs.shape[0] n_ref = ref_pcs.shape[0] all_cd = [] all_emd = [] iterator = range(n_sample) for sample_b_start in tqdm.tqdm(iterator): sample_batch = sample_pcs[sample_b_start] cd_lst = [] emd_lst = [] for ref_b_start in range(0, n_ref, batch_size): ref_b_end = min(n_ref, (ref_b_start + batch_size)) ref_batch = ref_pcs[ref_b_start:ref_b_end] batch_size_ref = ref_batch.size(0) sample_batch_exp = sample_batch.view(1, (- 1), 3).expand(batch_size_ref, (- 1), (- 1)) sample_batch_exp = sample_batch_exp.contiguous() cd_lst.append(chamfer_distance(sample_batch_exp, ref_batch, batch_reduction=None)[0].unsqueeze(0)) emd_batch = emd_approx(sample_batch_exp, ref_batch) emd_lst.append(emd_batch.view(1, (- 1))) cd_lst = torch.cat(cd_lst, dim=(- 1)) emd_lst = torch.cat(emd_lst, dim=(- 1)) all_cd.append(cd_lst) all_emd.append(emd_lst) all_cd = torch.cat(all_cd, dim=0) all_emd = torch.cat(all_emd, dim=0) return (all_cd, all_emd)
def get(tag_like: (str | Tag)) -> Model: model = bentoml.models.get(tag_like) if (model.info.module not in (MODULE_NAME, __name__)): raise NotFound(f'Model {model.tag} was saved with module {model.info.module}, not loading with {MODULE_NAME}.') return model
class BasicBlock(nn.Module): def __init__(self, inplanes, planes, stride=1, dilation=1, kernel=3, norm='bn', use_se=False, activation=nn.ReLU): super(BasicBlock, self).__init__() padding = (((dilation * kernel) - dilation) // 2) (self.inplanes, self.planes) = (int(inplanes), int(planes)) self.conv1 = nn.Conv2d(inplanes, inplanes, kernel_size=kernel, padding=padding, stride=stride, dilation=dilation, groups=inplanes, bias=False) self.bn1 = make_norm(inplanes, norm=norm) self.conv2 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, padding=0, bias=False) self.bn2 = make_norm(planes, norm=norm) self.se = (ops.Se2d(planes, reduction=4) if use_se else None) try: self.activation = activation(inplace=True) except: self.activation = activation() def forward(self, x): out = self.conv1(x) out = self.bn1(out) out = self.activation(out) out = self.conv2(out) out = self.bn2(out) out = self.activation(out) if (self.se is not None): out = self.se(out) return out
class TestOpenAIWindowService(): def setup_method(self): self.path: str = tempfile.mkdtemp() service: TokenizerService = get_tokenizer_service(self.path) self.window_service = WindowServiceFactory.get_window_service('openai/gpt-3.5-turbo-0301', service) def teardown_method(self, method): shutil.rmtree(self.path) def test_encode(self): assert (self.window_service.encode(TEST_PROMPT).token_values == GPT4_TEST_TOKEN_IDS) def test_decode(self): assert (self.window_service.decode(self.window_service.encode(TEST_PROMPT).tokens) == TEST_PROMPT) def test_tokenize(self): assert (self.window_service.tokenize(TEST_PROMPT) == GPT4_TEST_TOKENS)
class Encoder(nn.Module): def __init__(self, z_dim, c_dim, img_size): super(Encoder, self).__init__() self.model_enc = nn.Sequential(nn.Conv2d(int(c_dim), 64, 4, stride=2, padding=1), nn.ReLU(), nn.Conv2d(64, 64, 4, stride=2, padding=1), nn.ReLU(), nn.ZeroPad2d((1, 2, 1, 2)), nn.Conv2d(64, 64, 4, stride=1, padding=0), nn.ReLU()) self.fc_mean = nn.Linear(int((((64 * img_size) * img_size) / 16)), z_dim) def forward(self, x): x = self.model_enc(x) x = x.view(x.size(0), (- 1)) z_mean = self.fc_mean(x) return z_mean
def make_buff(comm_handler: BufferSimpleCommBase, is_bwd, shapes, dtypes=None, max_buffers=1, create=False, prev_stream_to_use=None): comm_handler.set_tensor_shapes(shapes) comm_handler.set_tensor_dtypes(dtypes) if is_bwd: b = Buffers(max_buffers, comm_handler.create_gradients_rcv_buffers, comm_handler.recv_gradients, is_grad=True, prev_stream_to_use=prev_stream_to_use) else: b = Buffers(max_buffers, comm_handler.create_activations_recv_buffers, comm_handler.recv_activations, is_grad=False, prev_stream_to_use=prev_stream_to_use) if create: b.create() return b
def box_voting(top_boxes, top_scores, all_boxes, all_scores, overlap_thresh, method='ID', beta=1.0): assert (method in BOX_VOTING_METHODS), 'Unknown box_voting method: {}'.format(method) return _C.box_voting(top_boxes, top_scores, all_boxes, all_scores, BOX_VOTING_METHODS[method], beta, overlap_thresh)
class JsonConfig(dict): Indent = 2 def __init__(self, *argv, **kwargs): super().__init__() super().__setitem__('__name', 'default') assert ((len(argv) == 0) or (len(kwargs) == 0)), '[JsonConfig]: Cannot initialize with position parameters (json file or a dict) and named parameters (key and values) at the same time.' if (len(argv) > 0): assert (len(argv) == 1), '[JsonConfig]: Need one positional parameters, found two.' arg = argv[0] else: arg = kwargs if isinstance(arg, str): super().__setitem__('__name', os.path.splitext(os.path.basename(arg))[0]) with open(arg, 'r') as load_f: arg = json.load(load_f) if isinstance(arg, dict): for key in arg: value = arg[key] if isinstance(value, dict): value = JsonConfig(value) super().__setitem__(key, value) else: raise TypeError('[JsonConfig]: Do not support given input with type {}'.format(type(arg))) def __setattr__(self, attr, value): raise Exception("[JsonConfig]: Can't set constant key {}".format(attr)) def __setitem__(self, item, value): raise Exception("[JsonConfig]: Can't set constant key {}".format(item)) def __getattr__(self, attr): return super().__getitem__(attr) def __str__(self): return self.__to_string('', 0) def __to_string(self, name, intent): ret = (((' ' * intent) + name) + ' {\n') for key in self: if (key.find('__') == 0): continue value = self[key] line = (' ' * intent) if isinstance(value, JsonConfig): line += value.__to_string(key, (intent + JsonConfig.Indent)) else: line += ((((' ' * JsonConfig.Indent) + key) + ': ') + str(value)) ret += (line + '\n') ret += ((' ' * intent) + '}') return ret def __add__(self, b): assert isinstance(b, JsonConfig) for k in b: v = b[k] if (k in self): if isinstance(v, JsonConfig): super().__setitem__(k, (self[k] + v)) elif (k == '__name'): super().__setitem__(k, ((self[k] + '&') + v)) else: assert (v == self[k]), '[JsonConfig]: Two config conflicts at`{}`, {} != {}'.format(k, self[k], v) else: super().__setitem__(k, v) return self def date_name(self): date = str(datetime.datetime.now()) date = date[:date.rfind(':')].replace('-', '').replace(':', '').replace(' ', '_') return (((date + '_') + super().__getitem__('__name')) + '.json') def dump(self, dir_path, json_name=None): if (json_name is None): json_name = self.date_name() json_path = os.path.join(dir_path, json_name) with open(json_path, 'w') as fout: print(str(self)) json.dump(self.to_dict(), fout, indent=JsonConfig.Indent) def to_dict(self): ret = {} for k in self: if (k.find('__') == 0): continue v = self[k] if isinstance(v, JsonConfig): ret[k] = v.to_dict() else: ret[k] = v return ret
def test_standard_represent(): img_path = 'dataset/img1.jpg' embedding_objs = DeepFace.represent(img_path) for embedding_obj in embedding_objs: embedding = embedding_obj['embedding'] logger.debug(f'Function returned {len(embedding)} dimensional vector') assert (len(embedding) == 2622) logger.info(' test standard represent function done')
_module() class ABIFuser(BaseModule): def __init__(self, d_model=512, max_seq_len=40, num_chars=90, init_cfg=None, **kwargs): super().__init__(init_cfg=init_cfg) self.max_seq_len = (max_seq_len + 1) self.w_att = nn.Linear((2 * d_model), d_model) self.cls = nn.Linear(d_model, num_chars) def forward(self, l_feature, v_feature): f = torch.cat((l_feature, v_feature), dim=2) f_att = torch.sigmoid(self.w_att(f)) output = ((f_att * v_feature) + ((1 - f_att) * l_feature)) logits = self.cls(output) return {'logits': logits}
def read_all_throughputs_json(throughputs_file): with open(throughputs_file, 'r') as f: throughputs = json.load(f) return throughputs
def traindata_to_tfrecord(): filename = '../deepsea_filtered.npz' with np.load(filename) as file: x = file['x_train'] y = file['y_train'] for file_num in range(1): with tf.io.TFRecordWriter(('./data/traindata-%.2d.tfrecord' % file_num)) as writer: for i in tqdm(range((file_num * 71753), ((file_num + 1) * 71753)), desc='Processing Train Data {}'.format(file_num), ascii=True): example_proto = serialize_example(x[i], y[i]) writer.write(example_proto)
def test_round_trip_placeholder(): layout = ak.contents.RecordArray([ak.contents.NumpyArray(PlaceholderArray(nplike, (4,), np.dtype(np.float64))), ak.contents.NumpyArray([1, 2, 3, 4])], ['x', 'y']) (form, length, container) = ak.to_buffers(layout) result = ak.from_buffers(form, length, container, highlevel=False) assert isinstance(result.content('x').data, PlaceholderArray) assert (result.content('x').data.shape == layout.content('x').data.shape) assert (result.content('x').data.dtype == layout.content('x').data.dtype)
def make_scorecard(scores): scorecard = [] display_names = [(None, 'Overall'), (Gender.MASCULINE, 'Masculine'), (Gender.FEMININE, 'Feminine')] bias_terms = {} for (gender, display_name) in display_names: gender_scores = scores.get(gender, Scores()) recall = gender_scores.recall() precision = gender_scores.precision() f1 = gender_scores.f1() bias_terms[gender] = f1 scorecard.append('{} recall: {:.1f} precision: {:.1f} f1: {:.1f}'.format(display_name, recall, precision, f1)) scorecard.append('\t\ttp {:d}\tfp {:d}'.format(gender_scores.true_positives, gender_scores.false_positives)) scorecard.append('\t\tfn {:d}\ttn {:d}'.format(gender_scores.false_negatives, gender_scores.true_negatives)) bias = '-' if (bias_terms[Gender.MASCULINE] and bias_terms[Gender.FEMININE]): bias = '{:.2f}'.format((bias_terms[Gender.FEMININE] / bias_terms[Gender.MASCULINE])) scorecard.append('Bias (F/M): {}\n'.format(bias)) return '\n'.join(scorecard)
def compute_metrics(seg_pred, seg_gt, n_cls, ignore_index=None, ret_cat_iou=False, tmp_dir=None, distributed=False): ret_metrics_mean = torch.zeros(3, dtype=float, device=ptu.device) if (ptu.dist_rank == 0): list_seg_pred = [] list_seg_gt = [] keys = sorted(seg_pred.keys()) for k in keys: list_seg_pred.append(np.asarray(seg_pred[k])) list_seg_gt.append(np.asarray(seg_gt[k])) ret_metrics = mean_iou(results=list_seg_pred, gt_seg_maps=list_seg_gt, num_classes=n_cls, ignore_index=ignore_index) ret_metrics = [ret_metrics['aAcc'], ret_metrics['Acc'], ret_metrics['IoU']] ret_metrics_mean = torch.tensor([np.round((np.nanmean(ret_metric.astype(np.float)) * 100), 2) for ret_metric in ret_metrics], dtype=float, device=ptu.device) cat_iou = ret_metrics[2] if distributed: dist.broadcast(ret_metrics_mean, 0) (pix_acc, mean_acc, miou) = ret_metrics_mean ret = dict(pixel_accuracy=pix_acc, mean_accuracy=mean_acc, mean_iou=miou) if (ret_cat_iou and (ptu.dist_rank == 0)): ret['cat_iou'] = cat_iou return ret
def insert_indent(s: str, indent: str='\t', insert_first=True) -> str: prefix = (indent if insert_first else '') return ((prefix + s.rstrip('\n ').replace('\n', ('\n' + indent))) + '\n')
class GlobalNode(StatNode): child_attrs = [] def analyse_declarations(self, env): for name in self.names: env.declare_global(name, self.pos) def analyse_expressions(self, env): return self def generate_execution_code(self, code): pass
def sliced_fun(f, n_slices): def sliced_f(sliced_inputs, non_sliced_inputs=None): if (non_sliced_inputs is None): non_sliced_inputs = [] if isinstance(non_sliced_inputs, tuple): non_sliced_inputs = list(non_sliced_inputs) n_paths = len(sliced_inputs[0]) slice_size = max(1, (n_paths // n_slices)) ret_vals = None for start in range(0, n_paths, slice_size): inputs_slice = [v[start:(start + slice_size)] for v in sliced_inputs] slice_ret_vals = f(*(inputs_slice + non_sliced_inputs)) if (not isinstance(slice_ret_vals, (tuple, list))): slice_ret_vals_as_list = [slice_ret_vals] else: slice_ret_vals_as_list = slice_ret_vals scaled_ret_vals = [(np.asarray(v) * len(inputs_slice[0])) for v in slice_ret_vals_as_list] if (ret_vals is None): ret_vals = scaled_ret_vals else: ret_vals = [(x + y) for (x, y) in zip(ret_vals, scaled_ret_vals)] ret_vals = [(v / n_paths) for v in ret_vals] if (not isinstance(slice_ret_vals, (tuple, list))): ret_vals = ret_vals[0] elif isinstance(slice_ret_vals, tuple): ret_vals = tuple(ret_vals) return ret_vals return sliced_f
class InputFeatures(object): def __init__(self, input_ids, input_mask, col_label_ids, lm_label_ids): self.input_ids = input_ids self.input_mask = input_mask self.col_label_ids = col_label_ids self.lm_label_ids = lm_label_ids
def load_refcoco_json(json_file, image_root, dataset_name=None, extra_annotation_keys=None): from pycocotools.coco import COCO timer = Timer() json_file = PathManager.get_local_path(json_file) with contextlib.redirect_stdout(io.StringIO()): coco_api = COCO(json_file) if (timer.seconds() > 1): logger.info('Loading {} takes {:.2f} seconds.'.format(json_file, timer.seconds())) id_map = None if (dataset_name is not None): meta = MetadataCatalog.get(dataset_name) cat_ids = sorted(coco_api.getCatIds()) cats = coco_api.loadCats(cat_ids) thing_classes = [c['name'] for c in sorted(cats, key=(lambda x: x['id']))] meta.thing_classes = thing_classes if (not ((min(cat_ids) == 1) and (max(cat_ids) == len(cat_ids)))): if ('coco' not in dataset_name): logger.warning("\nCategory ids in annotations are not in [1, #categories]! We'll apply a mapping for you.\n") id_map = {v: i for (i, v) in enumerate(cat_ids)} meta.thing_dataset_id_to_contiguous_id = id_map cat_ids = (cat_ids + list(range((max(cat_ids) + 1), 100000))) id_map = {v: i for (i, v) in enumerate(cat_ids)} img_ids = sorted(coco_api.imgs.keys()) imgs = coco_api.loadImgs(img_ids) anns = [coco_api.imgToAnns[img_id] for img_id in img_ids] total_num_valid_anns = sum([len(x) for x in anns]) total_num_anns = len(coco_api.anns) if (total_num_valid_anns < total_num_anns): logger.warning(f'{json_file} contains {total_num_anns} annotations, but only {total_num_valid_anns} of them match to images in the file.') if ('minival' not in json_file): ann_ids = [ann['id'] for anns_per_image in anns for ann in anns_per_image] assert (len(set(ann_ids)) == len(ann_ids)), "Annotation ids in '{}' are not unique!".format(json_file) imgs_anns = list(zip(imgs, anns)) logger.info('Loaded {} images in COCO format from {}'.format(len(imgs_anns), json_file)) dataset_dicts = [] ann_keys = (['iscrowd', 'bbox', 'keypoints', 'category_id'] + (extra_annotation_keys or [])) num_instances_without_valid_segmentation = 0 for (img_dict, anno_dict_list) in imgs_anns: record = {} record['file_name'] = os.path.join(image_root, img_dict['file_name']) record['height'] = img_dict['height'] record['width'] = img_dict['width'] if ('expressions' in img_dict): record['expressions'] = img_dict['expressions'] image_id = record['image_id'] = img_dict['id'] objs = [] for anno in anno_dict_list: assert (anno['image_id'] == image_id) assert (anno.get('ignore', 0) == 0), '"ignore" in COCO json file is not supported.' obj = {key: anno[key] for key in ann_keys if (key in anno)} if (('bbox' in obj) and (len(obj['bbox']) == 0)): raise ValueError(f"One annotation of image {image_id} contains empty 'bbox' value! This json does not have valid COCO format.") segm = anno.get('segmentation', None) if segm: if isinstance(segm, dict): if isinstance(segm['counts'], list): segm = mask_util.frPyObjects(segm, *segm['size']) else: segm = [poly for poly in segm if (((len(poly) % 2) == 0) and (len(poly) >= 6))] if (len(segm) == 0): num_instances_without_valid_segmentation += 1 continue obj['segmentation'] = segm keypts = anno.get('keypoints', None) if keypts: for (idx, v) in enumerate(keypts): if ((idx % 3) != 2): keypts[idx] = (v + 0.5) obj['keypoints'] = keypts phrase = anno.get('phrase', None) if phrase: obj['phrase'] = phrase obj['bbox_mode'] = BoxMode.XYWH_ABS if id_map: annotation_category_id = obj['category_id'] try: obj['category_id'] = id_map[annotation_category_id] except KeyError as e: raise KeyError(f"Encountered category_id={annotation_category_id} but this id does not exist in 'categories' of the json file.") from e objs.append(obj) record['annotations'] = objs record['task'] = 'grounding' dataset_dicts.append(record) if (num_instances_without_valid_segmentation > 0): logger.warning(('Filtered out {} instances without valid segmentation. '.format(num_instances_without_valid_segmentation) + 'There might be issues in your dataset generation process. Please check carefully')) return dataset_dicts
class ReconNetWrapper(nn.Module): fc_dim = 257 def __init__(self, net_recon, use_last_fc=False, init_path=None): super(ReconNetWrapper, self).__init__() self.use_last_fc = use_last_fc if (net_recon not in func_dict): return NotImplementedError('network [%s] is not implemented', net_recon) (func, last_dim) = func_dict[net_recon] backbone = func(use_last_fc=use_last_fc, num_classes=self.fc_dim) if (init_path and os.path.isfile(init_path)): state_dict = filter_state_dict(torch.load(init_path, map_location='cpu')) backbone.load_state_dict(state_dict) print(('loading init net_recon %s from %s' % (net_recon, init_path))) self.backbone = backbone if (not use_last_fc): self.final_layers = nn.ModuleList([conv1x1(last_dim, 80, bias=True), conv1x1(last_dim, 64, bias=True), conv1x1(last_dim, 80, bias=True), conv1x1(last_dim, 3, bias=True), conv1x1(last_dim, 27, bias=True), conv1x1(last_dim, 2, bias=True), conv1x1(last_dim, 1, bias=True)]) for m in self.final_layers: nn.init.constant_(m.weight, 0.0) nn.init.constant_(m.bias, 0.0) def forward(self, x): x = self.backbone(x) if (not self.use_last_fc): output = [] for layer in self.final_layers: output.append(layer(x)) x = torch.flatten(torch.cat(output, dim=1), 1) return x
_level_function() def argmax(array, axis=None, *, keepdims=False, mask_identity=True, highlevel=True, behavior=None, attrs=None): (yield (array,)) return _impl(array, axis, keepdims, mask_identity, highlevel, behavior, attrs)
def set_framework_dependencies(x): if (type(x) is numpy.ndarray): to_dtype = (lambda a: a) fw = numpy else: to_dtype = (lambda a: a.to(x.dtype)) fw = torch eps = fw.finfo(fw.float32).eps return (fw, to_dtype, eps)
class storage(): instance = None client = None def __init__(self): if ('MINIO_ADDRESS' in os.environ): address = os.environ['MINIO_ADDRESS'] access_key = os.environ['MINIO_ACCESS_KEY'] secret_key = os.environ['MINIO_SECRET_KEY'] self.client = minio.Minio(address, access_key=access_key, secret_key=secret_key, secure=False) def unique_name(name): (name, extension) = name.split('.') return '{name}.{random}.{extension}'.format(name=name, extension=extension, random=str(uuid.uuid4()).split('-')[0]) def upload(self, bucket, file, filepath): key_name = storage.unique_name(file) self.client.fput_object(bucket, key_name, filepath) return key_name def download(self, bucket, file, filepath): self.client.fget_object(bucket, file, filepath) def download_directory(self, bucket, prefix, path): objects = self.client.list_objects_v2(bucket, prefix, recursive=True) for obj in objects: file_name = obj.object_name self.download(bucket, file_name, os.path.join(path, file_name)) def upload_stream(self, bucket, file, bytes_data): key_name = storage.unique_name(file) self.client.put_object(bucket, key_name, bytes_data, bytes_data.getbuffer().nbytes) return key_name def download_stream(self, bucket, file): data = self.client.get_object(bucket, file) return data.read() def get_instance(): if (storage.instance is None): storage.instance = storage() return storage.instance
def hashing_trick(text, n, hash_function=None, filters='!"#$%&()*+,-./:;<=>?[\\]^_`{|}~\t\n', lower=True, split=' '): if (hash_function is None): hash_function = hash elif (hash_function == 'md5'): hash_function = (lambda w: int(md5(w.encode()).hexdigest(), 16)) seq = text_to_word_sequence(text, filters=filters, lower=lower, split=split) return [((hash_function(w) % (n - 1)) + 1) for w in seq]
def read_element_wav(elem: Dict[(str, Any)], audio_dir, dataset_info: DatasetInfo, target_sr=44100, duration: Optional[float]=None) -> Dict[(str, Any)]: track_id = elem[dataset_info.id_col] filepath = dataset_info.id_to_filename(track_id, audio_dir) (samples, sr) = read_wav(filepath=filepath, target_sr=target_sr, duration=duration) elem['audio'] = samples elem['audio_sample_rate'] = sr return elem
class CenterPivotConv4d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride, padding, bias=True): super(CenterPivotConv4d, self).__init__() self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size[:2], stride=stride[:2], bias=bias, padding=padding[:2]) self.conv2 = nn.Conv2d(in_channels, out_channels, kernel_size[2:], stride=stride[2:], bias=bias, padding=padding[2:]) self.stride34 = stride[2:] self.kernel_size = kernel_size self.stride = stride self.padding = padding self.idx_initialized = False self.idx_initialized_2 = False def prune(self, ct): (bsz, ch, ha, wa, hb, wb) = ct.size() idxh = torch.arange(start=0, end=hb, step=self.stride[2:][0], device=ct.device) idxw = torch.arange(start=0, end=wb, step=self.stride[2:][1], device=ct.device) self.len_h = len(idxh) self.len_w = len(idxw) self.idx = (idxw.repeat(self.len_h, 1) + (idxh.repeat(self.len_w, 1).t() * wb)).view((- 1)) self.idx_initialized = True ct_pruned = ct.view(bsz, ch, ha, wa, (- 1)).index_select(4, self.idx).view(bsz, ch, ha, wa, self.len_h, self.len_w) return ct_pruned def prune_out2(self, ct): (bsz, ch, ha, wa, hb, wb) = ct.size() idxh = torch.arange(start=0, end=ha, step=self.stride[:2][0], device=ct.device) idxw = torch.arange(start=0, end=wa, step=self.stride[:2][1], device=ct.device) self.len_h = len(idxh) self.len_w = len(idxw) self.idx = (idxw.repeat(self.len_h, 1) + (idxh.repeat(self.len_w, 1).t() * wa)).view((- 1)) self.idx_initialized_2 = True ct_pruned = ct.view(bsz, ch, (- 1), hb, wb).permute(0, 1, 3, 4, 2).index_select(4, self.idx).permute(0, 1, 4, 2, 3).view(bsz, ch, self.len_h, self.len_w, hb, wb) return ct_pruned def forward(self, x): if (self.stride[2:][(- 1)] > 1): out1 = self.prune(x) else: out1 = x (bsz, inch, ha, wa, hb, wb) = out1.size() out1 = out1.permute(0, 4, 5, 1, 2, 3).contiguous().view((- 1), inch, ha, wa) out1 = self.conv1(out1) (outch, o_ha, o_wa) = (out1.size((- 3)), out1.size((- 2)), out1.size((- 1))) out1 = out1.view(bsz, hb, wb, outch, o_ha, o_wa).permute(0, 3, 4, 5, 1, 2).contiguous() if (self.stride[:2][(- 1)] > 1): out2 = self.prune_out2(x) else: out2 = x (bsz, inch, ha, wa, hb, wb) = out2.size() out2 = out2.permute(0, 2, 3, 1, 4, 5).contiguous().view((- 1), inch, hb, wb) out2 = self.conv2(out2) (outch, o_hb, o_wb) = (out2.size((- 3)), out2.size((- 2)), out2.size((- 1))) out2 = out2.view(bsz, ha, wa, outch, o_hb, o_wb).permute(0, 3, 1, 2, 4, 5).contiguous() if ((out1.size()[(- 2):] != out2.size()[(- 2):]) and (self.padding[(- 2):] == (0, 0))): out1 = out1.view(bsz, outch, o_ha, o_wa, (- 1)).sum(dim=(- 1)) out2 = out2.squeeze() y = (out1 + out2) return y
class DependencyGraph(object): def __init__(self): self.adjacency_list = {} self.reverse_list = {} self.missing = {} def add_distribution(self, distribution): self.adjacency_list[distribution] = [] self.reverse_list[distribution] = [] def add_edge(self, x, y, label=None): self.adjacency_list[x].append((y, label)) if (x not in self.reverse_list[y]): self.reverse_list[y].append(x) def add_missing(self, distribution, requirement): logger.debug('%s missing %r', distribution, requirement) self.missing.setdefault(distribution, []).append(requirement) def _repr_dist(self, dist): return ('%s %s' % (dist.name, dist.version)) def repr_node(self, dist, level=1): output = [self._repr_dist(dist)] for (other, label) in self.adjacency_list[dist]: dist = self._repr_dist(other) if (label is not None): dist = ('%s [%s]' % (dist, label)) output.append(((' ' * level) + str(dist))) suboutput = self.repr_node(other, (level + 1)) subs = suboutput.split('\n') output.extend(subs[1:]) return '\n'.join(output) def to_dot(self, f, skip_disconnected=True): disconnected = [] f.write('digraph dependencies {\n') for (dist, adjs) in self.adjacency_list.items(): if ((len(adjs) == 0) and (not skip_disconnected)): disconnected.append(dist) for (other, label) in adjs: if (not (label is None)): f.write(('"%s" -> "%s" [label="%s"]\n' % (dist.name, other.name, label))) else: f.write(('"%s" -> "%s"\n' % (dist.name, other.name))) if ((not skip_disconnected) and (len(disconnected) > 0)): f.write('subgraph disconnected {\n') f.write('label = "Disconnected"\n') f.write('bgcolor = red\n') for dist in disconnected: f.write(('"%s"' % dist.name)) f.write('\n') f.write('}\n') f.write('}\n') def topological_sort(self): result = [] alist = {} for (k, v) in self.adjacency_list.items(): alist[k] = v[:] while True: to_remove = [] for (k, v) in list(alist.items())[:]: if (not v): to_remove.append(k) del alist[k] if (not to_remove): break for (k, v) in alist.items(): alist[k] = [(d, r) for (d, r) in v if (d not in to_remove)] logger.debug('Moving to result: %s', [('%s (%s)' % (d.name, d.version)) for d in to_remove]) result.extend(to_remove) return (result, list(alist.keys())) def __repr__(self): output = [] for (dist, adjs) in self.adjacency_list.items(): output.append(self.repr_node(dist)) return '\n'.join(output)
def _cfg(url='', **kwargs): return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'layer0.conv1', 'classifier': 'last_linear', **kwargs}
def initialize_cuda_kernels(cupy): if (cupy is not None): global kernel if (kernel is None): import awkward._connect.cuda._kernel_signatures cuda_src = f'''#define ERROR_BITS {ERROR_BITS} #define NO_ERROR {NO_ERROR}''' cuda_kernels_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'cuda_kernels') with open(os.path.join(cuda_kernels_path, 'cuda_common.cu'), encoding='utf-8') as header_file: cuda_src = ((cuda_src + '\n') + header_file.read()) for filename in glob.glob(os.path.join(cuda_kernels_path, 'awkward_*.cu')): with open(filename, encoding='utf-8') as cu_file: cu_code = cu_file.read() populate_kernel_errors(filename[filename.find('awkward_'):filename.find('.cu')], cu_code) cuda_src = ((cuda_src + '\n') + cu_code) template_specializations = fetch_template_specializations(awkward._connect.cuda._kernel_signatures.by_signature(None)) cuda_kernel_templates = cupy.RawModule(code=cuda_src, options=('--std=c++11',), name_expressions=template_specializations) kernel = awkward._connect.cuda._kernel_signatures.by_signature(cuda_kernel_templates) return kernel else: raise ModuleNotFoundError(error_message.format('Awkward Arrays with CUDA'))
def test(file): h2t = Horn2Transitions() h2t.parse(file) mp = MiniIC3(h2t.init, h2t.trans, h2t.goal, h2t.xs, h2t.inputs, h2t.xns) result = mp.run() if isinstance(result, Goal): g = result print('Trace') while g: print(g.level, g.cube) g = g.parent return if isinstance(result, ExprRef): print(('Invariant:\n%s ' % result)) return print(result)
class FSD50k(HearScene): _cfg(**HearScene.setup.default_except(corpus=dict(CLS=field(hear_scene_trainvaltest, '\nThe corpus class. You can add the **kwargs right below this CLS key', str), dataset_root=field('???', 'The root path of the corpus', str)), train_sampler=newdict(CLS=FixedBatchSizeBatchSampler, batch_size=10, shuffle=True), task=dict(prediction_type='multilabel', scores=['mAP', 'top1_acc', 'd_prime', 'aucroc']))) def setup(cls, **cfg): super().setup(**cfg) _cfg(**HearScene.train.default_except(trainer=dict(total_steps=40000, log_step=100, eval_step=1000, save_step=100, valid_metric='mAP', valid_higher_better=True))) def train(cls, **cfg): super().train(**cfg) _cfg(**HearScene.inference.default_cfg) def inference(cls, **cfg): super().inference(**cfg) _cfg(**HearScene.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume'))) def run(cls, **cfg): super().run(**cfg)
class TestClearInput(): def check_input_data_clear_called_flags(self, answer): result = clear_called_flag_recorder.get_input_clear_called_flags() assert (len(result) == len(answer)) for (i, flags) in enumerate(answer): assert (len(result[i]) == len(flags)) for (j, flag) in enumerate(flags): assert (flag == result[i][j][0]) def setup_method(self): clear_called_flag_recorder.activate_clear_called_flag_recorder() def teardown_method(self): clear_called_flag_recorder.deactivate_clear_called_flag_recorder() def test_clear_input_if_no_need_grad0(self): x1 = nn.Variable([1, 5], need_grad=True) xx1 = F.identity(x1) y1 = F.add_scalar(xx1) answer = [] answer.append([False]) answer.append([True]) y1.forward(clear_no_need_grad=True) self.check_input_data_clear_called_flags(answer) def test_clear_input_if_no_need_grad1(self): x1 = nn.Variable([1, 5], need_grad=True) xx1 = F.identity(x1) y1 = F.add_scalar(xx1) y2 = F.add_scalar(y1) answer = [] answer.append([False]) answer.append([True]) answer.append([True]) y2.forward(clear_no_need_grad=True) self.check_input_data_clear_called_flags(answer) def test_clear_input_if_no_need_grad2(self): x1 = nn.Variable([1, 5], need_grad=True) xx1 = F.identity(x1) y1 = F.tanh(xx1) y2 = F.add_scalar(y1) answer = [] answer.append([False]) answer.append([True]) answer.append([False]) y2.forward(clear_no_need_grad=True) self.check_input_data_clear_called_flags(answer) def test_clear_input_if_no_need_grad_branch0(self): x1 = nn.Variable([1, 5], need_grad=True) x2 = nn.Variable([1, 5], need_grad=True) xx1 = F.identity(x1) y1 = F.add_scalar(xx1) y2 = F.add_scalar(xx1) y3 = F.add2(y1, y2) answer = [] answer.append([False]) answer.append([False]) answer.append([True]) answer.append([True, True]) y3.forward(clear_no_need_grad=True) self.check_input_data_clear_called_flags(answer) def test_clear_input_if_no_need_grad_branch1(self): x1 = nn.Variable([1, 5], need_grad=True) x2 = nn.Variable([1, 5], need_grad=True) x3 = nn.Variable([1, 5], need_grad=True) xx1 = F.identity(x1) xx2 = F.identity(x2) y1 = F.mul2(xx1, xx2) xx3 = F.identity(x3) y2 = F.add2(xx2, xx3) y3 = F.add2(y1, y2) answer = [] answer.append([False]) answer.append([False]) answer.append([False, False]) answer.append([False]) answer.append([False, True]) answer.append([True, True]) y3.forward(clear_no_need_grad=True) self.check_input_data_clear_called_flags(answer) def test_clear_input_if_no_need_grad_convolution(self): x1 = nn.Variable([1, 1, 2], need_grad=True) x2 = nn.Variable([1, 1, 2], need_grad=True) x3 = nn.Variable([1], need_grad=True) inp = F.identity(x1) weight = F.identity(x2) bias = F.identity(x3) y = F.convolution(inp, weight, bias) answer = [] answer.append([False]) answer.append([False]) answer.append([False]) answer.append([False, False, True]) y.forward(clear_no_need_grad=True) self.check_input_data_clear_called_flags(answer) .parametrize('batch_stat', [False, True]) def test_clear_input_if_no_need_grad_batch_normalization(self, batch_stat): x1 = nn.Variable([1, 1, 2], need_grad=True) x2 = nn.Variable([1, 1, 1], need_grad=True) x3 = nn.Variable([1, 1, 1], need_grad=True) x4 = nn.Variable([1, 1, 1], need_grad=True) x5 = nn.Variable([1, 1, 1], need_grad=True) x = F.identity(x1) beta = F.identity(x2) gamma = F.identity(x3) if batch_stat: y = F.batch_normalization(x, beta, gamma, x4, x5, batch_stat=batch_stat) else: mean = F.identity(x4) var = F.identity(x5) y = F.batch_normalization(x, beta, gamma, mean, var, batch_stat=batch_stat) answer = [] answer.append([False]) answer.append([False]) answer.append([False]) if (not batch_stat): answer.append([False]) answer.append([False]) answer.append([False, True, False, False, False]) y.forward(clear_no_need_grad=True) self.check_input_data_clear_called_flags(answer)
class LinearModel(): def __init__(self, coefficient, bias): self.coefficient = coefficient self.bias = bias def __repr__(self): return 'LinearModel(coefficient={:.4f}, bias={:.4f})'.format(self.coefficient, self.bias) def evaluate(self, x): return ((self.coefficient * x) + self.bias) def inverse(self, y): return ((y - self.bias) / self.coefficient)
class SqueezeNetV11(SqueezeNet): def __init__(self): super(SqueezeNetV11, self).__init__('v1.1')
class SimpleShot(FewShotClassifier): def forward(self, query_images: Tensor) -> Tensor: query_features = self.compute_features(query_images) self._raise_error_if_features_are_multi_dimensional(query_features) scores = self.cosine_distance_to_prototypes(query_features) return self.softmax_if_specified(scores) def is_transductive() -> bool: return False
def build_transforms_swap(cfg, is_train=True, PIXEL_MEAN=[0.485, 0.456, 0.406], PIXEL_STD=[0.229, 0.224, 0.225]): normalize_transform = T.Normalize(mean=PIXEL_MEAN, std=PIXEL_STD) if is_train: transform = T.Compose([T.Resize([cfg.height, cfg.width]), T.RandomHorizontalFlip(p=0.5), T.Pad(10), T.RandomCrop([cfg.height, cfg.width]), T.ToTensor(), normalize_transform, RandomSwap(probability=0.5, mean=PIXEL_MEAN)]) else: transform = T.Compose([T.Resize([cfg.height, cfg.width]), T.ToTensor(), normalize_transform]) return transform
def _get_codegen_targets(sdfg: SDFG, frame: framecode.DaCeCodeGenerator): disp = frame._dispatcher provider_mapping = InstrumentationProvider.get_provider_mapping() disp.instrumentation[dtypes.InstrumentationType.No_Instrumentation] = None disp.instrumentation[dtypes.DataInstrumentationType.No_Instrumentation] = None for (node, parent) in sdfg.all_nodes_recursive(): if isinstance(node, SDFGState): frame.targets.add(disp.get_state_dispatcher(parent, node)) elif isinstance(node, dace.nodes.EntryNode): frame.targets.add(disp.get_scope_dispatcher(node.schedule)) elif isinstance(node, dace.nodes.Node): state: SDFGState = parent nsdfg = state.parent frame.targets.add(disp.get_node_dispatcher(nsdfg, state, node)) if isinstance(node, dace.nodes.AccessNode): state: SDFGState = parent nsdfg = state.parent desc = node.desc(nsdfg) frame.targets.add(disp.get_array_dispatcher(desc.storage)) if isinstance(node, (dace.nodes.AccessNode, dace.nodes.Tasklet)): state: SDFGState = parent for e in state.out_edges(node): if e.data.is_empty(): continue mtree = state.memlet_tree(e) if mtree.downwards: for leaf_e in mtree.leaves(): dst_node = leaf_e.dst if leaf_e.data.is_empty(): continue tgt = disp.get_copy_dispatcher(node, dst_node, leaf_e, state.parent, state) if (tgt is not None): frame.targets.add(tgt) else: dst_node = mtree.root().edge.dst tgt = disp.get_copy_dispatcher(node, dst_node, e, state.parent, state) if (tgt is not None): frame.targets.add(tgt) if hasattr(node, 'symbol_instrument'): disp.instrumentation[node.symbol_instrument] = provider_mapping[node.symbol_instrument] if hasattr(node, 'instrument'): disp.instrumentation[node.instrument] = provider_mapping[node.instrument] elif hasattr(node, 'consume'): disp.instrumentation[node.consume.instrument] = provider_mapping[node.consume.instrument] elif hasattr(node, 'map'): disp.instrumentation[node.map.instrument] = provider_mapping[node.map.instrument] if (sdfg.instrument != dtypes.InstrumentationType.No_Instrumentation): disp.instrumentation[sdfg.instrument] = provider_mapping[sdfg.instrument]
def test_gammaincc_edge_cases(): assert (gammaincc(1.2, np.inf) == 0) assert (gammaincc((- 1.2), np.inf) == 0) assert (gammaincc(0, np.inf) == 0) npt.assert_equal(gammaincc([1.2, 2.2], np.inf), [0, 0]) npt.assert_equal(gammaincc([(- 1.2), (- 2.2)], np.inf), [0, 0]) npt.assert_equal(gammaincc([0.0, 1.0], np.inf), [0, 0]) assert (gammaincc((- 1.0), 0.5) == 0) assert (gammaincc((- 2.0), 0.5) == 0) npt.assert_equal(gammaincc([(- 1.0), (- 2.0)], 0.5), [0, 0]) assert (gammaincc(0.5, 0) == 1) assert (gammaincc(1.5, 0) == 1) npt.assert_equal(gammaincc([0.5, 1.5], 0), [1, 1]) assert (gammaincc(0, 0) == 0) assert (gammaincc((- 1), 0) == 0) assert (gammaincc((- 2), 0) == 0) npt.assert_equal(gammaincc([0, (- 1), (- 2)], 0), [0, 0, 0]) assert (gammaincc((- 0.5), 0) == (- np.inf)) assert (gammaincc((- 1.5), 0) == np.inf) assert (gammaincc((- 2.5), 0) == (- np.inf)) npt.assert_equal(gammaincc([(- 0.5), (- 1.5)], 0), [(- np.inf), np.inf])
class Tanh_DenseNet(nn.Module): def __init__(self, block, nblocks, growth_rate=12, reduction=0.5, num_classes=100): super(Tanh_DenseNet, self).__init__() self.growth_rate = growth_rate num_planes = (2 * growth_rate) self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=False) self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0]) num_planes += (nblocks[0] * growth_rate) out_planes = int(math.floor((num_planes * reduction))) self.trans1 = Transition(num_planes, out_planes) num_planes = out_planes self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1]) num_planes += (nblocks[1] * growth_rate) out_planes = int(math.floor((num_planes * reduction))) self.trans2 = Transition(num_planes, out_planes) num_planes = out_planes self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2]) num_planes += (nblocks[2] * growth_rate) out_planes = int(math.floor((num_planes * reduction))) self.trans3 = Transition(num_planes, out_planes) num_planes = out_planes self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3]) num_planes += (nblocks[3] * growth_rate) self.bn = nn.BatchNorm2d(num_planes) self.linear = nn.Linear(num_planes, num_classes) def _make_dense_layers(self, block, in_planes, nblock): layers = [] for i in range(nblock): layers.append(block(in_planes, self.growth_rate)) in_planes += self.growth_rate return nn.Sequential(*layers) def forward(self, x): out = self.conv1(x) out = self.trans1(self.dense1(out)) out = self.trans2(self.dense2(out)) out = self.trans3(self.dense3(out)) out = self.dense4(out) out = F.avg_pool2d(F.tanh(self.bn(out)), 4) out = out.view(out.size(0), (- 1)) out = self.linear(out) return out