code
stringlengths
101
5.91M
def test_last_flag() -> None: x = [1, 2, 3, 4, 5] i = 0 for (is_last, value) in last_flag(x): if (i == (len(x) - 1)): assert is_last else: assert (not is_last) assert (value == x[i]) i += 1
class OptimRegime(Regime): def __init__(self, model, regime, defaults={}, filter=None, use_float_copy=False, log=True): super(OptimRegime, self).__init__(regime, defaults) if (filter is not None): model = FilterParameters(model, **filter) if use_float_copy: model = ModuleFloatShadow(model) self._original_parameters = list(model.original_parameters()) self.parameters = list(model.parameters()) self.optimizer = torch.optim.SGD(self.parameters, lr=0) self.regularizer = regularization.Regularizer(model) self.use_float_copy = use_float_copy self.lr_scheduler = _EmptySchedule(self.optimizer, last_epoch=(- 1)) self.schedule_time_frame = 'epoch' self.log = log def update(self, epoch=None, train_steps=None, metrics=None): updated = False if super(OptimRegime, self).update(epoch, train_steps): self.adjust(self.setting) updated = True if (self.schedule_time_frame == 'epoch'): time = (int(floor(epoch)) + 1) elif (self.schedule_time_frame == 'step'): time = (train_steps + 1) else: raise ValueError if (time != self.lr_scheduler.last_epoch): prev_lr = self.get_lr()[0] if isinstance(self.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau): self.lr_scheduler.step(metrics, epoch=time) self.lr_scheduler.step(epoch=time) updated = True if ((prev_lr != self.get_lr()[0]) and self.log): logging.debug(('OPTIMIZER - lr scheduled = %s' % self.get_lr()[0])) return updated def adjust(self, setting): reset = setting.get('reset', False) if (('optimizer' in setting) or reset): optim_method = _OPTIMIZERS[setting.get('optimizer', 'SGD')] if reset: self.optimizer = torch.optim.SGD(self.parameters, lr=0) if self.log: logging.debug('OPTIMIZER - reset setting') if (not isinstance(self.optimizer, optim_method)): self.optimizer = optim_method(self.optimizer.param_groups) if self.log: logging.debug(('OPTIMIZER - setting method = %s' % setting['optimizer'])) for param_group in self.optimizer.param_groups: for key in param_group.keys(): if (key in setting): new_val = setting[key] if (new_val != param_group[key]): if self.log: logging.debug(('OPTIMIZER - setting %s = %s' % (key, setting[key]))) param_group[key] = setting[key] if (key == 'lr'): param_group['initial_lr'] = param_group['lr'] base_lrs = list(map((lambda group: group['lr']), self.optimizer.param_groups)) self.lr_scheduler.base_lrs = base_lrs if hasattr(self.optimizer, 'base_lrs'): self.optimizer.base_lrs = base_lrs if ('regularizer' in setting): reg_list = deepcopy(setting['regularizer']) if (not (isinstance(reg_list, list) or isinstance(reg_list, tuple))): reg_list = (reg_list,) regularizers = [] for reg in reg_list: if isinstance(reg, dict): name = reg.pop('name') regularizers.append((regularization.__dict__[name], reg)) elif isinstance(reg, regularization.Regularizer): regularizers.append(reg) else: regularizers.append(reg(self.regularizer._model)) self.regularizer = regularization.RegularizerList(self.regularizer._model, regularizers) if ('lr_scheduler' in setting): schedule_config = setting['lr_scheduler'] if isinstance(schedule_config, _LRScheduler): self.lr_scheduler = schedule_config elif isinstance(schedule_config, dict): name = schedule_config.pop('name') self.schedule_time_frame = schedule_config.pop('time_frame', 'epoch') schedule_config['last_epoch'] = self.lr_scheduler.last_epoch self.lr_scheduler = _LRSCHEDULERS[name](self.optimizer, **schedule_config) elif (schedule_config is None): self.lr_scheduler = _EmptySchedule(self.optimizer, last_epoch=self.lr_scheduler.last_epoch) else: raise NotImplementedError def __getstate__(self): return {'optimizer_state': self.optimizer.__getstate__(), 'regime': self.regime} def __setstate__(self, state): self.regime = state.get('regime') self.optimizer.__setstate__(state.get('optimizer_state')) def state_dict(self): return {'optimizer_state': self.optimizer.state_dict(), 'regime': self.regime} def load_state_dict(self, state_dict): optimizer_state_dict = state_dict['optimizer_state'] self.__setstate__({'optimizer_state': optimizer_state_dict, 'regime': state_dict['regime']}) def zero_grad(self): self.optimizer.zero_grad() if self.use_float_copy: for p in self._original_parameters: if (p.grad is not None): p.grad.detach().zero_() def step(self): if self.use_float_copy: copy_params_grad(self.parameters, self._original_parameters) self.regularizer.pre_step() self.optimizer.step() self.regularizer.post_step() if self.use_float_copy: copy_params(self._original_parameters, self.parameters) def pre_forward(self): self.regularizer.pre_forward() def pre_backward(self): self.regularizer.pre_backward() def get_value(self, key): return [group[key] for group in self.optimizer.param_groups] def get_lr(self): return self.get_value('lr')
class PegasusTokenizer(metaclass=DummyObject): _backends = ['sentencepiece'] def __init__(self, *args, **kwargs): requires_backends(self, ['sentencepiece'])
def q_int(n, q=None): if (n not in ZZ): raise ValueError(f'{n} must be an integer') if (q is None): q = ZZ['q'].gen() if (n == 0): return parent(q)(0) if (n > 0): return sum(((q ** i) for i in range(n))) return ((- (q ** n)) * sum(((q ** i) for i in range((- n)))))
class FunctionAiryAiGeneral(BuiltinFunction): def __init__(self): BuiltinFunction.__init__(self, 'airy_ai', nargs=2, latex_name='\\operatorname{Ai}') def _derivative_(self, alpha, x, diff_param=None): if (diff_param == 0): raise NotImplementedError('cannot differentiate airy_ai in the first parameter') return airy_ai_general((alpha + 1), x) def _eval_(self, alpha, x): if ((not isinstance(x, Expression)) and (not isinstance(alpha, Expression))): if self._is_numerical(x): return self._evalf_(alpha, x) if (alpha == 0): return airy_ai_simple(x) if (alpha == 1): return airy_ai_prime(x) if (alpha == 2): return (x * airy_ai_simple(x)) else: return None def _evalf_(self, alpha, x, parent=None, algorithm=None): return _mpmath_utils_call(_mpmath_airyai, x, derivative=alpha, parent=parent)
def make_np(x): if isinstance(x, np.ndarray): return x if isinstance(x, six.string_types): return _prepare_caffe2(x) if np.isscalar(x): return np.array([x]) if isinstance(x, torch.Tensor): return _prepare_pytorch(x) raise NotImplementedError('Got {}, but numpy array, torch tensor, or caffe2 blob name are expected.'.format(type(x)))
def predict(net, data): net.eval() outputs = net(data) probs = net.vote preds = torch.where((outputs > 0.5), torch.ones(outputs.shape).cuda(), torch.zeros(outputs.shape).cuda()) return (preds.cpu().detach().numpy(), probs.cpu().detach().numpy())
def test_error(): global _quiet qsave = _quiet (saveerr, sys.stderr) = (sys.stderr, StringIO()) try: _quiet = False error('hello, world') finally: _quiet = qsave (saveerr, sys.stderr) = (sys.stderr, saveerr) print(type(saveerr)) assert ('hello, world\n' in saveerr.getvalue())
def _make_efficientnet_backbone(effnet): pretrained = nn.Module() pretrained.layer1 = nn.Sequential(effnet.conv_stem, effnet.bn1, effnet.act1, *effnet.blocks[0:2]) pretrained.layer2 = nn.Sequential(*effnet.blocks[2:3]) pretrained.layer3 = nn.Sequential(*effnet.blocks[3:5]) pretrained.layer4 = nn.Sequential(*effnet.blocks[5:9]) return pretrained
class CoNLL(Transform): fields = ['ID', 'FORM', 'LEMMA', 'CPOS', 'POS', 'FEATS', 'HEAD', 'DEPREL', 'PHEAD', 'PDEPREL'] def __init__(self, ID=None, FORM=None, LEMMA=None, CPOS=None, POS=None, FEATS=None, HEAD=None, DEPREL=None, PHEAD=None, PDEPREL=None): super().__init__() self.ID = ID self.FORM = FORM self.LEMMA = LEMMA self.CPOS = CPOS self.POS = POS self.FEATS = FEATS self.HEAD = HEAD self.DEPREL = DEPREL self.PHEAD = PHEAD self.PDEPREL = PDEPREL def src(self): return (self.FORM, self.LEMMA, self.CPOS, self.POS, self.FEATS) def tgt(self): return (self.HEAD, self.DEPREL, self.PHEAD, self.PDEPREL) def get_arcs(cls, sequence): return [int(i) for i in sequence] def get_sibs(cls, sequence): sibs = ([(- 1)] * (len(sequence) + 1)) heads = ([0] + [int(i) for i in sequence]) for i in range(1, len(heads)): hi = heads[i] for j in range((i + 1), len(heads)): hj = heads[j] (di, dj) = ((hi - i), (hj - j)) if ((hi >= 0) and (hj >= 0) and (hi == hj) and ((di * dj) > 0)): if (abs(di) > abs(dj)): sibs[i] = j else: sibs[j] = i break return sibs[1:] def get_edges(cls, sequence): edges = [([0] * (len(sequence) + 1)) for _ in range((len(sequence) + 1))] for (i, s) in enumerate(sequence, 1): if (s != '_'): for pair in s.split('|'): edges[i][int(pair.split(':')[0])] = 1 return edges def get_labels(cls, sequence): labels = [([None] * (len(sequence) + 1)) for _ in range((len(sequence) + 1))] for (i, s) in enumerate(sequence, 1): if (s != '_'): for pair in s.split('|'): (edge, label) = pair.split(':') labels[i][int(edge)] = label return labels def build_relations(cls, chart): sequence = (['_'] * len(chart)) for (i, row) in enumerate(chart): pairs = [(j, label) for (j, label) in enumerate(row) if (label is not None)] if (len(pairs) > 0): sequence[i] = '|'.join((f'{head}:{label}' for (head, label) in pairs)) return sequence def toconll(cls, tokens): if isinstance(tokens[0], str): s = '\n'.join([(f'{i} {word} ' + '\t'.join((['_'] * 8))) for (i, word) in enumerate(tokens, 1)]) else: s = '\n'.join([(f'{i} {word} _ {tag} ' + '\t'.join((['_'] * 6))) for (i, (word, tag)) in enumerate(tokens, 1)]) return (s + '\n') def isprojective(cls, sequence): pairs = [(h, d) for (d, h) in enumerate(sequence, 1) if (h >= 0)] for (i, (hi, di)) in enumerate(pairs): for (hj, dj) in pairs[(i + 1):]: ((li, ri), (lj, rj)) = (sorted([hi, di]), sorted([hj, dj])) if ((li <= hj <= ri) and (hi == dj)): return False if ((lj <= hi <= rj) and (hj == di)): return False if (((li < lj < ri) or (li < rj < ri)) and (((li - lj) * (ri - rj)) > 0)): return False return True def istree(cls, sequence, proj=False, multiroot=False): from supar.utils.alg import tarjan if (proj and (not cls.isprojective(sequence))): return False n_roots = sum(((head == 0) for head in sequence)) if (n_roots == 0): return False if ((not multiroot) and (n_roots > 1)): return False if any(((i == head) for (i, head) in enumerate(sequence, 1))): return False return (next(tarjan(sequence), None) is None) def load(self, data, proj=False, max_len=None, **kwargs): if isinstance(data, str): with open(data, 'r') as f: lines = [line.strip() for line in f] else: data = ([data] if isinstance(data[0], str) else data) lines = '\n'.join([self.toconll(i) for i in data]).split('\n') (i, start, sentences) = (0, 0, []) for line in progress_bar(lines, leave=False): if (not line): sentences.append(CoNLLSentence(self, lines[start:i])) start = (i + 1) i += 1 if proj: sentences = [i for i in sentences if self.isprojective(list(map(int, i.arcs)))] if (max_len is not None): sentences = [i for i in sentences if (len(i) < max_len)] return sentences
def _get_all_bases(class_or_name: Union[(str, Type)]) -> List[str]: if isinstance(class_or_name, str): return [class_or_name] return [base.__name__ for base in class_or_name.__mro__]
def compact_array(array, depth=(- 1)): data_items = [] def recurse(array, depth): if (isinstance(array, Content) and (array.__len__() > 0)): if (depth != 0): for it in range(array.__len__()): recurse(array.__getitem__(it), (depth - 1)) else: data_items.append(array) recurse(array, depth) return data_items
.parametrize('seed', [313, 314]) .parametrize('op', ['+', '-', '*', '/', '**']) .parametrize('shape', [(2, 3, 4), (0,)]) def test_ndarray_arithmetic_scalar_ops(seed, op, shape): rng = np.random.RandomState(seed) vx = nn.NdArray.from_numpy_array(rng.randn(*shape).astype(np.float32)) a = rng.randn() if ((op == '**') and (vx.size > 0)): vx.data += ((- vx.data.min()) + 1.0) vz = eval('vx {0} a'.format(op)) ref_z = eval('vx.data {0} a'.format(op)) assert_allclose(ref_z, vz.data) vx_bak = vx if (op == '+'): vx += a elif (op == '-'): vx -= a elif (op == '*'): vx *= a elif (op == '/'): vx /= a elif (op == '**'): vx **= a assert_allclose(vx.data, vz.data) if (op == '**'): return assert (vx is vx_bak)
def main(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('--corpus-dir', required=True, help='Location of pre-training text files.') parser.add_argument('--vocab-file', required=True, help='Location of vocabulary file.') parser.add_argument('--output-dir', required=True, help='Where to write out the tfrecords.') parser.add_argument('--max-seq-length', default=128, type=int, help='Number of tokens per example.') parser.add_argument('--num-processes', default=1, type=int, help='Parallelize across multiple processes.') parser.add_argument('--blanks-separate-docs', default=True, type=bool, help='Whether blank lines indicate document boundaries.') parser.add_argument('--do-lower-case', dest='do_lower_case', action='store_true', help='Lower case input text.') parser.add_argument('--no-lower-case', dest='do_lower_case', action='store_false', help="Don't lower case input text.") parser.set_defaults(do_lower_case=True) args = parser.parse_args() utils.rmkdir(args.output_dir) if (args.num_processes == 1): write_examples(0, args) else: jobs = [] for i in range(args.num_processes): job = multiprocessing.Process(target=write_examples, args=(i, args)) jobs.append(job) job.start() for job in jobs: job.join()
def simple_total_col_ion_coefficients(simple_index_nlte_ion): simple_col_ion_coefficients = [0., 0.] return pd.DataFrame(simple_col_ion_coefficients, index=simple_index_nlte_ion)
class ArgScopeTest(tf.test.TestCase): def testEmptyArgScope(self): with self.test_session(): self.assertEqual(scopes._current_arg_scope(), {}) def testCurrentArgScope(self): func1_kwargs = {'a': 1, 'b': None, 'c': [1]} key_op = (func1.__module__, func1.__name__) current_scope = {key_op: func1_kwargs.copy()} with self.test_session(): with scopes.arg_scope([func1], a=1, b=None, c=[1]) as scope: self.assertDictEqual(scope, current_scope) def testCurrentArgScopeNested(self): func1_kwargs = {'a': 1, 'b': None, 'c': [1]} func2_kwargs = {'b': 2, 'd': [2]} def key(f): return (f.__module__, f.__name__) current_scope = {key(func1): func1_kwargs.copy(), key(func2): func2_kwargs.copy()} with self.test_session(): with scopes.arg_scope([func1], a=1, b=None, c=[1]): with scopes.arg_scope([func2], b=2, d=[2]) as scope: self.assertDictEqual(scope, current_scope) def testReuseArgScope(self): func1_kwargs = {'a': 1, 'b': None, 'c': [1]} key_op = (func1.__module__, func1.__name__) current_scope = {key_op: func1_kwargs.copy()} with self.test_session(): with scopes.arg_scope([func1], a=1, b=None, c=[1]) as scope1: pass with scopes.arg_scope(scope1) as scope: self.assertDictEqual(scope, current_scope) def testReuseArgScopeNested(self): func1_kwargs = {'a': 1, 'b': None, 'c': [1]} func2_kwargs = {'b': 2, 'd': [2]} def key(f): return (f.__module__, f.__name__) current_scope1 = {key(func1): func1_kwargs.copy()} current_scope2 = {key(func1): func1_kwargs.copy(), key(func2): func2_kwargs.copy()} with self.test_session(): with scopes.arg_scope([func1], a=1, b=None, c=[1]) as scope1: with scopes.arg_scope([func2], b=2, d=[2]) as scope2: pass with scopes.arg_scope(scope1): self.assertDictEqual(scopes._current_arg_scope(), current_scope1) with scopes.arg_scope(scope2): self.assertDictEqual(scopes._current_arg_scope(), current_scope2) def testSimpleArgScope(self): func1_args = (0,) func1_kwargs = {'a': 1, 'b': None, 'c': [1]} with self.test_session(): with scopes.arg_scope([func1], a=1, b=None, c=[1]): (args, kwargs) = func1(0) self.assertTupleEqual(args, func1_args) self.assertDictEqual(kwargs, func1_kwargs) def testSimpleArgScopeWithTuple(self): func1_args = (0,) func1_kwargs = {'a': 1, 'b': None, 'c': [1]} with self.test_session(): with scopes.arg_scope((func1,), a=1, b=None, c=[1]): (args, kwargs) = func1(0) self.assertTupleEqual(args, func1_args) self.assertDictEqual(kwargs, func1_kwargs) def testOverwriteArgScope(self): func1_args = (0,) func1_kwargs = {'a': 1, 'b': 2, 'c': [1]} with scopes.arg_scope([func1], a=1, b=None, c=[1]): (args, kwargs) = func1(0, b=2) self.assertTupleEqual(args, func1_args) self.assertDictEqual(kwargs, func1_kwargs) def testNestedArgScope(self): func1_args = (0,) func1_kwargs = {'a': 1, 'b': None, 'c': [1]} with scopes.arg_scope([func1], a=1, b=None, c=[1]): (args, kwargs) = func1(0) self.assertTupleEqual(args, func1_args) self.assertDictEqual(kwargs, func1_kwargs) func1_kwargs['b'] = 2 with scopes.arg_scope([func1], b=2): (args, kwargs) = func1(0) self.assertTupleEqual(args, func1_args) self.assertDictEqual(kwargs, func1_kwargs) def testSharedArgScope(self): func1_args = (0,) func1_kwargs = {'a': 1, 'b': None, 'c': [1]} with scopes.arg_scope([func1, func2], a=1, b=None, c=[1]): (args, kwargs) = func1(0) self.assertTupleEqual(args, func1_args) self.assertDictEqual(kwargs, func1_kwargs) (args, kwargs) = func2(0) self.assertTupleEqual(args, func1_args) self.assertDictEqual(kwargs, func1_kwargs) def testSharedArgScopeTuple(self): func1_args = (0,) func1_kwargs = {'a': 1, 'b': None, 'c': [1]} with scopes.arg_scope((func1, func2), a=1, b=None, c=[1]): (args, kwargs) = func1(0) self.assertTupleEqual(args, func1_args) self.assertDictEqual(kwargs, func1_kwargs) (args, kwargs) = func2(0) self.assertTupleEqual(args, func1_args) self.assertDictEqual(kwargs, func1_kwargs) def testPartiallySharedArgScope(self): func1_args = (0,) func1_kwargs = {'a': 1, 'b': None, 'c': [1]} func2_args = (1,) func2_kwargs = {'a': 1, 'b': None, 'd': [2]} with scopes.arg_scope([func1, func2], a=1, b=None): with scopes.arg_scope([func1], c=[1]), scopes.arg_scope([func2], d=[2]): (args, kwargs) = func1(0) self.assertTupleEqual(args, func1_args) self.assertDictEqual(kwargs, func1_kwargs) (args, kwargs) = func2(1) self.assertTupleEqual(args, func2_args) self.assertDictEqual(kwargs, func2_kwargs)
def _read_array(f, typecode, array_desc): if (typecode in [1, 3, 4, 5, 6, 9, 13, 14, 15]): if (typecode == 1): nbytes = _read_int32(f) if (nbytes != array_desc['nbytes']): warnings.warn('Not able to verify number of bytes from header') array = np.frombuffer(f.read(array_desc['nbytes']), dtype=DTYPE_DICT[typecode]) elif (typecode in [2, 12]): array = np.frombuffer(f.read((array_desc['nbytes'] * 2)), dtype=DTYPE_DICT[typecode])[1::2] else: array = [] for i in range(array_desc['nelements']): dtype = typecode data = _read_data(f, dtype) array.append(data) array = np.array(array, dtype=np.object_) if (array_desc['ndims'] > 1): dims = array_desc['dims'][:int(array_desc['ndims'])] dims.reverse() array = array.reshape(dims) _align_32(f) return array
def infer_aliasing(node: nodes.NestedSDFG, sdfg: SDFG, state: SDFGState) -> None: data_to_conn: Dict[(str, Set[str])] = defaultdict(set) def _infer_aliased_connectors(get_edges: Callable[([nodes.NestedSDFG], List[Edge[Memlet]])], get_conn: Callable[([Edge[Memlet]], str)], outgoing: bool): for e in get_edges(node): if e.data.is_empty(): continue dnames = _get_addressed_arrays(state, e, outgoing=outgoing) conn = get_conn(e) for dname in dnames: data_to_conn[dname].add(conn) _infer_aliased_connectors(state.in_edges, (lambda e: e.dst_conn), False) _infer_aliased_connectors(state.out_edges, (lambda e: e.src_conn), True) for (dname, conns) in data_to_conn.items(): if ((len(conns) > 1) or sdfg.arrays[dname].may_alias): for aname in conns: if (aname in node.sdfg.arrays): desc = node.sdfg.arrays[aname] if isinstance(desc, data.Array): desc.may_alias = True
class OnnxifiTest(TestCase): ('Need ONNXIFI backend support') def test_relu_graph(self): batch_size = 1 X = np.random.randn(batch_size, 1, 3, 2).astype(np.float32) graph_def = make_graph([make_node('Relu', ['X'], ['Y'])], name='test', inputs=[make_tensor_value_info('X', onnx.TensorProto.FLOAT, [batch_size, 1, 3, 2])], outputs=[make_tensor_value_info('Y', onnx.TensorProto.FLOAT, [batch_size, 1, 3, 2])]) model_def = make_model(graph_def, producer_name='relu-test') op = core.CreateOperator('Onnxifi', ['X'], ['Y'], onnx_model=model_def.SerializeToString(), input_names=['X'], output_names=['Y'], output_shape_hint_0=[ONNXIFI_DATATYPE_FLOAT32, batch_size, 1, 3, 2]) workspace.FeedBlob('X', X) workspace.RunOperatorOnce(op) Y = workspace.FetchBlob('Y') np.testing.assert_almost_equal(Y, np.maximum(X, 0)) ('Need ONNXIFI backend support') def test_conv_graph(self): X = np.array([[[[0.0, 1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0, 9.0], [10.0, 11.0, 12.0, 13.0, 14.0], [15.0, 16.0, 17.0, 18.0, 19.0], [20.0, 21.0, 22.0, 23.0, 24.0]]]]).astype(np.float32) W = np.array([[[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]]]).astype(np.float32) Y_without_padding = np.array([[[[54.0, 63.0, 72.0], [99.0, 108.0, 117.0], [144.0, 153.0, 162.0]]]]).astype(np.float32) graph_def = make_graph([make_node('Conv', inputs=['X', 'W'], outputs=['Y'], kernel_shape=[3, 3], pads=[0, 0, 0, 0])], name='test', inputs=[make_tensor_value_info('X', onnx.TensorProto.FLOAT, [1, 1, 5, 5]), make_tensor_value_info('W', onnx.TensorProto.FLOAT, [1, 1, 3, 3])], outputs=[make_tensor_value_info('Y', onnx.TensorProto.FLOAT, [1, 1, 3, 3])]) model_def = make_model(graph_def, producer_name='conv-test') op = core.CreateOperator('Onnxifi', ['X0'], ['Y0'], onnx_model=model_def.SerializeToString(), initializers=['W', 'W0'], input_names=['X'], output_names=['Y'], output_shape_hint_0=[ONNXIFI_DATATYPE_FLOAT32, 1, 1, 3, 3]) workspace.FeedBlob('X0', X) workspace.FeedBlob('W0', W) workspace.RunOperatorOnce(op) Y = workspace.FetchBlob('Y0') np.testing.assert_almost_equal(Y, Y_without_padding)
def shrink_simplicial_complex(K): L = K._contractible_subcomplex() return SimplicialSet_finite(K).quotient(L)
class ControlSuite(): def __init__(self, task_name='humanoid_run'): self.task_name = task_name self._uint8_features = set([]) self._environment = None if (task_name == 'fish_swim'): self._domain_name = 'fish' self._task_name = 'swim' self._shapes = {'observation/target': (3,), 'observation/velocity': (13,), 'observation/upright': (1,), 'observation/joint_angles': (7,), 'action': (5,), 'discount': (), 'reward': (), 'episodic_reward': (), 'step_type': ()} elif (task_name == 'humanoid_run'): self._domain_name = 'humanoid' self._task_name = 'run' self._shapes = {'observation/velocity': (27,), 'observation/com_velocity': (3,), 'observation/torso_vertical': (3,), 'observation/extremities': (12,), 'observation/head_height': (1,), 'observation/joint_angles': (21,), 'action': (21,), 'discount': (), 'reward': (), 'episodic_reward': (), 'step_type': ()} elif (task_name == 'manipulator_insert_ball'): self._domain_name = 'manipulator' self._task_name = 'insert_ball' self._shapes = {'observation/arm_pos': (16,), 'observation/arm_vel': (8,), 'observation/touch': (5,), 'observation/hand_pos': (4,), 'observation/object_pos': (4,), 'observation/object_vel': (3,), 'observation/target_pos': (4,), 'action': (5,), 'discount': (), 'reward': (), 'episodic_reward': (), 'step_type': ()} elif (task_name == 'manipulator_insert_peg'): self._domain_name = 'manipulator' self._task_name = 'insert_peg' self._shapes = {'observation/arm_pos': (16,), 'observation/arm_vel': (8,), 'observation/touch': (5,), 'observation/hand_pos': (4,), 'observation/object_pos': (4,), 'observation/object_vel': (3,), 'observation/target_pos': (4,), 'episodic_reward': (), 'action': (5,), 'discount': (), 'reward': (), 'step_type': ()} elif (task_name == 'cartpole_swingup'): self._domain_name = 'cartpole' self._task_name = 'swingup' self._shapes = {'observation/position': (3,), 'observation/velocity': (2,), 'action': (1,), 'discount': (), 'reward': (), 'episodic_reward': (), 'step_type': ()} elif (task_name == 'walker_walk'): self._domain_name = 'walker' self._task_name = 'walk' self._shapes = {'observation/orientations': (14,), 'observation/velocity': (9,), 'observation/height': (1,), 'action': (6,), 'discount': (), 'reward': (), 'episodic_reward': (), 'step_type': ()} elif (task_name == 'walker_stand'): self._domain_name = 'walker' self._task_name = 'stand' self._shapes = {'observation/orientations': (14,), 'observation/velocity': (9,), 'observation/height': (1,), 'action': (6,), 'discount': (), 'reward': (), 'episodic_reward': (), 'step_type': ()} elif (task_name == 'cheetah_run'): self._domain_name = 'cheetah' self._task_name = 'run' self._shapes = {'observation/position': (8,), 'observation/velocity': (9,), 'action': (6,), 'discount': (), 'reward': (), 'episodic_reward': (), 'step_type': ()} elif (task_name == 'finger_turn_hard'): self._domain_name = 'finger' self._task_name = 'turn_hard' self._shapes = {'observation/position': (4,), 'observation/velocity': (3,), 'observation/touch': (2,), 'observation/target_position': (2,), 'observation/dist_to_target': (1,), 'action': (2,), 'discount': (), 'reward': (), 'episodic_reward': (), 'step_type': ()} else: raise ValueError("Task '{}' not found.".format(task_name)) self._data_path = 'dm_control_suite/{}/train'.format(task_name) def shapes(self): return self._shapes def data_path(self): return self._data_path def uint8_features(self): return self._uint8_features def environment(self): if (self._environment is not None): return self._environment self._environment = suite.load(domain_name=self._domain_name, task_name=self._task_name) self._environment = wrappers.SinglePrecisionWrapper(self._environment) self._environment = NormilizeActionSpecWrapper(self._environment) return self._environment
_lr_scheduler('reduce_lr_on_plateau') class ReduceLROnPlateau(FairseqLRScheduler): def __init__(self, args, optimizer): super().__init__(args, optimizer) if (len(args.lr) > 1): raise ValueError('Cannot use a fixed learning rate schedule with reduce_lr_on_plateau. Consider --lr-scheduler=fixed instead.') self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer.optimizer, patience=0, factor=args.lr_shrink, threshold=args.lr_threshold) def add_args(parser): parser.add_argument('--lr-shrink', default=0.1, type=float, metavar='LS', help='shrink factor for annealing, lr_new = (lr * lr_shrink)') parser.add_argument('--lr-threshold', default=0.0001, type=float, metavar='LT', help='Threshold for measuring the new optimum, to only focus on significant changes') def state_dict(self): return {'best': self.lr_scheduler.best, 'last_epoch': self.lr_scheduler.last_epoch} def load_state_dict(self, state_dict): self.lr_scheduler.best = state_dict['best'] if ('last_epoch' in state_dict): self.lr_scheduler.last_epoch = state_dict['last_epoch'] def step(self, epoch, val_loss=None): if (val_loss is not None): self.lr_scheduler.step(val_loss, epoch) else: self.lr_scheduler.last_epoch = epoch return self.optimizer.get_lr()
def brightness(image, factor): factor = (((factor / MAX_LEVEL) * 1.8) + 0.1) image = Image.fromarray(image) image = ImageEnhance.Brightness(image).enhance(factor) return np.asarray(image)
class M2M100ForConditionalGeneration(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class ConvReLU3d(torch.nn.Sequential): def __init__(self, conv, relu): assert ((type(conv) == Conv3d) and (type(relu) == ReLU)), 'Incorrect types for input modules{}{}'.format(type(conv), type(relu)) super(ConvReLU3d, self).__init__(conv, relu)
def test_recovery_custom_io(tmpdir): from speechbrain.utils.checkpoints import register_checkpoint_hooks from speechbrain.utils.checkpoints import mark_as_saver from speechbrain.utils.checkpoints import mark_as_loader from speechbrain.utils.checkpoints import Checkpointer _checkpoint_hooks class CustomRecoverable(): def __init__(self, param): self.param = int(param) _as_saver def save(self, path): with open(path, 'w') as fo: fo.write(str(self.param)) _as_loader def load(self, path, end_of_epoch, device): del end_of_epoch del device with open(path) as fi: self.param = int(fi.read()) custom_recoverable = CustomRecoverable(0) recoverer = Checkpointer(tmpdir, {'custom_recoverable': custom_recoverable}) custom_recoverable.param = 1 ckpt = recoverer.recover_if_possible() assert (ckpt is None) ckpt = recoverer.save_checkpoint() custom_recoverable.param = 2 loaded_ckpt = recoverer.recover_if_possible() assert (ckpt == loaded_ckpt) assert (custom_recoverable.param == 1)
def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration from scipy._build_utils.system_info import get_info from scipy._build_utils import numpy_nodepr_api config = Configuration('dsolve', parent_package, top_path) config.add_data_dir('tests') lapack_opt = get_info('lapack_opt', notfound_action=2) if (sys.platform == 'win32'): superlu_defs = [('NO_TIMER', 1)] else: superlu_defs = [] superlu_defs.append(('USE_VENDOR_BLAS', 1)) superlu_src = join(dirname(__file__), 'SuperLU', 'SRC') sources = sorted(glob.glob(join(superlu_src, '*.c'))) headers = list(glob.glob(join(superlu_src, '*.h'))) config.add_library('superlu_src', sources=sources, macros=superlu_defs, include_dirs=[superlu_src]) ext_sources = ['_superlumodule.c', '_superlu_utils.c', '_superluobject.c'] config.add_extension('_superlu', sources=ext_sources, libraries=['superlu_src'], depends=(sources + headers), extra_info=lapack_opt, **numpy_nodepr_api) config.add_data_files('SuperLU/License.txt') return config
class Gpt2Embeddings(StateDictSerializationMixin, eqx.Module): Vocab: Axis = eqx.static_field() config: Gpt2Config = eqx.static_field() token_embeddings: NamedArray position_embeddings: NamedArray dropout: hnn.Dropout def init(Vocab: Axis, config: Gpt2Config, *, key) -> 'Gpt2Embeddings': (k_wte, k_wpe, k_out) = jrandom.split(key, 3) token_embeddings = (hax.random.normal(k_wte, (Vocab, config.Embed)) * config.initializer_range) position_embeddings = (hax.random.normal(k_wpe, (config.Pos, config.Embed)) * (config.initializer_range / 2)) dropout = hnn.Dropout(pdrop=config.embed_pdrop) return Gpt2Embeddings(Vocab, config, token_embeddings, position_embeddings, dropout) _call def embed(self, input_ids, *, key): input_embeds = self.token_embeddings.take('vocab', input_ids) position_embeds = self.position_embeddings input_len = input_ids.resolve_axis('position').size x = (input_embeds + position_embeds[('position', hax.dslice(0, input_len))]) x = self.dropout(x, key=key) return x def unembed(self, x: NamedArray): return hax.dot('embed', x, self.token_embeddings) def _state_dict_key_map(self) -> Dict[(str, Optional[str])]: return {'token_embeddings': 'wte.weight', 'position_embeddings': 'wpe.weight'} def resize_embeddings(self, new_size: int, key: Optional[PRNGKeyArray]=None): new_weights = hax.tree_util.resize_axis(self.token_embeddings, self.Vocab, new_size, key=key) return dataclasses.replace(self, Vocab=self.Vocab.resize(new_size), token_embeddings=new_weights)
class Func_assoc_legendre_Q(BuiltinFunction): def __init__(self): BuiltinFunction.__init__(self, 'gen_legendre_Q', nargs=3, latex_name='Q', conversions={'maxima': 'assoc_legendre_q', 'mathematica': 'LegendreQ', 'maple': 'LegendreQ'}) def _eval_(self, n, m, x, *args, **kwds): ret = self._eval_special_values_(n, m, x) if (ret is not None): return ret if ((n in ZZ) and (m in ZZ) and (n >= 0) and (m >= 0) and ((x in ZZ) or (not SR(x).is_numeric()))): return self.eval_recursive(n, m, x) def _eval_special_values_(self, n, m, x): if (m == 0): return legendre_Q(n, x) if x.is_zero(): from .gamma import gamma from .other import sqrt from .trig import sin if ((m in QQ) and (n in QQ)): return (((((- sqrt(SR.pi())) * sin(((SR.pi() / 2) * (m + n)))) * gamma((QQ(((m + n) + 1)) / 2))) / gamma(((QQ((n - m)) / 2) + 1))) * (2 ** (m - 1))) elif (isinstance(n, Expression) or isinstance(m, Expression)): return (((((- sqrt(SR.pi())) * sin(((SR.pi() / 2) * (m + n)))) * gamma((((m + n) + 1) / 2))) / gamma((((n - m) / 2) + 1))) * (2 ** (m - 1))) def _evalf_(self, n, m, x, parent=None, **kwds): ret = self._eval_special_values_(n, m, x) if (ret is not None): return ret return _mpmath_utils_call(_mpmath_legenq, n, m, x, parent=parent) def eval_recursive(self, n, m, x, **kwds): from sage.misc.functional import sqrt if ((m == (n + 1)) or (n == 0)): if m.mod(2).is_zero(): denom = ((1 - (x ** 2)) ** (m / 2)) else: denom = (sqrt((1 - (x ** 2))) * ((1 - (x ** 2)) ** ((m - 1) / 2))) if (m == (n + 1)): return (((((- 1) ** m) * (m - 1).factorial()) * (2 ** n)) / denom) else: return (((((- 1) ** m) * (m - 1).factorial()) * (((x + 1) ** m) - ((x - 1) ** m))) / (2 * denom)) else: return ((((((n - m) + 1) * x) * gen_legendre_Q(n, (m - 1), x)) - (((n + m) - 1) * gen_legendre_Q((n - 1), (m - 1), x))) / sqrt((1 - (x ** 2)))) def _derivative_(self, n, m, x, *args, **kwds): diff_param = kwds['diff_param'] if (diff_param == 0): raise NotImplementedError('Derivative w.r.t. to the index is not supported.') else: return (((((n - m) + 1) * gen_legendre_Q((n + 1), m, x)) - (((n + 1) * x) * gen_legendre_Q(n, m, x))) / ((x ** 2) - 1))
def _catalog_shared_params(module, memo=None, prefix=''): if (memo is None): first_call = True memo = {} else: first_call = False for (name, param) in module._parameters.items(): param_prefix = ((prefix + ('.' if prefix else '')) + name) if (param not in memo): memo[param] = [] memo[param].append(param_prefix) for (name, m) in module._modules.items(): if (m is None): continue submodule_prefix = ((prefix + ('.' if prefix else '')) + name) _catalog_shared_params(m, memo, submodule_prefix) if first_call: return [x for x in memo.values() if (len(x) > 1)]
def f(f_string): frame = inspect.stack()[1][0] return Formatter(frame.f_globals, frame.f_locals).format(f_string)
def to_pd_datetime(timestamp): if isinstance(timestamp, pd.DatetimeIndex): return timestamp elif isinstance(timestamp, (int, float)): return pd.to_datetime(int((timestamp * 1000)), unit='ms') elif (isinstance(timestamp, Iterable) and all((isinstance(t, (int, float)) for t in timestamp))): timestamp = pd.to_datetime((np.asarray(timestamp).astype(float) * 1000), unit='ms') elif (isinstance(timestamp, np.ndarray) and (timestamp.dtype in [int, np.float32, np.float64])): timestamp = pd.to_datetime((np.asarray(timestamp).astype(float) * 1000), unit='ms') return pd.to_datetime(timestamp)
def create_train_val_dataloader(opt, logger): (train_loader, val_loader) = (None, None) for (phase, dataset_opt) in opt['datasets'].items(): if (phase == 'train'): dataset_enlarge_ratio = dataset_opt.get('dataset_enlarge_ratio', 1) train_set = build_dataset(dataset_opt) train_sampler = EnlargedSampler(train_set, opt['world_size'], opt['rank'], dataset_enlarge_ratio) train_loader = build_dataloader(train_set, dataset_opt, num_gpu=opt['num_gpu'], dist=opt['dist'], sampler=train_sampler, seed=opt['manual_seed']) num_iter_per_epoch = math.ceil(((len(train_set) * dataset_enlarge_ratio) / (dataset_opt['batch_size_per_gpu'] * opt['world_size']))) total_iters = int(opt['train']['total_iter']) total_epochs = math.ceil((total_iters / num_iter_per_epoch)) logger.info(f'''Training statistics: Number of train images: {len(train_set)} Dataset enlarge ratio: {dataset_enlarge_ratio} Batch size per gpu: {dataset_opt['batch_size_per_gpu']} World size (gpu number): {opt['world_size']} Require iter number per epoch: {num_iter_per_epoch} Total epochs: {total_epochs}; iters: {total_iters}.''') elif (phase == 'val'): val_set = build_dataset(dataset_opt) val_loader = build_dataloader(val_set, dataset_opt, num_gpu=opt['num_gpu'], dist=opt['dist'], sampler=None, seed=opt['manual_seed']) logger.info(f"Number of val images/folders in {dataset_opt['name']}: {len(val_set)}") else: raise ValueError(f'Dataset phase {phase} is not recognized.') return (train_loader, train_sampler, val_loader, total_epochs, total_iters)
_get_mesh_stats(mode='generate') def regular_mesh(n: int=10, length_x: float=1.0, length_y: float=1.0, length_z: Optional[float]=None, diagonal: Literal[('left', 'right', 'left/right', 'right/left', 'crossed')]='right', comm: Optional[MPI.Comm]=None) -> _typing.MeshTuple: if (length_x <= 0.0): raise _exceptions.InputError('cashocs.geometry.regular_mesh', 'length_x', 'length_x needs to be positive') if (length_y <= 0.0): raise _exceptions.InputError('cashocs.geometry.regular_mesh', 'length_y', 'length_y needs to be positive') if (not ((length_z is None) or (length_z > 0.0))): raise _exceptions.InputError('cashocs.geometry.regular_mesh', 'length_z', 'length_z needs to be positive or None (for 2D mesh)') n = int(n) if (comm is None): comm = fenics.MPI.comm_world if (length_z is None): sizes = [length_x, length_y] dim = 2 else: sizes = [length_x, length_y, length_z] dim = 3 size_min = np.min(sizes) num_points = [int(np.round(((length / size_min) * n))) for length in sizes] if (length_z is None): mesh = fenics.RectangleMesh(comm, fenics.Point(0, 0), fenics.Point(sizes), num_points[0], num_points[1], diagonal=diagonal) else: mesh = fenics.BoxMesh(comm, fenics.Point(0, 0, 0), fenics.Point(sizes), num_points[0], num_points[1], num_points[2]) subdomains = fenics.MeshFunction('size_t', mesh, dim=dim) boundaries = fenics.MeshFunction('size_t', mesh, dim=(dim - 1)) x_min = fenics.CompiledSubDomain('on_boundary && near(x[0], 0, tol)', tol=fenics.DOLFIN_EPS) x_max = fenics.CompiledSubDomain('on_boundary && near(x[0], length, tol)', tol=fenics.DOLFIN_EPS, length=sizes[0]) x_min.mark(boundaries, 1) x_max.mark(boundaries, 2) y_min = fenics.CompiledSubDomain('on_boundary && near(x[1], 0, tol)', tol=fenics.DOLFIN_EPS) y_max = fenics.CompiledSubDomain('on_boundary && near(x[1], length, tol)', tol=fenics.DOLFIN_EPS, length=sizes[1]) y_min.mark(boundaries, 3) y_max.mark(boundaries, 4) if (length_z is not None): z_min = fenics.CompiledSubDomain('on_boundary && near(x[2], 0, tol)', tol=fenics.DOLFIN_EPS) z_max = fenics.CompiledSubDomain('on_boundary && near(x[2], length, tol)', tol=fenics.DOLFIN_EPS, length=sizes[2]) z_min.mark(boundaries, 5) z_max.mark(boundaries, 6) dx = measure.NamedMeasure('dx', mesh, subdomain_data=subdomains) ds = measure.NamedMeasure('ds', mesh, subdomain_data=boundaries) d_interior_facet = measure.NamedMeasure('dS', mesh) return (mesh, subdomains, boundaries, dx, ds, d_interior_facet)
def test_custom_constraints_from_object(tmpdir): data = pd.DataFrame({'primary_key': ['user-000', 'user-001', 'user-002'], 'pii_col': ['223 Williams Rd', '75 Waltham St', '77 Mass Ave'], 'numerical_col': [2, 3, 4], 'categorical_col': ['a', 'b', 'a']}) metadata = SingleTableMetadata() metadata.detect_from_dataframe(data) metadata.update_column(column_name='pii_col', sdtype='address', pii=True) synthesizer = GaussianCopulaSynthesizer(metadata, enforce_min_max_values=False, enforce_rounding=False) synthesizer.add_custom_constraint_class(MyConstraint, 'MyConstraint') constraint = {'constraint_class': 'MyConstraint', 'constraint_parameters': {'column_names': ['numerical_col']}} synthesizer.add_constraints([constraint]) processed_data = synthesizer.preprocess(data) assert all((processed_data['numerical_col'] == (data['numerical_col'] ** 2))) synthesizer.fit_processed_data(processed_data) sampled = synthesizer.sample(10) assert all((sampled['numerical_col'] > 1)) synthesizer.save((tmpdir / 'test.pkl')) loaded_instance = synthesizer.load((tmpdir / 'test.pkl')) loaded_sampled = loaded_instance.sample(10) assert all((loaded_sampled['numerical_col'] > 1))
def set_random_seed(seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed)
def register_emitter(name, emitter_class): if (name in _EMITTER_TYPES): raise RegistrationError(f"Emitter '{name}' is already registered") _EMITTER_TYPES[name] = emitter_class
def get_num_inputs(o): args = 0 for a in o['arguments']: if (a['type'] == 'TensorList'): return '*' elif value_has_tensors(a): args += 1 return str(args)
class Pct(nn.Module): def __init__(self, output_channels=40, dropout=0.5): super(Pct, self).__init__() self.conv1 = nn.Conv1d(3, 64, kernel_size=1, bias=False) self.conv2 = nn.Conv1d(64, 64, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm1d(64) self.bn2 = nn.BatchNorm1d(64) self.gather_local_0 = Local_op(in_channels=128, out_channels=128) self.gather_local_1 = Local_op(in_channels=256, out_channels=256) self.pt_last = Point_Transformer_Last() self.conv_fuse = nn.Sequential(nn.Conv1d(1280, 1024, kernel_size=1, bias=False), nn.BatchNorm1d(1024), nn.LeakyReLU(negative_slope=0.2)) self.linear1 = nn.Linear(1024, 512, bias=False) self.bn6 = nn.BatchNorm1d(512) self.dp1 = nn.Dropout(p=dropout) self.linear2 = nn.Linear(512, 256) self.bn7 = nn.BatchNorm1d(256) self.dp2 = nn.Dropout(p=dropout) self.linear3 = nn.Linear(256, output_channels) self.subsample1 = SubsampleGroup(512, 32, group='knn') self.subsample2 = SubsampleGroup(256, 32, group='knn') def forward(self, xyz, x): (batch_size, _, _) = x.size() x = F.relu(self.bn1(self.conv1(x))) x = F.relu(self.bn2(self.conv2(x))) (new_xyz, new_feature) = self.subsample1(xyz, x) feature_0 = self.gather_local_0(new_feature) (new_xyz, new_feature) = self.subsample2(new_xyz, feature_0) feature_1 = self.gather_local_1(new_feature) x = self.pt_last(feature_1) x = torch.cat([x, feature_1], dim=1) x = self.conv_fuse(x) x = F.adaptive_max_pool1d(x, 1).view(batch_size, (- 1)) x = F.leaky_relu(self.bn6(self.linear1(x)), negative_slope=0.2) x = self.dp1(x) x = F.leaky_relu(self.bn7(self.linear2(x)), negative_slope=0.2) x = self.dp2(x) x = self.linear3(x) return x
class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = bn(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = bn(planes) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if (self.downsample is not None): residual = self.downsample(x) out += residual out = self.relu(out) return out
def build_gpr(data: Dataset, search_space: Optional[SearchSpace]=None, kernel_priors: bool=True, likelihood_variance: Optional[float]=None, trainable_likelihood: bool=False, kernel: Optional[gpflow.kernels.Kernel]=None) -> GPR: (empirical_mean, empirical_variance, _) = _get_data_stats(data) if ((kernel is None) and (search_space is None)): raise ValueError("'build_gpr' function requires one of 'search_space' or 'kernel' arguments, but got neither") elif ((kernel is None) and (search_space is not None)): kernel = _get_kernel(empirical_variance, search_space, kernel_priors, kernel_priors) mean = _get_mean_function(empirical_mean) assert isinstance(kernel, gpflow.kernels.Kernel) model = gpflow.models.GPR(data.astuple(), kernel, mean) _set_gaussian_likelihood_variance(model, empirical_variance, likelihood_variance) gpflow.set_trainable(model.likelihood, trainable_likelihood) return model
class ReadValuesNested(object): def test_access_top_fields(self): h = np.array(self._buffer, dtype=self._descr) if (not self.multiple_rows): assert_((h.shape == ())) assert_equal(h['x'], np.array(self._buffer[0], dtype='i4')) assert_equal(h['y'], np.array(self._buffer[4], dtype='f8')) assert_equal(h['z'], np.array(self._buffer[5], dtype='u1')) else: assert_((len(h) == 2)) assert_equal(h['x'], np.array([self._buffer[0][0], self._buffer[1][0]], dtype='i4')) assert_equal(h['y'], np.array([self._buffer[0][4], self._buffer[1][4]], dtype='f8')) assert_equal(h['z'], np.array([self._buffer[0][5], self._buffer[1][5]], dtype='u1')) def test_nested1_acessors(self): h = np.array(self._buffer, dtype=self._descr) if (not self.multiple_rows): assert_equal(h['Info']['value'], np.array(self._buffer[1][0], dtype='c16')) assert_equal(h['Info']['y2'], np.array(self._buffer[1][1], dtype='f8')) assert_equal(h['info']['Name'], np.array(self._buffer[3][0], dtype='U2')) assert_equal(h['info']['Value'], np.array(self._buffer[3][1], dtype='c16')) else: assert_equal(h['Info']['value'], np.array([self._buffer[0][1][0], self._buffer[1][1][0]], dtype='c16')) assert_equal(h['Info']['y2'], np.array([self._buffer[0][1][1], self._buffer[1][1][1]], dtype='f8')) assert_equal(h['info']['Name'], np.array([self._buffer[0][3][0], self._buffer[1][3][0]], dtype='U2')) assert_equal(h['info']['Value'], np.array([self._buffer[0][3][1], self._buffer[1][3][1]], dtype='c16')) def test_nested2_acessors(self): h = np.array(self._buffer, dtype=self._descr) if (not self.multiple_rows): assert_equal(h['Info']['Info2']['value'], np.array(self._buffer[1][2][1], dtype='c16')) assert_equal(h['Info']['Info2']['z3'], np.array(self._buffer[1][2][3], dtype='u4')) else: assert_equal(h['Info']['Info2']['value'], np.array([self._buffer[0][1][2][1], self._buffer[1][1][2][1]], dtype='c16')) assert_equal(h['Info']['Info2']['z3'], np.array([self._buffer[0][1][2][3], self._buffer[1][1][2][3]], dtype='u4')) def test_nested1_descriptor(self): h = np.array(self._buffer, dtype=self._descr) assert_((h.dtype['Info']['value'].name == 'complex128')) assert_((h.dtype['Info']['y2'].name == 'float64')) if (sys.version_info[0] >= 3): assert_((h.dtype['info']['Name'].name == 'str256')) else: assert_((h.dtype['info']['Name'].name == 'unicode256')) assert_((h.dtype['info']['Value'].name == 'complex128')) def test_nested2_descriptor(self): h = np.array(self._buffer, dtype=self._descr) assert_((h.dtype['Info']['Info2']['value'].name == 'void256')) assert_((h.dtype['Info']['Info2']['z3'].name == 'void64'))
def findfactor(cp1, cp2): size = 2 if ((len(cp1) % 2) != 0): raise error('Strings should be even-sized') if (len(cp1) != len(cp2)): raise error('Samples should be same size') sample_count = _sample_count(cp1, size) sum_ri_2 = _sum2(cp2, cp2, sample_count) sum_aij_ri = _sum2(cp1, cp2, sample_count) return (sum_aij_ri / sum_ri_2)
def process(spans, length, use_fine_grained=False): def compare(a, b): if (a[0] > b[0]): return 1 elif (a[0] == b[0]): if (a[1] > b[1]): return (- 1) else: return 1 else: return (- 1) def compare2(a, b): if (int(a[0]) >= int(b[1])): return 1 elif (b[0] <= a[0] <= a[1] <= b[1]): return (- 1) elif (a[0] <= b[0] <= b[1] <= a[1]): return 1 elif (b[0] >= a[1]): return (- 1) else: raise ValueError sentence_len = length results = [] spans.sort(key=cmp_to_key(compare)) idx = (- 1) def helper(nest=False): nonlocal idx idx += 1 if (idx > (len(spans) - 1)): return p = spans[idx] (i, j, label) = p children = [] while (((idx + 1) < len(spans)) and (i <= spans[(idx + 1)][0]) and (spans[(idx + 1)][1] <= j)): children.append(spans[(idx + 1)]) helper(True) for c in range(len(children)): label = ((p[2].split('+')[(- 1)] + '<>') if use_fine_grained else 'NULL') if ((c == (len(children) - 1)) and (children[(- 1)][1] < p[1])): results.append((children[(- 1)][1], p[1], label)) if (c == 0): if (children[c][0] > p[0]): results.append((p[0], children[c][0], label)) elif (children[c][0] > children[(c - 1)][1]): results.append((children[(c - 1)][1], children[c][0], label)) if (nest is False): if (idx < (len(spans) - 1)): if (spans[(idx + 1)][0] > j): assert ValueError helper(False) return helper() spans.extend(results) spans.sort(key=cmp_to_key(compare2)) return spans
def get_network_fn(name, num_classes, weight_decay=0.0, is_training=False): if (name not in networks_map): raise ValueError(('Name of network unknown %s' % name)) arg_scope = arg_scopes_map[name](weight_decay=weight_decay) func = networks_map[name] (func) def network_fn(images): with slim.arg_scope(arg_scope): return func(images, num_classes, is_training=is_training) if hasattr(func, 'default_image_size'): network_fn.default_image_size = func.default_image_size return network_fn
(base=10) def plot_semilogx(funcs, *args, **kwds): return plot(funcs, *args, scale='semilogx', **kwds)
def matrix_from_pose_msg(pose): t = matrix_from_point_msg(pose.position) r = matrix_from_quaternion_msg(pose.orientation) return concatenate_matrices(t, r)
def load_tr_te_data(csv_file_tr, csv_file_te, n_items): tp_tr = pd.read_csv(csv_file_tr) tp_te = pd.read_csv(csv_file_te) start_idx = min(tp_tr['uid'].min(), tp_te['uid'].min()) end_idx = max(tp_tr['uid'].max(), tp_te['uid'].max()) (rows_tr, cols_tr) = ((tp_tr['uid'] - start_idx), tp_tr['sid']) (rows_te, cols_te) = ((tp_te['uid'] - start_idx), tp_te['sid']) data_tr = sparse.csr_matrix((np.ones_like(rows_tr), (rows_tr, cols_tr)), dtype='float64', shape=(((end_idx - start_idx) + 1), n_items)) data_te = sparse.csr_matrix((np.ones_like(rows_te), (rows_te, cols_te)), dtype='float64', shape=(((end_idx - start_idx) + 1), n_items)) return (data_tr, data_te)
('/start', method='POST') def start_analyzer(): req = json.loads(request.body.read().decode('utf-8')) measurer.start(req)
class ClassificationMetric(EvaluateInstancesMetric): def __init__(self, delimiter: Optional[str]=None): self.delimiter = delimiter def is_multi_label(self) -> bool: return bool(self.delimiter) def evaluate_instances(self, request_states: List[RequestState]) -> List[Stat]: y_pred: List[List[str]] = [] y_true: List[List[str]] = [] for request_state in request_states: if (request_state.reference_index is not None): raise ValueError('ClassificationMetric does not support multiple choice separate adapters') if (request_state.request_mode == 'calibration'): raise ValueError('ClassificationMetric does not support calibration requests') assert (request_state.result is not None) if (len(request_state.result.completions) != 1): raise ValueError('Result must contain exactly one completion') if request_state.output_mapping: raise ValueError('ClassificationMetric does not support multiple choice adapters') references = request_state.instance.all_correct_references if (not self.is_multi_label()): assert (len(references) == 1) correct_ref_texts = [normalize_text(ref.output.text) for ref in references if ref.output.text] y_true.append(correct_ref_texts) input_text = request_state.result.completions[0].text predictions = (input_text.split(self.delimiter) if self.is_multi_label() else [input_text]) y_pred.append([normalize_text(pred) for pred in predictions if pred]) labels: List[str] = list(set((y for ys in y_true for y in ys))) mlb = MultiLabelBinarizer().fit([labels]) y_true = mlb.transform(y_true) y_pred = mlb.transform(y_pred) return [Stat(MetricName('classification_macro_f1')).add(f1_score(y_pred=y_pred, y_true=y_true, average='macro')), Stat(MetricName('classification_micro_f1')).add(f1_score(y_pred=y_pred, y_true=y_true, average='micro'))]
class MaskedLMTrainer(Trainer): def __init__(self, model: torch.nn.Module, **kwargs): super().__init__(model, **kwargs) def forward_batch(self, batch): batch_inputs = {'input_ids': batch.mlm_tok_ids, 'attention_mask': (~ batch.mlm_att_mask).long(), 'labels': batch.mlm_lab_ids} if hasattr(batch, 'paired_lab_ids'): batch_inputs.update({'token_type_ids': batch.tok_type_ids, 'next_sentence_label': batch.paired_lab_ids}) batch_outputs = self.model(**batch_inputs) loss = batch_outputs['loss'] if (loss.dim() > 0): loss = loss.mean() return loss
class Texfunc(): def __init__(self, ttype=0, center=(0, 0, 0), rotate=(0, 0, 0), scale=(1, 1, 1), imagefile=''): self._ttype = ttype (x, y, z) = center self._center = (float(x), float(y), float(z)) (x, y, z) = rotate self._rotate = (float(x), float(y), float(z)) (x, y, z) = scale self._scale = (float(x), float(y), float(z)) self._imagefile = imagefile def str(self): if (self._ttype == 0): return '0' elif ((self._ttype < 7) and (self._ttype > 0)): return ('%d center %s rotate %s scale %s' % (self._ttype, tostr(self._center), tostr(self._rotate), tostr(self._scale))) elif (self._ttype < 9): return ('%d %s center %s rotate %s scale %s' % (self._ttype, self._imagefile, tostr(self._center), tostr(self._rotate), tostr(self._scale))) elif (self._ttype == 9): return ('%d %s center %s rotate %s scale %s\n uaxis 1.0 0.0 0.0\n vaxis 0.0 1.0 0.0' % (self._ttype, self._imagefile, tostr(self._center), tostr(self._rotate), tostr(self._scale))) else: raise ValueError
def test_ambiguous_schedule(): def add(a: (dace.float32[(10, 10)] dace.StorageType.GPU_Global), b: dace.float32[(10, 10)]): return (a + b) with pytest.raises(InvalidSDFGNodeError): sdfg = add.to_sdfg() set_default_schedule_and_storage_types(sdfg, None)
class TestReaderWithLimit(TestCase): def test_runtime_threads(self): ws = workspace.C.Workspace() session = LocalSession(ws) src_ds = make_source_dataset(ws) totals = ([None] * 3) def proc(rec): with ops.task_init(): counter1 = ops.CreateCounter([], ['global_counter']) counter2 = ops.CreateCounter([], ['global_counter2']) counter3 = ops.CreateCounter([], ['global_counter3']) with ops.task_instance_init(): task_counter = ops.CreateCounter([], ['task_counter']) ops.CountUp(counter1) ops.CountUp(task_counter) with ops.task_instance_exit(): with ops.loop(ops.RetrieveCount(task_counter)): ops.CountUp(counter2) ops.CountUp(counter3) with ops.task_exit(): totals[0] = final_output(ops.RetrieveCount(counter1)) totals[1] = final_output(ops.RetrieveCount(counter2)) totals[2] = final_output(ops.RetrieveCount(counter3)) return rec with TaskGroup() as tg: pipe(src_ds.reader(), num_runtime_threads=8, processor=proc) session.run(tg) self.assertEqual(totals[0].fetch(), 100) self.assertEqual(totals[1].fetch(), 100) self.assertEqual(totals[2].fetch(), 8) with TaskGroup() as tg: q1 = pipe(src_ds.reader(), num_runtime_threads=2) q2 = pipe(ReaderWithLimit(q1.reader(), num_iter=25), num_runtime_threads=3) pipe(q2, processor=proc, num_runtime_threads=6) session.run(tg) self.assertEqual(totals[0].fetch(), 25) self.assertEqual(totals[1].fetch(), 25) self.assertEqual(totals[2].fetch(), 6) def _test_limit_reader_init_shared(self, size): ws = workspace.C.Workspace() session = LocalSession(ws) src_ds = make_source_dataset(ws, size=size) dst_ds = make_destination_dataset(ws, src_ds.content().clone_schema()) return (ws, session, src_ds, dst_ds) def _test_limit_reader_shared(self, reader_class, size, expected_read_len, expected_read_len_threshold, expected_finish, num_threads, read_delay, **limiter_args): (ws, session, src_ds, dst_ds) = self._test_limit_reader_init_shared(size) with TaskGroup(workspace_type=WorkspaceType.GLOBAL) as tg: if (read_delay > 0): reader = reader_class(ReaderWithDelay(src_ds.reader(), read_delay), **limiter_args) else: reader = reader_class(src_ds.reader(), **limiter_args) pipe(reader, dst_ds.writer(), num_runtime_threads=num_threads) session.run(tg) read_len = len(sorted(ws.blobs[str(dst_ds.content().label())].fetch())) self.assertGreaterEqual(read_len, (expected_read_len - expected_read_len_threshold)) self.assertLessEqual(read_len, (expected_read_len + expected_read_len_threshold)) self.assertEqual(sorted(ws.blobs[str(dst_ds.content().label())].fetch()), list(range(read_len))) self.assertEqual(ws.blobs[str(reader.data_finished())].fetch(), expected_finish) def test_count_limit_reader_without_limit(self): self._test_limit_reader_shared(ReaderWithLimit, size=100, expected_read_len=100, expected_read_len_threshold=0, expected_finish=True, num_threads=8, read_delay=0, num_iter=None) def test_count_limit_reader_with_zero_limit(self): self._test_limit_reader_shared(ReaderWithLimit, size=100, expected_read_len=0, expected_read_len_threshold=0, expected_finish=False, num_threads=8, read_delay=0, num_iter=0) def test_count_limit_reader_with_low_limit(self): self._test_limit_reader_shared(ReaderWithLimit, size=100, expected_read_len=10, expected_read_len_threshold=0, expected_finish=False, num_threads=8, read_delay=0, num_iter=10) def test_count_limit_reader_with_high_limit(self): self._test_limit_reader_shared(ReaderWithLimit, size=100, expected_read_len=100, expected_read_len_threshold=0, expected_finish=True, num_threads=8, read_delay=0, num_iter=110) def test_time_limit_reader_without_limit(self): self._test_limit_reader_shared(ReaderWithTimeLimit, size=100, expected_read_len=100, expected_read_len_threshold=0, expected_finish=True, num_threads=8, read_delay=0.1, duration=0) def test_time_limit_reader_with_short_limit(self): size = 50 num_threads = 4 sleep_duration = 0.25 duration = 1 expected_read_len = int(round(((num_threads * duration) / sleep_duration))) duration = (duration - (0.25 * sleep_duration)) self._test_limit_reader_shared(ReaderWithTimeLimit, size=size, expected_read_len=(expected_read_len / 2), expected_read_len_threshold=(expected_read_len / 2), expected_finish=False, num_threads=num_threads, read_delay=sleep_duration, duration=duration) def test_time_limit_reader_with_long_limit(self): self._test_limit_reader_shared(ReaderWithTimeLimit, size=50, expected_read_len=50, expected_read_len_threshold=0, expected_finish=True, num_threads=4, read_delay=0.2, duration=10)
def _pil_interp(method): if (method == 'bicubic'): return Image.BICUBIC elif (method == 'lanczos'): return Image.LANCZOS elif (method == 'hamming'): return Image.HAMMING else: return Image.BILINEAR
class BertGFPBrightness(flexs.Landscape): gfp_wt_sequence = 'MSKGEELFTGVVPILVELDGDVNGHKFSVSGEGEGDATYGKLTLKFICTTGKLPVPWPTLVTTLSYGVQCFSRYPDHMKQHDFFKSAMPEGYVQERTIFFKDDGNYKTRAEVKFEGDTLVNRIELKGIDFKEDGNILGHKLEYNYNSHNVYIMADKQKNGIKVNFKIRHNIEDGSVQLADHYQQNTPIGDGPVLLPDNHYLSTQSALSKDPNEKRDHMVLLEFVTAAGITHGMDELYK' starts = {'ed_10_wt': 'MSKGEVLFTGVVPILVEMDGDVNGHKFSVSGEGEGDATYGKLTTKFTCTTGKLPVPWPTKVTTLSYRVQCFSRYPDVMKQHDFFKSAMPEGYVQERTIFFKDDGNYKTRAEVQFEGDTLVNRIELKGIDFKEDGNILGHKLEYNYNSHNVYIMADKQKNGIKVNFKIRHNIEDGSVQLADHYQQNTPIGDGPVLLPDNHYLSTQSALSKDPNIKRDCMVLLEFVTAAGITHGMDELYK', 'ed_18_wt': 'MSKGEHLFTGVVPILVELDGDVNGKKFSVSGEGQGDATYGKLTLKFICTTAKVHVPWCTLVTTLSYGVQCFSRYPDHMKQHDFFKGAMPEGYVQERTIFFKDIGNYKLRAEVKFEGDTLVNRIELKGIDFKEDGNIHGHKLEYNYNSQNVYIMASKQKNGIKVNFKIRLNIEDGSVQLAEHYQVNTPIGDFPVLLPDNHKLSAQSADSKDPNEKRDHMHLLEFVTAVGITHGMDELYK', 'ed_31_wt': 'MSKGEELFSGVQPILVELDGCVNGHKFSVSGEGEIDATYGKLTLKFICTTWKLPMPWPCLVTFGSYGVQCFSRYRDHPKQHDFFKSAVPEGYVQERTIFMKDDLLYKTRAEVKFEGLTLVNRIELKGKDFKEDGNILGHKLEYNYNSHCVYPMADWNKNWIKVNSKIRLPIEDGSVILADHYQQNTPIGDQPVLLPENHYLSTQSALSKDPEEKGDLMVLLEFVTAAGITHGMDELYK'} def __init__(self): super().__init__(name='GFP') if (not os.path.exists('fluorescence-model')): os.mkdir('fluorescence-model') gfp_model_path = ' for file_name in ['args.json', 'checkpoint.bin', 'config.json', 'pytorch_model.bin']: print('Downloading', file_name) response = requests.get((gfp_model_path + file_name)) with open(f'fluorescence-model/{file_name}', 'wb') as f: f.write(response.content) self.tokenizer = tape.TAPETokenizer(vocab='iupac') self.device = ('cuda' if torch.cuda.is_available() else 'cpu') self.model = tape.ProteinBertForValuePrediction.from_pretrained('fluorescence-model').to(self.device) def _fitness_function(self, sequences): sequences = np.array(sequences) scores = [] for subset in np.array_split(sequences, max(1, (len(sequences) // 32))): encoded_seqs = torch.tensor([self.tokenizer.encode(seq) for seq in subset]).to(self.device) scores.append(self.model(encoded_seqs)[0].detach().numpy().astype(float).reshape((- 1))) return np.concatenate(scores)
def pair_process(item, strict_one=True): if hasattr(item, '__iter__'): for i in item: if (i != item[0]): if strict_one: raise ValueError('number in item {} must be the same'.format(item)) else: print('IMPORTANT WARNING: number in item {} must be the same'.format(item)) return item[0] return item
class TopicDrivenMaskedLM(RobertaPreTrainedModel): def __init__(self, config): super().__init__(config) if config.is_decoder: logger.warning('If you want to use `RobertaForMaskedLM` make sure `config.is_decoder=False` for bi-directional self-attention.') self.roberta = RobertaModel(config, add_pooling_layer=False) self.lm_head = RobertaLMHead(config) self.init_weights() def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None): return_dict = (return_dict if (return_dict is not None) else self.config.use_return_dict) outputs = self.roberta(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) masked_lm_loss = None if (labels is not None): loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view((- 1), self.config.vocab_size), labels.view((- 1))) if (not return_dict): output = ((prediction_scores,) + outputs[2:]) return (((masked_lm_loss,) + output) if (masked_lm_loss is not None) else output) return MaskedLMOutput(loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
def test_set_tokens(doc): ner_contents = ['O', 'ARTIFACT', 'ARTIFACT', 'O', 'CAT'] doc.set(fields=NER, contents=ner_contents, to_token=True) result = doc.get(NER, from_token=True) assert (result == ner_contents)
class ResNet(nn.Module): def __init__(self, in_channels, block, layers, num_classes=1000, zero_init_residual=False, groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None): super(ResNet, self).__init__() if (norm_layer is None): norm_layer = nn.BatchNorm2d self._norm_layer = norm_layer self.inplanes = 64 self.dilation = 1 if (replace_stride_with_dilation is None): replace_stride_with_dilation = [False, False, False] if (len(replace_stride_with_dilation) != 3): raise ValueError('replace_stride_with_dilation should be None or a 3-element tuple, got {}'.format(replace_stride_with_dilation)) self.groups = groups self.base_width = width_per_group self.conv1 = nn.Conv2d(in_channels, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = norm_layer(self.inplanes) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0]) self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1]) self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2]) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear((512 * block.expansion), num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) if zero_init_residual: for m in self.modules(): if isinstance(m, Bottleneck): nn.init.constant_(m.bn3.weight, 0) elif isinstance(m, BasicBlock): nn.init.constant_(m.bn2.weight, 0) def _make_layer(self, block, planes, blocks, stride=1, dilate=False): norm_layer = self._norm_layer downsample = None previous_dilation = self.dilation if dilate: self.dilation *= stride stride = 1 if ((stride != 1) or (self.inplanes != (planes * block.expansion))): downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride), norm_layer((planes * block.expansion))) layers = [] layers.append(block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer)) self.inplanes = (planes * block.expansion) for _ in range(1, blocks): layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer)) return nn.Sequential(*layers) def forward(self, x): out = [] x = self.conv1(x) x = self.bn1(x) x = self.relu(x) out.append(x) x = self.maxpool(x) x = self.layer1(x) out.append(x) x = self.layer2(x) out.append(x) x = self.layer3(x) out.append(x) x = self.layer4(x) out.append(x) x = self.avgpool(x) out.append(x) return out
class NetParameter(_message.Message): __metaclass__ = _reflection.GeneratedProtocolMessageType DESCRIPTOR = _NETPARAMETER
class Block(nn.Module): def __init__(self, in_planes, out_planes, stride=1): super(Block, self).__init__() self.conv1 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=in_planes, bias=False) self.bn1 = nn.BatchNorm2d(in_planes) self.conv2 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False) self.bn2 = nn.BatchNorm2d(out_planes) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = F.relu(self.bn2(self.conv2(out))) return out
class UnionFind(): def __init__(self, elements) -> None: self.ids = {e: e for e in elements} def add_element(self, e): if (e in self.ids): return False self.ids.update({e: e}) return True def find(self, e): prev = e curr = self.ids[e] while (prev != curr): prev = curr curr = self.ids[curr] self.ids[e] = curr return curr def union(self, e, f): if (f not in self.ids): self.add_element(f) self.ids[self.find(e)] = f
def create_einsum(state: dace.SDFGState, map_ranges, code, inputs, outputs=None, wcr_outputs=None): outputs = (outputs or []) wcr_outputs = (wcr_outputs or []) inpdict = {access_node.data: access_node for (access_node, _) in inputs} outdict = {access_node.data: access_node for (access_node, _) in (outputs + wcr_outputs)} input_memlets = {(access_node.data + '_inp'): dace.Memlet.simple(access_node.data, access_range) for (access_node, access_range) in inputs} output_memlets = {(access_node.data + '_out'): dace.Memlet.simple(access_node.data, access_range) for (access_node, access_range) in outputs} wcr_output_memlets = {(access_node.data + '_out'): dace.Memlet.simple(access_node.data, access_range, wcr_str='lambda x, y: x + y') for (access_node, access_range) in wcr_outputs} state.add_mapped_tasklet(name='einsum_tasklet', input_nodes=inpdict, output_nodes=outdict, map_ranges=map_ranges, inputs=input_memlets, code=code, outputs={**output_memlets, **wcr_output_memlets}, external_edges=True)
class LeanPreprocessedIf(LeanPreprocessedWithAsserts): expr_a: Expression expr_b: Expression cond_eq: bool jump_instr: Optional[LeanPreprocessedJumpToLabelInstruction] def get_exprs(self) -> List[Expression]: return [self.expr_a, self.expr_b]
class TFModelUtilsTest(unittest.TestCase): .skipif(('tensorflow' not in sys.modules), reason='requires TensorFlow') def test_model_from_pretrained(self): pass
def get_halluci(sess): halluci_step_idx = [] for step in sess: if ('step_id' in step): step_id = step['step_id'] numbers = re.findall('\\d+', step_id) (sess_idx, step_idx) = numbers if ('observation' in step): if ('Invalid action!' in step['observation']): halluci_step_idx.append(step_idx) return halluci_step_idx
class LegacyMatrixGroupElement(MatrixGroupElement_gap): def __setstate__(self, state): parent = state[0] m = state[1]['_MatrixGroupElement__mat'] m = parent.matrix_space()(m) self.__init__(parent, m, check=False)
def start_virtual_display() -> None: try: from pyvirtualdisplay.display import Display display = Display() display.start() except ImportError as e: raise ImportError('pyvirtualdisplay is not installed.\n$ pip install pyvirtualdisplay') from e
def load_table(dataset: str, version: str, overwrite: bool=False) -> Table: table_path = ((DATA_ROOT / dataset) / f'{version}.table.pkl') if ((not overwrite) and table_path.is_file()): L.info('table exists, load...') with open(table_path, 'rb') as f: table = pickle.load(f) L.info(f'load finished: {table}') return table table = Table(dataset, version) L.info('dump table to disk...') dump_table(table) return table
class DLoss(nn.Module): def __init__(self): super(DLoss, self).__init__() def forward(self, real_vloss, fake_vloss): d_loss = (torch.mean(torch.relu((1.0 - real_vloss))) + torch.mean(torch.relu((1.0 + fake_vloss)))) return d_loss
class SequentialDropout(nn.Module): def __init__(self, p=0.5): super(SequentialDropout, self).__init__() if ((p < 0) or (p > 1)): raise ValueError('dropout probability has to be between 0 and 1, but got {}'.format(p)) self.p = p self.restart = True def _make_noise(self, input): return Variable(input.data.new().resize_as_(input.data)) def forward(self, input): if ((self.p > 0) and self.training): if self.restart: self.noise = self._make_noise(input) self.noise.data.bernoulli_((1 - self.p)).div_((1 - self.p)) if (self.p == 1): self.noise.data.fill_(0) self.noise = self.noise.expand_as(input) self.restart = False return input.mul(self.noise) return input def end_of_sequence(self): self.restart = True def backward(self, grad_output): self.end_of_sequence() if ((self.p > 0) and self.training): return grad_output.mul(self.noise) else: return grad_output def __repr__(self): return (type(self).__name__ + '({:.4f})'.format(self.p))
def output_as_str(string_like): if ((string_like is not None) and (type(string_like) != str)): return string_like.decode('utf-8') else: return string_like
class SegmentCorpus(Job): def __init__(self, corpus_path, num_segments, use_fullname=False): self.set_vis_name('Segment Corpus') self.corpus_path = corpus_path self.num_segments = num_segments self.use_fullname = use_fullname self.segment_files = [self.output_path(('segments.%d' % i)) for i in range(num_segments)] def tasks(self): (yield Task('run', resume='run', mini_task=True)) def run(self): c = corpus.Corpus() c.load(tk.uncached_path(self.corpus_path)) all_segments = list(c.segments()) for (idx, segments) in enumerate(chunks(all_segments, self.num_segments)): with open(self.segment_files[idx].get_path(), 'wt') as segment_file: for segment in segments: if self.use_fullname: segment_file.write((segment.fullname() + '\n')) else: segment_file.write((segment.name + '\n'))
def motorcycle_data(): df = pd.read_csv('./data/motor.csv', index_col=0) (X, Y) = (df['times'].values.reshape((- 1), 1), df['accel'].values.reshape((- 1), 1)) Y = ((Y - Y.mean()) / Y.std()) X /= X.max() return (X, Y)
def entity_coverage_with_bert_ner(split): dataset = load_json(f'outputs/WebQSP.{split}.expr.json') linking_result = load_json(f'stagg/webqsp_{split}-entities.json') counted = 0 all_first_covered = [] topk_choices = [1, 3, 5, 10] for (i, data) in enumerate(dataset): skip = True for pidx in range(0, len(data['Parses'])): np = data['Parses'][pidx] if ((np['AnnotatorComment']['QuestionQuality'] == 'Good') and (np['AnnotatorComment']['ParseQuality'] == 'Complete')): skip = False if ((len(data['Parses']) == 0) or skip): continue counted += 1 gt_s_expr = [parse['SExpr'] for parse in data['Parses']] gt_s_expr = [x for x in gt_s_expr if (x != 'null')] if (not gt_s_expr): continue gt_entities_sets = [set(extract_entities(x)) for x in gt_s_expr] first_coverd = lnk_result = linking_result[data['QuestionId']] for k in topk_choices: topk_set = set(chain(*[[x['id'] for x in entities_per_mention[:k]] for entities_per_mention in lnk_result])) if any([x.issubset(topk_set) for x in gt_entities_sets]): first_coverd = k break all_first_covered.append(first_coverd) print('Coverage Table') for k in topk_choices: print(k, (sum([(x <= k) for x in all_first_covered]) / counted))
class AlexNet(Network): def setup(self): self.feed('data').conv(11, 11, 96, 4, 4, padding='VALID', name='conv1').lrn(2, 2e-05, 0.75, name='norm1').max_pool(3, 3, 2, 2, padding='VALID', name='pool1').conv(5, 5, 256, 1, 1, group=2, name='conv2').lrn(2, 2e-05, 0.75, name='norm2').max_pool(3, 3, 2, 2, padding='VALID', name='pool2').conv(3, 3, 384, 1, 1, name='conv3').conv(3, 3, 384, 1, 1, group=2, name='conv4').conv(3, 3, 256, 1, 1, group=2, name='conv5').max_pool(3, 3, 2, 2, padding='VALID', name='pool5').fc(4096, name='fc6').fc(4096, name='fc7').fc(1000, relu=False, name='fc8').softmax(name='prob')
def conv1d_layer_sentence_representation(sent_wordembeddings): representation_from_filters = [] output_channel = 0 if (FLAGS.handle_filter_output == 'sum'): output_channel = FLAGS.sentembed_size else: output_channel = (FLAGS.sentembed_size / FLAGS.max_filter_length) if ((output_channel * FLAGS.max_filter_length) != FLAGS.sentembed_size): print('Error: Make sure (output_channel * FLAGS.max_filter_length) is equal to FLAGS.sentembed_size.') exit(0) for filterwidth in xrange(1, (FLAGS.max_filter_length + 1)): with tf.variable_scope(('Conv1D_%d' % filterwidth)) as scope: conv_filter = variable_on_cpu(('conv_filter_%d' % filterwidth), [filterwidth, FLAGS.wordembed_size, output_channel], tf.truncated_normal_initializer()) conv = tf.nn.conv1d(sent_wordembeddings, conv_filter, 1, padding='VALID') conv_biases = variable_on_cpu(('conv_biases_%d' % filterwidth), [output_channel], tf.constant_initializer(0.0)) pre_activation = tf.nn.bias_add(conv, conv_biases) conv = tf.nn.relu(pre_activation) conv_reshaped = tf.expand_dims(conv, 1) out_height = conv_reshaped.get_shape()[1].value out_width = conv_reshaped.get_shape()[2].value maxpool = tf.nn.max_pool(conv_reshaped, [1, out_height, out_width, 1], [1, 1, 1, 1], padding='VALID') maxpool_norm = tf.nn.lrn(maxpool, 4, bias=1.0, alpha=(0.001 / 9.0), beta=0.75) maxpool_sqz = tf.squeeze(maxpool_norm, [1, 2]) representation_from_filters.append(maxpool_sqz) final_representation = [] with tf.variable_scope('FinalOut') as scope: if (FLAGS.handle_filter_output == 'sum'): final_representation = tf.add_n(representation_from_filters) else: final_representation = tf.concat(1, representation_from_filters) return final_representation
def add_backward_desc(backward_sdfg: dace.SDFG, forward_sdfg: dace.SDFG, forward_desc: dt.Data, forward_name: str) -> str: backward_name = utils.find_str_not_in_set(forward_sdfg.arrays, (forward_name + '_grad')) new_desc = copy.deepcopy(forward_desc) new_desc.transient = False return backward_sdfg.add_datadesc(backward_name, new_desc)
_module() class SegRecognizer(BaseRecognizer): def __init__(self, preprocessor=None, backbone=None, neck=None, head=None, loss=None, label_convertor=None, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super().__init__(init_cfg=init_cfg) assert (label_convertor is not None) self.label_convertor = build_convertor(label_convertor) self.preprocessor = None if (preprocessor is not None): self.preprocessor = build_preprocessor(preprocessor) assert (backbone is not None) self.backbone = build_backbone(backbone) assert (neck is not None) self.neck = build_neck(neck) assert (head is not None) head.update(num_classes=self.label_convertor.num_classes()) self.head = build_head(head) assert (loss is not None) self.loss = build_loss(loss) self.train_cfg = train_cfg self.test_cfg = test_cfg if (pretrained is not None): warnings.warn('DeprecationWarning: pretrained is a deprecated key, please consider using init_cfg') self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) def extract_feat(self, img): if (self.preprocessor is not None): img = self.preprocessor(img) x = self.backbone(img) return x def forward_train(self, img, img_metas, gt_kernels=None): feats = self.extract_feat(img) out_neck = self.neck(feats) out_head = self.head(out_neck) loss_inputs = (out_neck, out_head, gt_kernels) losses = self.loss(*loss_inputs) return losses def simple_test(self, img, img_metas, **kwargs): feat = self.extract_feat(img) out_neck = self.neck(feat) out_head = self.head(out_neck) for img_meta in img_metas: valid_ratio = ((1.0 * img_meta['resize_shape'][1]) / img.size((- 1))) img_meta['valid_ratio'] = valid_ratio (texts, scores) = self.label_convertor.tensor2str(out_head, img_metas) results = [] for (text, score) in zip(texts, scores): results.append(dict(text=text, score=score)) return results def merge_aug_results(self, aug_results): (out_text, out_score) = ('', (- 1)) for result in aug_results: text = result[0]['text'] score = (sum(result[0]['score']) / max(1, len(text))) if (score > out_score): out_text = text out_score = score out_results = [dict(text=out_text, score=out_score)] return out_results def aug_test(self, imgs, img_metas, **kwargs): aug_results = [] for (img, img_meta) in zip(imgs, img_metas): result = self.simple_test(img, img_meta, **kwargs) aug_results.append(result) return self.merge_aug_results(aug_results)
class HallLittlewood_p(HallLittlewood_generic): class Element(HallLittlewood_generic.Element): pass def __init__(self, hall_littlewood): HallLittlewood_generic.__init__(self, hall_littlewood) self._self_to_s_cache = p_to_s_cache self._s_to_self_cache = s_to_p_cache def _q_to_p_normalization(self, m): t = self.t coeff = ((1 - t) ** len(m)) for i in m.to_exp(): for j in range(1, (i + 1)): coeff *= ((1 - (t ** j)) / (1 - t)) return coeff def _s_to_self_base(self, part): from sage.combinat.sf.kfpoly import schur_to_hl t = QQt.gen() zero = self.base_ring().zero() res_dict = schur_to_hl(part, t) f = (lambda part2: res_dict.get(part2, zero)) return f def _s_cache(self, n): self._invert_morphism(n, QQt, self._self_to_s_cache, self._s_to_self_cache, to_self_function=self._s_to_self_base, upper_triangular=True, ones_on_diagonal=True)
def _export_to_json(json_name, xs, xlabel, ys, ylabel, ys_std): json_path = os.path.join(_log_dir, 'auto', (json_name + '.json')) with open(json_path, 'w') as json_file: json.dump(dict(x=xs, y=ys.tolist(), y_min=(ys - ys_std).tolist(), y_max=(ys + ys_std).tolist(), xlabel=xlabel, ylabel=ylabel), json_file)
class BernoulliRBM(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator): _parameter_constraints: dict = {'n_components': [Interval(Integral, 1, None, closed='left')], 'learning_rate': [Interval(Real, 0, None, closed='neither')], 'batch_size': [Interval(Integral, 1, None, closed='left')], 'n_iter': [Interval(Integral, 0, None, closed='left')], 'verbose': ['verbose'], 'random_state': ['random_state']} def __init__(self, n_components=256, *, learning_rate=0.1, batch_size=10, n_iter=10, verbose=0, random_state=None): self.n_components = n_components self.learning_rate = learning_rate self.batch_size = batch_size self.n_iter = n_iter self.verbose = verbose self.random_state = random_state def transform(self, X): check_is_fitted(self) X = self._validate_data(X, accept_sparse='csr', reset=False, dtype=(np.float64, np.float32)) return self._mean_hiddens(X) def _mean_hiddens(self, v): p = safe_sparse_dot(v, self.components_.T) p += self.intercept_hidden_ return expit(p, out=p) def _sample_hiddens(self, v, rng): p = self._mean_hiddens(v) return (rng.uniform(size=p.shape) < p) def _sample_visibles(self, h, rng): p = np.dot(h, self.components_) p += self.intercept_visible_ expit(p, out=p) return (rng.uniform(size=p.shape) < p) def _free_energy(self, v): return ((- safe_sparse_dot(v, self.intercept_visible_)) - np.logaddexp(0, (safe_sparse_dot(v, self.components_.T) + self.intercept_hidden_)).sum(axis=1)) def gibbs(self, v): check_is_fitted(self) if (not hasattr(self, 'random_state_')): self.random_state_ = check_random_state(self.random_state) h_ = self._sample_hiddens(v, self.random_state_) v_ = self._sample_visibles(h_, self.random_state_) return v_ _fit_context(prefer_skip_nested_validation=True) def partial_fit(self, X, y=None): first_pass = (not hasattr(self, 'components_')) X = self._validate_data(X, accept_sparse='csr', dtype=np.float64, reset=first_pass) if (not hasattr(self, 'random_state_')): self.random_state_ = check_random_state(self.random_state) if (not hasattr(self, 'components_')): self.components_ = np.asarray(self.random_state_.normal(0, 0.01, (self.n_components, X.shape[1])), order='F') self._n_features_out = self.components_.shape[0] if (not hasattr(self, 'intercept_hidden_')): self.intercept_hidden_ = np.zeros(self.n_components) if (not hasattr(self, 'intercept_visible_')): self.intercept_visible_ = np.zeros(X.shape[1]) if (not hasattr(self, 'h_samples_')): self.h_samples_ = np.zeros((self.batch_size, self.n_components)) self._fit(X, self.random_state_) def _fit(self, v_pos, rng): h_pos = self._mean_hiddens(v_pos) v_neg = self._sample_visibles(self.h_samples_, rng) h_neg = self._mean_hiddens(v_neg) lr = (float(self.learning_rate) / v_pos.shape[0]) update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T update -= np.dot(h_neg.T, v_neg) self.components_ += (lr * update) self.intercept_hidden_ += (lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))) self.intercept_visible_ += (lr * (np.asarray(v_pos.sum(axis=0)).squeeze() - v_neg.sum(axis=0))) h_neg[(rng.uniform(size=h_neg.shape) < h_neg)] = 1.0 self.h_samples_ = np.floor(h_neg, h_neg) def score_samples(self, X): check_is_fitted(self) v = self._validate_data(X, accept_sparse='csr', reset=False) rng = check_random_state(self.random_state) ind = (np.arange(v.shape[0]), rng.randint(0, v.shape[1], v.shape[0])) if sp.issparse(v): data = (((- 2) * v[ind]) + 1) if isinstance(data, np.matrix): v_ = (v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape)) else: v_ = (v + sp.csr_array((data.ravel(), ind), shape=v.shape)) else: v_ = v.copy() v_[ind] = (1 - v_[ind]) fe = self._free_energy(v) fe_ = self._free_energy(v_) return ((- v.shape[1]) * np.logaddexp(0, (- (fe_ - fe)))) _fit_context(prefer_skip_nested_validation=True) def fit(self, X, y=None): X = self._validate_data(X, accept_sparse='csr', dtype=(np.float64, np.float32)) n_samples = X.shape[0] rng = check_random_state(self.random_state) self.components_ = np.asarray(rng.normal(0, 0.01, (self.n_components, X.shape[1])), order='F', dtype=X.dtype) self._n_features_out = self.components_.shape[0] self.intercept_hidden_ = np.zeros(self.n_components, dtype=X.dtype) self.intercept_visible_ = np.zeros(X.shape[1], dtype=X.dtype) self.h_samples_ = np.zeros((self.batch_size, self.n_components), dtype=X.dtype) n_batches = int(np.ceil((float(n_samples) / self.batch_size))) batch_slices = list(gen_even_slices((n_batches * self.batch_size), n_batches, n_samples=n_samples)) verbose = self.verbose begin = time.time() for iteration in range(1, (self.n_iter + 1)): for batch_slice in batch_slices: self._fit(X[batch_slice], rng) if verbose: end = time.time() print(('[%s] Iteration %d, pseudo-likelihood = %.2f, time = %.2fs' % (type(self).__name__, iteration, self.score_samples(X).mean(), (end - begin)))) begin = end return self def _more_tags(self): return {'_xfail_checks': {'check_methods_subset_invariance': 'fails for the decision_function method', 'check_methods_sample_order_invariance': 'fails for the score_samples method'}, 'preserves_dtype': [np.float64, np.float32]}
def best_linear_code_in_guava(n, k, F): from .linear_code import LinearCode GapPackage('guava', spkg='gap_packages').require() libgap.load_package('guava') C = libgap.BestKnownLinearCode(n, k, F) return LinearCode(C.GeneratorMat()._matrix_(F))
def parse_args(): argparser = argparse.ArgumentParser() argparser.add_argument('domain', help='path to domain pddl file') argparser.add_argument('task', help='path to task pddl file') argparser.add_argument('--relaxed', dest='generate_relaxed_task', action='store_true', help='output relaxed task (no delete effects)') argparser.add_argument('--full-encoding', dest='use_partial_encoding', action='store_false', help='By default we represent facts that occur in multiple mutex groups only in one variable. Using this parameter adds these facts to multiple variables. This can make the meaning of the variables clearer, but increases the number of facts.') argparser.add_argument('--invariant-generation-max-candidates', default=100000, type=int, help='max number of candidates for invariant generation (default: %(default)d). Set to 0 to disable invariant generation and obtain only binary variables. The limit is needed for grounded input files that would otherwise produce too many candidates.') argparser.add_argument('--sas-file', default='output.sas', help='path to the SAS output file (default: %(default)s)') argparser.add_argument('--invariant-generation-max-time', default=300, type=int, help='max time for invariant generation (default: %(default)ds)') argparser.add_argument('--add-implied-preconditions', action='store_true', help='infer additional preconditions. This setting can cause a severe performance penalty due to weaker relevance analysis (see issue7).') argparser.add_argument('--keep-unreachable-facts', dest='filter_unreachable_facts', action='store_false', help="keep facts that can't be reached from the initial state") argparser.add_argument('--skip-variable-reordering', dest='reorder_variables', action='store_false', help='do not reorder variables based on the causal graph. Do not use this option with the causal graph heuristic!') argparser.add_argument('--keep-unimportant-variables', dest='filter_unimportant_vars', action='store_false', help='keep variables that do not influence the goal in the causal graph') argparser.add_argument('--dump-task', action='store_true', help='dump human-readable SAS+ representation of the task') argparser.add_argument('--layer-strategy', default='min', choices=['min', 'max'], help="How to assign layers to derived variables. 'min' attempts to put as many variables into the same layer as possible, while 'max' puts each variable into its own layer unless it is part of a cycle.") argparser.add_argument('--add-negative-axioms', action='store_true', help='add negative axioms. This setting introduced axioms that can help to be more informative.') return argparser.parse_args()
class DistributedTestDataSampler(Sampler): def __init__(self, data_source, batch_size, rank, world_size): data_len = len(data_source) all_indices = np.arange(data_len, dtype=int) split_indices = np.array_split(all_indices, world_size) num_batches = (((len(split_indices[0]) + batch_size) - 1) // batch_size) self.batch_indices = [i.tolist() for i in np.array_split(split_indices[rank], num_batches)] def __iter__(self): return iter(self.batch_indices) def __len__(self): return len(self.batch_indices)
class Configurable(): def from_config(cls, config, **kwargs): return cls._from_config(config, **kwargs) def _from_config(cls, config, **kwargs): return cls(**config, **kwargs) def get_config(self): return {self.get_kind(): self._get_config()} def get_kind(self): return getattr(self.__class__, 'kind', self.__class__.__name__.lower()) def _get_config(self): raise NotImplementedError('Configurables must implement a get_config method') def get_config_hash(self): return compute_hash(self.get_config())
class _Merge(Layer): def __init__(self, **kwargs): super(_Merge, self).__init__(**kwargs) self.supports_masking = True def _merge_function(self, inputs): raise NotImplementedError def _compute_elemwise_op_output_shape(self, shape1, shape2): if (None in [shape1, shape2]): return None elif (len(shape1) < len(shape2)): return self._compute_elemwise_op_output_shape(shape2, shape1) elif (len(shape2) == 0): return shape1 output_shape = list(shape1[:(- len(shape2))]) for (i, j) in zip(shape1[(- len(shape2)):], shape2): if ((i is None) or (j is None)): output_shape.append(None) elif (i == 1): output_shape.append(j) elif (j == 1): output_shape.append(i) else: if (i != j): raise ValueError(((('Operands could not be broadcast together with shapes ' + str(shape1)) + ' ') + str(shape2))) output_shape.append(i) return tuple(output_shape) def build(self, input_shape): if (not isinstance(input_shape, list)): raise ValueError('A merge layer should be called on a list of inputs.') if (len(input_shape) < 2): raise ValueError((('A merge layer should be called on a list of at least 2 inputs. Got ' + str(len(input_shape))) + ' inputs.')) batch_sizes = [s[0] for s in input_shape if (s is not None)] batch_sizes = set(batch_sizes) batch_sizes -= set([None]) if (len(batch_sizes) > 1): raise ValueError(('Can not merge tensors with different batch sizes. Got tensors with shapes : ' + str(input_shape))) if (input_shape[0] is None): output_shape = None else: output_shape = input_shape[0][1:] for i in range(1, len(input_shape)): if (input_shape[i] is None): shape = None else: shape = input_shape[i][1:] output_shape = self._compute_elemwise_op_output_shape(output_shape, shape) if ((None not in input_shape) and (len(set(map(len, input_shape))) == 1)): self._reshape_required = False else: self._reshape_required = True def call(self, inputs): if self._reshape_required: reshaped_inputs = [] input_ndims = list(map(K.ndim, inputs)) if (None not in input_ndims): max_ndim = max(input_ndims) for x in inputs: x_ndim = K.ndim(x) for _ in range((max_ndim - x_ndim)): x = K.expand_dims(x, 1) reshaped_inputs.append(x) return self._merge_function(reshaped_inputs) else: transposed = False for x in inputs: x_ndim = K.ndim(x) if (x_ndim is None): x_shape = K.shape(x) batch_size = x_shape[0] new_shape = K.concatenate([x_shape[1:], K.expand_dims(batch_size)]) x_transposed = K.reshape(x, K.stack([batch_size, K.prod(x_shape[1:])])) x_transposed = K.permute_dimensions(x_transposed, (1, 0)) x_transposed = K.reshape(x_transposed, new_shape) reshaped_inputs.append(x_transposed) transposed = True elif (x_ndim > 1): dims = (list(range(1, x_ndim)) + [0]) reshaped_inputs.append(K.permute_dimensions(x, dims)) transposed = True else: reshaped_inputs.append(x) y = self._merge_function(reshaped_inputs) y_ndim = K.ndim(y) if transposed: if (y_ndim is None): y_shape = K.shape(y) y_ndim = K.shape(y_shape)[0] batch_size = y_shape[(y_ndim - 1)] new_shape = K.concatenate([K.expand_dims(batch_size), y_shape[:(y_ndim - 1)]]) y = K.reshape(y, ((- 1), batch_size)) y = K.permute_dimensions(y, (1, 0)) y = K.reshape(y, new_shape) elif (y_ndim > 1): dims = ([(y_ndim - 1)] + list(range((y_ndim - 1)))) y = K.permute_dimensions(y, dims) return y else: return self._merge_function(inputs) def compute_output_shape(self, input_shape): if (input_shape[0] is None): output_shape = None else: output_shape = input_shape[0][1:] for i in range(1, len(input_shape)): if (input_shape[i] is None): shape = None else: shape = input_shape[i][1:] output_shape = self._compute_elemwise_op_output_shape(output_shape, shape) batch_sizes = [s[0] for s in input_shape if (s is not None)] batch_sizes = set(batch_sizes) batch_sizes -= set([None]) if (len(batch_sizes) == 1): output_shape = ((list(batch_sizes)[0],) + output_shape) else: output_shape = ((None,) + output_shape) return output_shape def compute_mask(self, inputs, mask=None): if (mask is None): return None if (not isinstance(mask, list)): raise ValueError('`mask` should be a list.') if (not isinstance(inputs, list)): raise ValueError('`inputs` should be a list.') if (len(mask) != len(inputs)): raise ValueError('The lists `inputs` and `mask` should have the same length.') if all([(m is None) for m in mask]): return None masks = [K.expand_dims(m, 0) for m in mask if (m is not None)] return K.all(K.concatenate(masks, axis=0), axis=0, keepdims=False)
def get_repeats(csv_file): files = [] with open(csv_file, 'r') as csv_fp: csv_reader = csv.DictReader(csv_fp) for row in csv_reader: files.append(row['file2']) files.append(row['file3']) return files
def argmax(vals: T.Iterable[Scalar]) -> Scalar: return sum(((i * val) for (i, val) in enumerate(argmax_onehot(vals))))
class RandomActorPolicy(BaseActorPolicy): def __init__(self, low_bound, upper_bound): super(RandomActorPolicy, self).__init__(identifier='random_policy') self._low_bound = low_bound self._upper_bound = upper_bound return def act(self, obs): return np.random.uniform(self._low_bound, self._upper_bound)
def get_clientid(): config = configparser.ConfigParser() if host_uuid_path.exists(): config.read(os.path.expanduser(host_uuid_path)) id = uuid.UUID(int=uuid.getnode()).hex if ('client' not in config): config.add_section('client') config.set('client', 'anon_clientid', id) elif ('anon_clientid' not in config['client']): config.set('client', 'anon_clientid', id) else: return config.get('client', 'anon_clientid') with host_uuid_path.open('w') as f: config.write(f) return id
_module class SoftmaxFocalClassificationLoss(Loss): def __init__(self, gamma=2.0, alpha=0.25): self._alpha = alpha self._gamma = gamma def _compute_loss(self, prediction_tensor, target_tensor, weights, class_indices=None): weights = weights.unsqueeze(2) if (class_indices is not None): weights *= indices_to_dense_vector(class_indices, prediction_tensor.shape[2]).view(1, 1, (- 1)).type_as(prediction_tensor) per_entry_cross_ent = _softmax_cross_entropy_with_logits(labels=target_tensor, logits=prediction_tensor) per_entry_cross_ent = (per_entry_cross_ent.unsqueeze((- 1)) * target_tensor) prediction_probabilities = F.softmax(prediction_tensor, dim=(- 1)) p_t = ((target_tensor * prediction_probabilities) + ((1 - target_tensor) * (1 - prediction_probabilities))) modulating_factor = 1.0 if self._gamma: modulating_factor = torch.pow((1.0 - p_t), self._gamma) alpha_weight_factor = 1.0 if (self._alpha is not None): alpha_weight_factor = torch.where((target_tensor[(..., 0)] == 1), torch.tensor((1 - self._alpha)).type_as(per_entry_cross_ent), torch.tensor(self._alpha).type_as(per_entry_cross_ent)) focal_cross_entropy_loss = ((modulating_factor * alpha_weight_factor) * per_entry_cross_ent) return (focal_cross_entropy_loss * weights)
def get_region_score(features, feature_columns, region_number, l2_reg, seed, prefix='region_', seq_mask_zero=True): region_logit = concat_func([get_linear_logit(features, feature_columns, seed=(seed + i), prefix=(prefix + str((i + 1))), l2_reg=l2_reg) for i in range(region_number)]) return Activation('softmax')(region_logit)
def init_logger(args): log_file = os.path.join(args.log_dir, (args.name + '.log')) logging.basicConfig(format='%(asctime)s | %(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=logging.INFO, filename=log_file, filemode='a+') console = logging.StreamHandler() console.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s | %(message)s') console.setFormatter(formatter) logging.getLogger('').addHandler(console)
def to_pretty_midi(music: 'Music') -> PrettyMIDI: midi = PrettyMIDI() (tempo_times, tempi) = ([0], [float(DEFAULT_TEMPO)]) for tempo in music.tempos: tempo_times.append(tempo.time) tempi.append(tempo.qpm) if (len(tempi) > 1): last_tempo = tempi[0] last_time = tempo_times[0] i = 1 while (i < len(tempo_times)): if (tempi[i] == last_tempo): del tempo_times[i] del tempi[i] elif (tempo_times[i] == last_time): del tempo_times[(i - 1)] del tempi[(i - 1)] else: last_tempo = tempi[i] i += 1 if (len(tempi) == 1): def map_time(time): return ((time * 60.0) / (music.resolution * tempi[0])) else: tempo_times_np = np.array(tempo_times) tempi_np = np.array(tempi) tempo_realtimes = np.cumsum(((np.diff(tempo_times_np) * 60.0) / (music.resolution * tempi_np[:(- 1)]))).tolist() tempo_realtimes.insert(0, 0.0) def map_time(time): idx = (np.searchsorted(tempo_times_np, time, side='right') - 1) residual = (time - tempo_times_np[idx]) factor = (60.0 / (music.resolution * tempi_np[idx])) return (tempo_realtimes[idx] + (residual * factor)) for key_signature in music.key_signatures: pm_key_signature = to_pretty_midi_key_signature(key_signature, map_time) if (pm_key_signature is not None): midi.key_signature_changes.append(pm_key_signature) for time_signature in music.time_signatures: midi.time_signature_changes.append(to_pretty_midi_time_signature(time_signature, map_time)) for lyric in music.lyrics: midi.lyrics.append(to_pretty_midi_lyric(lyric, map_time)) for track in music.tracks: midi.instruments.append(to_pretty_midi_instrument(track, map_time)) return midi
def get_human_normalized_score(entry): human_entries = find_all({'env-title': entry['env-title'], 'algo-title': 'Human', 'env-variant': entry['env-variant']}) random_entries = find_all({'env-title': entry['env-title'], 'algo-title': 'Random', 'env-variant': entry['env-variant']}) if (len(human_entries) > 1): print("[WARNING] More than one human entries were found for environment '{}'.".format(entry['env-title'])) human_entries.sort(key=(lambda entry: entry['score']), reverse=True) if (len(random_entries) > 1): print("[WARNING] More than one random entries were found for environment '{}'.".format(entry['env-title'])) random_entries.sort(key=(lambda entry: entry['score']), reverse=True) return ((entry['score'] - random_entries[0]['score']) / (human_entries[0]['score'] - random_entries[0]['score']))
class Quaternionr(MsgpackMixin): w_val = np.float32(0) x_val = np.float32(0) y_val = np.float32(0) z_val = np.float32(0) def __init__(self, x_val=np.float32(0), y_val=np.float32(0), z_val=np.float32(0), w_val=np.float32(1)): self.x_val = x_val self.y_val = y_val self.z_val = z_val self.w_val = w_val
def validate_network(val_loader, model): batch_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() global best_acc model.eval() criterion = nn.CrossEntropyLoss().cuda() with torch.no_grad(): end = time.perf_counter() for (i, (inp, target)) in enumerate(val_loader): inp = inp.cuda(non_blocking=True) target = target.cuda(non_blocking=True) output = model(inp) loss = criterion(output, target) (acc1, acc5) = accuracy(output, target, topk=(1, 5)) losses.update(loss.item(), inp.size(0)) top1.update(acc1[0], inp.size(0)) top5.update(acc5[0], inp.size(0)) batch_time.update((time.perf_counter() - end)) end = time.perf_counter() if (top1.avg.item() > best_acc[0]): best_acc = (top1.avg.item(), top5.avg.item()) if (args.rank == 0): logger.info('Test:\tTime {batch_time.avg:.3f}\tLoss {loss.avg:.4f}\ {top1.avg:.3f}\tBest so far {acc:.1f}'.format(batch_time=batch_time, loss=losses, top1=top1, acc=best_acc[0])) return (losses.avg, top1.avg.item(), top5.avg.item())
class UnalignedDataset(BaseDataset): def modify_commandline_options(parser, is_train): return parser def initialize(self, opt): self.opt = opt self.root = opt.dataroot self.dir_A = os.path.join(opt.dataroot, (opt.phase + 'A')) self.dir_B = os.path.join(opt.dataroot, (opt.phase + 'B')) self.A_paths = make_dataset(self.dir_A) self.B_paths = make_dataset(self.dir_B) self.A_paths = sorted(self.A_paths) self.B_paths = sorted(self.B_paths) self.A_size = len(self.A_paths) self.B_size = len(self.B_paths) self.transform = get_transform(opt) def __getitem__(self, index): A_path = self.A_paths[(index % self.A_size)] if self.opt.serial_batches: index_B = (index % self.B_size) else: index_B = random.randint(0, (self.B_size - 1)) B_path = self.B_paths[index_B] A_img = Image.open(A_path).convert('RGB') B_img = Image.open(B_path).convert('RGB') A = self.transform(A_img) B = self.transform(B_img) if (self.opt.direction == 'BtoA'): input_nc = self.opt.output_nc output_nc = self.opt.input_nc else: input_nc = self.opt.input_nc output_nc = self.opt.output_nc if (input_nc == 1): tmp = (((A[(0, ...)] * 0.299) + (A[(1, ...)] * 0.587)) + (A[(2, ...)] * 0.114)) A = tmp.unsqueeze(0) if (output_nc == 1): tmp = (((B[(0, ...)] * 0.299) + (B[(1, ...)] * 0.587)) + (B[(2, ...)] * 0.114)) B = tmp.unsqueeze(0) return {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path} def __len__(self): return max(self.A_size, self.B_size) def name(self): return 'UnalignedDataset'