code
stringlengths
101
5.91M
class ResnetCompleteNetworkTest(tf.test.TestCase): def _resnet_small(self, inputs, num_classes=None, is_training=True, global_pool=True, output_stride=None, include_root_block=True, spatial_squeeze=True, reuse=None, scope='resnet_v2_small'): block = resnet_v2.resnet_v2_block blocks = [block('block1', base_depth=1, num_units=3, stride=2), block('block2', base_depth=2, num_units=3, stride=2), block('block3', base_depth=4, num_units=3, stride=2), block('block4', base_depth=8, num_units=2, stride=1)] return resnet_v2.resnet_v2(inputs, blocks, num_classes, is_training=is_training, global_pool=global_pool, output_stride=output_stride, include_root_block=include_root_block, spatial_squeeze=spatial_squeeze, reuse=reuse, scope=scope) def testClassificationEndPoints(self): global_pool = True num_classes = 10 inputs = create_test_input(2, 224, 224, 3) with slim.arg_scope(resnet_utils.resnet_arg_scope()): (logits, end_points) = self._resnet_small(inputs, num_classes, global_pool=global_pool, spatial_squeeze=False, scope='resnet') self.assertTrue(logits.op.name.startswith('resnet/logits')) self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes]) self.assertTrue(('predictions' in end_points)) self.assertListEqual(end_points['predictions'].get_shape().as_list(), [2, 1, 1, num_classes]) self.assertTrue(('global_pool' in end_points)) self.assertListEqual(end_points['global_pool'].get_shape().as_list(), [2, 1, 1, 32]) def testEndpointNames(self): global_pool = True num_classes = 10 inputs = create_test_input(2, 224, 224, 3) with slim.arg_scope(resnet_utils.resnet_arg_scope()): (_, end_points) = self._resnet_small(inputs, num_classes, global_pool=global_pool, scope='resnet') expected = ['resnet/conv1'] for block in range(1, 5): for unit in range(1, (4 if (block < 4) else 3)): for conv in range(1, 4): expected.append(('resnet/block%d/unit_%d/bottleneck_v2/conv%d' % (block, unit, conv))) expected.append(('resnet/block%d/unit_%d/bottleneck_v2' % (block, unit))) expected.append(('resnet/block%d/unit_1/bottleneck_v2/shortcut' % block)) expected.append(('resnet/block%d' % block)) expected.extend(['global_pool', 'resnet/logits', 'resnet/spatial_squeeze', 'predictions']) self.assertItemsEqual(end_points.keys(), expected) def testClassificationShapes(self): global_pool = True num_classes = 10 inputs = create_test_input(2, 224, 224, 3) with slim.arg_scope(resnet_utils.resnet_arg_scope()): (_, end_points) = self._resnet_small(inputs, num_classes, global_pool=global_pool, scope='resnet') endpoint_to_shape = {'resnet/block1': [2, 28, 28, 4], 'resnet/block2': [2, 14, 14, 8], 'resnet/block3': [2, 7, 7, 16], 'resnet/block4': [2, 7, 7, 32]} for endpoint in endpoint_to_shape: shape = endpoint_to_shape[endpoint] self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape) def testFullyConvolutionalEndpointShapes(self): global_pool = False num_classes = 10 inputs = create_test_input(2, 321, 321, 3) with slim.arg_scope(resnet_utils.resnet_arg_scope()): (_, end_points) = self._resnet_small(inputs, num_classes, global_pool=global_pool, spatial_squeeze=False, scope='resnet') endpoint_to_shape = {'resnet/block1': [2, 41, 41, 4], 'resnet/block2': [2, 21, 21, 8], 'resnet/block3': [2, 11, 11, 16], 'resnet/block4': [2, 11, 11, 32]} for endpoint in endpoint_to_shape: shape = endpoint_to_shape[endpoint] self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape) def testRootlessFullyConvolutionalEndpointShapes(self): global_pool = False num_classes = 10 inputs = create_test_input(2, 128, 128, 3) with slim.arg_scope(resnet_utils.resnet_arg_scope()): (_, end_points) = self._resnet_small(inputs, num_classes, global_pool=global_pool, include_root_block=False, spatial_squeeze=False, scope='resnet') endpoint_to_shape = {'resnet/block1': [2, 64, 64, 4], 'resnet/block2': [2, 32, 32, 8], 'resnet/block3': [2, 16, 16, 16], 'resnet/block4': [2, 16, 16, 32]} for endpoint in endpoint_to_shape: shape = endpoint_to_shape[endpoint] self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape) def testAtrousFullyConvolutionalEndpointShapes(self): global_pool = False num_classes = 10 output_stride = 8 inputs = create_test_input(2, 321, 321, 3) with slim.arg_scope(resnet_utils.resnet_arg_scope()): (_, end_points) = self._resnet_small(inputs, num_classes, global_pool=global_pool, output_stride=output_stride, spatial_squeeze=False, scope='resnet') endpoint_to_shape = {'resnet/block1': [2, 41, 41, 4], 'resnet/block2': [2, 41, 41, 8], 'resnet/block3': [2, 41, 41, 16], 'resnet/block4': [2, 41, 41, 32]} for endpoint in endpoint_to_shape: shape = endpoint_to_shape[endpoint] self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape) def testAtrousFullyConvolutionalValues(self): nominal_stride = 32 for output_stride in [4, 8, 16, 32, None]: with slim.arg_scope(resnet_utils.resnet_arg_scope()): with tf.Graph().as_default(): with self.test_session() as sess: tf.set_random_seed(0) inputs = create_test_input(2, 81, 81, 3) (output, _) = self._resnet_small(inputs, None, is_training=False, global_pool=False, output_stride=output_stride) if (output_stride is None): factor = 1 else: factor = (nominal_stride // output_stride) output = resnet_utils.subsample(output, factor) tf.get_variable_scope().reuse_variables() (expected, _) = self._resnet_small(inputs, None, is_training=False, global_pool=False) sess.run(tf.global_variables_initializer()) self.assertAllClose(output.eval(), expected.eval(), atol=0.0001, rtol=0.0001) def testUnknownBatchSize(self): batch = 2 (height, width) = (65, 65) global_pool = True num_classes = 10 inputs = create_test_input(None, height, width, 3) with slim.arg_scope(resnet_utils.resnet_arg_scope()): (logits, _) = self._resnet_small(inputs, num_classes, global_pool=global_pool, spatial_squeeze=False, scope='resnet') self.assertTrue(logits.op.name.startswith('resnet/logits')) self.assertListEqual(logits.get_shape().as_list(), [None, 1, 1, num_classes]) images = create_test_input(batch, height, width, 3) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(logits, {inputs: images.eval()}) self.assertEqual(output.shape, (batch, 1, 1, num_classes)) def testFullyConvolutionalUnknownHeightWidth(self): batch = 2 (height, width) = (65, 65) global_pool = False inputs = create_test_input(batch, None, None, 3) with slim.arg_scope(resnet_utils.resnet_arg_scope()): (output, _) = self._resnet_small(inputs, None, global_pool=global_pool) self.assertListEqual(output.get_shape().as_list(), [batch, None, None, 32]) images = create_test_input(batch, height, width, 3) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(output, {inputs: images.eval()}) self.assertEqual(output.shape, (batch, 3, 3, 32)) def testAtrousFullyConvolutionalUnknownHeightWidth(self): batch = 2 (height, width) = (65, 65) global_pool = False output_stride = 8 inputs = create_test_input(batch, None, None, 3) with slim.arg_scope(resnet_utils.resnet_arg_scope()): (output, _) = self._resnet_small(inputs, None, global_pool=global_pool, output_stride=output_stride) self.assertListEqual(output.get_shape().as_list(), [batch, None, None, 32]) images = create_test_input(batch, height, width, 3) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(output, {inputs: images.eval()}) self.assertEqual(output.shape, (batch, 9, 9, 32))
def compute_BERT_CLS_feature(model, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None) -> torch.FloatTensor: if (model.training is True): raise ValueError outputs = model.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states) pooled_output = outputs[1] return model.dropout(pooled_output)
class FrameworkInfo(): def __init__(self, activation_quantizer_mapping: Dict[(QuantizationMethod, Callable)], kernel_channels_mapping: DefaultDict, activation_min_max_mapping: Dict[(str, tuple)], layer_min_max_mapping: Dict[(Any, tuple)], kernel_ops_attributes_mapping: DefaultDict, out_channel_axis_mapping: DefaultDict): self.activation_quantizer_mapping = activation_quantizer_mapping self.kernel_channels_mapping = kernel_channels_mapping self.activation_min_max_mapping = activation_min_max_mapping self.layer_min_max_mapping = layer_min_max_mapping self.kernel_ops_attributes_mapping = kernel_ops_attributes_mapping self.out_channel_axis_mapping = out_channel_axis_mapping def get_kernel_op_attributes(self, node_type: Any) -> List[str]: attr_list = self.kernel_ops_attributes_mapping.get(node_type) return attr_list def is_kernel_op(self, node_type: Any) -> bool: return (node_type in self.kernel_ops_attributes_mapping.keys()) def layers_has_min_max(self, layer: Any) -> bool: return (layer in self.layer_min_max_mapping) def activation_has_min_max(self, activation_name: str) -> bool: return (activation_name in self.activation_min_max_mapping)
def convert_space_to_rllab_space(space): from sandbox.rocky.tf.spaces import Box as TfBox from sandbox.rocky.tf.spaces import Discrete as TfDiscrete from rllab.spaces.discrete import Discrete as RllabDiscrete from rllab.spaces.box import Box as RllabBox if (isinstance(space, RllabBox) or isinstance(space, RllabDiscrete)): return space if (isinstance(space, TfBox) or isinstance(space, gym.spaces.Box)): return RllabBox(space.low, space.high) elif (isinstance(space, TfDiscrete) or isinstance(space, gym.spaces.Discrete)): return RllabDiscrete(space.n) raise TypeError()
class _dispatch_dtypes(tuple): def __add__(self, other): assert isinstance(other, tuple) return _dispatch_dtypes(tuple.__add__(self, other))
def _simplify_atans(exprn): if (not _exprn_contains_funcs(exprn, [atan2])): return factor_terms(exprn) pnames = ('a', 'x', 'y') (a_w, x_w, y_w) = symbols(','.join(((n_ + '_w') for n_ in pnames)), cls=Wild, real=True) n_w = Wild('n_w', properties=((lambda x: x.is_integer),)) c = Function('c', real=True) s = Function('s', real=True) cos_patt1 = cos((n_w * atan2(y_w, x_w))) sin_patt1 = sin((n_w * atan2(y_w, x_w))) res = TR10(exprn) res = res.replace(cos_patt1, c(x_w, y_w, n_w)) res = res.replace(sin_patt1, s(x_w, y_w, n_w)) res = res.replace(c, _cos_n_atan) res = res.replace(s, _sin_n_atan) res = factor_terms(TR10i(res)) return res
def get_preprocessing_model(input_size=224): preprocessing_model = keras.Sequential() preprocessing_model.add(layers.CenterCrop(input_size, input_size)) preprocessing_model.add(layers.Normalization(mean=[(0.485 * 255), (0.456 * 255), (0.406 * 255)], variance=[((0.229 * 255) ** 2), ((0.224 * 255) ** 2), ((0.225 * 255) ** 2)])) return preprocessing_model
class ECM(SageObject): def __init__(self, B1=10, B2=None, **kwds): self._cmd = self._make_cmd(B1, B2, kwds) def _make_cmd(self, B1, B2, kwds): ecm = ['ecm'] options = [] for (x, v) in kwds.items(): if (v is False): continue options.append('-{0}'.format(x)) if ((v is not True) and (v != '')): options.append(str(v)) if (B2 is None): args = [str(B1)] else: args = [str(B1), str(B2)] return ((ecm + options) + args) def _run_ecm(self, cmd, n): p = Popen(cmd, stdout=PIPE, stdin=PIPE, stderr=PIPE, encoding='latin-1') (out, err) = p.communicate(input=str(n)) if (err != ''): raise ValueError(err) return out def __call__(self, n): n = self._validate(n) return self._run_ecm(self._cmd, n) def interact(self): print('Enter numbers to run ECM on them.') print('Press control-D to exit.') call(self._cmd) _recommended_B1_list = {15: 2000, 20: 11000, 25: 50000, 30: 250000, 35: 1000000, 40: 3000000, 45: , 50: , 55: , 60: , 65: , 70: } def _B1_table_value(self, factor_digits, min=15, max=70): if (factor_digits < min): factor_digits = min if (factor_digits > max): raise ValueError('too many digits') step = 5 return ((((factor_digits + step) - 1) // step) * step) def recommended_B1(self, factor_digits): return self._recommended_B1_list[self._B1_table_value(factor_digits)] _parse_status_re = re.compile('Using B1=(\\d+), B2=(\\d+), polynomial ([^,]+), sigma=(\\d+)') _found_input_re = re.compile('Found input number N') _found_factor_re = re.compile('Found (?P<primality>.*) factor of [\\s]*(?P<digits>\\d+) digits: (?P<factor>\\d+)') _found_cofactor_re = re.compile('(?P<primality>.*) cofactor (?P<cofactor>\\d+) has [\\s]*(?P<digits>\\d+) digits') def _parse_output(self, n, out): out_lines = out.lstrip().splitlines() if (not out_lines[0].startswith('GMP-ECM')): raise ValueError('invalid output') result = [] for line in out_lines: m = self._parse_status_re.match(line) if (m is not None): group = m.groups() self._last_params = {'B1': group[0], 'B2': group[1], 'poly': group[2], 'sigma': group[3]} continue m = self._found_input_re.match(line) if (m is not None): return [(n, True)] m = self._found_factor_re.match(line) if (m is not None): factor = m.group('factor') primality = m.group('primality') assert (primality in ['prime', 'composite', 'probable prime']) result += [(ZZ(factor), (primality != 'composite'))] continue m = self._found_cofactor_re.match(line) if (m is not None): cofactor = m.group('cofactor') primality = m.group('primality') assert (primality in ['Prime', 'Composite', 'Probable prime']) result += [(ZZ(cofactor), (primality != 'Composite'))] return result raise ValueError('failed to parse ECM output') def one_curve(self, n, factor_digits=None, B1=2000, algorithm='ECM', **kwds): n = self._validate(n) if (factor_digits is not None): B1 = self.recommended_B1(factor_digits) if (algorithm == 'P-1'): kwds['pm1'] = '' elif (algorithm == 'P+1'): kwds['pp1'] = '' elif (algorithm == 'ECM'): pass else: raise ValueError('unknown algorithm') cmd = self._make_cmd(B1, None, kwds) out = self._run_ecm(cmd, n) try: factors = self._parse_output(n, out) return [factors[0][0], factors[1][0]] except (ValueError, IndexError): return [ZZ(1), n] def _find_factor(self, n, factor_digits, B1, **kwds): n = self._validate(n) kwds.setdefault('c', ) kwds.setdefault('I', 1) if (factor_digits is not None): B1 = self.recommended_B1(factor_digits) kwds['one'] = True cmd = self._make_cmd(B1, None, kwds) out = self._run_ecm(cmd, n) return self._parse_output(n, out) def find_factor(self, n, factor_digits=None, B1=2000, **kwds): factors = self._find_factor(n, factor_digits, B1, **kwds) return [factor[0] for factor in factors] def factor(self, n, factor_digits=None, B1=2000, proof=False, **kwds): n = self._validate(n) factors = [n] probable_prime_factors = [] while factors: n = factors.pop() if n.is_prime(proof=proof): probable_prime_factors.append(n) continue if (n.ndigits() < 15): for (p, e) in n.factor(algorithm='pari'): probable_prime_factors.extend(([p] * e)) continue if n.is_perfect_power(): (base, exp) = n.perfect_power() factors.extend(([base] * exp)) continue n_factorization = [n] while (len(n_factorization) == 1): n_factorization = self.find_factor(n, B1=B1) factors.extend(n_factorization) return sorted(probable_prime_factors) def get_last_params(self): return self._last_params def time(self, n, factor_digits, verbose=False): title_curves = 'Expected number of curves to find a factor of n digits' title_time = 'Expected time to find a factor of n digits:' n = self._validate(n) B1 = self.recommended_B1(factor_digits) cmd = self._make_cmd(B1, None, {'v': True}) out = self._run_ecm(cmd, n) if verbose: print(out) if (title_time not in out): print('Unable to compute timing, factorized immediately') return out_lines = iter(out.splitlines()) while (next(out_lines)[:len(title_curves)] != title_curves): pass header_curves = next(out_lines) curve_count_table = next(out_lines) while (next(out_lines) != title_time): pass header_time = next(out_lines) time_table = next(out_lines) assert (header_curves == header_time) assert (header_curves.split() == ['35', '40', '45', '50', '55', '60', '65', '70', '75', '80']) h_min = 35 h_max = 80 offset = ((self._B1_table_value(factor_digits, h_min, h_max) - h_min) // 5) print('offset', offset) curve_count = curve_count_table.split()[offset] time = time_table.split()[offset] print('Expected curves: {0}, Expected time: {1}'.format(curve_count, time)) def _validate(self, n): n = ZZ(n) if (n <= 0): raise ValueError('n must be positive') if (n.ndigits() > 4095): raise ValueError('n must have at most 4095 digits') return n
def test_RFE_fit_score_params(): class TestEstimator(BaseEstimator, ClassifierMixin): def fit(self, X, y, prop=None): if (prop is None): raise ValueError('fit: prop cannot be None') self.svc_ = SVC(kernel='linear').fit(X, y) self.coef_ = self.svc_.coef_ return self def score(self, X, y, prop=None): if (prop is None): raise ValueError('score: prop cannot be None') return self.svc_.score(X, y) (X, y) = load_iris(return_X_y=True) with pytest.raises(ValueError, match='fit: prop cannot be None'): RFE(estimator=TestEstimator()).fit(X, y) with pytest.raises(ValueError, match='score: prop cannot be None'): RFE(estimator=TestEstimator()).fit(X, y, prop='foo').score(X, y) RFE(estimator=TestEstimator()).fit(X, y, prop='foo').score(X, y, prop='foo')
def frontalcortex_dropseq(save_path: str='data/') -> anndata.AnnData: return _load_frontalcortex_dropseq(save_path=save_path)
class NCISPrecision(NCISMetric): _scala_udf_name = 'getNCISPrecisionMetricValue' def _get_metric_value_by_user(k, *args): (pred, ground_truth, pred_weights) = args if ((len(pred) == 0) or (len(ground_truth) == 0)): return 0 mask = np.isin(pred[:k], ground_truth) return (sum(np.array(pred_weights)[mask]) / sum(pred_weights[:k]))
class CustomEntityParserUsage(Enum): WITH_STEMS = 0 WITHOUT_STEMS = 1 WITH_AND_WITHOUT_STEMS = 2 def merge_usages(cls, lhs_usage, rhs_usage): if (lhs_usage is None): return rhs_usage if (rhs_usage is None): return lhs_usage if (lhs_usage == rhs_usage): return lhs_usage return cls.WITH_AND_WITHOUT_STEMS
def cut_J_based_on_mean_func(J, e_mean): if (J is None): J_before = None J_after = None elif (e_mean >= max(J)): J_before = J J_after = None elif (e_mean <= min(J)): J_before = None J_after = J else: J_before = (min(J), e_mean) J_after = (e_mean, max(J)) return (J_before, J_after)
class HalfCheetahVelEnv(HalfCheetahEnvMetaBase): def __init__(self, task=None): task = (task or {'velocity': 0.0}) self._task = task self._goal_vel = task['velocity'] super().__init__() def step(self, action): xposbefore = self.sim.data.qpos[0] self.do_simulation(action, self.frame_skip) xposafter = self.sim.data.qpos[0] forward_vel = ((xposafter - xposbefore) / self.dt) forward_reward = ((- 1.0) * abs((forward_vel - self._goal_vel))) ctrl_cost = ((0.5 * 0.1) * np.sum(np.square(action))) observation = self._get_obs() reward = (forward_reward - ctrl_cost) done = False infos = dict(reward_forward=forward_reward, reward_ctrl=(- ctrl_cost), task=self._task) return (observation, reward, done, infos) def sample_tasks(self, num_tasks): velocities = self.np_random.uniform(0.0, 2.0, size=(num_tasks,)) tasks = [{'velocity': velocity} for velocity in velocities] return tasks def set_task(self, task): self._task = task self._goal_vel = task['velocity']
class MultiPassModelTests(tf.test.TestCase): def _test_single_pass(self, method): config = get_config('resnet-test') config.momentum = 0.0 config.base_learn_rate = 0.1 np.random.seed(0) BSIZE = config.batch_size xval = np.random.uniform((- 1.0), 1.0, [BSIZE, config.height, config.width, config.num_channel]).astype(np.float32) yval = np.floor(np.random.uniform(0, 9.9, [BSIZE])).astype(np.int32) with tf.Graph().as_default(), self.test_session() as sess, log.verbose_level(2): x = tf.constant(xval) y = tf.constant(yval) with tf.variable_scope('Model', reuse=None): m1 = get_multi_gpu_model('resnet', config, num_replica=2, inp=x, label=y) sess.run(tf.global_variables_initializer()) m1.assign_lr(sess, config.base_learn_rate) tvars = tf.trainable_variables() tvars_str = map((lambda x: x.name), tvars) saver = tf.train.Saver(tvars) if (not os.path.exists(FOLDER)): os.makedirs(FOLDER) saver.save(sess, CKPT_FNAME) m1.train_step(sess) tvars_v1 = sess.run(tvars) tvars_d1 = dict(zip(tvars_str, tvars_v1)) with tf.Graph().as_default(), self.test_session() as sess, log.verbose_level(2): with tf.variable_scope('Model', reuse=True): m2 = MultiPassModel(config, ResNetModel, num_passes=2, debug=True, inp=x, label=y, aggregate_method=method) tvars = tf.trainable_variables() saver = tf.train.Saver(tvars) sess.run(tf.global_variables_initializer()) saver.restore(sess, CKPT_FNAME) m2.assign_lr(sess, config.base_learn_rate) m2.train_step(sess) tvars_v2 = sess.run(tvars) tvars_d2 = dict(zip(tvars_str, tvars_v2)) for vv in tvars_str: log.info(vv, verbose=2) np.testing.assert_allclose(tvars_d1[vv], tvars_d2[vv], rtol=0.0001, atol=1e-06) log.info('...ok', verbose=2) def test_single_pass_cumsum(self): self._test_single_pass('cumsum') def test_single_pass_storage(self): self._test_single_pass('storage') def _test_multi_pass(self, method): config = get_config('resnet-test') config.momentum = 0.0 config.base_learn_rate = 0.1 np.random.seed(0) BSIZE = config.batch_size xval = np.random.uniform((- 1.0), 1.0, [BSIZE, config.height, config.width, config.num_channel]).astype(np.float32) yval = np.floor(np.random.uniform(0, 9.9, [BSIZE])).astype(np.int32) with tf.Graph().as_default(), self.test_session() as sess, log.verbose_level(2): x = tf.constant(xval) y = tf.constant(yval) with tf.variable_scope('Model', reuse=None): m1 = get_multi_gpu_model('resnet', config, num_replica=2, inp=x, label=y) sess.run(tf.global_variables_initializer()) m1.assign_lr(sess, config.base_learn_rate) tvars = tf.trainable_variables() tvars_str = map((lambda x: x.name), tvars) saver = tf.train.Saver(tvars) if (not os.path.exists(FOLDER)): os.makedirs(FOLDER) saver.save(sess, CKPT_FNAME) for ii in range(3): m1.train_step(sess) tvars_v1 = sess.run(tvars) tvars_d1 = dict(zip(tvars_str, tvars_v1)) with tf.Graph().as_default(), self.test_session() as sess, log.verbose_level(2): with tf.variable_scope('Model', reuse=True): m2 = MultiPassModel(config, ResNetModel, num_passes=2, inp=x, label=y, aggregate_method=method) tvars = tf.trainable_variables() saver = tf.train.Saver(tvars) sess.run(tf.global_variables_initializer()) m2.assign_lr(sess, config.base_learn_rate) saver.restore(sess, CKPT_FNAME) for ii in range(3): m2.train_step(sess) tvars_v2 = sess.run(tvars) tvars_d2 = dict(zip(tvars_str, tvars_v2)) for vv in tvars_str: np.testing.assert_allclose(tvars_d1[vv], tvars_d2[vv], rtol=0.0001, atol=1e-06) def test_multi_pass_cumsum(self): self._test_multi_pass('cumsum') def test_multi_pass_storage(self): self._test_multi_pass('storage')
def parse_args(): parser = argparse.ArgumentParser(description='Generate COCO test image information for COCO panoptic segmentation.') parser.add_argument('data_root', help='Path to COCO annotation directory.') args = parser.parse_args() return args
def main(): if (FAST_DEV_RUN == True): training_args = TrainingArguments(output_dir='./roberta_gen/checkpoints', overwrite_output_dir=True, max_steps=1, warmup_steps=0, logging_steps=1, save_steps=1, max_grad_norm=5.0, per_device_eval_batch_size=8, per_device_train_batch_size=8, gradient_accumulation_steps=32, learning_rate=3e-05, adam_epsilon=1e-06, weight_decay=0.01, do_eval=True, do_train=True, fp16=True) elif (FAST_DEV_RUN == False): training_args = TrainingArguments(output_dir='./roberta_gen/checkpoints', overwrite_output_dir=True, warmup_steps=500, logging_steps=500, max_steps=5000, save_steps=500, max_grad_norm=5.0, per_device_eval_batch_size=8, per_device_train_batch_size=8, gradient_accumulation_steps=32, learning_rate=3e-05, adam_epsilon=1e-06, weight_decay=0.01, do_eval=True, do_train=True, fp16=True) base_model_name_HF = 'allenai/biomed_roberta_base' model_path = f'{MODEL_OUT_DIR}/bioclinical-roberta' unpretrained_model_path = base_model_name_HF logger.info(f'Loading the model from {unpretrained_model_path}') tokenizer = RobertaTokenizer.from_pretrained(unpretrained_model_path) model = RobertaForMaskedLM.from_pretrained(unpretrained_model_path, gradient_checkpointing=True) logger.warning(f'Tokenizer {tokenizer} parameterized with model_max_len as {tokenizer.model_max_length}') model.config.gradient_checkpointing = True logger.critical(f'Pre-Training {model.num_parameters()}-parameter model. This could take days!!!!') pretrain_and_evaluate(training_args, model, tokenizer, eval_only=False, model_path_out=training_args.output_dir) logger.warning(f'Saving model to {model_path}/final') model.save_pretrained(f'{model_path}/final') logger.critical('Final pre-trained model, tokenizer,and config saved!')
def test_alpha_exponent_insertion_none(): insert = [] with mock.patch('pynguin.utils.randomness.next_float') as float_mock: float_mock.return_value = 0.2 func = MagicMock() func.return_value = None alpha_exponent_insertion(insert, func) assert (insert == [])
class OutliersFilter(): def __init__(self, interquartile_coeff, mode_percentile, min_percentile, max_percentile): self.interquartile_coeff = interquartile_coeff self.mode_percentile = mode_percentile self.min_percentile = min_percentile self.max_percentile = max_percentile def perform_filter(self, df: pd.DataFrame, interquartile: bool=True) -> set: columns_names = df.select_dtypes(include='number').columns rows_for_del = [] for column in columns_names: if self.mode_percentile: min_value = df[column].quantile(self.min_percentile) max_value = df[column].quantile(self.max_percentile) elif interquartile: upper_quantile = df[column].quantile(0.8) lower_quantile = df[column].quantile(0.2) interquartile_range = (upper_quantile - lower_quantile) min_value = (lower_quantile - (self.interquartile_coeff * interquartile_range)) max_value = (upper_quantile + (self.interquartile_coeff * interquartile_range)) else: mean_value = df[column].mean() standard_deviation = df[column].std() (nstd_lower, nstd_upper) = (3, 3) min_value = (mean_value - (nstd_lower * standard_deviation)) max_value = (mean_value + (nstd_upper * standard_deviation)) rows_for_del_column = ((df[column] < min_value) | (df[column] > max_value)) rows_for_del_column = df.index[rows_for_del_column].tolist() rows_for_del.extend(rows_for_del_column) rows_for_del = set(rows_for_del) logger.info(f'Drop {len(rows_for_del)} rows') return rows_for_del
def dump_torchscript_IR(model, dir): PathManager.mkdirs(dir) def _get_script_mod(mod): if isinstance(mod, torch.jit.TracedModule): return mod._actual_script_module return mod with PathManager.open(os.path.join(dir, 'model_ts_code.txt'), 'w') as f: def get_code(mod): try: return _get_script_mod(mod)._c.code except AttributeError: pass try: return mod.code except AttributeError: return None def dump_code(prefix, mod): code = get_code(mod) name = (prefix or 'root model') if (code is None): f.write(f'''Could not found code for {name} (type={mod.original_name}) ''') f.write('\n') else: f.write(f''' Code for {name}, type={mod.original_name}: ''') f.write(code) f.write('\n') f.write(('-' * 80)) for (name, m) in mod.named_children(): dump_code(((prefix + '.') + name), m) if isinstance(model, torch.jit.ScriptFunction): f.write(get_code(model)) else: dump_code('', model) def _get_graph(model): try: return _get_script_mod(model)._c.dump_to_str(True, False, False) except AttributeError: return model.graph.str() with PathManager.open(os.path.join(dir, 'model_ts_IR.txt'), 'w') as f: f.write(_get_graph(model)) with PathManager.open(os.path.join(dir, 'model_ts_IR_inlined.txt'), 'w') as f: f.write(str(model.inlined_graph)) if (not isinstance(model, torch.jit.ScriptFunction)): with PathManager.open(os.path.join(dir, 'model.txt'), 'w') as f: f.write(str(model))
def evaluate(algo, make_env_test, num_eval_episodes: int=5, seed: int=0): returns = [] for i_ep in range(num_eval_episodes): env_test = make_env_test(((seed * 100) + i_ep)) (state, _) = env_test.reset() episode_return = 0.0 (terminated, truncated) = (False, False) while (not (terminated or truncated)): action = algo.exploit(state) (state, reward, terminated, truncated, _) = env_test.step(action) episode_return += reward returns.append(episode_return) mean_return = np.mean(returns) return mean_return
class domain_classifier(nn.Module): def __init__(self, hidden_size, device): super(domain_classifier, self).__init__() self.classify = nn.Sequential(nn.Linear(hidden_size, 512), nn.LeakyReLU(0.2, True), nn.Linear(512, 1), nn.Sigmoid()) self.to(device) def forward(self, x): output = self.classify(x) output = output.view((- 1)) return output
def test_n_features_in_validation(): est = MyEstimator() X_train = [[1, 2, 3], [4, 5, 6]] est._check_n_features(X_train, reset=True) assert (est.n_features_in_ == 3) msg = 'X does not contain any features, but MyEstimator is expecting 3 features' with pytest.raises(ValueError, match=msg): est._check_n_features('invalid X', reset=False)
def test_broadcast_float_int_2d_regular(): this = ak.contents.RegularArray(ak.contents.NumpyArray(np.array([1.0, 2.0, 3.0, 4.0], dtype='float64')), size=2, parameters={'name': 'this'}) that = ak.contents.RegularArray(ak.contents.NumpyArray(np.array([1, 9], dtype='int64')), size=1, parameters={'name': 'that'}) (this_next, that_next) = ak.operations.ak_broadcast_arrays.broadcast_arrays(this, that, highlevel=False) assert (this.parameters == this_next.parameters) assert (that.parameters == that_next.parameters) assert (this.content.parameters == this_next.content.parameters) assert (that.content.parameters == that_next.content.parameters)
def get_tf_metrics(m): if callable(m): return m elif (m.lower() == 'mae'): return tf.keras.metrics.MAE elif (m.lower() == 'mse'): return tf.keras.metrics.MSE elif (m.lower() == 'acc'): def acc(y_true, y_pred): return tf.reduce_mean(y_true) return acc elif (m.lower() == 'auc'): return tf.keras.metrics.AUC else: raise Exception(('cannot understand metric type: %s' % m))
def test_MLR(): model_name = 'MLR' (region_x, y, region_feature_columns) = get_test_data(SAMPLE_SIZE, sparse_feature_num=3, dense_feature_num=3, prefix='region') (base_x, y, base_feature_columns) = get_test_data(SAMPLE_SIZE, sparse_feature_num=3, dense_feature_num=3, prefix='base') (bias_x, y, bias_feature_columns) = get_test_data(SAMPLE_SIZE, sparse_feature_num=3, dense_feature_num=3, prefix='bias') model = MLR(region_feature_columns) model.compile('adam', 'binary_crossentropy', metrics=['binary_crossentropy']) check_model(model, model_name, region_x, y) print((model_name + ' test pass!'))
def register_functions_ns3_Config(module, root_module): module.add_function('Connect', 'void', [param('std::string', 'path'), param('ns3::CallbackBase const &', 'cb')]) module.add_function('ConnectWithoutContext', 'void', [param('std::string', 'path'), param('ns3::CallbackBase const &', 'cb')]) module.add_function('Disconnect', 'void', [param('std::string', 'path'), param('ns3::CallbackBase const &', 'cb')]) module.add_function('DisconnectWithoutContext', 'void', [param('std::string', 'path'), param('ns3::CallbackBase const &', 'cb')]) module.add_function('GetRootNamespaceObject', 'ns3::Ptr< ns3::Object >', [param('uint32_t', 'i')]) module.add_function('GetRootNamespaceObjectN', 'uint32_t', []) module.add_function('LookupMatches', 'ns3::Config::MatchContainer', [param('std::string', 'path')]) module.add_function('RegisterRootNamespaceObject', 'void', [param('ns3::Ptr< ns3::Object >', 'obj')]) module.add_function('Reset', 'void', []) module.add_function('Set', 'void', [param('std::string', 'path'), param('ns3::AttributeValue const &', 'value')]) module.add_function('SetDefault', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) module.add_function('SetDefaultFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) module.add_function('SetGlobal', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) module.add_function('SetGlobalFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) module.add_function('UnregisterRootNamespaceObject', 'void', [param('ns3::Ptr< ns3::Object >', 'obj')]) return
def torch_persistent_save(obj, filename, async_write: bool=False): if async_write: with PathManager.opena(filename, 'wb') as f: _torch_persistent_save(obj, f) else: with PathManager.open(filename, 'wb') as f: _torch_persistent_save(obj, f)
def decoder_with_encoder_attention_backward(x, encoder_out, dy, sattn_concat, sattn_proj_q, sattn_proj_k, sattn_proj_v, sattn_scaled_scores, sattn_dropout_mask, norm1_mean, norm1_std, norm1_normed, edattn_concat, edattn_proj_q, edattn_proj_k, edattn_proj_v, edattn_scaled_scores, edattn_dropout_mask, norm2_mean, norm2_std, norm2_normed, linear1_dropout_mask, ff_dropout_mask, norm3_mean, norm3_std, norm3_normed, ff_resid, ff1, ff1_linear, normed2, edattn_resid, normed1, sattn_resid, sattn_wq, sattn_wk, sattn_wv, sattn_wo, sattn_scale, sattn_mask, edattn_wq, edattn_wk, edattn_wv, edattn_wo, edattn_scale, norm1_scale, norm1_bias, norm2_scale, norm2_bias, norm3_scale, norm3_bias, linear1_w, linear1_b, linear2_w, linear2_b, sattn_dropout_p, edattn_dropout_p, linear1_dropout_p, ff_dropout_p, activation='gelu'): act_backward_data = _get_activation_backward(activation) dff_resid = layer_norm_backward_data(ff_resid, dy, norm3_mean, norm3_std, norm3_scale, norm3_bias) (dnorm3_scale, dnorm3_bias) = layer_norm_backward_weights(dy, norm3_normed, norm3_scale, norm3_bias) dff_dropout = dff_resid dnormed2_resid = dff_resid dff = dropout_backward_data(dff_dropout, ff_dropout_p, ff_dropout_mask) dff1 = linear_backward_data(ff1, linear2_w, dff) (dlinear2_w, dlinear2_b) = linear_backward_weights(ff1, linear2_w, dff, bias=linear2_b) dff1_act = dropout_backward_data(dff1, linear1_dropout_p, linear1_dropout_mask) dff1_linear = act_backward_data(ff1_linear, dff1_act) dnormed2_linear = linear_backward_data(normed2, linear1_w, dff1_linear) (dlinear1_w, dlinear1_b) = linear_backward_weights(normed2, linear1_w, dff1_linear, bias=linear1_b) dnormed2 = (dnormed2_resid + dnormed2_linear) dedattn_resid = layer_norm_backward_data(edattn_resid, dnormed2, norm2_mean, norm2_std, norm2_scale, norm2_bias) (dnorm2_scale, dnorm2_bias) = layer_norm_backward_weights(dnormed2, norm2_normed, norm2_scale, norm2_bias) dedattn_dropout = dedattn_resid dnormed1_resid = dedattn_resid dedattn = dropout_backward_data(dedattn_dropout, edattn_dropout_p, edattn_dropout_mask) (dnormed1_attn, dencoder_out, dencoder_out_unused, dedattn_wq, dedattn_wk, dedattn_wv, dedattn_wo, dedattn_in_b, dedattn_out_b) = attn_backward_numpy(normed1, encoder_out, encoder_out, edattn_wq, edattn_wk, edattn_wv, edattn_wo, edattn_scale, dedattn, edattn_concat, edattn_proj_q, edattn_proj_k, edattn_proj_v, edattn_scaled_scores) dnormed1 = (dnormed1_resid + dnormed1_attn) dsattn_resid = layer_norm_backward_data(sattn_resid, dnormed1, norm1_mean, norm1_std, norm1_scale, norm1_bias) (dnorm1_scale, dnorm1_bias) = layer_norm_backward_weights(dnormed1, norm1_normed, norm1_scale, norm1_bias) dsattn_dropout = dsattn_resid dx_resid = dsattn_resid dsattn = dropout_backward_data(dsattn_dropout, sattn_dropout_p, sattn_dropout_mask) (dx_attn, dsk_unused, dsv_unused, dsattn_wq, dsattn_wk, dsattn_wv, dsattn_wo, dsattn_in_b, dsattn_out_b) = attn_backward_numpy(x, x, x, sattn_wq, sattn_wk, sattn_wv, sattn_wo, sattn_scale, dsattn, sattn_concat, sattn_proj_q, sattn_proj_k, sattn_proj_v, sattn_scaled_scores, mask=sattn_mask) dx = (dx_resid + dx_attn) return (dx, dencoder_out, dsattn_wq, dsattn_wk, dsattn_wv, dsattn_wo, dsattn_in_b, dsattn_out_b, dedattn_wq, dedattn_wk, dedattn_wv, dedattn_wo, dedattn_in_b, dedattn_out_b, dnorm1_scale, dnorm1_bias, dnorm2_scale, dnorm2_bias, dnorm3_scale, dnorm3_bias, dlinear1_w, dlinear1_b, dlinear2_w, dlinear2_b)
def construct_beta_hats(Sigma, R, eps_list, max_norm): halved_eps_list = [(eps / 2.0) for eps in eps_list] sigma_sensitivity = 2.0 Sigma_hats = noise_reduc.gen_list(Sigma, sigma_sensitivity, halved_eps_list) r_sensitivity = 2.0 R_hats = noise_reduc.gen_list(R, r_sensitivity, halved_eps_list) beta_hats = np.array([ridge.exact_solve_and_project(S_hat, R_hat, max_norm) for (S_hat, R_hat) in zip(Sigma_hats, R_hats)]) return beta_hats
def find_executable_batch_size(function: callable=None, starting_batch_size: int=128, auto_find_batch_size: bool=False): if (function is None): return functools.partial(find_executable_batch_size, starting_batch_size=starting_batch_size, auto_find_batch_size=auto_find_batch_size) if auto_find_batch_size: requires_backends(find_executable_batch_size, 'accelerate') from accelerate.utils import find_executable_batch_size as accelerate_find_executable_batch_size return accelerate_find_executable_batch_size(function=function, starting_batch_size=starting_batch_size) return functools.partial(function, batch_size=starting_batch_size)
def test_waveform_id_to_network_station_location(): assert (waveform_id_to_network_station_location('NET.STA.LOC.CHA') == 'NET.STA.LOC') assert (waveform_id_to_network_station_location('NET.STA..CHA') == 'NET.STA.') assert (waveform_id_to_network_station_location('invalid') == 'invalid')
class B100(base.SRBase): def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) return def get_path(self) -> str: return path.join(self.dpath, 'benchmark', 'b100')
def logical_xor_scalar_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes, val): return ([None] * (len(grad_inputs) + len(inputs)))
def maybe_decode_diffs(diff_decoders, h_t: _Array, edge_fts: _Array, graph_fts: _Array, decode_diffs: bool) -> Optional[Dict[(str, _Array)]]: if decode_diffs: preds = {} node = _Location.NODE edge = _Location.EDGE graph = _Location.GRAPH preds[node] = _decode_node_diffs(diff_decoders[node], h_t) preds[edge] = _decode_edge_diffs(diff_decoders[edge], h_t, edge_fts) preds[graph] = _decode_graph_diffs(diff_decoders[graph], h_t, graph_fts) else: preds = None return preds
class LEDTokenizerFast(PreTrainedTokenizerFast): vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES slow_tokenizer_class = LEDTokenizer model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, errors='replace', bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', add_prefix_space=False, trim_offsets=True, **kwargs): super().__init__(vocab_file, merges_file, tokenizer_file=tokenizer_file, errors=errors, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, trim_offsets=trim_offsets, **kwargs) pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if (pre_tok_state.get('add_prefix_space', add_prefix_space) != add_prefix_space): pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop('type')) pre_tok_state['add_prefix_space'] = add_prefix_space self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state) self.add_prefix_space = add_prefix_space tokenizer_component = 'post_processor' tokenizer_component_instance = getattr(self.backend_tokenizer, tokenizer_component, None) if tokenizer_component_instance: state = json.loads(tokenizer_component_instance.__getstate__()) if ('sep' in state): state['sep'] = tuple(state['sep']) if ('cls' in state): state['cls'] = tuple(state['cls']) changes_to_apply = False if (state.get('add_prefix_space', add_prefix_space) != add_prefix_space): state['add_prefix_space'] = add_prefix_space changes_to_apply = True if (state.get('trim_offsets', trim_offsets) != trim_offsets): state['trim_offsets'] = trim_offsets changes_to_apply = True if changes_to_apply: component_class = getattr(processors, state.pop('type')) new_value = component_class(**state) setattr(self.backend_tokenizer, tokenizer_component, new_value) def mask_token(self) -> str: if (self._mask_token is None): if self.verbose: logger.error('Using mask_token, but it is not set yet.') return None return str(self._mask_token) _token.setter def mask_token(self, value): value = (AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value) self._mask_token = value def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding: is_split_into_words = kwargs.get('is_split_into_words', False) if (is_split_into_words and (not self.add_prefix_space)): raise ValueError(f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.') return super()._batch_encode_plus(*args, **kwargs) def _encode_plus(self, *args, **kwargs) -> BatchEncoding: is_split_into_words = kwargs.get('is_split_into_words', False) if (is_split_into_words and (not self.add_prefix_space)): raise ValueError(f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.') return super()._encode_plus(*args, **kwargs) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): output = (([self.bos_token_id] + token_ids_0) + [self.eos_token_id]) if (token_ids_1 is None): return output return (((output + [self.eos_token_id]) + token_ids_1) + [self.eos_token_id]) def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if (token_ids_1 is None): return (len(((cls + token_ids_0) + sep)) * [0]) return (len((((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)) * [0]) def _pad(self, encoded_inputs: Union[(Dict[(str, EncodedInput)], BatchEncoding)], max_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, return_attention_mask: Optional[bool]=None) -> dict: encoded_inputs = super()._pad(encoded_inputs=encoded_inputs, max_length=max_length, padding_strategy=padding_strategy, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask) if (return_attention_mask is None): return_attention_mask = ('attention_mask' in self.model_input_names) if (return_attention_mask and ('global_attention_mask' in encoded_inputs)): required_input = encoded_inputs[self.model_input_names[0]] needs_to_be_padded = (len(encoded_inputs['global_attention_mask']) != len(required_input)) if needs_to_be_padded: difference = (len(required_input) - len(encoded_inputs['global_attention_mask'])) if (self.padding_side == 'right'): encoded_inputs['global_attention_mask'] = (encoded_inputs['global_attention_mask'] + ([(- 1)] * difference)) elif (self.padding_side == 'left'): encoded_inputs['global_attention_mask'] = (([(- 1)] * difference) + encoded_inputs['global_attention_mask']) else: raise ValueError(('Invalid padding strategy:' + str(self.padding_side))) return encoded_inputs
def prepare_data_for_parallel(tokenizer, train_data, test_data, max_length, max_length_per_example, method_type, n_classes, test_inputs, prefixes, idx, prefixes_with_space, bos_token_id, eos_token_id): assert (train_data is not None) demonstrations_list = [] np.random.shuffle(train_data) for (sent, label) in train_data: tokens = tokenizer(sent)['input_ids'][:max_length_per_example] prefix = prefixes[int(label)] if (method_type == 'channel'): tokens = (prefix + tokens) elif (method_type == 'direct'): tokens = (tokens + prefix) else: raise NotImplementedError() demonstrations_list.append(tokens) for i in range(n_classes): for j in range((i + 1), n_classes): assert (prefixes[i][:idx] == prefixes[j][:idx]) assert (prefixes[i][idx] != prefixes[j][idx]) input_tensors = [] for i in range(n_classes): if (method_type == 'channel'): prefix = prefixes_with_space[i].copy() prompt = [(demonstrations + prefix) for demonstrations in demonstrations_list] tensor = prepro_sentence_pair(prompt, test_inputs, max_length, bos_token_id, eos_token_id, allow_truncation=True) elif (method_type == 'direct'): prefix = prefixes[i].copy() prompt = [((demonstrations.copy() + test_input) + prefix[:idx]) for test_input in test_inputs for demonstrations in demonstrations_list] tensor = prepro_sentence_pair(prompt, [prefix[idx:]], max_length, bos_token_id, eos_token_id, allow_truncation=True) else: raise NotImplementedError() input_tensors.append(tensor) return input_tensors
def hard_osimertinib(mean_cls=GeometricMeanScoringFunction) -> GoalDirectedBenchmark: smiles = 'COc1cc(N(C)CCN(C)C)c(NC(=O)C=C)cc1Nc2nccc(n2)c3cn(C)c4ccccc34' modifier = ClippedScoreModifier(upper_x=0.8) similar_to_osimertinib = TanimotoScoringFunction(smiles, fp_type='FCFP4', score_modifier=modifier) but_not_too_similar = TanimotoScoringFunction(smiles, fp_type='ECFP6', score_modifier=MinGaussianModifier(mu=0.85, sigma=0.1)) tpsa_over_100 = RdkitScoringFunction(descriptor=tpsa, score_modifier=MaxGaussianModifier(mu=100, sigma=10)) logP_scoring = RdkitScoringFunction(descriptor=logP, score_modifier=MinGaussianModifier(mu=1, sigma=1)) make_osimertinib_great_again = mean_cls([similar_to_osimertinib, but_not_too_similar, tpsa_over_100, logP_scoring]) specification = uniform_specification(1, 10, 100) return GoalDirectedBenchmark(name='Osimertinib MPO', objective=make_osimertinib_great_again, contribution_specification=specification)
def _write(out_path, text): try: with open(out_path, 'r') as f: old_text = f.read() except IOError: old_text = None if (old_text != text): with open(out_path, 'w') as f: logger.info('Writing {}'.format(out_path)) f.write(text) else: logger.info('Skipped writing {}'.format(out_path))
def parse_args(): parser = argparse.ArgumentParser(description='A demo of person search') parser.add_argument('--gpu', default=(- 1), type=int, help='GPU device id to use. Default: -1, means using CPU') parser.add_argument('--checkpoint', help='The checkpoint to be used. Default: None') parser.add_argument('--threshold', default=0.75, type=float, help='The threshold used to remove those bounding boxes with low scores. Default: 0.75') parser.add_argument('--cfg', help='Optional config file. Default: None') return parser.parse_args()
def _singular_func(self, singular=singular): self.parent()._singular_(singular).set_ring() try: self.__singular._check_valid() if (self.__singular.parent() is singular): return self.__singular except (AttributeError, ValueError): pass return _singular_init_func(self, singular)
def get_reward_targets(env: Union[(offline_env.OfflineEnv, gym.wrappers.TimeLimit)], env_name: str, reward_fractions: List[float], targets: str='of expert', average_reward_to_go: bool=True) -> List[float]: if (targets == 'of demos'): reward_to_go = dataset.reward_to_go(env.get_dataset(), average=average_reward_to_go) reward_min = np.min(reward_to_go) reward_max = np.max(reward_to_go) elif (targets == 'of expert'): if ('antmaze' in env_name): reward_min = 0 reward_max = 1 else: reward_min = infos.REF_MIN_SCORE[env_name] reward_max = infos.REF_MAX_SCORE[env_name] if average_reward_to_go: reward_min /= env._max_episode_steps reward_max /= env._max_episode_steps else: raise ValueError("targets must be 'of demos' or 'of expert'") reward_targets = [(reward_min + ((reward_max - reward_min) * frac)) for frac in reward_fractions] return reward_targets
def train_policy(num_of_envs, log_relative_path, maximum_episode_length, skip_frame, seed_num, her_config, total_time_steps, validate_every_timesteps, task_name): task = generate_task(task_generator_id=task_name, dense_reward_weights=np.array([100000, 0, 0, 0]), fractional_reward_weight=0) env = CausalWorld(task=task, skip_frame=skip_frame, enable_visualization=False, seed=seed_num, max_episode_length=maximum_episode_length) env = HERGoalEnvWrapper(env) env = CurriculumWrapper(env, intervention_actors=[GoalInterventionActorPolicy()], actives=[(0, , 1, 0)]) set_global_seeds(seed_num) checkpoint_callback = CheckpointCallback(save_freq=int((validate_every_timesteps / num_of_envs)), save_path=log_relative_path, name_prefix='model') model = HER(MlpPolicy, env, SAC, verbose=1, policy_kwargs=dict(layers=[256, 256, 256]), **her_config, seed=seed_num) model.learn(total_timesteps=total_time_steps, tb_log_name='her_sac', callback=checkpoint_callback) return
def cw_trans_reg_format(reg: sTRANS_sBC_reg): (n, c, h, w) = (reg[f'res0_{d}'] for d in 'nchw') opd0 = dict(address=reg.opd0_addr, dtype=DType(reg.res0_prec), shape=(n, w, h, c), layout=Layout.alignEU) res0 = dict(address=reg.res0_addr, dtype=DType(reg.res0_prec), shape=(n, c, h, w), layout=Layout.alignEU) if (reg.tsk_eu_typ == 0): opd0['layout'] = Layout.T3 operands = [get_value(**opd0)] results = [get_value(**res0)] return (results, {}, operands)
class OwlViTOnnxConfig(OnnxConfig): def inputs(self) -> Mapping[(str, Mapping[(int, str)])]: return OrderedDict([('input_ids', {0: 'batch', 1: 'sequence'}), ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ('attention_mask', {0: 'batch', 1: 'sequence'})]) def outputs(self) -> Mapping[(str, Mapping[(int, str)])]: return OrderedDict([('logits_per_image', {0: 'batch'}), ('logits_per_text', {0: 'batch'}), ('text_embeds', {0: 'batch'}), ('image_embeds', {0: 'batch'})]) def atol_for_validation(self) -> float: return 0.0001 def generate_dummy_inputs(self, processor: 'ProcessorMixin', batch_size: int=(- 1), seq_length: int=(- 1), framework: Optional['TensorType']=None) -> Mapping[(str, Any)]: text_input_dict = super().generate_dummy_inputs(processor.tokenizer, batch_size=batch_size, seq_length=seq_length, framework=framework) image_input_dict = super().generate_dummy_inputs(processor.feature_extractor, batch_size=batch_size, framework=framework) return {**text_input_dict, **image_input_dict} def default_onnx_opset(self) -> int: return 14
def register_Ns3UanPhyPerUmodem_methods(root_module, cls): cls.add_constructor([param('ns3::UanPhyPerUmodem const &', 'arg0')]) cls.add_constructor([]) cls.add_method('CalcPer', 'double', [param('ns3::Ptr< ns3::Packet >', 'pkt'), param('double', 'sinrDb'), param('ns3::UanTxMode', 'mode')], is_virtual=True) cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) return
def readTSVFile(path, verbose=False): with open(path, 'r') as f: data = [line.strip().split('\t') for line in f] if verbose: print('[I] file read complete with length', len(data)) return data
def entrygen(execute_path): abspath = os.path.join(os.path.dirname(__file__), execute_path) files = os.listdir(abspath) entrygen_functions.append(f'''# f"{{package_path}}/{execute_path} ''') entrygen_count = 0 for file in files: file_abspath = os.path.join(os.path.dirname(execute_path), file) if os.path.isdir(file_abspath): continue file_name = os.path.splitext(file)[0] ext_name = os.path.splitext(file)[(- 1)] if (ext_name == '.py'): codegen = f'''def {file_name.replace('-', '_')}(): file_name = f"{{os.getenv('TPUC_ROOT')}}/{os.path.join(execute_path, file)}" run_subprocess_py(file_name) ''' else: codegen = f'''def {file_name.replace('-', '_')}(): file_name = f"{{os.getenv('TPUC_ROOT')}}/{os.path.join(execute_path, file)}" run_subprocess_c(file_name) ''' function_names.append(file_name) entrygen_functions.append(codegen) entrygen_count += 1 if (entrygen_count == 0): entrygen_functions.pop() else: entrygen_functions.append(f'''### total {entrygen_count} entry generated for f"{{package_path}}/{execute_path} ''')
def train_mixup(epoch, args): print(('\nEpoch: %d' % epoch)) net.train() train_loss = 0 correct = 0 total = 0 for (batch_idx, (inputs, targets)) in enumerate(trainloader): (inputs, targets) = (inputs.to(device), targets.to(device)) (inputs, targets_a, targets_b, lam) = mixup_data(inputs, targets, args.mixup_alpha, True) (inputs, targets_a, targets_b) = map(Variable, (inputs, targets_a, targets_b)) optimizer.zero_grad() outputs = net(inputs) loss = criterion(None, outputs, targets_a, targets_b, lam) loss.backward() optimizer.step() train_loss += loss.item() (_, predicted) = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() print(('Train Loss: %.3f | Acc: %.3f%% (%d/%d)' % ((train_loss / (batch_idx + 1)), ((100.0 * correct) / total), correct, total)))
.gpu def test_scalar_output_ptr_access(): sdfg = dace.SDFG('scalptrtest') state = sdfg.add_state() sdfg.add_scalar('scal', dace.float64, transient=True, storage=dace.dtypes.StorageType.GPU_Global) sdfg.add_array('__return', [1], dace.float64) tasklet = state.add_tasklet('write', {}, {'outp': dace.pointer(dace.float64)}, '\n double a = 5;\n cudaMemcpyAsync(outp, &a, 1 * sizeof(double), cudaMemcpyHostToDevice,\n __state->gpu_context->streams[0]);\n ', language=dace.dtypes.Language.CPP) access_scal = state.add_access('scal') write_unsqueezed = state.add_write('__return') state.add_edge(tasklet, 'outp', access_scal, None, sdfg.make_array_memlet('scal')) state.add_edge(access_scal, None, write_unsqueezed, None, sdfg.make_array_memlet('scal')) ret = sdfg() assert np.allclose(ret, 5)
def soft_rounding_uniform_quantizer(input_tensor: tf.Tensor, auxvar_tensor: tf.Variable, min_tensor: tf.Tensor, max_tensor: tf.Tensor, num_bits: int) -> tf.Tensor: (min_range, max_range) = qutils.fix_range_to_include_zero(min_tensor, max_tensor, num_bits) delta = qutils.calculate_delta_uniform(min_range, max_range, num_bits) input_tensor_int = qutils.ste_floor(((input_tensor - min_range) / delta)) tensor_q = (input_tensor_int + auxvar_tensor) return ((delta * qutils.ste_clip(tensor_q, min_val=0, max_val=((2 ** num_bits) - 1))) + min_range)
def test_read_tag(): str_io = BytesIO() r = _make_readerlike(str_io) c_reader = m5u.VarReader5(r) assert_raises(IOError, c_reader.read_tag) tag = _make_tag('i4', 1, mio5p.miINT32, sde=True) tag['byte_count'] = 5 _write_stream(str_io, tag.tostring()) assert_raises(ValueError, c_reader.read_tag)
class MetaSimulation(BaseSimulation): _repeat_sim = False def __init__(self, simulations, mappings): warnings.warn('The MetaSimulation class is a work in progress and might change in the future', stacklevel=2) self.simulations = simulations self.mappings = mappings self.model = None survey = BaseSurvey([]) vnD = [sim.survey.nD for sim in self.simulations] survey._vnD = vnD self.survey = survey self._data_offsets = np.cumsum(np.r_[(0, vnD)]) def simulations(self): return self._simulations def simulations(self, value): self._simulations = validate_list_of_types('simulations', value, BaseSimulation, ensure_unique=True) def mappings(self): return self._mappings def mappings(self, value): value = validate_list_of_types('mappings', value, IdentityMap) if ((not self._repeat_sim) and (len(value) != len(self.simulations))): raise ValueError('Must provide the same number of mappings and simulations.') model_len = value[0].shape[1] for (i, (mapping, sim)) in enumerate(zip(value, self.simulations)): if (mapping.shape[1] != model_len): raise ValueError('All mappings must have the same input length') map_out_shape = mapping.shape[0] for name in sim._act_map_names: sim_mapping = getattr(sim, name) sim_in_shape = sim_mapping.shape[1] if ((map_out_shape != '*') and (sim_in_shape != '*') and (sim_in_shape != map_out_shape)): raise ValueError(f'Simulation and mapping at index {i} inconsistent. Simulation mapping shape {sim_in_shape} incompatible with input mapping shape {map_out_shape}.') self._mappings = value def _act_map_names(self): return ['_model_map'] def _model_map(self): return self.mappings[0] def model(self): return self._model def model(self, value): updated = HasModel.model.fset(self, value) if ((not self._repeat_sim) and updated): for (mapping, sim) in zip(self.mappings, self.simulations): if (value is not None): sim.model = (mapping * self._model) else: sim.model = value def fields(self, m): self.model = m f = [] for (mapping, sim) in zip(self.mappings, self.simulations): if (self._repeat_sim and (self.model is not None)): sim.model = (mapping * self.model) f.append(sim.fields(sim.model)) return f def dpred(self, m=None, f=None): if (f is None): if (m is None): m = self.model f = self.fields(m) d_pred = [] for (mapping, sim, field) in zip(self.mappings, self.simulations, f): if self._repeat_sim: sim.model = (mapping * self.model) d_pred.append(sim.dpred(m=sim.model, f=field)) return np.concatenate(d_pred) def Jvec(self, m, v, f=None): self.model = m if (f is None): f = self.fields(m) j_vec = [] for (mapping, sim, field) in zip(self.mappings, self.simulations, f): if self._repeat_sim: sim.model = (mapping * self.model) sim_v = (mapping.deriv(self.model) v) j_vec.append(sim.Jvec(sim.model, sim_v, f=field)) return np.concatenate(j_vec) def Jtvec(self, m, v, f=None): self.model = m if (f is None): f = self.fields(m) jt_vec = 0 for (i, (mapping, sim, field)) in enumerate(zip(self.mappings, self.simulations, f)): if self._repeat_sim: sim.model = (mapping * self.model) sim_v = v[self._data_offsets[i]:self._data_offsets[(i + 1)]] jt_vec += (mapping.deriv(self.model).T sim.Jtvec(sim.model, sim_v, f=field)) return jt_vec def getJtJdiag(self, m, W=None, f=None): self.model = m if (getattr(self, '_jtjdiag', None) is None): if (W is None): W = np.ones(self.survey.nD) else: try: W = W.diagonal() except (AttributeError, TypeError, ValueError): pass jtj_diag = 0.0 if (f is None): f = self.fields(m) for (i, (mapping, sim, field)) in enumerate(zip(self.mappings, self.simulations, f)): if self._repeat_sim: sim.model = (mapping * self.model) sim_w = sp.diags(W[self._data_offsets[i]:self._data_offsets[(i + 1)]]) sim_jtj = sp.diags(np.sqrt(sim.getJtJdiag(sim.model, sim_w, f=field))) m_deriv = mapping.deriv(self.model) jtj_diag += np.asarray((sim_jtj m_deriv).power(2).sum(axis=0)).flatten() self._jtjdiag = jtj_diag return self._jtjdiag def deleteTheseOnModelUpdate(self): return (super().deleteTheseOnModelUpdate + ['_jtjdiag'])
('dace.comm.BCGather') def _bcgather(pv: ProgramVisitor, sdfg: SDFG, state: SDFGState, in_buffer: str, out_buffer: str, block_sizes: Union[(str, Sequence[Union[(sp.Expr, Number)]])]): from dace.libraries.pblas.nodes.pgeadd import BlockCyclicGather libnode = BlockCyclicGather('_BCGather_') inbuf_range = None if isinstance(in_buffer, tuple): (inbuf_name, inbuf_range) = in_buffer else: inbuf_name = in_buffer in_desc = sdfg.arrays[inbuf_name] inbuf_node = state.add_read(inbuf_name) bsizes_range = None if isinstance(block_sizes, (list, tuple)): if isinstance(block_sizes[0], str): (bsizes_name, bsizes_range) = block_sizes bsizes_desc = sdfg.arrays[bsizes_name] bsizes_node = state.add_read(bsizes_name) else: (bsizes_name, bsizes_desc) = sdfg.add_temp_transient((len(block_sizes),), dtype=dace.int32) bsizes_node = state.add_access(bsizes_name) bsizes_tasklet = state.add_tasklet('_set_bsizes_', {}, {'__out'}, ';'.join(['__out[{}] = {}'.format(i, sz) for (i, sz) in enumerate(block_sizes)])) state.add_edge(bsizes_tasklet, '__out', bsizes_node, None, Memlet.from_array(bsizes_name, bsizes_desc)) else: bsizes_name = block_sizes bsizes_desc = sdfg.arrays[bsizes_name] bsizes_node = state.add_read(bsizes_name) outbuf_range = None if isinstance(out_buffer, tuple): (outbuf_name, outbuf_range) = out_buffer else: outbuf_name = out_buffer out_desc = sdfg.arrays[outbuf_name] outbuf_node = state.add_write(outbuf_name) if inbuf_range: inbuf_mem = Memlet.simple(inbuf_name, inbuf_range) else: inbuf_mem = Memlet.from_array(inbuf_name, in_desc) if bsizes_range: bsizes_mem = Memlet.simple(bsizes_name, bsizes_range) else: bsizes_mem = Memlet.from_array(bsizes_name, bsizes_desc) if outbuf_range: outbuf_mem = Memlet.simple(outbuf_name, outbuf_range) else: outbuf_mem = Memlet.from_array(outbuf_name, out_desc) state.add_edge(inbuf_node, None, libnode, '_inbuffer', inbuf_mem) state.add_edge(bsizes_node, None, libnode, '_block_sizes', bsizes_mem) state.add_edge(libnode, '_outbuffer', outbuf_node, None, outbuf_mem) return None
class TestRuntime(unittest.TestCase): def dummy_intervalset(): return IntervalSet([TestRuntime.dummy_interval()]) def query(vids): return IntervalSetMapping({vid: TestRuntime.dummy_intervalset() for vid in vids}) def dummy_interval(payload=None): return Interval(Bounds3D(1, 10), payload=payload) def query_that_throws_at_0(vids): output = [] for vid in vids: if (vid == 0): raise RuntimeError() output.append(TestRuntime.dummy_interval(vid)) return IntervalSet(output) def assertCollectionEq(self, c1, c2): map1 = c1.get_grouped_intervals() map2 = c2.get_grouped_intervals() self.assertEqual(map1.keys(), map2.keys()) for key in map1: is1 = map1[key] is2 = map2[key] self.assertIntervalSetEq(is1, is2) def assertIntervalsEq(self, intrvl1, intrvl2, payload_cmp=None): self.assertEqual(intrvl1['bounds'].data, intrvl2['bounds'].data) self.assertTrue(((payload_cmp is None) or payload_cmp(intrvl1['payload'], intrvl2['payload']))) def assertIntervalSetEq(self, is1, is2, payload_cmp=None): self.assertEqual(is1.size(), is2.size()) for (i, j) in zip(is1.get_intervals(), is2.get_intervals()): self.assertIntervalsEq(i, j) def test_single_process_runtime(self): vids = list(range(1000)) rt = Runtime.inline() self.assertCollectionEq(rt.run(TestRuntime.query, vids)[0], TestRuntime.query(vids)) def test_exception_inline(self): vids = list(range(2)) rt = Runtime.inline() (_, vids_with_err) = rt.run(TestRuntime.query_that_throws_at_0, vids, print_error=False) self.assertEqual([0], vids_with_err) def test_forked_children(self): vids = list(range(10)) rt = Runtime(get_forked_process_pool_factory()) self.assertCollectionEq(rt.run(TestRuntime.query, vids, chunksize=3)[0], TestRuntime.query(vids)) def test_forked_children_exception(self): vids = list(range(2)) rt = Runtime(get_forked_process_pool_factory(1)) (_, vids_with_err) = rt.run(TestRuntime.query_that_throws_at_0, vids, print_error=False) self.assertEqual([0], vids_with_err) def test_spawned_children(self): vids = list(range(10)) rt = Runtime(get_spawned_process_pool_factory()) self.assertCollectionEq(rt.run(TestRuntime.query, vids, chunksize=3)[0], TestRuntime.query(vids)) def test_spawned_children_exception(self): vids = list(range(2)) rt = Runtime(get_spawned_process_pool_factory()) (_, vids_with_err) = rt.run(TestRuntime.query_that_throws_at_0, vids, print_error=False) self.assertEqual([0], vids_with_err) def test_returning_intervalset(self): vids = list(range(1, 101)) rt = Runtime(get_spawned_process_pool_factory()) (answer, _) = rt.run(TestRuntime.query_that_throws_at_0, vids) self.assertIntervalSetEq(answer, TestRuntime.query_that_throws_at_0(vids)) def test_iterator(self): vids = list(range(1000)) rt = Runtime(get_forked_process_pool_factory(5)) gen = rt.get_result_iterator(TestRuntime.query, vids, randomize=False) for (vid, result) in zip(vids, gen): self.assertCollectionEq(result, TestRuntime.query([vid])) def test_inline_iterator(self): vids = list(range(1000)) rt = Runtime.inline() gen = rt.get_result_iterator(TestRuntime.query, vids, randomize=True) for result in gen: self.assertCollectionEq(result, TestRuntime.query(result.keys())) def test_iterator_error(self): vids = list(range(2)) rt = Runtime(get_spawned_process_pool_factory()) gen = rt.get_result_iterator(TestRuntime.query_that_throws_at_0, vids, print_error=False) result = next(gen) self.assertIntervalSetEq(result, TestRuntime.query_that_throws_at_0([1])) with self.assertRaises(RekallRuntimeException): next(gen) def test_all_tasks_fail(self): vids = list(range(1)) rt = Runtime.inline() with self.assertRaises(RekallRuntimeException): rt.run(TestRuntime.query_that_throws_at_0, vids, print_error=False)
class MultiVAE(RecMixin, BaseRecommenderModel): _charger def __init__(self, data, config, params, *args, **kwargs): self._random = np.random self._random_p = random self._ratings = self._data.train_dict self._sampler = sp.Sampler(self._data.sp_i_train) if (self._batch_size < 1): self._batch_size = self._num_users self._params_list = [('_intermediate_dim', 'intermediate_dim', 'intermediate_dim', 600, int, None), ('_latent_dim', 'latent_dim', 'latent_dim', 200, int, None), ('_lambda', 'reg_lambda', 'reg_lambda', 0.01, None, None), ('_learning_rate', 'lr', 'lr', 0.001, None, None), ('_dropout_rate', 'dropout_pkeep', 'dropout_pkeep', 1, None, None)] self.autoset_params() self._dropout_rate = (1.0 - self._dropout_rate) self._model = VariationalAutoEncoder(self._num_items, self._intermediate_dim, self._latent_dim, self._learning_rate, self._dropout_rate, self._lambda) self._total_anneal_steps = 200000 self._anneal_cap = 0.2 def name(self): return ((((('MultiVAE' + '_e:') + str(self._epochs)) + '_bs:') + str(self._batch_size)) + f'_{self.get_params_shortcut()}') def train(self): if self._restore: return self.restore_weights() best_metric_value = 0 self._update_count = 0 for it in range(self._epochs): loss = 0 steps = 0 with tqdm(total=int((self._num_users // self._batch_size)), disable=(not self._verbose)) as t: for batch in self._sampler.step(self._num_users, self._batch_size): steps += 1 if (self._total_anneal_steps > 0): anneal = min(self._anneal_cap, ((1.0 * self._update_count) / self._total_anneal_steps)) else: anneal = self._anneal_cap loss += self._model.train_step(batch, anneal).numpy() t.set_postfix({'loss': f'{(loss / steps):.5f}'}) t.update() self._update_count += 1 self.evaluate(it, loss)
class FrozenRequirement(object): def __init__(self, name, req, editable, comments=()): self.name = name self.canonical_name = canonicalize_name(name) self.req = req self.editable = editable self.comments = comments def from_dist(cls, dist): (req, editable, comments) = get_requirement_info(dist) if ((req is None) and (not editable)): direct_url = dist_get_direct_url(dist) if direct_url: req = direct_url_as_pep440_direct_reference(direct_url, dist.project_name) comments = [] if (req is None): req = dist.as_requirement() return cls(dist.project_name, req, editable, comments=comments) def __str__(self): req = self.req if self.editable: req = '-e {}'.format(req) return ('\n'.join((list(self.comments) + [str(req)])) + '\n')
def register_Ns3MmWaveSpectrumSignalParametersDlCtrlFrame_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::MmWaveSpectrumSignalParametersDlCtrlFrame const &', 'p')]) cls.add_method('Copy', 'ns3::Ptr< ns3::SpectrumSignalParameters >', [], is_virtual=True) cls.add_instance_attribute('cellId', 'uint16_t', is_const=False) cls.add_instance_attribute('ctrlMsgList', 'std::list< ns3::Ptr< ns3::MmWaveControlMessage > >', is_const=False) cls.add_instance_attribute('pss', 'bool', is_const=False) return
def load_shard(meta_path): (i, meta_path) = meta_path shard_name = Path(meta_path).stem metadata = {} with open(meta_path, 'r') as f: shard_file = json.load(f) count = len(shard_file) for line in shard_file: idx = line['filename'].split('.')[0] line['shard_size'] = count line['shard_name'] = shard_name metadata[idx] = line return (i, (metadata, (shard_name, count)))
def test_enc_head(): inputs = [torch.randn(1, 32, 21, 21)] head = EncHead(in_channels=[32], channels=16, num_classes=19, in_index=[(- 1)]) if torch.cuda.is_available(): (head, inputs) = to_cuda(head, inputs) outputs = head(inputs) assert (isinstance(outputs, tuple) and (len(outputs) == 2)) assert (outputs[0].shape == (1, head.num_classes, 21, 21)) assert (outputs[1].shape == (1, head.num_classes)) inputs = [torch.randn(1, 32, 21, 21)] head = EncHead(in_channels=[32], channels=16, use_se_loss=False, num_classes=19, in_index=[(- 1)]) if torch.cuda.is_available(): (head, inputs) = to_cuda(head, inputs) outputs = head(inputs) assert (outputs.shape == (1, head.num_classes, 21, 21)) inputs = [torch.randn(1, 16, 45, 45), torch.randn(1, 32, 21, 21)] head = EncHead(in_channels=[16, 32], channels=16, add_lateral=True, num_classes=19, in_index=[(- 2), (- 1)]) if torch.cuda.is_available(): (head, inputs) = to_cuda(head, inputs) outputs = head(inputs) assert (isinstance(outputs, tuple) and (len(outputs) == 2)) assert (outputs[0].shape == (1, head.num_classes, 21, 21)) assert (outputs[1].shape == (1, head.num_classes)) test_output = head.forward_test(inputs, None, None) assert (test_output.shape == (1, head.num_classes, 21, 21))
def main(): scheduler = BlockingScheduler(timezone=utc) scheduler.add_job(tick, 'interval', seconds=10) try: scheduler.start() except (KeyboardInterrupt, SystemExit): pass
def collect_core_entities_simple(x): all_entities = [] for (i, j) in zip(*x['entityCell'].nonzero()): if ((j == 0) and (j in x['entityColumn'])): all_entities.append(str(x['tableData'][i][j]['surfaceLinks'][0]['target']['id'])) return all_entities
class Camera(): def __init__(self, cameraResolution=[320, 240]): self.cameraResolution = cameraResolution camTargetPos = [0.5, 0, 0.05] camDistance = 0.4 upAxisIndex = 2 yaw = 90 pitch = (- 30.0) roll = 0 fov = 60 nearPlane = 0.01 farPlane = 100 self.viewMatrix = pb.computeViewMatrixFromYawPitchRoll(camTargetPos, camDistance, yaw, pitch, roll, upAxisIndex) aspect = (cameraResolution[0] / cameraResolution[1]) self.projectionMatrix = pb.computeProjectionMatrixFOV(fov, aspect, nearPlane, farPlane) def get_image(self): img_arr = pb.getCameraImage(self.cameraResolution[0], self.cameraResolution[1], self.viewMatrix, self.projectionMatrix, shadow=1, lightDirection=[1, 1, 1], renderer=pb.ER_BULLET_HARDWARE_OPENGL) rgb = img_arr[2] dep = img_arr[3] return (rgb, dep)
def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT, source_address=None): (host, port) = address err = None for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM): (af, socktype, proto, canonname, sa) = res sock = None try: sock = socket.socket(af, socktype, proto) if (timeout is not _GLOBAL_DEFAULT_TIMEOUT): sock.settimeout(float(timeout)) if source_address: sock.bind(source_address) sock.connect(sa) return sock except socket.error: err = get_exception() if (sock is not None): sock.close() if (err is not None): raise err else: raise socket.error('getaddrinfo returns an empty list')
class LatentTransformerDecoderLayer(TransformerDecoderLayer): def __init__(self, args, idx, layer_select=None, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False): super().__init__(args, no_encoder_attn, add_bias_kv, add_zero_attn) self.idx = idx self.layer_select = layer_select def residual_connection(self, x, residual): return (residual + (x * self.layer_select(self.idx)))
def clean_input(prompt: str='', talk=False): try: cfg = Config() if cfg.chat_messages_enabled: for plugin in cfg.plugins: if (not hasattr(plugin, 'can_handle_user_input')): continue if (not plugin.can_handle_user_input(user_input=prompt)): continue plugin_response = plugin.user_input(user_input=prompt) if (not plugin_response): continue if (plugin_response.lower() in ['yes', 'yeah', 'y', 'ok', 'okay', 'sure', 'alright']): return cfg.authorise_key elif (plugin_response.lower() in ['no', 'nope', 'n', 'negative']): return cfg.exit_key return plugin_response logger.info('Asking user via keyboard...') answer = session.prompt(ANSI(prompt)) return answer except KeyboardInterrupt: logger.info('You interrupted Auto-GPT') logger.info('Quitting...') exit(0)
def run_deep_graph_infomax(base_model, generator, epochs, reorder=(lambda sequence, subjects: subjects)): corrupted_generator = CorruptedGenerator(generator) gen = corrupted_generator.flow(G.nodes()) infomax = DeepGraphInfomax(base_model, corrupted_generator) (x_in, x_out) = infomax.in_out_tensors() model = Model(inputs=x_in, outputs=x_out) model.compile(loss=tf.nn.sigmoid_cross_entropy_with_logits, optimizer=Adam(lr=0.001)) history = model.fit(gen, epochs=epochs, verbose=0, callbacks=[es]) (x_emb_in, x_emb_out) = base_model.in_out_tensors() if (generator.num_batch_dims() == 2): x_emb_out = tf.squeeze(x_emb_out, axis=0) emb_model = Model(inputs=x_emb_in, outputs=x_emb_out) test_gen = generator.flow(test_subjects.index) train_gen = generator.flow(train_subjects.index) test_embeddings = emb_model.predict(test_gen) train_embeddings = emb_model.predict(train_gen) ordered_test_subjects = reorder(test_gen, test_subjects) ordered_train_subjects = reorder(train_gen, train_subjects) lr = LogisticRegression(multi_class='auto', solver='lbfgs') lr.fit(train_embeddings, ordered_train_subjects) y_pred = lr.predict(test_embeddings) acc = (y_pred == ordered_test_subjects).mean() return acc
def train(net, optimizer, data, target, NUM_BATCHES): for i in range(NUM_BATCHES): net.zero_grad() x = data[i].reshape(((- 1), 1)) y = target[i].reshape(((- 1), 1)) loss = net.BBB_loss(x, y) loss.backward() optimizer.step()
class AlbertConfig(PretrainedConfig): model_type = 'albert' def __init__(self, vocab_size=30000, embedding_size=128, hidden_size=4096, num_hidden_layers=12, num_hidden_groups=1, num_attention_heads=64, intermediate_size=16384, inner_group_num=1, hidden_act='gelu_new', hidden_dropout_prob=0, attention_probs_dropout_prob=0, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, classifier_dropout_prob=0.1, position_embedding_type='absolute', pad_token_id=0, bos_token_id=2, eos_token_id=3, **kwargs): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.embedding_size = embedding_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_hidden_groups = num_hidden_groups self.num_attention_heads = num_attention_heads self.inner_group_num = inner_group_num self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.classifier_dropout_prob = classifier_dropout_prob self.position_embedding_type = position_embedding_type
class Bilinear(Module): __constants__ = ['in1_features', 'in2_features', 'out_features'] in1_features: int in2_features: int out_features: int complex_weights: bool weight: Union[(Tensor, Tuple[(Tensor, Tensor)])] bias: Optional[Union[(Tensor, Tuple[(Tensor, Tensor)])]] def __init__(self, in1_features: int, in2_features: int, out_features: int, bias: bool=True, complex_weights: bool=True) -> None: super(Bilinear, self).__init__() self.in1_features = in1_features self.in2_features = in2_features self.out_features = out_features self.complex_weights = complex_weights if complex_weights: self.weight = Parameter(torch.Tensor(out_features, in1_features, in2_features).to(torch.cfloat)) else: weight_real = Parameter(torch.Tensor(out_features, in1_features, in2_features)) weight_imag = Parameter(torch.Tensor(out_features, in1_features, in2_features)) self.weight = ParameterList([weight_real, weight_imag]) if bias: if complex_weights: self.bias = Parameter(torch.Tensor(out_features).to(torch.cfloat)) else: bias_real = Parameter(torch.Tensor(out_features)) bias_imag = Parameter(torch.Tensor(out_features)) self.bias = ParameterList([bias_real, bias_imag]) else: self.register_parameter('bias', None) self.reset_parameters() def _reset_parameters(self, weight, bias) -> None: bound = (1 / math.sqrt(weight.size(1))) init.uniform_(weight, (- bound), bound) if (bias is not None): init.uniform_(bias, (- bound), bound) def reset_parameters(self) -> None: if (type(self.weight) is ParameterList): self._reset_parameters(self.weight[0], (None if (self.bias is None) else self.bias[0])) self._reset_parameters(self.weight[1], (None if (self.bias is None) else self.bias[1])) else: self._reset_parameters(self.weight, self.bias) def forward(self, input1: Tensor, input2: Tensor) -> Tensor: return cF.bilinear(input1, input2, self.weight, self.bias) def extra_repr(self) -> str: return 'in1_features={}, in2_features={}, out_features={}, bias={}'.format(self.in1_features, self.in2_features, self.out_features, (self.bias is not None))
class StandardPermutations_avoiding_12(StandardPermutations_avoiding_generic): def __init__(self, n): super().__init__(n, (Permutations()([1, 2]),)) def __iter__(self): (yield self.element_class(self, range(self.n, 0, (- 1)), check=False)) def cardinality(self): return ZZ.one()
class RealTrafficMatrix(TrafficMatrix): def __init__(self, problem, tm, date, time, seed=0, scale_factor=1.0): super().__init__(problem, tm, seed, scale_factor=1.0) self._date = date self._time = time def model(self): return 'real' def date(self): return self._date def time(self): return self._time def copy(self): return RealTrafficMatrix(self.problem, self._tm.copy(), self.date, self.time, self.seed, self.scale_factor) def _init_traffic_matrix(self): pass def _update(self, scale_factor, type, **kwargs): if (type == 'uniform'): alpha = kwargs['alpha'] assert (alpha > 0.0) def new_val(val): return max(0, (val * (1 + (alpha * uni_rand((- 1), 1))))) elif (type == 'scale'): assert (scale_factor > 0.0) def new_val(val): return (val * scale_factor) else: raise Exception('"{}" not a valid perturbation type for the traffic matrix'.format(type)) mat = np.zeros_like(self.tm) for i in range(mat.shape[0]): for j in range(mat.shape[1]): mat[(i, j)] = new_val(self._tm[(i, j)]) self._tm = mat def _fname_suffix(self): return '{}_{}'.format(self.date, self.time)
class HParams(object): def __init__(self, **kwargs): self._items = {} for (k, v) in kwargs.items(): self._set(k, v) def _set(self, k, v): self._items[k] = v setattr(self, k, v) def __getattr__(self, k): if (not hasattr(self, k)): return None return self._items[k] def parse(self, str_value): hps = HParams(**self._items) for entry in str_value.strip().split(','): entry = entry.strip() if (not entry): continue (key, sep, value) = entry.partition('=') if (not sep): raise ValueError(('Unable to parse: %s' % entry)) default_value = hps._items[key] if isinstance(default_value, bool): hps._set(key, (value.lower() == 'true')) elif isinstance(default_value, int): hps._set(key, int(value)) elif isinstance(default_value, float): hps._set(key, float(value)) else: hps._set(key, value) return hps def __str__(self): return str(self._items)
def _generate_md_atari_batch(atari_envs): for name in atari_envs: subname = name[6:] _generate_md_atari_single_env(filepath='envs/gym/atari/{}.md'.format(subname), env_title='atari-{}'.format(subname))
def inputs_to_tree_reps(args, dep_heads, l, prune, subj_pos=None, obj_pos=None): maxlen = max(l) dep_heads = dep_heads.cpu().numpy() if subj_pos: subj_pos = subj_pos.cpu().numpy() if obj_pos: obj_pos = obj_pos.cpu().numpy() trees = [head_to_tree(dep_heads[i], l[i], prune, None, None) for i in range(len(l))] adj = [tree_to_adj(maxlen, tree, directed=False, self_loop=False).reshape(1, maxlen, maxlen) for tree in trees] adj = np.concatenate(adj, axis=0) adj = torch.from_numpy(adj) return adj
def main(args): args.distributed_world_size = torch.cuda.device_count() port = random.randint(10000, 20000) args.distributed_init_method = 'tcp://localhost:{port}'.format(port=port) args.distributed_init_host = 'localhost' args.distributed_port = (port + 1) mp = torch.multiprocessing.get_context('spawn') error_queue = mp.SimpleQueue() error_handler = ErrorHandler(error_queue) procs = [] for i in range(args.distributed_world_size): args.distributed_rank = i args.device_id = i procs.append(mp.Process(target=run, args=(args, error_queue), daemon=True)) procs[i].start() error_handler.add_child(procs[i].pid) for p in procs: p.join()
def _depthwise_conv2d(inputs, kernel, strides, padding): return jax.lax.conv_general_dilated(inputs, kernel, strides, padding, feature_group_count=inputs.shape[(- 1)], dimension_numbers=('NHWC', 'HWIO', 'NHWC'))
class SubWithTorchFunction(torch.Tensor): def __torch_function__(self, func, types, args=(), kwargs=None): if (kwargs is None): kwargs = {} return super().__torch_function__(func, types, args, kwargs)
def res_block(x): inputs = x x = conv_block(x, 16, 3, 1) x = conv_block(x, 16, 3, 1, activation=None) return layers.Add()([inputs, x])
def aa_to_rotmat(axis_angle: Union[(torch.Tensor, numpy.ndarray)]) -> Union[(torch.Tensor, numpy.ndarray)]: if (axis_angle.shape[(- 1)] != 3): raise ValueError(f'Invalid input axis angles shape f{axis_angle.shape}.') t = Compose([axis_angle_to_matrix]) return t(axis_angle)
def size(g, self, dim): if _is_value(dim): raise RuntimeError('ONNX export only supports constant dim values in .size()') full_shape = g.op('Shape', self) return select(g, full_shape, dim=0, index=dim)
def filter_errors(errors, method, Estimator=None): for (code, message) in errors: if (code in ['RT02', 'GL01', 'GL02']): continue if (code in ['SA01', 'EX01']): continue if ((code == 'PR02') and (Estimator is not None) and (method is not None)): method_obj = getattr(Estimator, method) if isinstance(method_obj, property): continue if ((method is not None) and (code in ['EX01', 'SA01', 'ES01'])): continue (yield (code, message))
('Concat') def TranslateConcat(layer, pretrained_blobs, is_test, **kwargs): caffe_op = BaseTranslate(layer, 'Concat') caffe_op.output.extend([(('_' + caffe_op.output[0]) + '_dims')]) AddArgument(caffe_op, 'order', 'NCHW') return (caffe_op, [])
class DetectionCheckpointer(Checkpointer): def __init__(self, model, save_dir='', *, save_to_disk=None, **checkpointables): is_main_process = comm.is_main_process() super().__init__(model, save_dir, save_to_disk=(is_main_process if (save_to_disk is None) else save_to_disk), **checkpointables) def _load_file(self, filename): if filename.endswith('.pkl'): with PathManager.open(filename, 'rb') as f: data = pickle.load(f, encoding='latin1') if (('model' in data) and ('__author__' in data)): self.logger.info("Reading a file from '{}'".format(data['__author__'])) return data else: if ('blobs' in data): data = data['blobs'] data = {k: v for (k, v) in data.items() if (not k.endswith('_momentum'))} return {'model': data, '__author__': 'Caffe2', 'matching_heuristics': True} loaded = super()._load_file(filename) if ('model' not in loaded): loaded = {'model': loaded} return loaded def _load_model(self, checkpoint): if checkpoint.get('matching_heuristics', False): self._convert_ndarray_to_tensor(checkpoint['model']) model_state_dict = self.model.state_dict() align_and_update_state_dicts(model_state_dict, checkpoint['model'], c2_conversion=(checkpoint.get('__author__', None) == 'Caffe2')) checkpoint['model'] = model_state_dict super()._load_model(checkpoint)
_args('v', 'v', 'i', 'i', 'i', 'none') def topk(g, self, k, dim, largest, sorted, out=None): return sym_help._topk_helper(g, self, k, dim, largest=largest, sorted=sorted, out=out)
class UniqueAllResult(NamedTuple): values: ArrayLike indices: ArrayLike inverse_indices: ArrayLike counts: ArrayLike
def code_eval(gold: Tuple[(str, Optional[Dict])], pred: str) -> float: assert (gold[1] is not None) return float(code_metrics_helper.check_correctness(gold[1], pred, 3.0)['passed'])
def context(msg: str) -> Iterator[None]: try: (yield) except Exception as e: msg = textwrap.indent(msg, ' ') msg = (f'''{e.args[0]} {msg}''' if e.args else msg) e.args = ((msg,) + e.args[1:]) raise
class NERTransformer(BaseTransformer): mode = 'token-classification' def __init__(self, hparams): if (type(hparams) == dict): hparams = Namespace(**hparams) module = import_module('tasks') try: token_classification_task_clazz = getattr(module, hparams.task_type) self.token_classification_task: TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError(f'Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. Available tasks classes are: {TokenClassificationTask.__subclasses__()}') self.labels = self.token_classification_task.get_labels(hparams.labels) self.pad_token_label_id = CrossEntropyLoss().ignore_index super().__init__(hparams, len(self.labels), self.mode) def forward(self, **inputs): return self.model(**inputs) def training_step(self, batch, batch_num): inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]} if (self.config.model_type != 'distilbert'): inputs['token_type_ids'] = (batch[2] if (self.config.model_type in ['bert', 'xlnet']) else None) outputs = self(**inputs) loss = outputs[0] return {'loss': loss} def prepare_data(self): args = self.hparams for mode in ['train', 'dev', 'test']: cached_features_file = self._feature_file(mode) if (os.path.exists(cached_features_file) and (not args.overwrite_cache)): logger.info('Loading features from cached file %s', cached_features_file) features = torch.load(cached_features_file) else: logger.info('Creating features from dataset file at %s', args.data_dir) examples = self.token_classification_task.read_examples_from_file(args.data_dir, mode) features = self.token_classification_task.convert_examples_to_features(examples, self.labels, args.max_seq_length, self.tokenizer, cls_token_at_end=bool((self.config.model_type in ['xlnet'])), cls_token=self.tokenizer.cls_token, cls_token_segment_id=(2 if (self.config.model_type in ['xlnet']) else 0), sep_token=self.tokenizer.sep_token, sep_token_extra=False, pad_on_left=bool((self.config.model_type in ['xlnet'])), pad_token=self.tokenizer.pad_token_id, pad_token_segment_id=self.tokenizer.pad_token_type_id, pad_token_label_id=self.pad_token_label_id) logger.info('Saving features into cached file %s', cached_features_file) torch.save(features, cached_features_file) def get_dataloader(self, mode: int, batch_size: int, shuffle: bool=False) -> DataLoader: cached_features_file = self._feature_file(mode) logger.info('Loading features from cached file %s', cached_features_file) features = torch.load(cached_features_file) all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long) if (features[0].token_type_ids is not None): all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long) else: all_token_type_ids = torch.tensor([0 for f in features], dtype=torch.long) all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long) return DataLoader(TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_label_ids), batch_size=batch_size) def validation_step(self, batch, batch_nb): inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]} if (self.config.model_type != 'distilbert'): inputs['token_type_ids'] = (batch[2] if (self.config.model_type in ['bert', 'xlnet']) else None) outputs = self(**inputs) (tmp_eval_loss, logits) = outputs[:2] preds = logits.detach().cpu().numpy() out_label_ids = inputs['labels'].detach().cpu().numpy() return {'val_loss': tmp_eval_loss.detach().cpu(), 'pred': preds, 'target': out_label_ids} def _eval_end(self, outputs): val_loss_mean = torch.stack([x['val_loss'] for x in outputs]).mean() preds = np.concatenate([x['pred'] for x in outputs], axis=0) preds = np.argmax(preds, axis=2) out_label_ids = np.concatenate([x['target'] for x in outputs], axis=0) label_map = dict(enumerate(self.labels)) out_label_list = [[] for _ in range(out_label_ids.shape[0])] preds_list = [[] for _ in range(out_label_ids.shape[0])] for i in range(out_label_ids.shape[0]): for j in range(out_label_ids.shape[1]): if (out_label_ids[(i, j)] != self.pad_token_label_id): out_label_list[i].append(label_map[out_label_ids[i][j]]) preds_list[i].append(label_map[preds[i][j]]) results = {'val_loss': val_loss_mean, 'accuracy_score': accuracy_score(out_label_list, preds_list), 'precision': precision_score(out_label_list, preds_list), 'recall': recall_score(out_label_list, preds_list), 'f1': f1_score(out_label_list, preds_list)} ret = dict(results.items()) ret['log'] = results return (ret, preds_list, out_label_list) def validation_epoch_end(self, outputs): (ret, preds, targets) = self._eval_end(outputs) logs = ret['log'] return {'val_loss': logs['val_loss'], 'log': logs, 'progress_bar': logs} def test_epoch_end(self, outputs): (ret, predictions, targets) = self._eval_end(outputs) logs = ret['log'] return {'avg_test_loss': logs['val_loss'], 'log': logs, 'progress_bar': logs} def add_model_specific_args(parser, root_dir): BaseTransformer.add_model_specific_args(parser, root_dir) parser.add_argument('--task_type', default='NER', type=str, help='Task type to fine tune in training (e.g. NER, POS, etc)') parser.add_argument('--max_seq_length', default=128, type=int, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.') parser.add_argument('--labels', default='', type=str, help='Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.') parser.add_argument('--gpus', default=0, type=int, help='The number of GPUs allocated for this, it is by default 0 meaning none') parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets') return parser
def compute_chunk_sizes(M, N, target_chunk_size): nChunks_col = 1 nChunks_row = 1 rowChunk = int(np.ceil((M / nChunks_row))) colChunk = int(np.ceil((N / nChunks_col))) chunk_size = (((rowChunk * colChunk) * 8) * 1e-06) while (chunk_size >= target_chunk_size): if (rowChunk > colChunk): nChunks_row += 1 else: nChunks_col += 1 rowChunk = int(np.ceil((M / nChunks_row))) colChunk = int(np.ceil((N / nChunks_col))) chunk_size = (((rowChunk * colChunk) * 8) * 1e-06) return (rowChunk, colChunk)
def agent(config: Config): ai_name = 'Test AI' memory = MagicMock() next_action_count = 0 command_registry = MagicMock() ai_config = AIConfig(ai_name=ai_name) system_prompt = 'System prompt' triggering_prompt = 'Triggering prompt' workspace_directory = 'workspace_directory' agent = Agent(ai_name=ai_name, memory=memory, next_action_count=next_action_count, command_registry=command_registry, ai_config=ai_config, config=config, system_prompt=system_prompt, triggering_prompt=triggering_prompt, workspace_directory=workspace_directory) return agent
def reorder_index(batch_indices, world_size): mini_batchsize = (len(batch_indices) // world_size) reorder_indices = [] for i in range(0, mini_batchsize): for j in range(0, world_size): reorder_indices.append(batch_indices[(i + (j * mini_batchsize))]) return reorder_indices
class BytePairEncoding(Vocabulary): def __init__(self, vocab_file, bpe_file, seq_postfix=None, **kwargs): super(BytePairEncoding, self).__init__(vocab_file=vocab_file, seq_postfix=seq_postfix, **kwargs) from returnn.util.bpe import StandardBytePairEncoder self.bpe = StandardBytePairEncoder(bpe_codes_file=bpe_file, labels=self._labels) def get_seq(self, sentence): segments = self.bpe.segment_sentence(sentence) seq = self.get_seq_indices(segments) return (seq + self.seq_postfix)
class MD_G_multi(nn.Module): def __init__(self, output_dim, c_dim=3, nz=8): super(MD_G_multi, self).__init__() self.nz = nz ini_tch = 256 tch_add = ini_tch tch = ini_tch self.tch_add = tch_add self.dec1 = MisINSResBlock(tch, tch_add) self.dec2 = MisINSResBlock(tch, tch_add) self.dec3 = MisINSResBlock(tch, tch_add) self.dec4 = MisINSResBlock(tch, tch_add) dec5 = [] dec5 += [ReLUINSConvTranspose2d(tch, (tch // 2), kernel_size=3, stride=2, padding=1, output_padding=1)] tch = (tch // 2) dec5 += [ReLUINSConvTranspose2d(tch, (tch // 2), kernel_size=3, stride=2, padding=1, output_padding=1)] tch = (tch // 2) dec5 += [nn.ConvTranspose2d(tch, output_dim, kernel_size=1, stride=1, padding=0)] dec5 += [nn.Tanh()] self.decA5 = nn.Sequential(*dec5) self.mlp = nn.Sequential(nn.Linear((nz + c_dim), 256), nn.ReLU(inplace=True), nn.Linear(256, 256), nn.ReLU(inplace=True), nn.Linear(256, (tch_add * 4))) return def forward(self, x, z, c): z_c = torch.cat([c, z], 1) z_c = self.mlp(z_c) (z1, z2, z3, z4) = torch.split(z_c, self.tch_add, dim=1) (z1, z2, z3, z4) = (z1.contiguous(), z2.contiguous(), z3.contiguous(), z4.contiguous()) out1 = self.dec1(x, z1) out2 = self.dec2(out1, z2) out3 = self.dec3(out2, z3) out4 = self.dec4(out3, z4) out = self.decA5(out4) return out
class FunctionalBatchNorm(common.BaseSubstitution): def __init__(self): bn_node = NodeOperationMatcher(F.batch_norm) super().__init__(matcher_instance=bn_node) def get_attributes_from_inputs(self, graph: Graph, node: BaseNode) -> dict: input_nodes = graph.get_prev_nodes(node, sink_index_sorted=True) if (len(input_nodes) == 5): return {MOVING_MEAN: list(input_nodes[1].weights.values())[0], MOVING_VARIANCE: list(input_nodes[2].weights.values())[0], GAMMA: list(input_nodes[3].weights.values())[0], BETA: list(input_nodes[4].weights.values())[0]} else: Logger.warning(f'functional batch_norm is only folded in the 5 inputs case (input, mean, var, gamma, beta),got {len(input_nodes)}') return {} def substitute(self, graph: Graph, node: BaseNode) -> Graph: if (len(node.input_shape[0]) != 4): return graph out_channels = node.output_shape[0][1] bn_node_weights = self.get_attributes_from_inputs(graph, node) if (not bn_node_weights): return graph new_batchnorm2d = BaseNode(name=(node.name + '_into_BatchNorm2d'), framework_attr={NUM_FEATURES: out_channels, EPSILON: EPSILON_VAL, MOMENTUM: MOMENTUM_VAL}, input_shape=node.output_shape, output_shape=node.output_shape, weights=bn_node_weights, layer_class=nn.BatchNorm2d) num_nodes_before_substitution = len(graph.nodes) num_edges_before_substitution = len(graph.edges) batch_norm_consts = graph.get_prev_nodes(node)[1:] for const in batch_norm_consts: graph.remove_edge(const, node) graph.remove_node(const) graph.replace_node(node, new_batchnorm2d) assert ((num_nodes_before_substitution - len(graph.nodes)) == len(batch_norm_consts)) assert ((num_edges_before_substitution - len(graph.edges)) == len(batch_norm_consts)) return graph
class Agent(object): def __init__(self): self.reset() def action(self, state): return NotImplementedError() def actions(self, states, agent_indices): return NotImplementedError() def a_probs_from_action(action): action_idx = Action.ACTION_TO_INDEX[action] return np.eye(Action.NUM_ACTIONS)[action_idx] def check_action_probs(action_probs, tolerance=0.0001): probs_sum = sum(action_probs) assert math.isclose(probs_sum, 1.0, rel_tol=tolerance), 'Action probabilities {} should sum up to approximately 1 but sum up to {}'.format(list(action_probs), probs_sum) def set_agent_index(self, agent_index): self.agent_index = agent_index def set_mdp(self, mdp): self.mdp = mdp def reset(self): self.agent_index = None self.mdp = None
def test_nd_array_initialize(): shape = [2, 3, 4] a = nn.NdArray(shape) ref_a = np.zeros(shape, dtype=np.float32) assert (a.data == ref_a).all()
def get_batch(iterable, n=1): l = len(iterable) for ndx in range(0, l, n): (yield iterable[ndx:min((ndx + n), l)])
def parse(): parser = argparse.ArgumentParser() parser.add_argument('--save-as', metavar='FOLDER_NAME', required=True) args = parser.parse_args() return args
class TreeWalker(base.TreeWalker): def __iter__(self): previous = None for event in self.tree: if (previous is not None): for token in self.tokens(previous, event): (yield token) previous = event if (previous is not None): for token in self.tokens(previous, None): (yield token) def tokens(self, event, next): (kind, data, _) = event if (kind == START): (tag, attribs) = data name = tag.localname namespace = tag.namespace converted_attribs = {} for (k, v) in attribs: if isinstance(k, QName): converted_attribs[(k.namespace, k.localname)] = v else: converted_attribs[(None, k)] = v if ((namespace == namespaces['html']) and (name in voidElements)): for token in self.emptyTag(namespace, name, converted_attribs, ((not next) or (next[0] != END) or (next[1] != tag))): (yield token) else: (yield self.startTag(namespace, name, converted_attribs)) elif (kind == END): name = data.localname namespace = data.namespace if ((namespace != namespaces['html']) or (name not in voidElements)): (yield self.endTag(namespace, name)) elif (kind == COMMENT): (yield self.comment(data)) elif (kind == TEXT): for token in self.text(data): (yield token) elif (kind == DOCTYPE): (yield self.doctype(*data)) elif (kind in (XML_NAMESPACE, DOCTYPE, START_NS, END_NS, START_CDATA, END_CDATA, PI)): pass else: (yield self.unknown(kind))