code
stringlengths
101
5.91M
def start_processes(fn, args=(), nprocs=1, join=True, daemon=False, start_method='spawn'): _python_version_check() mp = multiprocessing.get_context(start_method) error_queues = [] processes = [] for i in range(nprocs): error_queue = mp.SimpleQueue() process = mp.Process(target=_wrap, args=(fn, i, args, error_queue), daemon=daemon) process.start() error_queues.append(error_queue) processes.append(process) context = ProcessContext(processes, error_queues) if (not join): return context while (not context.join()): pass
def lookup_function(val): type = val.type if (type.code == gdb.TYPE_CODE_REF): type = type.target() type = type.unqualified().strip_typedefs() typename = type.tag if (typename == None): return None for function in pretty_printers_dict: if function.search(typename): return pretty_printers_dict[function](val) return None
def _get_builtin_metadata(dataset_name): return _get_flickr30k_metadata([]) raise KeyError('No built-in metadata for dataset {}'.format(dataset_name))
def test_pair_confusion_matrix(): n = 10 N = (n ** 2) clustering1 = np.hstack([([(i + 1)] * n) for i in range(n)]) clustering2 = np.hstack([([(i + 1)] * (n + 1)) for i in range(n)])[:N] expected = np.zeros(shape=(2, 2), dtype=np.int64) for i in range(len(clustering1)): for j in range(len(clustering2)): if (i != j): same_cluster_1 = int((clustering1[i] == clustering1[j])) same_cluster_2 = int((clustering2[i] == clustering2[j])) expected[(same_cluster_1, same_cluster_2)] += 1 assert_array_equal(pair_confusion_matrix(clustering1, clustering2), expected)
def _impl(array, highlevel, behavior, attrs): with HighLevelContext(behavior=behavior, attrs=attrs) as ctx: layout = ctx.unwrap(array, allow_record=True, primitive_policy='error') fields = ak.operations.fields(layout) def check_for_union(layout, **kwargs): if isinstance(layout, (ak.contents.RecordArray, ak.Record)): return layout elif layout.is_union: for content in layout.contents: if (set(ak.operations.fields(content)) != set(fields)): raise ValueError('union of different sets of fields, cannot ak.unzip') ak._do.recursively_apply(layout, check_for_union, return_array=False) if (len(fields) == 0): return (ctx.wrap(layout, highlevel=highlevel, allow_other=True),) else: return tuple((ctx.wrap(layout[n], highlevel=highlevel, allow_other=True) for n in fields))
class SubGoalAttachment(): def __init__(self, config, vehicle, street_map): self.config = config self.vehicle = vehicle self.street_map = street_map self._nav_config = self.config.navigation self._sub_goals = None def reset(self): self._sub_goals = [] for (i, edgeID) in enumerate(self.street_map.scenario.route): edge = self.street_map.scenario.sumo_net.getEdge(edgeID) start_pos = sumolib.geomhelper.positionAtShapeOffset(edge.getShape(), 0.0) end_pos = sumolib.geomhelper.positionAtShapeOffset(edge.getShape(), edge.getLength()) if ((i > 0) and (not self.config.simulation.subgoals_only_after)): self._sub_goals.append(SimpleWaypoint(np.array(start_pos))) self._sub_goals.append(SimpleWaypoint(np.array(end_pos))) if self._nav_config.only_end_goal: self._sub_goals = self._sub_goals[(- 1)] def step(self): veh_state = self.vehicle.state if (len(self._sub_goals) == 0): return remove_idx = (- 1) for (i, wp) in enumerate(self._sub_goals): if (np.linalg.norm((veh_state.location[:2] - wp.location)) > self._nav_config.sub_goal_consume_dist): break else: remove_idx += 1 self._sub_goals = self._sub_goals[(remove_idx + 1):] def sub_goals(self): return self._sub_goals
def find_comparable_simulations(sxs_id, catalog, catalog_resolutions): mass1 = catalog[sxs_id]['initial_mass1'] mass2 = catalog[sxs_id]['initial_mass2'] spin1 = catalog[sxs_id]['initial_dimensionless_spin1'] spin2 = catalog[sxs_id]['initial_dimensionless_spin2'] mass_ratio = (mass1 / mass2) spin1_magnitude = np.linalg.norm(spin1) spin2_magnitude = np.linalg.norm(spin2) has_multiple_resolutions = [] for key in catalog_resolutions: if (len(catalog_resolutions[key]) > 1): has_multiple_resolutions.append(key) same_id_and_spec_revision = [] for key in has_multiple_resolutions: if ((catalog[key]['spec_revisions'] == catalog[sxs_id]['spec_revisions']) and (catalog[key]['initial_data_type'] == catalog[sxs_id]['initial_data_type'])): same_id_and_spec_revision.append(key) if (len(same_id_and_spec_revision) > 0): has_multiple_resolutions = same_id_and_spec_revision mass_spin_diff_best = np.inf key_best = has_multiple_resolutions[0] for key in has_multiple_resolutions: current_mass1 = catalog[key]['initial_mass1'] current_mass2 = catalog[key]['initial_mass2'] current_spin1 = catalog[key]['initial_dimensionless_spin1'] current_spin2 = catalog[key]['initial_dimensionless_spin2'] current_mass_ratio = (current_mass1 / current_mass2) current_spin1_magnitude = np.linalg.norm(current_spin1) current_spin2_magnitude = np.linalg.norm(current_spin2) mass_spin_diff = ((np.abs((mass_ratio - current_mass_ratio)) + np.abs((spin1_magnitude - current_spin1_magnitude))) + np.abs((spin2_magnitude - current_spin2_magnitude))) if (mass_spin_diff < mass_spin_diff_best): mass_spin_diff_best = mass_spin_diff key_best = key resolution_best = np.max(catalog_resolutions[key_best]) return (((key_best.replace(':', '_') + '_Res') + str(resolution_best)) + '.h5')
class Evaluator(object): def __init__(self, model): self.model = model self.global_step = model.global_step self.build_summary() self.writer = tf.summary.FileWriter(cfg.summary_dir) def get_evaluation(self, sess, dataset_obj, global_step=None): _logger.add() _logger.add(('getting evaluation result for %s' % dataset_obj.data_type)) (logits_list, loss_list, accu_list) = ([], [], []) is_sent_list = [] for (sample_batch, _, _, _) in dataset_obj.generate_batch_sample_iter(): feed_dict = self.model.get_feed_dict(sample_batch, 'dev') (logits, loss, accu) = sess.run([self.model.logits, self.model.loss, self.model.accuracy], feed_dict) logits_list.append(np.argmax(logits, (- 1))) loss_list.append(loss) accu_list.append(accu) is_sent_list += [sample['is_sent'] for sample in sample_batch] logits_array = np.concatenate(logits_list, 0) loss_value = np.mean(loss_list) accu_array = np.concatenate(accu_list, 0) accu_value = np.mean(accu_array) sent_accu_list = [] for (idx, is_sent) in enumerate(is_sent_list): if is_sent: sent_accu_list.append(accu_array[idx]) sent_accu_value = np.mean(sent_accu_list) if (global_step is not None): if (dataset_obj.data_type == 'train'): summary_feed_dict = {self.train_loss: loss_value, self.train_accuracy: accu_value, self.train_sent_accuracy: sent_accu_value} summary = sess.run(self.train_summaries, summary_feed_dict) self.writer.add_summary(summary, global_step) elif (dataset_obj.data_type == 'dev'): summary_feed_dict = {self.dev_loss: loss_value, self.dev_accuracy: accu_value, self.dev_sent_accuracy: sent_accu_value} summary = sess.run(self.dev_summaries, summary_feed_dict) self.writer.add_summary(summary, global_step) else: summary_feed_dict = {self.test_loss: loss_value, self.test_accuracy: accu_value, self.test_sent_accuracy: sent_accu_value} summary = sess.run(self.test_summaries, summary_feed_dict) self.writer.add_summary(summary, global_step) return (loss_value, accu_value, sent_accu_value) def get_evaluation_file_output(self, sess, dataset_obj, global_step, deleted_step): _logger.add() _logger.add(('get evaluation file output for %s' % dataset_obj.data_type)) if (deleted_step is not None): delete_name = ('gs_%d' % deleted_step) delete_path = os.path.join(cfg.answer_dir, delete_name) if os.path.exists(delete_path): shutil.rmtree(delete_path) _logger.add() _logger.add(('getting evaluation result for %s' % dataset_obj.data_type)) (logits_list, loss_list, accu_list) = ([], [], []) is_sent_list = [] for (sample_batch, _, _, _) in dataset_obj.generate_batch_sample_iter(): feed_dict = self.model.get_feed_dict(sample_batch, 'dev') (logits, loss, accu) = sess.run([self.model.logits, self.model.loss, self.model.accuracy], feed_dict) logits_list.append(np.argmax(logits, (- 1))) loss_list.append(loss) accu_list.append(accu) is_sent_list += [sample['is_sent'] for sample in sample_batch] logits_array = np.concatenate(logits_list, 0) loss_value = np.mean(loss_list) accu_array = np.concatenate(accu_list, 0) accu_value = np.mean(accu_array) sent_accu_list = [] for (idx, is_sent) in enumerate(is_sent_list): if is_sent: sent_accu_list.append(accu_array[idx]) sent_accu_value = np.mean(sent_accu_list) analysis_save_dir = cfg.mkdir(cfg.answer_dir, (('gs_%s' % global_step) or 'test')) OutputAnalysis.do_analysis(dataset_obj, logits_array, accu_array, analysis_save_dir, cfg.fine_grained) def build_summary(self): with tf.name_scope('train_summaries'): self.train_loss = tf.placeholder(tf.float32, [], 'train_loss') self.train_accuracy = tf.placeholder(tf.float32, [], 'train_accuracy') self.train_sent_accuracy = tf.placeholder(tf.float32, [], 'train_sent_accuracy') tf.add_to_collection('train_summaries_collection', tf.summary.scalar('train_loss', self.train_loss)) tf.add_to_collection('train_summaries_collection', tf.summary.scalar('train_accuracy', self.train_accuracy)) tf.add_to_collection('train_summaries_collection', tf.summary.scalar('train_sent_accuracy', self.train_sent_accuracy)) self.train_summaries = tf.summary.merge_all('train_summaries_collection') with tf.name_scope('dev_summaries'): self.dev_loss = tf.placeholder(tf.float32, [], 'dev_loss') self.dev_accuracy = tf.placeholder(tf.float32, [], 'dev_accuracy') self.dev_sent_accuracy = tf.placeholder(tf.float32, [], 'dev_sent_accuracy') tf.add_to_collection('dev_summaries_collection', tf.summary.scalar('dev_loss', self.dev_loss)) tf.add_to_collection('dev_summaries_collection', tf.summary.scalar('dev_accuracy', self.dev_accuracy)) tf.add_to_collection('dev_summaries_collection', tf.summary.scalar('dev_sent_accuracy', self.dev_sent_accuracy)) self.dev_summaries = tf.summary.merge_all('dev_summaries_collection') with tf.name_scope('test_summaries'): self.test_loss = tf.placeholder(tf.float32, [], 'test_loss') self.test_accuracy = tf.placeholder(tf.float32, [], 'test_accuracy') self.test_sent_accuracy = tf.placeholder(tf.float32, [], 'test_sent_accuracy') tf.add_to_collection('test_summaries_collection', tf.summary.scalar('test_loss', self.test_loss)) tf.add_to_collection('test_summaries_collection', tf.summary.scalar('test_accuracy', self.test_accuracy)) tf.add_to_collection('test_summaries_collection', tf.summary.scalar('test_sent_accuracy', self.test_sent_accuracy)) self.test_summaries = tf.summary.merge_all('test_summaries_collection')
def decode_param_command(args, **kwargs): os.makedirs(args.outdir, exist_ok=True) logger.log(99, 'Loading parameters...') load_parameters(args.param) params = get_parameters(grad_only=False) for (key, variable) in params.items(): logger.log(99, key) file_path = os.path.join(args.outdir, (urllib.parse.quote(key, safe='/ ').replace('/', '~') + '.txt')) os.makedirs(os.path.dirname(file_path), exist_ok=True) save_param_in_txt(variable.d, file_path) logger.log(99, 'Decode Parameter Completed.') return True
class ArgumentBase(): def __init__(self, A: 'Union[cuda.CUdeviceptr, np.ndarray, torch.Tensor, cp.ndarray]', B: 'Union[cuda.CUdeviceptr, np.ndarray, torch.Tensor, cp.ndarray]', C: 'Union[cuda.CUdeviceptr, np.ndarray, torch.Tensor, cp.ndarray]', D: 'Union[cuda.CUdeviceptr, np.ndarray, torch.Tensor, cp.ndarray]', **kwargs) -> None: if ('bias' in kwargs.keys()): self.bias = kwargs['bias'] else: self.bias = False if isinstance(A, np.ndarray): self.host_D = D self.buffer_A = NumpyFrontend.argument(A, False) self.buffer_B = NumpyFrontend.argument(B, False) self.buffer_C = NumpyFrontend.argument(C, False) self.buffer_D = NumpyFrontend.argument(D, True) self.ptr_A = self.buffer_A.ptr self.ptr_B = self.buffer_B.ptr self.ptr_C = self.buffer_C.ptr self.ptr_D = self.buffer_D.ptr self.tensor_c_numel = C.size elif (torch_available and isinstance(A, torch.Tensor)): self.ptr_A = TorchFrontend.argument(A) self.ptr_B = TorchFrontend.argument(B) self.ptr_C = TorchFrontend.argument(C) self.ptr_D = TorchFrontend.argument(D) self.tensor_c_numel = C.numel() elif isinstance(A, cuda.CUdeviceptr): self.ptr_A = A self.ptr_B = B self.ptr_C = C self.ptr_D = D elif (cupy_available and isinstance(A, cp.ndarray)): self.ptr_A = CupyFrontend.argument(A) self.ptr_B = CupyFrontend.argument(B) self.ptr_C = CupyFrontend.argument(C) self.ptr_D = CupyFrontend.argument(D) self.tensor_c_numel = C.size else: raise TypeError('Unsupported Frontend. Only support numpy and torch') def sync(self, stream_sync=True): if stream_sync: (err,) = cudart.cudaDeviceSynchronize() if (err != cuda.CUresult.CUDA_SUCCESS): raise RuntimeError(('CUDA Error %s' % str(err))) if hasattr(self, 'host_D'): (err,) = cuda.cuMemcpyDtoH(self.host_D, self.ptr_D, (self.host_D.size * self.host_D.itemsize)) if (err != cuda.CUresult.CUDA_SUCCESS): raise RuntimeError(('CUDA Error %s' % str(err)))
.parametrize('batch_size', [1, 2, 5, 100]) .parametrize('mask_distance,expected', [(1, ((- 2.), (- 2.))), (2, ((- 0.), (- 0.))), (5, ((- 0.), (- 0.)))]) def test_likelihood_batch_handles_batch_sizes(msa_sampler, msa_batch_example, batch_size, mask_distance, expected): result = list(msa_sampler.log_likelihood_batch(msa_batch_example, target_index=4, with_masking=True, mask_distance=mask_distance, batch_size=batch_size)) assert (result[0][0] == pytest.approx(expected[0])) assert (mean(result[0][1]) == pytest.approx(result[0][0])) assert (result[1][0] == pytest.approx(expected[1])) assert (mean(result[1][1]) == pytest.approx(result[1][0]))
def _make_leducHoldem_dwg(dwg, state: LeducHoldemState, config): GRID_SIZE = config['GRID_SIZE'] BOARD_SIZE = config['BOARD_WIDTH'] color_set = config['COLOR_SET'] dwg.add(dwg.rect((0, 0), ((BOARD_SIZE * GRID_SIZE), (BOARD_SIZE * GRID_SIZE)), fill=color_set.background_color)) board_g = dwg.g() board_g.add(dwg.rect((0, (4 * GRID_SIZE)), ((2 * GRID_SIZE), (3 * GRID_SIZE)), fill=color_set.background_color, stroke=color_set.grid_color, stroke_width='2px', rx='5px', ry='5px')) board_g.add(dwg.text(text=CARD[state._cards[0]], insert=(GRID_SIZE, (5 * GRID_SIZE)), fill=color_set.text_color, font_size='40px', font_family='Courier')) board_g.add(dwg.text(text=f'chip +{state._chips[0]}', insert=(0, (7.6 * GRID_SIZE)), fill=color_set.text_color, font_size='18px', font_family='Courier')) board_g.add(dwg.rect(((6 * GRID_SIZE), (4 * GRID_SIZE)), ((2 * GRID_SIZE), (3 * GRID_SIZE)), fill=color_set.background_color, stroke=color_set.grid_color, stroke_width='2px', rx='5px', ry='5px')) board_g.add(dwg.text(text=CARD[state._cards[1]], insert=((7 * GRID_SIZE), (5 * GRID_SIZE)), fill=color_set.text_color, font_size='40px', font_family='Courier')) chip = f'chip +{state._chips[1]}' board_g.add(dwg.text(text=chip, insert=(((8 * GRID_SIZE) - (10 * len(chip))), (7.6 * GRID_SIZE)), fill=color_set.text_color, font_size='18px', font_family='Courier')) board_g.add(dwg.line(start=(0, (3.5 * GRID_SIZE)), end=((8 * GRID_SIZE), (3.5 * GRID_SIZE)), stroke=color_set.grid_color, stroke_width='2px')) board_g.add(dwg.rect(((3 * GRID_SIZE), 0), ((2 * GRID_SIZE), (3 * GRID_SIZE)), fill=color_set.background_color, stroke=(color_set.p1_color if (state._round == 0) else color_set.p2_color), stroke_width='2px', rx='5px', ry='5px')) board_g.add(dwg.text(text=CARD[state._cards[2]], insert=((4 * GRID_SIZE), GRID_SIZE), fill=(color_set.p1_color if (state._round == 0) else color_set.p2_color), font_size='40px', font_family='Courier')) return board_g
class LinearReLU(nnqat.Linear): _FLOAT_MODULE = torch.nn.intrinsic.LinearReLU def __init__(self, in_features, out_features, bias=True, qconfig=None): super(LinearReLU, self).__init__(in_features, out_features, bias, qconfig) def forward(self, input): return F.relu(F.linear(input, self.weight_fake_quant(self.weight), self.bias)) def from_float(cls, mod): return super(LinearReLU, cls).from_float(mod)
class TestNetwork(unittest.TestCase): def setUp(self): self.network = Network('test_net') self.network.set_input_layer(InputLayer(3, 224)) self.network.add('c1', ConvLayer(3, 64, 224, 3)) self.network.add('p1', PoolingLayer(64, 7, 32)) self.network.add('f1', FCLayer(64, 1000, 7)) def test_set_input_layer(self): network = Network('test_net') network.set_input_layer(InputLayer(3, 24)) self.assertIsInstance(network.input_layer(), InputLayer) self.assertEqual(network.input_layer().nofm, 3) self.assertEqual(network.input_layer().hofm, 24) self.assertEqual(network.input_layer().wofm, 24) self.assertEqual(len(network), 0) def test_set_input_layer_type(self): network = Network('test_net') with self.assertRaisesRegex(TypeError, 'Network: .*input_layer.*'): network.set_input_layer(Layer(3, 24)) with self.assertRaisesRegex(TypeError, 'Network: .*input_layer.*'): network.set_input_layer(ConvLayer(3, 8, 24, 3)) def test_set_input_layer_duplicate(self): network = Network('test_net') network.set_input_layer(InputLayer(3, 24)) with self.assertRaisesRegex(KeyError, 'Network: .*input.*'): network.set_input_layer(InputLayer(3, 24)) def test_add(self): self.assertEqual(len(self.network), 3) self.network.add('f2', FCLayer(64, 2000, 7), prevs='p1') self.network.add('f3', FCLayer(3000, 1000), prevs=('f1', 'f2')) self.network.add('e4', EltwiseLayer(1000, 1, 2), prevs=('f1', 'f3')) self.network.add('f4', FCLayer(1000, 1000), prevs='e4') self.assertEqual(len(self.network), 7) def test_add_same_key(self): network = Network('test_net') network.set_input_layer(InputLayer(3, 224)) network.add('c1', ConvLayer(3, 64, 224, 3)) with self.assertRaisesRegex(KeyError, 'Network: .*c1.*'): network.add('c1', ConvLayer(64, 128, 224, 3)) def test_add_no_input(self): network = Network('test_net') with self.assertRaisesRegex(RuntimeError, 'Network: .*input.*'): network.add('c1', ConvLayer(3, 64, 224, 3)) def test_add_no_prev(self): network = Network('test_net') network.set_input_layer(InputLayer(3, 224)) network.add('c1', ConvLayer(3, 64, 224, 3)) with self.assertRaisesRegex(KeyError, 'Network: .*prev.*p1.*'): network.add('p1', PoolingLayer(64, 7, 32), prevs='p1') def test_add_invalid_type(self): network = Network('test_net') network.set_input_layer(InputLayer(3, 224)) with self.assertRaisesRegex(TypeError, 'Network: .*Layer.*'): network.add('c1', (3, 64, 224, 3)) def test_add_unmatch_prev(self): network = Network('test_net') network.set_input_layer(InputLayer(3, 224)) network.add('c1', ConvLayer(3, 64, 224, 3)) with self.assertRaisesRegex(ValueError, 'Network: .*c1.*p1.*mismatch fmap.*'): network.add('p1', PoolingLayer(64, 7, 2)) self.assertEqual(len(network), 1) with self.assertRaisesRegex(ValueError, 'Network: .*c1.*c2.*mismatch fmap.*'): network.add('c2', ConvLayer(64, 128, 220, 3)) self.assertEqual(len(network), 1) with self.assertRaisesRegex(ValueError, 'Network: .*c1.*prev.*p1.*'): network.add('p1', PoolingLayer(32, 7, 32)) self.assertEqual(len(network), 1) with self.assertRaisesRegex(ValueError, 'Network: .*c1.*prev.*c2.*'): network.add('c2', ConvLayer(32, 128, 224, 3)) self.assertEqual(len(network), 1) network.add('c2', ConvLayer(64, 128, 224, 3)) with self.assertRaisesRegex(ValueError, 'Network: .*c1 | c2.*prev.*p1.*'): network.add('p1', PoolingLayer(128, 7, 32), prevs=('c1', 'c2')) self.assertEqual(len(network), 2) def test_add_ext(self): self.assertEqual(len(self.network), 3) self.network.add_ext('e0', InputLayer(3, 24)) self.assertIsInstance(self.network['e0'], InputLayer) self.assertEqual(self.network['e0'].nofm, 3) self.assertEqual(self.network['e0'].hofm, 24) self.assertEqual(self.network['e0'].wofm, 24) self.network.add_ext('e1', InputLayer(5, (16, 20))) self.assertIsInstance(self.network['e1'], InputLayer) self.assertEqual(self.network['e1'].nofm, 5) self.assertEqual(self.network['e1'].hofm, 16) self.assertEqual(self.network['e1'].wofm, 20) self.assertEqual(len(self.network), 3) def test_add_ext_same_key(self): network = Network('test_net') network.add_ext('e0', InputLayer(3, 24)) with self.assertRaisesRegex(KeyError, 'Network: .*ext.*'): network.add_ext('e0', InputLayer(3, 24)) def test_add_ext_invalid_type(self): network = Network('test_net') with self.assertRaisesRegex(TypeError, 'Network: .*external layer.*'): network.add_ext('e0', Layer(3, 24)) with self.assertRaisesRegex(TypeError, 'Network: .*external layer.*'): network.add_ext('e0', ConvLayer(3, 8, 24, 3)) def test_prevs(self): self.network.add('f2', FCLayer(64, 2000, 7), prevs='p1') self.network.add('f3', FCLayer(3000, 1000), prevs=('f1', 'f2')) prevs = self.network.prevs('f1') self.assertTupleEqual(prevs, ('p1',)) prevs = self.network.prevs('f2') self.assertTupleEqual(prevs, ('p1',)) prevs = self.network.prevs('f3') self.assertTupleEqual(prevs, ('f1', 'f2')) def test_prevs_first(self): self.network.add('c2', ConvLayer(3, 3, 224, 1), prevs=self.network.INPUT_LAYER_KEY) prevs = self.network.prevs('c1') self.assertTupleEqual(prevs, (None,)) prevs = self.network.prevs('c2') self.assertTupleEqual(prevs, (None,)) def test_prevs_input(self): with self.assertRaisesRegex(ValueError, 'Network: .*input.*'): _ = self.network.prevs(self.network.INPUT_LAYER_KEY) def test_prevs_ext_next(self): self.network.add_ext('e0', InputLayer(3, 224)) self.network.add('n', ConvLayer(6, 3, 224, 1), prevs=(self.network.INPUT_LAYER_KEY, 'e0')) prevs = self.network.prevs('n') self.assertTupleEqual(prevs, (None, 'e0')) def test_prevs_ext(self): self.network.add_ext('e0', InputLayer(3, 3)) with self.assertRaisesRegex(ValueError, 'Network: .*ext.*'): _ = self.network.prevs('e0') def test_nexts(self): self.network.add('f2', FCLayer(64, 2000, 7), prevs='p1') self.network.add('f3', FCLayer(3000, 1000), prevs=('f1', 'f2')) self.network.add('e4', EltwiseLayer(1000, 1, 2), prevs=('f1', 'f3')) self.network.add('f4', FCLayer(1000, 1000), prevs='e4') nexts = self.network.nexts('p1') self.assertTupleEqual(nexts, ('f1', 'f2')) nexts = self.network.nexts('f1') self.assertTupleEqual(nexts, ('f3', 'e4')) nexts = self.network.nexts('f2') self.assertTupleEqual(nexts, ('f3',)) nexts = self.network.nexts('f3') self.assertTupleEqual(nexts, ('e4',)) def test_nexts_last(self): nexts = self.network.nexts('f1') self.assertTupleEqual(nexts, (None,)) self.network.add('f2', FCLayer(64, 2000, 7), prevs='p1') nexts = self.network.nexts('f1') self.assertTupleEqual(nexts, (None,)) nexts = self.network.nexts('f2') self.assertTupleEqual(nexts, (None,)) def test_nexts_input(self): nexts = self.network.nexts(self.network.INPUT_LAYER_KEY) self.assertTupleEqual(nexts, ('c1',)) self.network.add('c2', ConvLayer(3, 3, 224, 1), prevs=self.network.INPUT_LAYER_KEY) self.network.add('c3', ConvLayer(6, 4, 224, 1), prevs=(self.network.INPUT_LAYER_KEY, 'c2')) nexts = self.network.nexts(self.network.INPUT_LAYER_KEY) self.assertTupleEqual(nexts, ('c1', 'c2', 'c3')) def test_firsts(self): firsts = self.network.firsts() self.assertTupleEqual(firsts, ('c1',)) self.network.add('c2', ConvLayer(3, 3, 224, 1), prevs=self.network.INPUT_LAYER_KEY) self.network.add('c3', ConvLayer(6, 4, 224, 1), prevs=(self.network.INPUT_LAYER_KEY, 'c2')) firsts = self.network.firsts() self.assertTupleEqual(firsts, ('c1', 'c2')) self.assertIn('c1', firsts) self.assertNotIn('c3', firsts) def test_firsts_ext(self): self.network.add_ext('e0', InputLayer(3, 224)) self.network.add('c2', ConvLayer(3, 3, 224, 1), prevs=('e0',)) self.network.add('c3', ConvLayer(67, 3, 224, 1), prevs=('e0', 'c1')) self.network.add('c4', ConvLayer(6, 3, 224, 1), prevs=(self.network.INPUT_LAYER_KEY, 'e0')) firsts = self.network.firsts() self.assertIn('c2', firsts) self.assertNotIn('c3', firsts) self.assertIn('c4', firsts) def test_lasts(self): lasts = self.network.lasts() self.assertTupleEqual(lasts, ('f1',)) self.network.add('f2', FCLayer(64, 2000, 7), prevs='p1') lasts = self.network.lasts() self.assertTupleEqual(lasts, ('f1', 'f2')) def test_ext_layers(self): self.assertTupleEqual(self.network.ext_layers(), tuple()) self.network.add_ext('e0', InputLayer(3, 224)) self.assertTupleEqual(self.network.ext_layers(), ('e0',)) self.network.add_ext('e1', InputLayer(3, 224)) self.assertTupleEqual(self.network.ext_layers(), ('e0', 'e1')) def test_contains(self): self.assertIn('c1', self.network) self.assertIn('p1', self.network) self.assertIn('f1', self.network) self.assertNotIn('f2', self.network) self.network.add('f2', FCLayer(64, 2000, 7), prevs='p1') self.assertIn('f2', self.network) def test_len(self): self.assertEqual(len(self.network), 3) network = Network('test_net') self.assertEqual(len(network), 0) network.set_input_layer(InputLayer(3, 224)) self.assertEqual(len(network), 0) network.add('c1', ConvLayer(3, 4, 224, 1)) self.assertEqual(len(network), 1) self.network.add('f2', FCLayer(64, 2000, 7), prevs='p1') self.assertEqual(len(self.network), 4) self.network.add('f3', FCLayer(3000, 1000), prevs=('f1', 'f2')) self.assertEqual(len(self.network), 5) self.network.add('e4', EltwiseLayer(1000, 1, 2), prevs=('f1', 'f3')) self.assertEqual(len(self.network), 6) self.network.add('f4', FCLayer(1000, 1000), prevs='e4') self.assertEqual(len(self.network), 7) def test_iter(self): num = 0 for layer in self.network: self.assertIn(layer, self.network) self.assertIsInstance(self.network[layer], Layer) num += 1 self.assertEqual(len(self.network), num) network = Network('test_net') network.set_input_layer(InputLayer(3, 224)) with self.assertRaises(StopIteration): _ = next(iter(network)) def test_contains_ext(self): self.assertNotIn('e0', self.network) self.network.add_ext('e0', InputLayer(3, 224)) self.assertIn('e0', self.network) def test_len_ext(self): self.assertEqual(len(self.network), 3) self.network.add_ext('e0', InputLayer(3, 224)) self.assertEqual(len(self.network), 3) def test_iter_ext(self): self.network.add_ext('e0', InputLayer(3, 224)) for layer in self.network: self.assertNotEqual(layer, 'e0') def test_getitem(self): self.assertIsInstance(self.network['c1'], ConvLayer) self.assertIsInstance(self.network['p1'], PoolingLayer) self.assertIsInstance(self.network['f1'], FCLayer) def test_getitem_error(self): with self.assertRaisesRegex(KeyError, 'Network: .*c2.*'): _ = self.network['c2'] def test_str(self): string = str(self.network) for layer in self.network: self.assertIn(layer, string)
def main(): parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--model', type=str, required=True, help='The type of model to train', choices=['hanrnn', 'hanconv', 'flanrnn', 'flanconv', 'han_encless', 'flan_encless']) parser.add_argument('--dataset-name', type=str, required=True, help='Which dataset to train the model on', choices=['amazon', 'yahoo10cat', 'yelp', 'imdb', 'whateverDatasetYouHaveInMind']) parser.add_argument('--gpu', type=int, required=True, help='GPU to use (can supply -1 if not using GPU)') parser.add_argument('--optional-model-tag', type=str, required=False, default='', help='Optional tag to append to end of model serialization dir') parser.add_argument('--train-multiple-models', type=str, required=False, default='False', help='Whether to train multiple models on the same data, reusing the loaded iterator') parser.add_argument('--crashed-last-time', type=bool, required=False, default=False, help="Whether we're resuming from a crashed run") parser.add_argument('--train-existing-model', type=bool, required=False, default=False, help="Whether we're resuming from a preexisting model") parser.add_argument('--continue-on-same-config-file', type=bool, required=False, default=False, help="Whether we're resuming from an interrupted run") parser.add_argument('--output-dir-base', required=False, default=base_serialized_models_dir, help="Which directory each individual model's serialization directory sits in") parser.add_argument('--dir-with-config-files', required=False, type=str, default=directory_with_config_files, help='Base directory for all config files') args = parser.parse_args() if (not args.output_dir_base.endswith('/')): args.output_dir_base += '/' if (not os.path.isdir(args.output_dir_base)): os.makedirs(args.output_dir_base) if (not args.dir_with_config_files.endswith('/')): args.dir_with_config_files += '/' output_dir = (((args.output_dir_base + args.dataset_name) + '-') + args.model) if (args.optional_model_tag != ''): output_dir += ('-' + args.optional_model_tag) if ((not args.continue_on_same_config_file) and (not args.train_existing_model)): assert (not os.path.isdir(output_dir)), (('Output dir ' + str(output_dir)) + ' must not already exist.') import_submodules('attn_tests_lib') import_submodules('textcat') config_file = ((args.dir_with_config_files + args.dataset_name) + corresponding_config_files[args.model]) edit_config_file_to_have_gpu(config_file, args.gpu) if args.train_multiple_models.lower().startswith('t'): train_seq_models_reuse_iterator(config_file, output_dir, args.gpu) elif (args.continue_on_same_config_file or (not args.train_existing_model)): print(('Starting to train model from ' + config_file)) train_model_from_file(config_file, output_dir, recover=args.continue_on_same_config_file) else: print(('Starting to train model from ' + config_file)) assert os.path.isdir(output_dir) if output_dir.endswith('/'): output_dir = output_dir[:(- 1)] original_output_dir = output_dir next_available_ind = 2 while os.path.isdir(((original_output_dir + '-') + str(next_available_ind))): next_available_ind += 1 output_dir = (((original_output_dir + '-') + str(next_available_ind)) + '/') output_dir_to_load_prev_best_model_from = original_output_dir if (next_available_ind > 2): output_dir_to_load_prev_best_model_from += (('-' + str((next_available_ind - 1))) + '/') new_params = Params.from_file(config_file, '') prev_best_model = load_prev_best_model(output_dir_to_load_prev_best_model_from) return train_model_but_load_prev_model_weights(new_params, output_dir, prev_best_model, False, False, False)
class PreHook(abc.ABC): def __call__(self, node: Node, function: Callable, args: tuple, kwargs: dict) -> Tuple[(Optional[Tuple], Optional[Dict])]: pass
def phn2txt(phn, phoneme_map): value = ''.join((phoneme_map[phoneme] for phoneme in phn)).strip() value = MULTI_SPACE.sub('', value) return value
def get_device_option(device): m = {DeviceType.CPU: caffe2_pb2.CPU, DeviceType.CUDA: workspace.GpuDeviceType} return core.DeviceOption(m[device.type], device.device_id)
def get_left_tokens(c, window=3, attrib='words', n_max=1, case_sensitive=False): (left, right) = ((c[0], c[1]) if (c[0].char_start < c[1].char_start) else (c[1], c[0])) span = get_left_span(left, window=window) tokens = span.get_attrib_tokens(attrib) return ([t.lower() for t in tokens] if (not case_sensitive) else tokens)
def remove_spectral_norm(module, name='weight'): for (k, hook) in module._forward_pre_hooks.items(): if (isinstance(hook, SpectralNorm) and (hook.name == name)): hook.remove(module) del module._forward_pre_hooks[k] return module raise ValueError("spectral_norm of '{}' not found in {}".format(name, module))
def module_build_impl(a): source_path = a.SOURCE module_path = a.output source_path = Path(source_path) assert source_path.name.endswith('.py'), 'Source must be a Python script.' if (module_path is None): module_path = f'{source_path.name[:(- 3)]}.tcm' module_path = Path(module_path) print(f'Building Taichi module: {source_path}') print() d = runpy.run_path(str(source_path), run_name='__main__') print() required_caps = (d['REQUIRED_CAPS'] if ('REQUIRED_CAPS' in d) else []) assert isinstance(required_caps, list), 'REQUIRED_CAPS must be a list.' if required_caps: print('Module requires the following capabilities:') for cap in required_caps: print(f' - {cap}') print() m = Module(caps=required_caps) for record in _aot_kernels: print('Added kernel:', record.name) template_args = None if record.template_types: print(' Template types:') template_args = {} for (k, v) in record.template_types.items(): print(f' - {k}: {v}') if (isinstance(v, int) or (id(v) in integer_type_ids)): value = 0 elif (isinstance(v, float) or (id(v) in real_type_ids)): value = 0.0 elif isinstance(v, NdarrayType): if ((v.ndim is None) or (v.ndim <= 0)): raise ValueError('Ndarray template type must specify a non-zero dimension.') value = taichi.ndarray(v.dtype, ((1,) * v.ndim)) elif isinstance(v, TextureType): value = taichi.Texture(taichi.Format.rgba8, ((4,) * v.num_dimensions)) elif isinstance(v, RWTextureType): value = taichi.Texture(v.fmt, ((4,) * v.num_dimensions)) else: raise ValueError(f'Unsupported template type: {type(v)}') template_args[k] = value m.add_kernel(record.kernel, template_args) print() if module_path.name.endswith('.tcm'): m.archive(str(module_path)) else: m.save(str(module_path)) print(f'Module is archive to: {module_path}') print()
class Network(object): def __init__(self, n_length, base_filters, kernel_size, n_block, n_channel): use_cuda = torch.cuda.is_available() n_samples = 1000 n_length = n_length n_classes = 2 batch_size = 64 (data, label) = read_data_generated(n_samples=n_samples, n_length=n_length, n_channel=n_channel, n_classes=n_classes) print(data.shape, Counter(label)) dataset = MyDataset(data, label) dataloader = DataLoader(dataset, batch_size=batch_size) (data_test, label_test) = read_data_generated(n_samples=n_samples, n_length=n_length, n_channel=n_channel, n_classes=n_classes) self.label_test = label_test print(data_test.shape, Counter(label_test)) dataset_test = MyDataset(data_test, label_test) dataloader_test = DataLoader(dataset_test, batch_size=batch_size, drop_last=False) self.device = device = torch.device(('cuda' if use_cuda else 'cpu')) (self.train_loader, self.test_loader) = (dataloader, dataloader_test) self.model = ResNet1D(in_channels=n_channel, base_filters=base_filters, kernel_size=kernel_size, stride=2, n_block=n_block, groups=base_filters, n_classes=n_classes, downsample_gap=max((n_block // 8), 1), increasefilter_gap=max((n_block // 4), 1), verbose=False).to(device) self.optimizer = optim.Adam(self.model.parameters(), lr=0.001) def train(self): train(self.model, self.device, self.train_loader, self.optimizer) return test(self.model, self.device, self.test_loader, self.label_test) def test(self): return test(self.model, self.device, self.test_loader, self.label_test) def get_weights(self): return self.model.state_dict() def set_weights(self, weights): self.model.load_state_dict(weights) def save(self): torch.save(self.model.state_dict(), 'synthetic_ray.pt') def load(self): self.model.load_state_dict(torch.load('synthetic_ray.pt'))
def get_data_correlated(with_input_blocks, corr_coef=0.6): np.random.seed(111) n = 5000 p = 4 beta_a = np.array([0, 0, 0, 0]).astype('float32') beta_i = np.array(((([0, 3, 0, 0] + ([0] * 3)) + [0, (- 2)]) + [0])).astype('float32') cov_mat = (np.eye(4) * 1.0) cov_mat[(0, 2)] = cov_mat[(2, 0)] = (1.0 * corr_coef) simulator = CorrelatedDataSimulator(n=n, p=p, noise_var=0.1, data_cov_matrix=cov_mat, degree=2, discretize_beta=True, with_input_blocks=with_input_blocks) simulator.set_effect(beta_a, beta_i) (X_train, y_train) = simulator.sample_data() (X_val, y_val) = simulator.sample_data(N=500) (X_test, y_test) = simulator.sample_data(N=500) return ((X_train, y_train), (X_val, y_val), (X_test, y_test))
_utils.in_tempdir def test_dory_search_nomatch(location): copy_dory_catlas() testdata = relative_file('data/random-query-nomatch.fa') shutil.copyfile(testdata, 'random-query.fa') args = '-k 21 dory_k21 --contigs-db dory_k21/bcalm.unitigs.db'.split() print('** running index_cdbg_by_kmer') assert (index_cdbg_by_kmer.main(args) == 0) args = 'dory_k21 dory_k21_r1 dory_k21_r1_search_oh0 --query random-query.fa -k 21 --contigs-db dory_k21/bcalm.unitigs.db'.split() try: assert (query_by_sequence.main(args) == 0) except SystemExit as e: assert (e.code == 0), str(e)
class TransHeadNet(nn.Module): def __init__(self, in_channels, num_layers=3, num_filters=256, kernel_size=3, output_dim=3, freeze=False, norm='BN', num_gn_groups=32): super().__init__() self.freeze = freeze if (kernel_size == 3): padding = 1 elif (kernel_size == 2): padding = 0 self.features = nn.ModuleList() for i in range(num_layers): _in_channels = (in_channels if (i == 0) else num_filters) self.features.append(nn.Conv2d(_in_channels, num_filters, kernel_size=kernel_size, stride=1, padding=padding, bias=False)) self.features.append(get_norm(norm, num_filters, num_gn_groups=num_gn_groups)) self.features.append(nn.ReLU(inplace=True)) self.linears = nn.ModuleList() self.linears.append(nn.Linear(((256 * 8) * 8), 4096)) self.linears.append(nn.ReLU(inplace=True)) self.linears.append(nn.Linear(4096, 4096)) self.linears.append(nn.ReLU(inplace=True)) self.linears.append(nn.Linear(4096, output_dim)) for m in self.modules(): if isinstance(m, nn.Conv2d): normal_init(m, std=0.001) elif isinstance(m, (_BatchNorm, nn.GroupNorm)): constant_init(m, 1) elif isinstance(m, nn.ConvTranspose2d): normal_init(m, std=0.001) elif isinstance(m, nn.Linear): normal_init(m, std=0.001) def forward(self, x): if self.freeze: with torch.no_grad(): for (i, l) in enumerate(self.features): x = l(x) x = x.view((- 1), ((256 * 8) * 8)) for (i, l) in enumerate(self.linears): x = l(x) return x.detach() else: for (i, l) in enumerate(self.features): x = l(x) x = x.view((- 1), ((256 * 8) * 8)) for (i, l) in enumerate(self.linears): x = l(x) return x
def get_split_list(data_list): out1 = [] out2 = [] for item in data_list: out1.append(item[0::2]) out2.append(item[1::2]) out = (out1 + out2) return out
class Window(): def __init__(self, window_size): self.window_size = window_size self.window = [] def update(self, num): self.window.append(num) if (len(self.window) > self.window_size): self.window = self.window[1:] def get(self): return self.window
class TestDetector(): def setup(self): self.detector_id = '-detector-' self.temp_dir = mkdtemp(prefix='mubench-detector_') def teardown(self): remove_tree(self.temp_dir) def test_raises_on_missing_file(self): assert_raises(ValueError, Detector, self.temp_dir, '-detector-', [], Detector.DEFAULT_RELEASE) def test_raises_value_error_on_no_release(self): self.setup_releases([]) assert_raises(ValueError, Detector, self.temp_dir, self.detector_id, [], Detector.DEFAULT_RELEASE) def test_md5(self): self.setup_releases([{'md5': '-md5-', 'cli_version': RunnerInterfaceTestImpl.TEST_VERSION}]) detector = Detector(self.temp_dir, self.detector_id, [], Detector.DEFAULT_RELEASE) assert_equals('-md5-', detector.md5) def test_md5_defaults_to_none(self): self.setup_releases([{'cli_version': RunnerInterfaceTestImpl.TEST_VERSION}]) detector = Detector(self.temp_dir, self.detector_id, [], Detector.DEFAULT_RELEASE) assert_equals(detector.md5, Detector.NO_MD5) def test_interface(self): self.setup_releases([{'cli_version': RunnerInterfaceTestImpl.TEST_VERSION, 'md5': '-md5-'}]) detector = Detector(self.temp_dir, self.detector_id, [], Detector.DEFAULT_RELEASE) assert_is_instance(detector.runner_interface, RunnerInterfaceTestImpl) def test_raises_on_missing_cli_version(self): self.setup_releases([{'md5': '-md5-'}]) assert_raises(ValueError, Detector, self.temp_dir, self.detector_id, [], Detector.DEFAULT_RELEASE) def test_download_url(self): self.setup_releases([{'cli_version': RunnerInterfaceTestImpl.TEST_VERSION, 'md5': '-md5-'}]) detector = Detector(self.temp_dir, self.detector_id, [], Detector.DEFAULT_RELEASE) expected_url = '{}/{}/{}/{}.jar'.format(Detector.BASE_URL, Detector.DEFAULT_RELEASE, RunnerInterfaceTestImpl.TEST_VERSION, self.detector_id) assert_equals(expected_url, detector.jar_url) def test_gets_requested_release(self): self.setup_releases([{'md5': '-md5_1-', 'tag': '-release_1-', 'cli_version': '0.0.0'}, {'md5': '-md5_requested-', 'tag': '-release_requested-', 'cli_version': RunnerInterfaceTestImpl.TEST_VERSION}, {'md5': '-md5_3-', 'tag': '-release_3-', 'cli_version': '0.0.2'}]) detector = Detector(self.temp_dir, self.detector_id, [], '-release_requested-') expected_url = '{}/-release_requested-/{}/{}.jar'.format(Detector.BASE_URL, RunnerInterfaceTestImpl.TEST_VERSION, self.detector_id) assert_equals(expected_url, detector.jar_url) assert_equals('-md5_requested-', detector.md5) def test_raises_on_no_matching_release(self): self.setup_releases([{'md5': '-md5-', 'tag': '-release-', 'cli_version': '0.0.1'}]) assert_raises(ValueError, Detector, self.temp_dir, self.detector_id, [], '-unavailable_release-') def test_release_is_case_insensitive(self): self.setup_releases([{'md5': '-md5_1-', 'tag': '-release_1-', 'cli_version': '-version-'}, {'md5': '-md5_requested-', 'tag': 'RELEASE_REQUESTED', 'cli_version': RunnerInterfaceTestImpl.TEST_VERSION}, {'md5': '-md5_3-', 'tag': '-release_3-', 'cli_version': '-version-'}]) detector = Detector(self.temp_dir, self.detector_id, [], 'release_requested') expected_url = '{}/release_requested/{}/{}.jar'.format(Detector.BASE_URL, RunnerInterfaceTestImpl.TEST_VERSION, self.detector_id) assert_equals(expected_url, detector.jar_url) assert_equals('-md5_requested-', detector.md5) def test_uses_first_release_by_default(self): self.setup_releases([{'md5': '-md5_requested-', 'tag': 'RELEASE_REQUESTED', 'cli_version': RunnerInterfaceTestImpl.TEST_VERSION}, {'md5': '-md5_1-', 'tag': '-release_1-', 'cli_version': '-version-'}, {'md5': '-md5_3-', 'tag': '-release_3-', 'cli_version': '-version-'}]) detector = Detector(self.temp_dir, self.detector_id, [], Detector.DEFAULT_RELEASE) expected_url = '{}/release_requested/{}/{}.jar'.format(Detector.BASE_URL, RunnerInterfaceTestImpl.TEST_VERSION, self.detector_id) assert_equals(expected_url, detector.jar_url) assert_equals('-md5_requested-', detector.md5) def test_default_tag_is_not_empty(self): assert Detector.DEFAULT_RELEASE def setup_releases(self, releases): releases_index = join(self.temp_dir, self.detector_id, Detector.RELEASES_FILE) write_yaml(releases, releases_index)
def get_random_data(num_samps=1000): x = np.random.sample(((10 * 4) * num_samps)).reshape((num_samps, 10, 4)) y = np.random.sample(num_samps) return (x, y)
class SubprocessTimeoutTest(unittest.TestCase): def setUp(self): self._path = os.path.dirname(os.path.realpath(__file__)) def test_normal_exec_no_timeout(self): cmdline = 'sleep 1; echo Done' (return_code, output, err) = sub.run(cmdline, {}, cwd=self._path, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, timeout=10) self.assertEqual(0, return_code) self.assertEqual('Done\n', output) self.assertEqual(None, err) def test_exec_with_timeout(self): cmdline = 'sleep 100; echo Done' (return_code, output, err) = sub.run(cmdline, {}, cwd=self._path, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, timeout=1) self.assertEqual(sub.E_TIMEOUT, return_code) self.assertEqual('', output) self.assertEqual(None, err) def test_exec_with_timeout_python_interpreter(self): cmdline = 'python3 -c "while True: pass"' (return_code, output, err) = sub.run(cmdline, {}, cwd=self._path, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, timeout=5) self.assertEqual(sub.E_TIMEOUT, return_code) self.assertEqual('', output) self.assertEqual(None, err)
def c2du(u): u = np.clip(u, ((- UMAX) + 0.001), (UMAX - 0.001)) return int(np.floor(((u + UMAX) / DU)))
def main(config, args): del config.module_replay config.module_replay = OldReplayBuffer() torch.set_num_threads(min(4, args.num_envs)) torch.set_num_interop_threads(min(4, args.num_envs)) agent = mrl.config_to_agent(config) num_eps = max((args.num_eval_envs * 3), 10) res = np.mean(agent.eval(num_episodes=num_eps).rewards) agent.logger.log_color(f'Initial test reward ({num_eps} eps):', f'{res:.2f}') max_perf = 0 for epoch in range(int((args.max_steps // args.epoch_len))): t = time.time() agent.train(num_steps=args.epoch_len) res = np.mean(agent.eval(num_episodes=num_eps).rewards) agent.logger.log_color('Test reward ({} eps): {:.2f}'.format(len(res), np.mean(res))) res = int(np.mean(res)) agent.logger.log_color('Epoch time:', '{:.2f}'.format((time.time() - t)), color='yellow') if (CHECKPOINT_DICT[args.env] and (res > CHECKPOINT_DICT[args.env][0])): CHECKPOINT_DICT[args.env] = CHECKPOINT_DICT[args.env][1:] print('') print('Saving agent at epoch {}'.format(epoch)) agent.save(f'performance_{res}') print('Reloading agent to confirm it works...') agent.load(f'performance_{res}') print('') max_perf = max(max_perf, res) elif (res > max_perf): print('') print('NEW MAX PERFORMANCE!') print('Saving agent at epoch {}'.format(epoch)) agent.save('performance_MAX') print('Reloading agent to confirm it works...') agent.load('performance_MAX') print('') max_perf = max(max_perf, res)
class Llama2LoraKbit(CausalLoraKbitModel): config_name: str = 'llama2_lora_kbit' def __init__(self, weights_path: Optional[str]=None): super().__init__(LLama2LoraKbitEngine.config_name, weights_path)
.mpi def test_eq_commworld_1(): from mpi4py import MPI comm = MPI.COMM_WORLD comm2 = comm.Dup() def eq_commworld_1(out: dace.bool[1]): out[0] = (comm2 == MPI.COMM_WORLD) res = np.zeros((1,), dtype=np.bool_) eq_commworld_1(res) assert (res[0] == (comm2 == MPI.COMM_WORLD))
.parametrize('ratio, user_answer, item_answer, split_by_fraqtions', [(0.5, [[1, 1, 2, 2, 3, 3], [1, 1, 1, 2, 2, 2, 3, 3, 3]], [[1, 2, 1, 2, 1, 5], [3, 4, 5, 3, 9, 10, 3, 1, 2]], True), (0.1, [[1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3], [1, 2, 3]], [[1, 2, 3, 4, 1, 2, 3, 9, 1, 5, 3, 1], [5, 10, 2]], True), (0.5, [[1, 1, 1, 2, 2, 2, 3, 3, 3], [1, 1, 2, 2, 3, 3]], [[1, 2, 3, 1, 2, 3, 1, 5, 3], [4, 5, 9, 10, 1, 2]], False), (0.1, [[1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3], [1, 2, 3]], [[1, 2, 3, 4, 1, 2, 3, 9, 1, 5, 3, 1], [5, 10, 2]], False)]) .parametrize('dataset_type', [pytest.param('spark_dataframe_test', marks=pytest.mark.spark), pytest.param('pandas_dataframe_test', marks=pytest.mark.core)]) def test_ratio_splitter_without_drops(ratio, user_answer, item_answer, split_by_fraqtions, request, dataset_type): dataframe = request.getfixturevalue(dataset_type) filtered_dataframe = RatioSplitter(test_size=ratio, divide_column='user_id', query_column='user_id', drop_cold_users=False, drop_cold_items=False, split_by_fraqtions=split_by_fraqtions).split(dataframe) if (dataset_type == 'pandas_dataframe_test'): item_ids = _get_column_list_pandas(filtered_dataframe, 'item_id') user_ids = _get_column_list_pandas(filtered_dataframe, 'user_id') else: item_ids = _get_column_list(filtered_dataframe, 'item_id') user_ids = _get_column_list(filtered_dataframe, 'user_id') _check_assert(user_ids, item_ids, user_answer, item_answer)
def test_dot_matrix_vector_product(): a_raw = torch.tensor([[1.0, 2.0, 3.0], [(- 1.0), (- 2.0), (- 3.0)]]) b_raw = torch.tensor([4.0, 5.0]) a_feature_dim = Dim(dimension=3) reduce_dim = Dim(dimension=2) a = Tensor(name='a', dims=[reduce_dim, a_feature_dim], dtype='float32', raw_tensor=a_raw) b = Tensor(name='b', dims=[reduce_dim], dtype='float32', raw_tensor=b_raw) result = rf.matmul(a, b, reduce=[reduce_dim]) assert (result.dims == (a_feature_dim,)) assert (result.raw_tensor.tolist() == pytest.approx([(- 1.0), (- 2.0), (- 3.0)]))
class PreTrainedTokenizer(object): vocab_files_names = {} pretrained_vocab_files_map = {} pretrained_init_configuration = {} max_model_input_sizes = {} SPECIAL_TOKENS_ATTRIBUTES = ['bos_token', 'eos_token', 'unk_token', 'sep_token', 'pad_token', 'cls_token', 'mask_token', 'additional_special_tokens'] def bos_token(self): if (self._bos_token is None): logger.error('Using bos_token, but it is not set yet.') return self._bos_token def eos_token(self): if (self._eos_token is None): logger.error('Using eos_token, but it is not set yet.') return self._eos_token def unk_token(self): if (self._unk_token is None): logger.error('Using unk_token, but it is not set yet.') return self._unk_token def sep_token(self): if (self._sep_token is None): logger.error('Using sep_token, but it is not set yet.') return self._sep_token def pad_token(self): if (self._pad_token is None): logger.error('Using pad_token, but it is not set yet.') return self._pad_token def cls_token(self): if (self._cls_token is None): logger.error('Using cls_token, but it is not set yet.') return self._cls_token def mask_token(self): if (self._mask_token is None): logger.error('Using mask_token, but it is not set yet.') return self._mask_token def additional_special_tokens(self): if (self._additional_special_tokens is None): logger.error('Using additional_special_tokens, but it is not set yet.') return self._additional_special_tokens _token.setter def bos_token(self, value): self._bos_token = value _token.setter def eos_token(self, value): self._eos_token = value _token.setter def unk_token(self, value): self._unk_token = value _token.setter def sep_token(self, value): self._sep_token = value _token.setter def pad_token(self, value): self._pad_token = value _token.setter def cls_token(self, value): self._cls_token = value _token.setter def mask_token(self, value): self._mask_token = value _special_tokens.setter def additional_special_tokens(self, value): self._additional_special_tokens = value def bos_token_id(self): return self.convert_tokens_to_ids(self.bos_token) def eos_token_id(self): return self.convert_tokens_to_ids(self.eos_token) def unk_token_id(self): return self.convert_tokens_to_ids(self.unk_token) def sep_token_id(self): return self.convert_tokens_to_ids(self.sep_token) def pad_token_id(self): return self.convert_tokens_to_ids(self.pad_token) def cls_token_id(self): return self.convert_tokens_to_ids(self.cls_token) def mask_token_id(self): return self.convert_tokens_to_ids(self.mask_token) def additional_special_tokens_ids(self): return self.convert_tokens_to_ids(self.additional_special_tokens) def __init__(self, max_len=None, **kwargs): self._bos_token = None self._eos_token = None self._unk_token = None self._sep_token = None self._pad_token = None self._cls_token = None self._mask_token = None self._additional_special_tokens = [] self.max_len = (max_len if (max_len is not None) else int(.0)) self.added_tokens_encoder = {} self.added_tokens_decoder = {} self.init_inputs = () self.init_kwargs = {} for (key, value) in kwargs.items(): if (key in self.SPECIAL_TOKENS_ATTRIBUTES): if (key == 'additional_special_tokens'): assert (isinstance(value, (list, tuple)) and all(((isinstance(t, str) or (six.PY2 and isinstance(t, unicode))) for t in value))) else: assert (isinstance(value, str) or (six.PY2 and isinstance(value, unicode))) setattr(self, key, value) def from_pretrained(cls, *inputs, **kwargs): return cls._from_pretrained(*inputs, **kwargs) def _from_pretrained(cls, pretrained_model_name_or_path, *init_inputs, **kwargs): cache_dir = kwargs.pop('cache_dir', None) force_download = kwargs.pop('force_download', False) proxies = kwargs.pop('proxies', None) s3_models = list(cls.max_model_input_sizes.keys()) vocab_files = {} init_configuration = {} if (pretrained_model_name_or_path in s3_models): for (file_id, map_list) in cls.pretrained_vocab_files_map.items(): vocab_files[file_id] = map_list[pretrained_model_name_or_path] if (cls.pretrained_init_configuration and (pretrained_model_name_or_path in cls.pretrained_init_configuration)): init_configuration = cls.pretrained_init_configuration[pretrained_model_name_or_path] else: logger.info("Model name '{}' not found in model shortcut name list ({}). Assuming '{}' is a path or url to a directory containing tokenizer files.".format(pretrained_model_name_or_path, ', '.join(s3_models), pretrained_model_name_or_path)) for (file_id, file_name) in cls.vocab_files_names.items(): if os.path.isdir(pretrained_model_name_or_path): full_file_name = os.path.join(pretrained_model_name_or_path, file_name) else: full_file_name = pretrained_model_name_or_path if (not os.path.exists(full_file_name)): logger.info("Didn't find file {}. We won't load it.".format(full_file_name)) full_file_name = None vocab_files[file_id] = full_file_name additional_files_names = {'added_tokens_file': ADDED_TOKENS_FILE, 'special_tokens_map_file': SPECIAL_TOKENS_MAP_FILE, 'tokenizer_config_file': TOKENIZER_CONFIG_FILE} saved_directory = pretrained_model_name_or_path if (os.path.exists(saved_directory) and (not os.path.isdir(saved_directory))): saved_directory = os.path.dirname(saved_directory) for (file_id, file_name) in additional_files_names.items(): full_file_name = os.path.join(saved_directory, file_name) if (not os.path.exists(full_file_name)): logger.info("Didn't find file {}. We won't load it.".format(full_file_name)) full_file_name = None vocab_files[file_id] = full_file_name if all(((full_file_name is None) for full_file_name in vocab_files.values())): logger.error("Model name '{}' was not found in model name list ({}). We assumed '{}' was a path or url but couldn't find tokenizer filesat this path or url.".format(pretrained_model_name_or_path, ', '.join(s3_models), pretrained_model_name_or_path)) return None try: resolved_vocab_files = {} for (file_id, file_path) in vocab_files.items(): if (file_path is None): resolved_vocab_files[file_id] = None else: resolved_vocab_files[file_id] = cached_path(file_path, cache_dir=cache_dir, force_download=force_download, proxies=proxies) except EnvironmentError as e: if (pretrained_model_name_or_path in s3_models): logger.error("Couldn't reach server to download vocabulary.") else: logger.error("Model name '{}' was not found in model name list ({}). We assumed '{}' was a path or url but couldn't find files {} at this path or url.".format(pretrained_model_name_or_path, ', '.join(s3_models), pretrained_model_name_or_path, str(vocab_files.keys()))) raise e for (file_id, file_path) in vocab_files.items(): if (file_path == resolved_vocab_files[file_id]): logger.info('loading file {}'.format(file_path)) else: logger.info('loading file {} from cache at {}'.format(file_path, resolved_vocab_files[file_id])) tokenizer_config_file = resolved_vocab_files.pop('tokenizer_config_file', None) if (tokenizer_config_file is not None): init_kwargs = json.load(open(tokenizer_config_file, encoding='utf-8')) saved_init_inputs = init_kwargs.pop('init_inputs', ()) if (not init_inputs): init_inputs = saved_init_inputs else: init_kwargs = init_configuration init_kwargs.update(kwargs) if (pretrained_model_name_or_path in cls.max_model_input_sizes): max_len = cls.max_model_input_sizes[pretrained_model_name_or_path] if ((max_len is not None) and isinstance(max_len, (int, float))): init_kwargs['max_len'] = min(init_kwargs.get('max_len', int(.0)), max_len) added_tokens_file = resolved_vocab_files.pop('added_tokens_file', None) special_tokens_map_file = resolved_vocab_files.pop('special_tokens_map_file', None) for (args_name, file_path) in resolved_vocab_files.items(): if (args_name not in init_kwargs): init_kwargs[args_name] = file_path if (special_tokens_map_file is not None): special_tokens_map = json.load(open(special_tokens_map_file, encoding='utf-8')) for (key, value) in special_tokens_map.items(): if (key not in init_kwargs): init_kwargs[key] = value tokenizer = cls(*init_inputs, **init_kwargs) tokenizer.init_inputs = init_inputs tokenizer.init_kwargs = init_kwargs if (added_tokens_file is not None): added_tok_encoder = json.load(open(added_tokens_file, encoding='utf-8')) added_tok_decoder = {v: k for (k, v) in added_tok_encoder.items()} tokenizer.added_tokens_encoder.update(added_tok_encoder) tokenizer.added_tokens_decoder.update(added_tok_decoder) return tokenizer def save_pretrained(self, save_directory): if (not os.path.isdir(save_directory)): logger.error('Saving directory ({}) should be a directory'.format(save_directory)) return special_tokens_map_file = os.path.join(save_directory, SPECIAL_TOKENS_MAP_FILE) added_tokens_file = os.path.join(save_directory, ADDED_TOKENS_FILE) tokenizer_config_file = os.path.join(save_directory, TOKENIZER_CONFIG_FILE) tokenizer_config = copy.deepcopy(self.init_kwargs) tokenizer_config['init_inputs'] = copy.deepcopy(self.init_inputs) for file_id in self.vocab_files_names.keys(): tokenizer_config.pop(file_id, None) with open(tokenizer_config_file, 'w', encoding='utf-8') as f: f.write(json.dumps(tokenizer_config, ensure_ascii=False)) with open(special_tokens_map_file, 'w', encoding='utf-8') as f: f.write(json.dumps(self.special_tokens_map, ensure_ascii=False)) with open(added_tokens_file, 'w', encoding='utf-8') as f: if self.added_tokens_encoder: out_str = json.dumps(self.added_tokens_encoder, ensure_ascii=False) else: out_str = u'{}' f.write(out_str) vocab_files = self.save_vocabulary(save_directory) return (vocab_files + (special_tokens_map_file, added_tokens_file)) def save_vocabulary(self, save_directory): raise NotImplementedError def vocab_size(self): raise NotImplementedError def __len__(self): return (self.vocab_size + len(self.added_tokens_encoder)) def add_tokens(self, new_tokens): if (not new_tokens): return 0 to_add_tokens = [] for token in new_tokens: assert (isinstance(token, str) or (six.PY2 and isinstance(token, unicode))) if ((token != self.unk_token) and (self.convert_tokens_to_ids(token) == self.convert_tokens_to_ids(self.unk_token))): to_add_tokens.append(token) logger.info('Adding %s to the vocabulary', token) added_tok_encoder = dict(((tok, (len(self) + i)) for (i, tok) in enumerate(to_add_tokens))) added_tok_decoder = {v: k for (k, v) in added_tok_encoder.items()} self.added_tokens_encoder.update(added_tok_encoder) self.added_tokens_decoder.update(added_tok_decoder) return len(to_add_tokens) def add_special_tokens(self, special_tokens_dict): if (not special_tokens_dict): return 0 added_tokens = 0 for (key, value) in special_tokens_dict.items(): assert (key in self.SPECIAL_TOKENS_ATTRIBUTES) if (key == 'additional_special_tokens'): assert (isinstance(value, (list, tuple)) and all(((isinstance(t, str) or (six.PY2 and isinstance(t, unicode))) for t in value))) added_tokens += self.add_tokens(value) else: assert (isinstance(value, str) or (six.PY2 and isinstance(value, unicode))) added_tokens += self.add_tokens([value]) logger.info('Assigning %s to the %s key of the tokenizer', value, key) setattr(self, key, value) return added_tokens def tokenize(self, text, **kwargs): def split_on_token(tok, text): result = [] split_text = text.split(tok) for (i, sub_text) in enumerate(split_text): sub_text = sub_text.strip() if ((i == 0) and (not sub_text)): result += [tok] elif (i == (len(split_text) - 1)): if sub_text: result += [sub_text] else: pass else: if sub_text: result += [sub_text] result += [tok] return result def split_on_tokens(tok_list, text): if (not text): return [] if (not tok_list): return self._tokenize(text, **kwargs) tokenized_text = [] text_list = [text] for tok in tok_list: tokenized_text = [] for sub_text in text_list: if ((sub_text not in self.added_tokens_encoder) and (sub_text not in self.all_special_tokens)): tokenized_text += split_on_token(tok, sub_text) else: tokenized_text += [sub_text] text_list = tokenized_text return sum(((self._tokenize(token, **kwargs) if ((token not in self.added_tokens_encoder) and (token not in self.all_special_tokens)) else [token]) for token in tokenized_text), []) added_tokens = (list(self.added_tokens_encoder.keys()) + self.all_special_tokens) tokenized_text = split_on_tokens(added_tokens, text) return tokenized_text def _tokenize(self, text, **kwargs): raise NotImplementedError def convert_tokens_to_ids(self, tokens): if (tokens is None): return None if (isinstance(tokens, str) or (six.PY2 and isinstance(tokens, unicode))): return self._convert_token_to_id_with_added_voc(tokens) ids = [] for token in tokens: ids.append(self._convert_token_to_id_with_added_voc(token)) if (len(ids) > self.max_len): logger.warning('Token indices sequence length is longer than the specified maximum sequence length for this model ({} > {}). Running this sequence through the model will result in indexing errors'.format(len(ids), self.max_len)) return ids def _convert_token_to_id_with_added_voc(self, token): if (token is None): return None if (token in self.added_tokens_encoder): return self.added_tokens_encoder[token] return self._convert_token_to_id(token) def _convert_token_to_id(self, token): raise NotImplementedError def encode(self, text, text_pair=None, add_special_tokens=False, **kwargs): if (text_pair is None): if add_special_tokens: return self.add_special_tokens_single_sentence(self.convert_tokens_to_ids(self.tokenize(text, **kwargs))) else: return self.convert_tokens_to_ids(self.tokenize(text, **kwargs)) first_sentence_tokens = [self._convert_token_to_id(token) for token in self.tokenize(text, **kwargs)] second_sentence_tokens = [self._convert_token_to_id(token) for token in self.tokenize(text_pair, **kwargs)] if add_special_tokens: return self.add_special_tokens_sentences_pair(first_sentence_tokens, second_sentence_tokens) else: return (first_sentence_tokens, second_sentence_tokens) def add_special_tokens_single_sentence(self, token_ids): logger.warning('This tokenizer does not make use of special tokens. The sequence has been returned with no modification.') return token_ids def add_special_tokens_sentences_pair(self, token_ids_0, token_ids_1): logger.warning('This tokenizer does not make use of special tokens. The two sequences have been concatenated.') return (token_ids_0 + token_ids_1) def convert_ids_to_tokens(self, ids, skip_special_tokens=False): if isinstance(ids, int): if (ids in self.added_tokens_decoder): return self.added_tokens_decoder[ids] else: return self._convert_id_to_token(ids) tokens = [] for index in ids: if (skip_special_tokens and (index in self.all_special_ids)): continue if (index in self.added_tokens_decoder): tokens.append(self.added_tokens_decoder[index]) else: tokens.append(self._convert_id_to_token(index)) return tokens def _convert_id_to_token(self, index): raise NotImplementedError def convert_tokens_to_string(self, tokens): return ' '.join(self.convert_ids_to_tokens(tokens)) def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens) sub_texts = [] current_sub_text = [] for token in filtered_tokens: if (skip_special_tokens and (token in self.all_special_ids)): continue if (token in self.added_tokens_encoder): if current_sub_text: sub_texts.append(self.convert_tokens_to_string(current_sub_text)) current_sub_text = [] sub_texts.append((' ' + token)) else: current_sub_text.append(token) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(current_sub_text)) text = ''.join(sub_texts) if ((self._sep_token is not None) and (self._sep_token in text)): text = text.replace(self._cls_token, self._sep_token) split_text = list(filter((lambda sentence: (len(sentence) > 0)), text.split(self._sep_token))) if clean_up_tokenization_spaces: clean_text = [self.clean_up_tokenization(text) for text in split_text] return clean_text else: return split_text elif clean_up_tokenization_spaces: clean_text = self.clean_up_tokenization(text) return clean_text else: return text def special_tokens_map(self): set_attr = {} for attr in self.SPECIAL_TOKENS_ATTRIBUTES: attr_value = getattr(self, ('_' + attr)) if attr_value: set_attr[attr] = attr_value return set_attr def all_special_tokens(self): all_toks = [] set_attr = self.special_tokens_map for attr_value in set_attr.values(): all_toks = (all_toks + (list(attr_value) if isinstance(attr_value, (list, tuple)) else [attr_value])) all_toks = list(set(all_toks)) return all_toks def all_special_ids(self): all_toks = self.all_special_tokens all_ids = list((self._convert_token_to_id(t) for t in all_toks)) return all_ids def clean_up_tokenization(out_string): out_string = out_string.replace(' .', '.').replace(' ?', '?').replace(' !', '!').replace(' ,', ',').replace(" ' ", "'").replace(" n't", "n't").replace(" 'm", "'m").replace(' do not', " don't").replace(" 's", "'s").replace(" 've", "'ve").replace(" 're", "'re") return out_string
def factor_product(*args): if (not all((isinstance(phi, BaseFactor) for phi in args))): raise TypeError('Arguments must be factors') elif (len(set(map(type, args))) != 1): raise NotImplementedError('All the args are expected to be instances of the same factor class.') return reduce((lambda phi1, phi2: (phi1 * phi2)), args)
def torch_cat(tensors, dim=None, axis=None, *, out=None): if ((dim is None) and (axis is None)): dim = 0 if ((dim is None) and (axis is not None)): dim = axis if (dim < 0): dim = (tensors[0].dim() + dim) shapes = [t.shape for t in tensors] shape = list(shapes[0]) concatenated_dim = sum((shape[dim] for shape in shapes)) final_shape = ((shape[:dim] + [concatenated_dim]) + shape[(dim + 1):]) return torch.empty(final_shape, device='meta')
def __getattr__(name): return _sub_module_deprecation(sub_package='integrate', module='lsoda', private_modules=['_lsoda'], all=__all__, attribute=name)
def is_crnn_config(filename): if filename.endswith('.gz'): return False try: config = Config() config.load_file(filename) return True except Exception: pass return False
def convert_cache_to_csv(dataset_cache, output_dir): data = torch.load(dataset_cache) train_data = data['train'] valid_data = data['valid'] test_data = data['test'] train_data.to_csv(join(output_dir, (dataset_cache + 'train.csv')), columns=['Dialogue_ID', 'Utterance_ID', 'Speaker', 'Sentiment', 'Emotion', 'DA', 'Utterance', 'Gender', 'Age', 'Neuroticism', 'Extraversion', 'Openness', 'Agreeableness', 'Conscientiousness'])
class SummaryWriter(object): def __init__(self, path: str, reduce_func=None): if (reduce_func is None): reduce_func = (lambda values: ((sum(values) / len(values)) if (len(values) > 0) else None)) self.tb_writer = _SummaryWriter(path) self.reduce_func = reduce_func self.clear() def __del__(self): self.tb_writer.close() def clear(self): self.state = defaultdict(list) def add_scalar(self, key, value, num: int=1): self.state[key].extend(([value] * num)) def write(self, global_step): for (key, values) in self.state.items(): val = self.reduce_func(values) if (val is not None): self.tb_writer.add_scalar(key, val, global_step) self.clear()
def svg_dendrogram(dendrogram: np.ndarray, names: Optional[np.ndarray]=None, rotate: bool=False, width: float=400, height: float=300, margin: float=10, margin_text: float=5, scale: float=1, line_width: float=2, n_clusters: int=2, color: str='black', colors: Optional[Iterable]=None, font_size: int=12, reorder: bool=False, rotate_names: bool=True, filename: Optional[str]=None): if (colors is None): colors = STANDARD_COLORS elif isinstance(colors, dict): colors = np.array(list(colors.values())) elif isinstance(colors, list): colors = np.array(colors) if rotate: svg = svg_dendrogram_left(dendrogram, names, width, height, margin, margin_text, scale, line_width, n_clusters, color, colors, font_size, reorder) else: svg = svg_dendrogram_top(dendrogram, names, width, height, margin, margin_text, scale, line_width, n_clusters, color, colors, font_size, reorder, rotate_names) if (filename is not None): with open((filename + '.svg'), 'w') as f: f.write(svg) return svg
_function_from_c_func_and_dispatcher(_multiarray_umath.packbits) def packbits(a, axis=None, bitorder='big'): return (a,)
class NoiseScheduleVP(): def __init__(self, schedule='discrete', betas=None, alphas_cumprod=None, continuous_beta_0=0.1, continuous_beta_1=20.0): if (schedule not in ['discrete', 'linear', 'cosine']): raise ValueError("Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear' or 'cosine'".format(schedule)) self.schedule = schedule if (schedule == 'discrete'): if (betas is not None): log_alphas = (0.5 * jnp.log((1 - betas)).cumsum(dim=0)) else: assert (alphas_cumprod is not None) log_alphas = (0.5 * jnp.log(alphas_cumprod)) self.total_N = len(log_alphas) self.T = 1.0 self.t_array = jnp.linspace(0.0, 1.0, (self.total_N + 1))[1:].reshape((1, (- 1))) self.log_alpha_array = log_alphas.reshape((1, (- 1))) else: self.total_N = 1000 self.beta_0 = continuous_beta_0 self.beta_1 = continuous_beta_1 self.cosine_s = 0.008 self.cosine_beta_max = 999.0 self.cosine_t_max = ((((math.atan(((self.cosine_beta_max * (1.0 + self.cosine_s)) / math.pi)) * 2.0) * (1.0 + self.cosine_s)) / math.pi) - self.cosine_s) self.cosine_log_alpha_0 = math.log(math.cos((((self.cosine_s / (1.0 + self.cosine_s)) * math.pi) / 2.0))) self.schedule = schedule if (schedule == 'cosine'): self.T = 0.9946 else: self.T = 1.0 def marginal_log_mean_coeff(self, t): if (self.schedule == 'discrete'): t_shape = t.shape return interpolate_fn(t.reshape(((- 1), 1)), self.t_array, self.log_alpha_array).reshape(t_shape) elif (self.schedule == 'linear'): return ((((- 0.25) * (t ** 2)) * (self.beta_1 - self.beta_0)) - ((0.5 * t) * self.beta_0)) elif (self.schedule == 'cosine'): log_alpha_fn = (lambda s: jnp.log(jnp.cos(((((s + self.cosine_s) / (1.0 + self.cosine_s)) * math.pi) / 2.0)))) log_alpha_t = (log_alpha_fn(t) - self.cosine_log_alpha_0) return log_alpha_t def marginal_alpha(self, t): return jnp.exp(self.marginal_log_mean_coeff(t)) def marginal_std(self, t): return jnp.sqrt((1.0 - jnp.exp((2.0 * self.marginal_log_mean_coeff(t))))) def marginal_lambda(self, t): log_mean_coeff = self.marginal_log_mean_coeff(t) log_std = (0.5 * jnp.log((1.0 - jnp.exp((2.0 * log_mean_coeff))))) return (log_mean_coeff - log_std) def inverse_lambda(self, lamb): if (self.schedule == 'linear'): tmp = ((2.0 * (self.beta_1 - self.beta_0)) * jnp.logaddexp(((- 2.0) * lamb), jnp.zeros((1,)))) Delta = ((self.beta_0 ** 2) + tmp) return ((tmp / (jnp.sqrt(Delta) + self.beta_0)) / (self.beta_1 - self.beta_0)) elif (self.schedule == 'discrete'): log_alpha = ((- 0.5) * jnp.logaddexp(jnp.zeros((1,)), ((- 2.0) * lamb))) shape = log_alpha.shape t = interpolate_fn(log_alpha.reshape(((- 1), 1)), jnp.flip(self.log_alpha_array, [1]), jnp.flip(self.t_array, [1])) return t.reshape(shape) else: log_alpha = ((- 0.5) * jnp.logaddexp(((- 2.0) * lamb), jnp.zeros((1,)))) t_fn = (lambda log_alpha_t: ((((jnp.arccos(jnp.exp((log_alpha_t + self.cosine_log_alpha_0))) * 2.0) * (1.0 + self.cosine_s)) / math.pi) - self.cosine_s)) t = t_fn(log_alpha) return t
class RandomIdentitySampler(Sampler): def __init__(self, data_source, batch_size, num_instances): if (batch_size < num_instances): raise ValueError('batch_size={} must be no less than num_instances={}'.format(batch_size, num_instances)) self.data_source = data_source self.batch_size = batch_size self.num_instances = num_instances self.num_pids_per_batch = (self.batch_size // self.num_instances) self.index_dic = defaultdict(list) for (index, items) in enumerate(data_source): pid = items[1] self.index_dic[pid].append(index) self.pids = list(self.index_dic.keys()) assert (len(self.pids) >= self.num_pids_per_batch) self.length = 0 for pid in self.pids: idxs = self.index_dic[pid] num = len(idxs) if (num < self.num_instances): num = self.num_instances self.length += (num - (num % self.num_instances)) def __iter__(self): batch_idxs_dict = defaultdict(list) for pid in self.pids: idxs = copy.deepcopy(self.index_dic[pid]) if (len(idxs) < self.num_instances): idxs = np.random.choice(idxs, size=self.num_instances, replace=True) random.shuffle(idxs) batch_idxs = [] for idx in idxs: batch_idxs.append(idx) if (len(batch_idxs) == self.num_instances): batch_idxs_dict[pid].append(batch_idxs) batch_idxs = [] avai_pids = copy.deepcopy(self.pids) final_idxs = [] while (len(avai_pids) >= self.num_pids_per_batch): selected_pids = random.sample(avai_pids, self.num_pids_per_batch) for pid in selected_pids: batch_idxs = batch_idxs_dict[pid].pop(0) final_idxs.extend(batch_idxs) if (len(batch_idxs_dict[pid]) == 0): avai_pids.remove(pid) return iter(final_idxs) def __len__(self): return self.length
class PytorchLUTFakeQuant(torch.nn.Module): def __init__(self, quantization_params: Dict[(str, np.ndarray)]): super(PytorchLUTFakeQuant, self).__init__() self.quantization_params = quantization_params self.activation_is_signed = self.quantization_params.get(SIGNED) self.lut_values = to_torch_tensor(self.quantization_params.get(LUT_VALUES)) self.threshold = self.quantization_params.get(THRESHOLD) def forward(self, x: torch.Tensor) -> torch.Tensor: if ((self.activation_is_signed is None) or (self.lut_values is None) or (self.threshold is None)): return None _quant_output = self.lut_kmeans_quantizer(x) return _quant_output def lut_kmeans_quantizer(self, tensor_data: torch.Tensor) -> torch.Tensor: tensor = self.int_quantization_with_threshold(tensor_data, LUT_VALUES_BITWIDTH) tensor = tensor.unsqueeze((- 1)) expanded_lut_values = self.lut_values.reshape([*[1 for _ in range((len(tensor.shape) - 1))], (- 1)]) lut_values_assignments = torch.argmin(torch.abs((tensor - expanded_lut_values)), dim=(- 1)) centers = self.lut_values.flatten()[lut_values_assignments] quant_tensor = ((centers / (2 ** (LUT_VALUES_BITWIDTH - int(self.activation_is_signed)))) * self.threshold) return quant_tensor def int_quantization_with_threshold(self, data: torch.Tensor, n_bits: int, eps: float=EPS) -> torch.Tensor: if self.activation_is_signed: clip_max = ((2 ** (n_bits - 1)) - 1) clip_min = (- (2 ** (n_bits - 1))) else: clip_max = ((2 ** n_bits) - 1) clip_min = 0 return torch.clip(((data / (self.threshold + eps)) * (2 ** (n_bits - int(self.activation_is_signed)))), min=clip_min, max=clip_max)
class SemiemoClassificationHead(nn.Module): def __init__(self, input_dim, inner_dim, num_classes, activation_fn, pooler_dropout, args): super().__init__() self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) self.args = args def forward(self, features, Final_rep, **kwargs): if (not Final_rep): if (self.args.t_only & (not self.args.stack_up)): Final = features['j_text'] if (self.args.a_only & (not self.args.stack_up)): Final = features['j_aud'] if (self.args.all_in or (self.args.a_only and self.args.t_only)): if self.args.stack_up: T_A = features['t2a_r'] A_T = features['a2t_r'] Final = torch.cat((T_A, A_T), dim=1) else: T_A = features['j_text'] A_T = features['j_aud'] Final = torch.cat((T_A, A_T), dim=1) else: Final = features x = Final x = self.dropout(x) x = self.out_proj(x) return (x, Final)
def _load_sut(tracer: ExecutionTracer) -> bool: try: tracer.current_thread_identifier = threading.current_thread().ident importlib.import_module(config.configuration.module_name) except ImportError as ex: _LOGGER.exception('Failed to load SUT: %s', ex) return False return True
def register_Ns3LteRrcSapAsConfig_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::LteRrcSap::AsConfig const &', 'arg0')]) cls.add_instance_attribute('sourceDlCarrierFreq', 'uint16_t', is_const=False) cls.add_instance_attribute('sourceMasterInformationBlock', 'ns3::LteRrcSap::MasterInformationBlock', is_const=False) cls.add_instance_attribute('sourceMeasConfig', 'ns3::LteRrcSap::MeasConfig', is_const=False) cls.add_instance_attribute('sourceRadioResourceConfig', 'ns3::LteRrcSap::RadioResourceConfigDedicated', is_const=False) cls.add_instance_attribute('sourceSystemInformationBlockType1', 'ns3::LteRrcSap::SystemInformationBlockType1', is_const=False) cls.add_instance_attribute('sourceSystemInformationBlockType2', 'ns3::LteRrcSap::SystemInformationBlockType2', is_const=False) cls.add_instance_attribute('sourceUeIdentity', 'uint16_t', is_const=False) return
class RNNDecoderBase(DecoderBase): def __init__(self, rnn_type, bidirectional_encoder, num_layers, hidden_size, attn_type='general', attn_func='softmax', coverage_attn=False, context_gate=None, copy_attn=False, dropout=0.0, embeddings=None, reuse_copy_attn=False, copy_attn_type='general'): super(RNNDecoderBase, self).__init__(attentional=((attn_type != 'none') and (attn_type is not None))) self.bidirectional_encoder = bidirectional_encoder self.num_layers = num_layers self.hidden_size = hidden_size self.embeddings = embeddings self.dropout = nn.Dropout(dropout) self.state = {} self.rnn = self._build_rnn(rnn_type, input_size=self._input_size, hidden_size=hidden_size, num_layers=num_layers, dropout=dropout) self.context_gate = None if (context_gate is not None): self.context_gate = context_gate_factory(context_gate, self._input_size, hidden_size, hidden_size, hidden_size) self._coverage = coverage_attn if (not self.attentional): if self._coverage: raise ValueError('Cannot use coverage term with no attention.') self.attn = None else: self.attn = GlobalAttention(hidden_size, coverage=coverage_attn, attn_type=attn_type, attn_func=attn_func) if (copy_attn and (not reuse_copy_attn)): if ((copy_attn_type == 'none') or (copy_attn_type is None)): raise ValueError('Cannot use copy_attn with copy_attn_type none') self.copy_attn = GlobalAttention(hidden_size, attn_type=copy_attn_type, attn_func=attn_func) else: self.copy_attn = None self._reuse_copy_attn = (reuse_copy_attn and copy_attn) if (self._reuse_copy_attn and (not self.attentional)): raise ValueError('Cannot reuse copy attention with no attention.') def from_opt(cls, opt, embeddings): return cls(opt.rnn_type, opt.brnn, opt.dec_layers, opt.dec_rnn_size, opt.global_attention, opt.global_attention_function, opt.coverage_attn, opt.context_gate, opt.copy_attn, (opt.dropout[0] if (type(opt.dropout) is list) else opt.dropout), embeddings, opt.reuse_copy_attn, opt.copy_attn_type) def init_state(self, src, memory_bank, encoder_final): def _fix_enc_hidden(hidden): if self.bidirectional_encoder: hidden = torch.cat([hidden[0:hidden.size(0):2], hidden[1:hidden.size(0):2]], 2) return hidden if isinstance(encoder_final, tuple): self.state['hidden'] = tuple((_fix_enc_hidden(enc_hid) for enc_hid in encoder_final)) else: self.state['hidden'] = (_fix_enc_hidden(encoder_final),) batch_size = self.state['hidden'][0].size(1) h_size = (batch_size, self.hidden_size) self.state['input_feed'] = self.state['hidden'][0].data.new(*h_size).zero_().unsqueeze(0) self.state['coverage'] = None def map_state(self, fn): self.state['hidden'] = tuple((fn(h, 1) for h in self.state['hidden'])) self.state['input_feed'] = fn(self.state['input_feed'], 1) if (self._coverage and (self.state['coverage'] is not None)): self.state['coverage'] = fn(self.state['coverage'], 1) def detach_state(self): self.state['hidden'] = tuple((h.detach() for h in self.state['hidden'])) self.state['input_feed'] = self.state['input_feed'].detach() def forward(self, tgt, memory_bank, memory_lengths=None, step=None, **kwargs): (dec_state, dec_outs, attns) = self._run_forward_pass(tgt, memory_bank, memory_lengths=memory_lengths) if (not isinstance(dec_state, tuple)): dec_state = (dec_state,) self.state['hidden'] = dec_state self.state['input_feed'] = dec_outs[(- 1)].unsqueeze(0) self.state['coverage'] = None if ('coverage' in attns): self.state['coverage'] = attns['coverage'][(- 1)].unsqueeze(0) if (type(dec_outs) == list): dec_outs = torch.stack(dec_outs) for k in attns: if (type(attns[k]) == list): attns[k] = torch.stack(attns[k]) return (dec_outs, attns) def update_dropout(self, dropout): self.dropout.p = dropout self.embeddings.update_dropout(dropout)
def get_predicted_instances(scene_graph, feature_name='feature'): node_features = [] node_masks = [] n_pts = scene_graph.graph['n_pts'] for node in scene_graph.nodes: node_features.append(scene_graph.nodes[node][feature_name]) node_mask = np.zeros(n_pts, dtype=np.bool_) node_mask[scene_graph.nodes[node]['pt_indices']] = True node_masks.append(node_mask) node_features = np.vstack(node_features) node_masks = np.stack(node_masks) return (node_features, node_masks)
def get_adjacency_matrix(edges, num_nodes): A = np.zeros((num_nodes, num_nodes), dtype=np.float32) for edge in edges: A[edge] = 1.0 return A
class SWALR(_LRScheduler): def __init__(self, optimizer, swa_lr, anneal_epochs=10, anneal_strategy='cos', last_epoch=(- 1)): swa_lrs = self._format_param(optimizer, swa_lr) for (swa_lr, group) in zip(swa_lrs, optimizer.param_groups): group['swa_lr'] = swa_lr if (anneal_strategy not in ['cos', 'linear']): raise ValueError("anneal_strategy must by one of 'cos' or 'linear', instead got {}".format(anneal_strategy)) elif (anneal_strategy == 'cos'): self.anneal_func = self._cosine_anneal elif (anneal_strategy == 'linear'): self.anneal_func = self._linear_anneal if ((not isinstance(anneal_epochs, int)) or (anneal_epochs < 1)): raise ValueError('anneal_epochs must be a positive integer, got {}'.format(anneal_epochs)) self.anneal_epochs = anneal_epochs super(SWALR, self).__init__(optimizer, last_epoch) def _format_param(optimizer, swa_lrs): if isinstance(swa_lrs, (list, tuple)): if (len(swa_lrs) != len(optimizer.param_groups)): raise ValueError('swa_lr must have the same length as optimizer.param_groups: swa_lr has {}, optimizer.param_groups has {}'.format(len(swa_lrs), len(optimizer.param_groups))) return swa_lrs else: return ([swa_lrs] * len(optimizer.param_groups)) def _linear_anneal(t): return t def _cosine_anneal(t): return ((1 - math.cos((math.pi * t))) / 2) def _get_initial_lr(lr, swa_lr, alpha): if (alpha == 1): return swa_lr return ((lr - (alpha * swa_lr)) / (1 - alpha)) def get_lr(self): if (not self._get_lr_called_within_step): warnings.warn('To get the last learning rate computed by the scheduler, please use `get_last_lr()`.', UserWarning) step = (self._step_count - 1) prev_t = max(0, min(1, ((step - 1) / self.anneal_epochs))) prev_alpha = self.anneal_func(prev_t) prev_lrs = [self._get_initial_lr(group['lr'], group['swa_lr'], prev_alpha) for group in self.optimizer.param_groups] t = max(0, min(1, (step / self.anneal_epochs))) alpha = self.anneal_func(t) return [((group['swa_lr'] * alpha) + (lr * (1 - alpha))) for (group, lr) in zip(self.optimizer.param_groups, prev_lrs)]
def resnext(width, height, frame_count, lr, output=9, model_name='sentnet_color.model'): net = input_data(shape=[None, width, height, 3], name='input') net = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001) net = tflearn.layers.conv.resnext_block(net, n, 16, 32) net = tflearn.resnext_block(net, 1, 32, 32, downsample=True) net = tflearn.resnext_block(net, (n - 1), 32, 32) net = tflearn.resnext_block(net, 1, 64, 32, downsample=True) net = tflearn.resnext_block(net, (n - 1), 64, 32) net = tflearn.batch_normalization(net) net = tflearn.activation(net, 'relu') net = tflearn.global_avg_pool(net) net = tflearn.fully_connected(net, output, activation='softmax') opt = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True) net = tflearn.regression(net, optimizer=opt, loss='categorical_crossentropy') model = tflearn.DNN(net, max_checkpoints=0, tensorboard_verbose=0, tensorboard_dir='log') return model
def test_same_input_shapes(): data = [(), (1,), (3,), (0, 1), (0, 3), (1, 0), (3, 0), (1, 3), (3, 1), (3, 3)] for shape in data: input_shapes = [shape] assert_shapes_correct(input_shapes, shape) input_shapes2 = [shape, shape] assert_shapes_correct(input_shapes2, shape) input_shapes3 = [shape, shape, shape] assert_shapes_correct(input_shapes3, shape)
def check_vocab(vocab_file, out_dir, check_special_token=True, sos=None, eos=None, unk=None): if tf.gfile.Exists(vocab_file): utils.print_out(('# Vocab file %s exists' % vocab_file)) vocab = [] with codecs.getreader('utf-8')(tf.gfile.GFile(vocab_file, 'rb')) as f: vocab_size = 0 for word in f: vocab_size += 1 vocab.append(word.strip()) if check_special_token: if (not unk): unk = UNK if (not sos): sos = SOS if (not eos): eos = EOS assert (len(vocab) >= 3) if ((vocab[0] != unk) or (vocab[1] != sos) or (vocab[2] != eos)): utils.print_out(('The first 3 vocab words [%s, %s, %s] are not [%s, %s, %s]' % (vocab[0], vocab[1], vocab[2], unk, sos, eos))) vocab = ([unk, sos, eos] + vocab) vocab_size += 3 new_vocab_file = os.path.join(out_dir, os.path.basename(vocab_file)) with codecs.getwriter('utf-8')(tf.gfile.GFile(new_vocab_file, 'wb')) as f: for word in vocab: f.write(('%s\n' % word)) vocab_file = new_vocab_file else: raise ValueError(("vocab_file '%s' does not exist." % vocab_file)) vocab_size = len(vocab) return (vocab_size, vocab_file)
def copy_attrs(orig, dest, names, only_if_set=False): for a in Utils.to_list(names): u = getattr(orig, a, ()) if (u or (not only_if_set)): setattr(dest, a, u)
class NanoDetPlusAuxHead(nn.Module): def __init__(self, num_classes, input_channel, feat_channels=256, stacked_convs=4, strides=[8, 16, 32], conv_cfg=None, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), activation='LeakyReLU', reg_max=16, **kwargs): super(NanoDetPlusAuxHead, self).__init__() self.num_classes = num_classes self.in_channels = input_channel self.feat_channels = feat_channels self.stacked_convs = stacked_convs self.strides = strides self.reg_max = reg_max self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.activation = activation self.cls_out_channels = num_classes self._init_layers() self.init_weights() def _init_layers(self): self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = (self.in_channels if (i == 0) else self.feat_channels) self.cls_convs.append(ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, activation=self.activation)) self.reg_convs.append(ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, activation=self.activation)) self.gfl_cls = nn.Conv2d(self.feat_channels, self.cls_out_channels, 3, padding=1) self.gfl_reg = nn.Conv2d(self.feat_channels, (4 * (self.reg_max + 1)), 3, padding=1) self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides]) def init_weights(self): for m in self.cls_convs: if isinstance(m, nn.Conv2d): nn.init.normal_(m.weight, 0, 0.01) if (hasattr(m, 'bias') and (m.bias is not None)): nn.init.constant_(m.bias, 0) for m in self.reg_convs: if isinstance(m, nn.Conv2d): nn.init.normal_(m.weight, 0, 0.01) if (hasattr(m, 'bias') and (m.bias is not None)): nn.init.constant_(m.bias, 0) bias_cls = (- 4.595) m = self.gfl_cls if isinstance(m, nn.Conv2d): nn.init.normal_(m.weight, 0, 0.01) if (hasattr(m, 'bias') and (m.bias is not None)): nn.init.constant_(m.bias, bias_cls) m = self.gfl_reg if isinstance(m, nn.Conv2d): nn.init.normal_(m.weight, 0, 0.01) if (hasattr(m, 'bias') and (m.bias is not None)): nn.init.constant_(m.bias, 0) def forward(self, feats): outputs = [] for (x, scale) in zip(feats, self.scales): cls_feat = x reg_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: reg_feat = reg_conv(reg_feat) cls_score = self.gfl_cls(cls_feat) bbox_pred = scale(self.gfl_reg(reg_feat)).float() output = torch.cat([cls_score, bbox_pred], dim=1) outputs.append(output.flatten(start_dim=2)) outputs = torch.cat(outputs, dim=2).permute(0, 2, 1) return outputs
class HomeWorkAttack(Attack): def __init__(self, knowledge_length=1): super(HomeWorkAttack, self).__init__(knowledge_length) def _generate_instances(self, single_traj): return [single_traj[:2].values] def assess_risk(self, traj, targets=None, force_instances=False, show_progress=False): freq = frequency_vector(traj) return self._all_risks(freq, targets, force_instances, show_progress) def _match(self, single_traj, instance): inst = pd.DataFrame(data=instance, columns=single_traj.columns) locs_inst = pd.merge(single_traj[:2], inst, left_on=[constants.LATITUDE, constants.LONGITUDE], right_on=[constants.LATITUDE, constants.LONGITUDE]) if (len(locs_inst.index) == len(inst.index)): return 1 else: return 0
def filter_items(items, filter_expr, order_by, is_desc, max_size, columns=[]): new_items = [] for i in items: if (type(i) != dict): i = i._asdict() if eval(filter_expr, i): new_items.append(i) def key_func(a): return [a[o] for o in order_by if (a.get(o) is not None)] if (order_by is not None): new_items = sorted(new_items, key=key_func, reverse=is_desc) if (len(columns) > 0): tmp_items = [] for i in new_items: tmp_items.append([(i[c] if (i.get(c) is not None) else '') for c in columns]) new_items = tmp_items else: new_items = [i.values() for i in new_items] if ((max_size > 0) and (max_size < len(new_items))): new_items = new_items[0:max_size] return new_items
class ClevrDataLoader(): def __init__(self, **kwargs): if ('question_pt' not in kwargs): raise ValueError('Must give question_pt') if ('scene_pt' not in kwargs): raise ValueError('Must give scene_pt') if ('vocab_json' not in kwargs): raise ValueError('Must give vocab_json') scene_pt_path = str(kwargs.pop('scene_pt')) print(('loading scenes from %s' % scene_pt_path)) with open(scene_pt_path, 'rb') as f: conn_matrixes = pickle.load(f) edge_matrixes = pickle.load(f) vertex_vectors = pickle.load(f) scene_descs = pickle.load(f) vocab_json_path = str(kwargs.pop('vocab_json')) print(('loading vocab from %s' % vocab_json_path)) vocab = load_vocab(vocab_json_path) question_pt_path = str(kwargs.pop('question_pt')) print(('loading questions from %s' % question_pt_path)) with open(question_pt_path, 'rb') as f: obj = pickle.load(f) questions = obj['questions'] image_indices = obj['image_idxs'] programs = obj['programs'] program_inputs = obj['program_inputs'] answers = obj['answers'] if ('annotation_json' in kwargs): annotations = json.load(open(kwargs.pop('annotation_json')))['scenes'] self.orig_annotations = {int(s['image_index']): s for s in annotations} self.ratio = None if ('ratio' in kwargs): self.ratio = kwargs.pop('ratio') total = int((len(questions) * self.ratio)) print(('training ratio = %.3f, containing %d questions' % (self.ratio, total))) questions = questions[:total] image_indices = image_indices[:total] programs = programs[:total] program_inputs = program_inputs[:total] answers = answers[:total] self.dataset = ClevrDataset(questions, image_indices, programs, program_inputs, answers, conn_matrixes, edge_matrixes, vertex_vectors) self.scene_descs = scene_descs self.vocab = vocab self.batch_size = kwargs.pop('batch_size') self.shuffle = kwargs.pop('shuffle') def generator(self): random_idxs = np.arange(len(self.dataset)) if self.shuffle: np.random.shuffle(random_idxs) for batch_iter in range(0, len(self.dataset), self.batch_size): data = [] self.idx_cache = [] self.desc_cache = [] for i in range(batch_iter, min((batch_iter + self.batch_size), len(self.dataset))): data.append(self.dataset[random_idxs[i]]) image_idx = self.dataset.all_image_idxs[random_idxs[i]].item() self.idx_cache.append(image_idx) self.desc_cache.append(self.scene_descs[image_idx]) data = collate(data) (yield data) def __len__(self): return math.ceil((len(self.dataset) / self.batch_size))
def _limit_signed_rational(val, max_val, min_val): frac = Fraction(val) n_d = (frac.numerator, frac.denominator) if (min(n_d) < min_val): n_d = _limit_rational(val, abs(min_val)) if (max(n_d) > max_val): val = Fraction(*n_d) n_d = _limit_rational(val, max_val) return n_d
class SubwordField(Field): def __init__(self, *args, **kwargs): self.fix_len = (kwargs.pop('fix_len') if ('fix_len' in kwargs) else 0) super().__init__(*args, **kwargs) def build(self, dataset, min_freq=1, embed=None): if hasattr(self, 'vocab'): return sequences = getattr(dataset, self.name) counter = Counter((piece for seq in sequences for token in seq for piece in self.preprocess(token))) self.vocab = Vocab(counter, min_freq, self.specials, self.unk_index) if (not embed): self.embed = None else: tokens = self.preprocess(embed.tokens) if embed.unk: tokens[embed.unk_index] = self.unk self.vocab.extend(tokens) self.embed = torch.zeros(len(self.vocab), embed.dim) self.embed[self.vocab[tokens]] = embed.vectors def transform(self, sequences): sequences = [[self.preprocess(token) for token in seq] for seq in sequences] if (self.fix_len <= 0): self.fix_len = max((len(token) for seq in sequences for token in seq)) if self.use_vocab: sequences = [[([(self.vocab[i] if (i in self.vocab) else self.unk_index) for i in token] if token else [self.unk_index]) for token in seq] for seq in sequences] if self.bos: sequences = [([[self.bos_index]] + seq) for seq in sequences] if self.eos: sequences = [(seq + [[self.eos_index]]) for seq in sequences] lens = [min(self.fix_len, max((len(ids) for ids in seq))) for seq in sequences] sequences = [pad([torch.tensor(ids[:i]) for ids in seq], self.pad_index, i) for (i, seq) in zip(lens, sequences)] return sequences
def test_logvar_same(): a = model.forward(x1.float())[2] b = model.encode(x1.float())[1] assert torch.all(a.eq(b))
def generate_content(index: int, task_ids: list, base_filename: str, level: int) -> str: task_id = task_ids[(index - 1)] noise = generate_noise(NOISE) if (index != level): if (level == 1): return f'''{noise} The current task_id is {task_id}. {noise} Write all the task_ids into the file output.txt. The file has not been created yet. After that, use the task_complete command.''' return f'''{noise} The current task_id is {task_id}. {noise} Read the file {base_filename}{(index + 1)}.txt using the read_file command.''' return f'''{noise} This task_id is {task_id} {noise} Write all the task_ids into the file output.txt. The file has not been created yet. After that, use the task_complete command. '''
_numpy_output(check_dtype=True) def test_ufunc_signbit_u(A: dace.uint32[10]): return np.signbit(A)
def get_parameters(): parser = get_parser() base_args = parser.parse_args() if (base_args.options_type != 'generic'): raise NotImplementedError if (base_args.deprecated is not None): if (base_args.deprecated == 'vgg_cifar'): args = get_deprecated_params_vgg_cifar() args.deprecated = base_args.deprecated elif (base_args.deprecated == 'mnist_act'): args = get_deprecated_params_mnist_act() args.deprecated = base_args.deprecated else: raise NotImplementedError elif base_args.config_file: args = copy.deepcopy(base_args) print('Reading parameters from {}, but CLI arguments'.format(args.config_file)) with open(os.path.join(base_args.config_dir, base_args.config_file), 'r') as f: file_params = dotdict(json.load(f)) for (param, value) in file_params.items(): if (not hasattr(args, param)): setattr(args, param, value) elif (getattr(args, param) == parser.get_default(param)): setattr(args, param, value) args.deprecated = None else: args = base_args if hasattr(args, 'timestamp'): args.previous_timestamp = args.timestamp args.timestamp = get_timestamp_other() args.rootdir = os.getcwd() if (args.sweep_name is not None): args.baseroot = args.rootdir args.rootdir = os.path.join(args.rootdir, args.sweep_name) else: args.baseroot = args.rootdir args.config_dir = os.path.join(args.rootdir, 'configurations') args.result_dir = os.path.join(args.rootdir, 'results') args.exp_name = ('exp_' + args.timestamp) args.csv_dir = os.path.join(args.rootdir, 'csv') utils.mkdir(args.config_dir) utils.mkdir(args.result_dir) utils.mkdir(args.csv_dir) if ((not hasattr(args, 'save_result_file')) or (args.save_result_file is None)): args.save_result_file = 'default.csv' dump_parameters(args) return args
def structure_loss(pred, mask): weit = (1 + (5 * torch.abs((F.avg_pool2d(mask, kernel_size=31, stride=1, padding=15) - mask)))) wbce = F.binary_cross_entropy_with_logits(pred, mask, reduction='none') wbce = ((weit * wbce).sum(dim=(2, 3)) / weit.sum(dim=(2, 3))) pred = torch.sigmoid(pred) inter = ((pred * mask) * weit).sum(dim=(2, 3)) union = ((pred + mask) * weit).sum(dim=(2, 3)) wiou = (1 - ((inter + 1) / ((union - inter) + 1))) return (wbce + wiou).mean()
_if_pypy def test_hashingvectorizer_nan_in_docs(): message = 'np.nan is an invalid document, expected byte or unicode string.' exception = ValueError def func(): hv = HashingVectorizer() hv.fit_transform(['hello world', np.nan, 'hello hello']) with pytest.raises(exception, match=message): func()
def repeat_expr(params: {}): agent_type = params['agent'] num_tasks = params['num_tasks'] num_showings = params['num_showings'] step_size = params['step_size'] replacement_rate = 0.0001 decay_rate = 0.99 maturity_threshold = 100 util_type = 'contribution' opt = params['opt'] weight_decay = 0 use_gpu = 0 dev = 'cpu' num_classes = 10 total_classes = 1000 new_heads = True mini_batch_size = 100 perturb_scale = 0 momentum = 0 if ('replacement_rate' in params.keys()): replacement_rate = params['replacement_rate'] if ('decay_rate' in params.keys()): decay_rate = params['decay_rate'] if ('util_type' in params.keys()): util_type = params['util_type'] if ('maturity_threshold' in params.keys()): maturity_threshold = params['maturity_threshold'] if ('weight_decay' in params.keys()): weight_decay = params['weight_decay'] if ('use_gpu' in params.keys()): if (params['use_gpu'] == 1): use_gpu = 1 dev = (torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')) if (dev == torch.device('cuda')): torch.set_default_tensor_type('torch.cuda.FloatTensor') if ('num_classes' in params.keys()): num_classes = params['num_classes'] if ('new_heads' in params.keys()): new_heads = params['new_heads'] if ('mini_batch_size' in params.keys()): mini_batch_size = params['mini_batch_size'] if ('perturb_scale' in params.keys()): perturb_scale = params['perturb_scale'] if ('momentum' in params.keys()): momentum = params['momentum'] num_epochs = num_showings classes_per_task = num_classes net = ConvNet() if (agent_type == 'linear'): net = MyLinear(input_size=3072, num_outputs=classes_per_task) if (agent_type in ['bp', 'linear']): learner = Backprop(net=net, step_size=step_size, opt=opt, loss='nll', weight_decay=weight_decay, to_perturb=(perturb_scale != 0), perturb_scale=perturb_scale, device=dev, momentum=momentum) elif (agent_type == 'cbp'): learner = ConvCBP(net=net, step_size=step_size, momentum=momentum, loss='nll', weight_decay=weight_decay, opt=opt, init='default', replacement_rate=replacement_rate, decay_rate=decay_rate, util_type=util_type, device=dev, maturity_threshold=maturity_threshold) with open('class_order', 'rb+') as f: class_order = pickle.load(f) class_order = class_order[int([params['run_idx']][0])] num_class_repetitions_required = (int(((num_classes * num_tasks) / total_classes)) + 1) class_order = np.concatenate(([class_order] * num_class_repetitions_required)) save_after_every_n_tasks = 1 if (num_tasks >= 10): save_after_every_n_tasks = int((num_tasks / 10)) examples_per_epoch = (train_images_per_class * classes_per_task) train_accuracies = torch.zeros((num_tasks, num_epochs), dtype=torch.float) test_accuracies = torch.zeros((num_tasks, num_epochs), dtype=torch.float) (x_train, x_test, y_train, y_test) = (None, None, None, None) for task_idx in range(num_tasks): del x_train, x_test, y_train, y_test (x_train, y_train, x_test, y_test) = load_imagenet(class_order[(task_idx * classes_per_task):((task_idx + 1) * classes_per_task)]) (x_train, x_test) = (x_train.type(torch.FloatTensor), x_test.type(torch.FloatTensor)) if (agent_type == 'linear'): (x_train, x_test) = (x_train.flatten(1), x_test.flatten(1)) if (use_gpu == 1): (x_train, x_test, y_train, y_test) = (x_train.to(dev), x_test.to(dev), y_train.to(dev), y_test.to(dev)) if new_heads: net.layers[(- 1)].weight.data *= 0 net.layers[(- 1)].bias.data *= 0 for epoch_idx in tqdm(range(num_epochs)): example_order = np.random.permutation((train_images_per_class * classes_per_task)) x_train = x_train[example_order] y_train = y_train[example_order] new_train_accuracies = torch.zeros((int((examples_per_epoch / mini_batch_size)),), dtype=torch.float) epoch_iter = 0 for start_idx in range(0, examples_per_epoch, mini_batch_size): batch_x = x_train[start_idx:(start_idx + mini_batch_size)] batch_y = y_train[start_idx:(start_idx + mini_batch_size)] (loss, network_output) = learner.learn(x=batch_x, target=batch_y) with torch.no_grad(): new_train_accuracies[epoch_iter] = accuracy(softmax(network_output, dim=1), batch_y).cpu() epoch_iter += 1 with torch.no_grad(): train_accuracies[task_idx][epoch_idx] = new_train_accuracies.mean() new_test_accuracies = torch.zeros((int((x_test.shape[0] / mini_batch_size)),), dtype=torch.float) test_epoch_iter = 0 for start_idx in range(0, x_test.shape[0], mini_batch_size): test_batch_x = x_test[start_idx:(start_idx + mini_batch_size)] test_batch_y = y_test[start_idx:(start_idx + mini_batch_size)] (network_output, _) = net.predict(x=test_batch_x) new_test_accuracies[test_epoch_iter] = accuracy(softmax(network_output, dim=1), test_batch_y) test_epoch_iter += 1 test_accuracies[task_idx][epoch_idx] = new_test_accuracies.mean() print('accuracy for task', task_idx, 'in epoch', epoch_idx, ': train, ', train_accuracies[task_idx][epoch_idx], ', test,', test_accuracies[task_idx][epoch_idx]) if ((task_idx % save_after_every_n_tasks) == 0): save_data(data={'train_accuracies': train_accuracies.cpu(), 'test_accuracies': test_accuracies.cpu()}, data_file=params['data_file']) save_data(data={'train_accuracies': train_accuracies.cpu(), 'test_accuracies': test_accuracies.cpu()}, data_file=params['data_file'])
def save_video(label, step, tensor, fps=15, n_cols=None): def _to_uint8(t): if (t.dtype != np.uint8): t = (t * 255.0).astype(np.uint8) return t if (tensor.dtype in [object]): tensor = [_to_uint8(prepare_video(t, n_cols)) for t in tensor] else: tensor = prepare_video(tensor, n_cols) tensor = _to_uint8(tensor) tensor = tensor.transpose(0, 3, 1, 2) return wandb.Video(tensor, fps=15, format='mp4')
(params=[{'application/json': {'schema': {'type': 'object', 'properties': {'foo': {'type': 'string'}}}}}, {'application/json': {'schema': {'type': 'integer'}}}, {'multipart/form-data': {'schema': {'type': 'object', 'additionalProperties': False, 'properties': {'data': {'type': 'string', 'format': 'binary'}}, 'required': ['data']}}}, None]) def raw_schema(request, empty_open_api_3_schema): empty_open_api_3_schema['paths'] = {'/data/{path_param}/': {'get': {'parameters': [{'name': f'{location}_param', 'in': location, 'required': True, 'schema': {'type': 'string'}, **kwargs} for (location, kwargs) in (('path', {}), ('query', {'style': 'simple', 'explode': True}), ('header', {}), ('cookie', {}))], 'responses': {'200': {'description': 'OK'}}}}} if (request.param is not None): empty_open_api_3_schema['paths']['/data/{path_param}/']['get'].update({'requestBody': {'content': request.param, 'required': True}}) return empty_open_api_3_schema
class _CustomClusterer(BaseEstimator): def __init__(self, n_clusters=1, expose_cluster_centers=True): self.n_clusters = n_clusters self.expose_cluster_centers = expose_cluster_centers def fit(self, X, y=None): if self.expose_cluster_centers: self.cluster_centers_ = np.random.randn(self.n_clusters, X.shape[1]) return self def predict(self, X): return np.zeros(len(X), dtype=int)
class SentenceTransformersEncoder(): def __init__(self, model_name='shibing624/text2vec-base-chinese'): self.model = SentenceTransformer(model_name) def encode(self, sentences, convert_to_numpy=True): sentence_embeddings = self.model.encode(sentences, convert_to_numpy=convert_to_numpy) return sentence_embeddings
class MetadataCatalog(): _NAME_TO_META = {} def get(name): assert len(name) if (name in MetadataCatalog._NAME_TO_META): ret = MetadataCatalog._NAME_TO_META[name] if hasattr(ret, 'dataset_name'): logger = logging.getLogger() logger.warning("\nThe 'dataset_name' key in metadata is no longer used for\nsharing metadata among splits after D! Add\nmetadata to each split (now called dataset) separately!\n ") parent_meta = MetadataCatalog.get(ret.dataset_name).as_dict() ret.set(**parent_meta) return ret else: m = MetadataCatalog._NAME_TO_META[name] = Metadata(name=name) return m
class DDPG(RLAlgorithm): def __init__(self, env, policy, qf, es, batch_size=32, n_epochs=200, epoch_length=1000, min_pool_size=10000, replay_pool_size=1000000, discount=0.99, max_path_length=250, qf_weight_decay=0.0, qf_update_method='adam', qf_learning_rate=0.001, policy_weight_decay=0, policy_update_method='adam', policy_learning_rate=0.0001, eval_samples=10000, soft_target=True, soft_target_tau=0.001, n_updates_per_sample=1, scale_reward=1.0, include_horizon_terminal_transitions=False, plot=False, pause_for_plot=False): self.env = env self.policy = policy self.qf = qf self.es = es self.batch_size = batch_size self.n_epochs = n_epochs self.epoch_length = epoch_length self.min_pool_size = min_pool_size self.replay_pool_size = replay_pool_size self.discount = discount self.max_path_length = max_path_length self.qf_weight_decay = qf_weight_decay self.qf_update_method = parse_update_method(qf_update_method, learning_rate=qf_learning_rate) self.qf_learning_rate = qf_learning_rate self.policy_weight_decay = policy_weight_decay self.policy_update_method = parse_update_method(policy_update_method, learning_rate=policy_learning_rate) self.policy_learning_rate = policy_learning_rate self.eval_samples = eval_samples self.soft_target_tau = soft_target_tau self.n_updates_per_sample = n_updates_per_sample self.include_horizon_terminal_transitions = include_horizon_terminal_transitions self.plot = plot self.pause_for_plot = pause_for_plot self.qf_loss_averages = [] self.policy_surr_averages = [] self.q_averages = [] self.y_averages = [] self.paths = [] self.es_path_returns = [] self.paths_samples_cnt = 0 self.scale_reward = scale_reward self.opt_info = None def start_worker(self): parallel_sampler.populate_task(self.env, self.policy) if self.plot: plotter.init_plot(self.env, self.policy) def train(self): pool = SimpleReplayPool(max_pool_size=self.replay_pool_size, observation_dim=self.env.observation_space.flat_dim, action_dim=self.env.action_space.flat_dim) self.start_worker() self.init_opt() itr = 0 path_length = 0 path_return = 0 terminal = False observation = self.env.reset() sample_policy = pickle.loads(pickle.dumps(self.policy)) for epoch in range(self.n_epochs): logger.push_prefix(('epoch #%d | ' % epoch)) logger.log('Training started') for epoch_itr in pyprind.prog_bar(range(self.epoch_length)): if terminal: observation = self.env.reset() self.es.reset() sample_policy.reset() self.es_path_returns.append(path_return) path_length = 0 path_return = 0 action = self.es.get_action(itr, observation, policy=sample_policy) (next_observation, reward, terminal, _) = self.env.step(action) path_length += 1 path_return += reward if ((not terminal) and (path_length >= self.max_path_length)): terminal = True if self.include_horizon_terminal_transitions: pool.add_sample(observation, action, (reward * self.scale_reward), terminal) else: pool.add_sample(observation, action, (reward * self.scale_reward), terminal) observation = next_observation if (pool.size >= self.min_pool_size): for update_itr in range(self.n_updates_per_sample): batch = pool.random_batch(self.batch_size) self.do_training(itr, batch) sample_policy.set_param_values(self.policy.get_param_values()) itr += 1 logger.log('Training finished') if (pool.size >= self.min_pool_size): self.evaluate(epoch, pool) params = self.get_epoch_snapshot(epoch) logger.save_itr_params(epoch, params) logger.dump_tabular(with_prefix=False) logger.pop_prefix() if self.plot: self.update_plot() if self.pause_for_plot: input('Plotting evaluation run: Press Enter to continue...') self.env.terminate() self.policy.terminate() def init_opt(self): target_policy = pickle.loads(pickle.dumps(self.policy)) target_qf = pickle.loads(pickle.dumps(self.qf)) obs = self.env.observation_space.new_tensor_variable('obs', extra_dims=1) action = self.env.action_space.new_tensor_variable('action', extra_dims=1) yvar = TT.vector('ys') qf_weight_decay_term = ((0.5 * self.qf_weight_decay) * sum([TT.sum(TT.square(param)) for param in self.qf.get_params(regularizable=True)])) qval = self.qf.get_qval_sym(obs, action) qf_loss = TT.mean(TT.square((yvar - qval))) qf_reg_loss = (qf_loss + qf_weight_decay_term) policy_weight_decay_term = ((0.5 * self.policy_weight_decay) * sum([TT.sum(TT.square(param)) for param in self.policy.get_params(regularizable=True)])) policy_qval = self.qf.get_qval_sym(obs, self.policy.get_action_sym(obs), deterministic=True) policy_surr = (- TT.mean(policy_qval)) policy_reg_surr = (policy_surr + policy_weight_decay_term) qf_updates = self.qf_update_method(qf_reg_loss, self.qf.get_params(trainable=True)) policy_updates = self.policy_update_method(policy_reg_surr, self.policy.get_params(trainable=True)) f_train_qf = ext.compile_function(inputs=[yvar, obs, action], outputs=[qf_loss, qval], updates=qf_updates) f_train_policy = ext.compile_function(inputs=[obs], outputs=policy_surr, updates=policy_updates) self.opt_info = dict(f_train_qf=f_train_qf, f_train_policy=f_train_policy, target_qf=target_qf, target_policy=target_policy) def do_training(self, itr, batch): (obs, actions, rewards, next_obs, terminals) = ext.extract(batch, 'observations', 'actions', 'rewards', 'next_observations', 'terminals') target_qf = self.opt_info['target_qf'] target_policy = self.opt_info['target_policy'] (next_actions, _) = target_policy.get_actions(next_obs) next_qvals = target_qf.get_qval(next_obs, next_actions) ys = (rewards + (((1.0 - terminals) * self.discount) * next_qvals)) f_train_qf = self.opt_info['f_train_qf'] f_train_policy = self.opt_info['f_train_policy'] (qf_loss, qval) = f_train_qf(ys, obs, actions) policy_surr = f_train_policy(obs) target_policy.set_param_values(((target_policy.get_param_values() * (1.0 - self.soft_target_tau)) + (self.policy.get_param_values() * self.soft_target_tau))) target_qf.set_param_values(((target_qf.get_param_values() * (1.0 - self.soft_target_tau)) + (self.qf.get_param_values() * self.soft_target_tau))) self.qf_loss_averages.append(qf_loss) self.policy_surr_averages.append(policy_surr) self.q_averages.append(qval) self.y_averages.append(ys) def evaluate(self, epoch, pool): logger.log('Collecting samples for evaluation') paths = parallel_sampler.sample_paths(policy_params=self.policy.get_param_values(), max_samples=self.eval_samples, max_path_length=self.max_path_length) average_discounted_return = np.mean([special.discount_return(path['rewards'], self.discount) for path in paths]) returns = [sum(path['rewards']) for path in paths] all_qs = np.concatenate(self.q_averages) all_ys = np.concatenate(self.y_averages) average_q_loss = np.mean(self.qf_loss_averages) average_policy_surr = np.mean(self.policy_surr_averages) average_action = np.mean(np.square(np.concatenate([path['actions'] for path in paths]))) policy_reg_param_norm = np.linalg.norm(self.policy.get_param_values(regularizable=True)) qfun_reg_param_norm = np.linalg.norm(self.qf.get_param_values(regularizable=True)) logger.record_tabular('Epoch', epoch) logger.record_tabular('AverageReturn', np.mean(returns)) logger.record_tabular('StdReturn', np.std(returns)) logger.record_tabular('MaxReturn', np.max(returns)) logger.record_tabular('MinReturn', np.min(returns)) if (len(self.es_path_returns) > 0): logger.record_tabular('AverageEsReturn', np.mean(self.es_path_returns)) logger.record_tabular('StdEsReturn', np.std(self.es_path_returns)) logger.record_tabular('MaxEsReturn', np.max(self.es_path_returns)) logger.record_tabular('MinEsReturn', np.min(self.es_path_returns)) logger.record_tabular('AverageDiscountedReturn', average_discounted_return) logger.record_tabular('AverageQLoss', average_q_loss) logger.record_tabular('AveragePolicySurr', average_policy_surr) logger.record_tabular('AverageQ', np.mean(all_qs)) logger.record_tabular('AverageAbsQ', np.mean(np.abs(all_qs))) logger.record_tabular('AverageY', np.mean(all_ys)) logger.record_tabular('AverageAbsY', np.mean(np.abs(all_ys))) logger.record_tabular('AverageAbsQYDiff', np.mean(np.abs((all_qs - all_ys)))) logger.record_tabular('AverageAction', average_action) logger.record_tabular('PolicyRegParamNorm', policy_reg_param_norm) logger.record_tabular('QFunRegParamNorm', qfun_reg_param_norm) self.env.log_diagnostics(paths) self.policy.log_diagnostics(paths) self.qf_loss_averages = [] self.policy_surr_averages = [] self.q_averages = [] self.y_averages = [] self.es_path_returns = [] def update_plot(self): if self.plot: plotter.update_plot(self.policy, self.max_path_length) def get_epoch_snapshot(self, epoch): return dict(env=self.env, epoch=epoch, qf=self.qf, policy=self.policy, target_qf=self.opt_info['target_qf'], target_policy=self.opt_info['target_policy'], es=self.es)
class ImageNetValidation(ImageNetBase): NAME = 'ILSVRC2012_validation' URL = ' AT_HASH = '5d6d0df7ed81efd49ca99ea4737e0ae5e3a5f2e5' VS_URL = ' FILES = ['ILSVRC2012_img_val.tar', 'validation_synset.txt'] SIZES = [, 1950000] def __init__(self, process_images=True, data_root=None, **kwargs): self.data_root = data_root self.process_images = process_images super().__init__(**kwargs) def _prepare(self): if self.data_root: self.root = os.path.join(self.data_root, self.NAME) else: cachedir = os.environ.get('XDG_CACHE_HOME', os.path.expanduser('~/.cache')) self.root = os.path.join(cachedir, 'autoencoders/data', self.NAME) self.datadir = os.path.join(self.root, 'data') self.txt_filelist = os.path.join(self.root, 'filelist.txt') self.expected_length = 50000 self.random_crop = retrieve(self.config, 'ImageNetValidation/random_crop', default=False) if (not tdu.is_prepared(self.root)): print('Preparing dataset {} in {}'.format(self.NAME, self.root)) datadir = self.datadir if (not os.path.exists(datadir)): path = os.path.join(self.root, self.FILES[0]) if ((not os.path.exists(path)) or (not (os.path.getsize(path) == self.SIZES[0]))): import academictorrents as at atpath = at.get(self.AT_HASH, datastore=self.root) assert (atpath == path) print('Extracting {} to {}'.format(path, datadir)) os.makedirs(datadir, exist_ok=True) with tarfile.open(path, 'r:') as tar: tar.extractall(path=datadir) vspath = os.path.join(self.root, self.FILES[1]) if ((not os.path.exists(vspath)) or (not (os.path.getsize(vspath) == self.SIZES[1]))): download(self.VS_URL, vspath) with open(vspath, 'r') as f: synset_dict = f.read().splitlines() synset_dict = dict((line.split() for line in synset_dict)) print('Reorganizing into synset folders') synsets = np.unique(list(synset_dict.values())) for s in synsets: os.makedirs(os.path.join(datadir, s), exist_ok=True) for (k, v) in synset_dict.items(): src = os.path.join(datadir, k) dst = os.path.join(datadir, v) shutil.move(src, dst) filelist = glob.glob(os.path.join(datadir, '**', '*.JPEG')) filelist = [os.path.relpath(p, start=datadir) for p in filelist] filelist = sorted(filelist) filelist = ('\n'.join(filelist) + '\n') with open(self.txt_filelist, 'w') as f: f.write(filelist) tdu.mark_prepared(self.root)
def _get_do_arguments(do_op): assert (do_op.type == 'Do'), 'Expected Do op' args = {} for arg in do_op.arg: if (not arg.name): continue if (arg.name == 'net'): assert arg.n, 'Expected non empty net argument' args['net'] = arg.n elif (arg.name == 'reuse_workspace'): assert arg.i, 'Expected non empty reuse_workspace argument' args['reuse_workspace'] = bool(arg.i) elif (arg.name == 'inner_blobs'): assert arg.strings, 'Expected non empty inner_blobs argument' args['inner_blobs'] = arg.strings elif (arg.name == 'outer_blobs_idx'): assert arg.ints, 'Expected non empty outer_blobs_idx argument' args['outer_blobs_idx'] = arg.ints return args
class CyclicCodePolynomialEncoder(Encoder): def __init__(self, code): if (not isinstance(code, CyclicCode)): raise ValueError('code has to be a CyclicCode') self._polynomial_ring = code._polynomial_ring super().__init__(code) def __eq__(self, other): return (isinstance(other, CyclicCodePolynomialEncoder) and (self.code() == other.code())) def _repr_(self): return ('Polynomial-style encoder for %s' % self.code()) def _latex_(self): return ('\\textnormal{Polynomial-style encoder for }%s' % self.code()._latex_()) def encode(self, p): C = self.code() k = C.dimension() n = C.length() if (p.degree() >= k): raise ValueError((('Degree of the message must be at most %s' % k) - 1)) res = _to_complete_list((p * C.generator_polynomial()), n) return vector(C.base_field(), res) def unencode_nocheck(self, c): R = self.message_space() g = self.code().generator_polynomial() p = R(c.list()) return (p // g) def message_space(self): return self._polynomial_ring
def backtrace(vf: ti.template(), p, dt_: ti.template()): v1 = bilerp(vf, p) p1 = (p - ((0.5 * dt_) * v1)) v2 = bilerp(vf, p1) p2 = (p - ((0.75 * dt_) * v2)) v3 = bilerp(vf, p2) p -= (dt_ * ((((2 / 9) * v1) + ((1 / 3) * v2)) + ((4 / 9) * v3))) return p
def test_missing_content_type_header(case, response_factory): response = response_factory.requests(content_type=None) with pytest.raises(CheckFailed, match='Missing Content-Type header'): content_type_conformance(response, case)
def get_models(args, BERT_PT_PATH, trained=False, path_model_bert=None, path_model=None): agg_ops = ['', 'MAX', 'MIN', 'COUNT', 'SUM', 'AVG'] cond_ops = ['=', '>', '<', 'OP'] print(f'Batch_size = {(args.bS * args.accumulate_gradients)}') print(f'BERT parameters:') print(f'learning rate: {args.lr_bert}') print(f'Fine-tune BERT: {args.fine_tune}') (model_bert, tokenizer, bert_config) = get_bert(BERT_PT_PATH, args.bert_type, args.do_lower_case, args.no_pretraining) args.iS = (bert_config.hidden_size * args.num_target_layers) n_cond_ops = len(cond_ops) n_agg_ops = len(agg_ops) print(f'Seq-to-SQL: the number of final BERT layers to be used: {args.num_target_layers}') print(f'Seq-to-SQL: the size of hidden dimension = {args.hS}') print(f'Seq-to-SQL: LSTM encoding layer size = {args.lS}') print(f'Seq-to-SQL: dropout rate = {args.dr}') print(f'Seq-to-SQL: learning rate = {args.lr}') model = FT_s2s_1(args.iS, args.hS, args.lS, args.dr, args.max_seq_length, n_cond_ops, n_agg_ops) model = model.to(device) if trained: assert (path_model_bert != None) assert (path_model != None) if nsml.IS_ON_NSML: res = torch.load(path_model_bert) else: res = torch.load(path_model_bert, map_location='cpu') model_bert.load_state_dict(res['model_bert']) model_bert.to(device) if nsml.IS_ON_NSML: res = torch.load(path_model) else: res = torch.load(path_model, map_location='cpu') model.load_state_dict(res['model']) return (model, model_bert, tokenizer, bert_config)
def show_result_pyplot(model, img, result, palette=None, fig_size=(15, 10)): if hasattr(model, 'module'): model = model.module img = model.show_result(img, result, palette=palette, show=False) plt.figure(figsize=fig_size) plt.imshow(mmcv.bgr2rgb(img)) plt.show()
class EqualNumPyDataSplitter(NumPyDataSplitter): def __init__(self, shuffle=True, seed=0): self.shuffle = shuffle self.seed = seed def split(self, data, num_collaborators): np.random.seed(self.seed) idx = range(len(data)) if self.shuffle: idx = np.random.permutation(idx) slices = np.array_split(idx, num_collaborators) return slices
def test_cross_nn_distances(): X = np.array([0, 0.1, 0.3, 0.55]).reshape((- 1), 1) X_new = np.array([0.1, 0.9]).reshape((- 1), 1) maxk = 3 expected_indices = [[1, 0, 2], [3, 2, 1]] expected_distances = [[0, 0.1, 0.2], [0.35, 0.6, 0.8]] (distances, indices) = utils.compute_cross_nn_distances(X_new, X, maxk, metric='euclidean') assert (pytest.approx(indices) == expected_indices) assert (pytest.approx(distances) == expected_distances)
def test_utils_public_api(): assert (dir(pyhf.utils) == ['EqDelimStringParamType', 'citation', 'digest', 'options_from_eqdelimstring'])
def token_char_tokenize(text): text = char_regex.sub(' \\g<0> ', text) tokens = num_regex.sub(DIGIT_WORD, text).split() chars = [] for token in tokens: if (token == DIGIT_WORD): chars.append(token) else: chars.extend(list(token)) return chars
def _get_torchscript_builtins(): functions = [] builtins = filter((lambda fn: (not _is_math_fn(fn[0]))), _get_builtins_helper()) builtins_list = list(builtins) for (fn, _builtin_name) in builtins_list: mod = inspect.getmodule(fn) if (not mod): raise RuntimeError(f'Module for {fn} not found') builtin = _find_builtin(fn) if (builtin is not None): schemas = torch._C._jit_get_schemas_for_operator(builtin) for schema in schemas: functions.append(_emit_schema(mod.__name__, fn.__name__, schema)) pass return ('TorchScript Builtin Functions', functions)
class PixDADiscriminator(nn.Module): def __init__(self, num_classes): super(PixDADiscriminator, self).__init__() self.n_classes = num_classes self.ndf = 64 self.conv1 = nn.Conv2d(self.n_classes, self.ndf, kernel_size=3, stride=1, padding=1) self.conv2 = nn.Conv2d(self.ndf, (self.ndf * 2), kernel_size=3, stride=1, padding=1) self.classifier = nn.Conv2d((self.ndf * 2), 1, kernel_size=1, stride=1, padding=0) self.leaky_relu = nn.LeakyReLU(negative_slope=0.2, inplace=True) def forward(self, x): x = self.conv1(x) x = self.leaky_relu(x) x = self.conv2(x) x = self.leaky_relu(x) x = self.classifier(x) return x
class TestMaskedTensor(TestCase): def test_float_graph_execution_fails(self): with tf.Graph().as_default(): assert (tf.executing_eagerly() is False) input_shape = (1,) (tensor, mask) = create_random_numpy_tensor_and_mask(shape=input_shape, probability_for_masked=0.1) masked_tf = MaskedTensor(tensor=tf.constant(tensor), mask=tf.constant(mask)) with self.assertRaises(TypeError): float(masked_tf) def test_eq_graph_execution(self): with tf.Graph().as_default(): assert (tf.executing_eagerly() is False) input_shape = (7, 6) (tensor, mask) = create_random_numpy_tensor_and_mask(shape=input_shape, probability_for_masked=0.1) masked_tf = MaskedTensor(tensor=tf.constant(tensor), mask=tf.constant(mask)) _ = (masked_tf == 6.0)
class ModelCheckpointMine(pl.callbacks.model_checkpoint.ModelCheckpoint): def __init__(self, *args, fault_tolerant=False, **kwargs): super().__init__(*args, **kwargs) self.fault_tolerant = fault_tolerant def on_exception(self, trainer: 'pl.Trainer', *_: Any, **__: Any) -> None: if self.fault_tolerant: trainer.save_checkpoint(str((Path(self.dirpath) / '.pl_auto_save.ckpt')))
class NestedDataClassProperty(Property): def __get__(self, obj, objtype=None) -> 'Data': return super().__get__(obj, objtype) def dtype(self): from dace import data as dt return dt.Data def from_string(s): from dace import data as dt dtype = getattr(dt, s, None) if ((dtype is None) or (not isinstance(dtype, dt.Data))): raise ValueError('Not a valid data type: {}'.format(s)) return dtype def to_string(obj): return obj.to_string() def to_json(self, obj): if (obj is None): return None return obj.to_json() def from_json(obj, context=None): if (obj is None): return None elif isinstance(obj, str): return NestedDataClassProperty.from_string(obj) elif isinstance(obj, dict): return dace.serialize.from_json(obj) else: raise TypeError('Cannot parse type from: {}'.format(obj))
class SplitDataset(data.Dataset): def __init__(self, ds, split_inds, **kwargs): self.split_inds = list(split_inds) self.wrapped_data = ds self.is_lazy = (isinstance(ds, lazy_array_loader) or (hasattr(ds, 'is_lazy') and ds.is_lazy)) if self.is_lazy: self.lens = itemgetter(*self.split_inds)(list(self.wrapped_data.lens)) self._X = None self._Y = None def __len__(self): return len(self.split_inds) def __getitem__(self, index): return self.wrapped_data[self.split_inds[index]] def SetTokenizer(self, tokenizer): self.wrapped_data.SetTokenizer(tokenizer) def GetTokenizer(self): return self.wrapped_data.GetTokenizer() def X(self): if (self._X is None): self._X = itemgetter(*self.split_inds)(self.wrapped_data.X) return self._X def Y(self): if (self._Y is None): self._Y = np.array(itemgetter(*self.split_inds)(self.wrapped_data.Y)) return self._Y def __iter__(self): for idx in self.split_inds: (yield self.wrapped_data[idx])
def CalculateTotalPCharge(mol): Hmol = Chem.AddHs(mol) GMCharge.ComputeGasteigerCharges(Hmol, iter_step) res = [] for atom in Hmol.GetAtoms(): res.append(float(atom.GetProp('_GasteigerCharge'))) if (res == []): return 0 else: cc = numpy.array(res, 'd') return sum(cc[(cc > 0)])
(Output('data-download', 'options'), Input('data-download-parent', 'n_clicks')) def select_download_parent(n_clicks): options = [] ctx = dash.callback_context prop_id = ctx.triggered_id if (prop_id == 'data-download-parent'): models = file_manager.get_model_list() options += [{'label': s, 'value': s} for s in models] return options
def compact(x): if isinstance(x, dict): return dict(((k, v) for (k, v) in x.items() if (v is not None))) elif isinstance(x, list): return [elem for elem in x if (elem is not None)] return x
(frozen=True) class LanguageLogicalStatement(): subject: str subject_category: str specifier_type: Literal[('a', 'the')] def generate_specified_subject(self, upper=False, specifier_type=None) -> str: specifier_type = (self.specifier_type if (specifier_type is None) else specifier_type) if ((not (self.subject_category != 'person')) or (self.subject == 'person')): return self.subject base_char = (specifier_type[0].upper() if upper else specifier_type[0].lower()) if (specifier_type == 'a'): if (self.subject[0].lower() in ['a', 'e', 'i', 'o', 'u']): return f'{base_char}n {self.subject}' else: return f'{base_char}{specifier_type[1:]} {self.subject}' return f'{base_char} {self.subject}'
def stringify(val): if True: return repr(val) else: from pprint import pformat return pformat(val)