code
stringlengths
101
5.91M
def get_schema(schema_name: str='simple_swagger.yaml', **kwargs: Any) -> BaseSchema: schema = make_schema(schema_name, **kwargs) return schemathesis.from_dict(schema)
class MAMLLearner(BaseLearner): def __init__(self, module: Layer, learning_rate: float, approximate: bool=True) -> None: super().__init__(module) self.learning_rate = learning_rate self.approximate = approximate def clone(self) -> Layer: cloned_module = clone_model(self.module) return MAMLLearner(module=cloned_module, learning_rate=self.learning_rate, approximate=self.approximate) def adapt(self, loss: Tensor) -> None: manual_gradient_descent(self.module, lr=self.learning_rate, loss=loss, approximate=self.approximate)
class OxfordIIITPet(VisionDataset): _RESOURCES = ((' '5c4f3ee8e5d25df40f4fd59a7f44e54c'), (' '95a8c909bbe2e81eed6a22bccdf3f68f')) _VALID_TARGET_TYPES = ('category', 'segmentation') def __init__(self, args, split: str='train', target_types: Union[(Sequence[str], str)]='category', transforms: Optional[Callable]=None, target_transform: Optional[Callable]=None, download: bool=False): split = ('trainval' if (split == 'train') else 'test') self._split = verify_str_arg(split, 'split', ('trainval', 'test')) if isinstance(target_types, str): target_types = [target_types] self._target_types = [verify_str_arg(target_type, 'target_types', self._VALID_TARGET_TYPES) for target_type in target_types] super().__init__(args.data_dir, transforms=transforms, transform=get_transforms(split, args.crop_size, args.pretrained_model), target_transform=target_transform) self._base_folder = (pathlib.Path(self.root) / 'oxford-iiit-pet') self._images_folder = (self._base_folder / 'images') self._anns_folder = (self._base_folder / 'annotations') self._segs_folder = (self._anns_folder / 'trimaps') if download: self._download() if (not self._check_exists()): raise RuntimeError('Dataset not found. You can use download=True to download it') image_ids = [] self._labels = [] with open((self._anns_folder / f'{self._split}.txt')) as file: for line in file: (image_id, label, *_) = line.strip().split() image_ids.append(image_id) self._labels.append((int(label) - 1)) self.classes = [' '.join((part.title() for part in raw_cls.split('_'))) for (raw_cls, _) in sorted({(image_id.rsplit('_', 1)[0], label) for (image_id, label) in zip(image_ids, self._labels)}, key=(lambda image_id_and_label: image_id_and_label[1]))] self.class_to_idx = dict(zip(self.classes, range(len(self.classes)))) self._images = [(self._images_folder / f'{image_id}.jpg') for image_id in image_ids] self._segs = [(self._segs_folder / f'{image_id}.png') for image_id in image_ids] def __len__(self) -> int: return len(self._images) def __getitem__(self, idx: int) -> Tuple[(Any, Any)]: image = Image.open(self._images[idx]).convert('RGB') target: Any = [] for target_type in self._target_types: if (target_type == 'category'): target.append(self._labels[idx]) else: target.append(Image.open(self._segs[idx])) if (not target): target = None elif (len(target) == 1): target = target[0] else: target = tuple(target) if self.transforms: (image, target) = self.transforms(image, target) sample = {'image': image, 'label': target} return sample def _check_exists(self) -> bool: for folder in (self._images_folder, self._anns_folder): if (not (os.path.exists(folder) and os.path.isdir(folder))): return False else: return True def _download(self) -> None: if self._check_exists(): return for (url, md5) in self._RESOURCES: download_and_extract_archive(url, download_root=str(self._base_folder), md5=md5)
def get_sort_order(dist, clust_order, cluster_threshold, feature_order): clust_inds = np.argsort(clust_order) feature_order = feature_order.copy() for i in range((len(feature_order) - 1)): ind1 = feature_order[i] next_ind = feature_order[(i + 1)] next_ind_pos = (i + 1) for j in range((i + 1), len(feature_order)): ind2 = feature_order[j] if (dist[(ind1, ind2)] <= cluster_threshold): if ((dist[(ind1, next_ind)] > cluster_threshold) or (clust_inds[ind2] < clust_inds[next_ind])): next_ind = ind2 next_ind_pos = j for j in range(next_ind_pos, (i + 1), (- 1)): feature_order[j] = feature_order[(j - 1)] feature_order[(i + 1)] = next_ind return feature_order
def test_load_bert_weights(bert_config): bert_model = AutoModel.from_pretrained(BERT_MODEL_NAME) bert_state_dict = bert_model.state_dict() config = _create_luke_config(bert_config, 5, bert_config.hidden_size) model = LukeModel(config) model.load_bert_weights(bert_state_dict) luke_state_dict = model.state_dict() for (key, tensor) in bert_state_dict.items(): assert torch.equal(luke_state_dict[key], tensor)
class NonDataDescriptorMixin(DataDocumenterMixinBase): def import_object(self, raiseerror: bool=False) -> bool: ret = super().import_object(raiseerror) if (ret and (not inspect.isattributedescriptor(self.object))): self.non_data_descriptor = True else: self.non_data_descriptor = False return ret def should_suppress_value_header(self) -> bool: return ((not getattr(self, 'non_data_descriptor', False)) or super().should_suppress_directive_header()) def get_doc(self) -> Optional[List[List[str]]]: if getattr(self, 'non_data_descriptor', False): return None else: return super().get_doc()
def pad(x, max_len: np.array, padding_value: int=0): flattened_vector = x fill_size = (max_len - len(flattened_vector)) if (fill_size > 0): fill_zeros = np.full(fill_size, fill_value=padding_value) return np.concatenate((flattened_vector, fill_zeros), axis=0) else: return flattened_vector[:max_len]
def data_generator4(train_data, test_data, train_labels, test_labels, configs, training_mode): train_time_series_ts = train_data test_time_series_ts = test_data mvn = MeanVarNormalize() mvn.train((train_time_series_ts + test_time_series_ts)) (bias, scale) = (mvn.bias, mvn.scale) train_time_series = train_time_series_ts.to_pd().to_numpy() train_time_series = ((train_time_series - bias) / scale) test_time_series = test_time_series_ts.to_pd().to_numpy() test_time_series = ((test_time_series - bias) / scale) train_labels = train_labels.to_pd().to_numpy() test_labels = test_labels.to_pd().to_numpy() test_anomaly_window_num = int((len(np.where((test_labels[1:] != test_labels[:(- 1)]))[0]) / 2)) train_x = subsequences(train_time_series, configs.window_size, configs.time_step) test_x = subsequences(test_time_series, configs.window_size, configs.time_step) train_y = subsequences(train_labels, configs.window_size, configs.time_step) test_y = subsequences(test_labels, configs.window_size, configs.time_step) train_y_window = np.zeros(train_x.shape[0]) test_y_window = np.zeros(test_x.shape[0]) train_anomaly_window_num = 0 for (i, item) in enumerate(train_y[:]): if (sum(item[:configs.time_step]) >= 1): train_anomaly_window_num += 1 train_y_window[i] = 1 else: train_y_window[i] = 0 for (i, item) in enumerate(test_y[:]): if (sum(item[:configs.time_step]) >= 1): test_y_window[i] = 1 else: test_y_window[i] = 0 (train_x, val_x, train_y, val_y) = train_test_split(train_x, train_y_window, test_size=0.2, shuffle=False) train_x = train_x.transpose((0, 2, 1)) val_x = val_x.transpose((0, 2, 1)) test_x = test_x.transpose((0, 2, 1)) train_dat_dict = dict() train_dat_dict['samples'] = train_x train_dat_dict['labels'] = train_y val_dat_dict = dict() val_dat_dict['samples'] = val_x val_dat_dict['labels'] = val_y test_dat_dict = dict() test_dat_dict['samples'] = test_x test_dat_dict['labels'] = test_y_window train_dataset = Load_Dataset(train_dat_dict, configs, training_mode) val_dataset = Load_Dataset(val_dat_dict, configs, training_mode) test_dataset = Load_Dataset(test_dat_dict, configs, training_mode) train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=configs.batch_size, shuffle=True, drop_last=configs.drop_last, num_workers=0) val_loader = torch.utils.data.DataLoader(dataset=val_dataset, batch_size=configs.batch_size, shuffle=False, drop_last=False, num_workers=0) test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=configs.batch_size, shuffle=False, drop_last=False, num_workers=0) return (train_loader, val_loader, test_loader, test_anomaly_window_num)
def GenerateSM80_TensorOp_1688(manifest, cuda_version): if (not CudaToolkitVersionSatisfies(cuda_version, 11, 0)): return layouts = [(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), (LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor)] math_instructions = [MathInstruction([16, 8, 8], DataType.tf32, DataType.tf32, DataType.f32, OpcodeClass.TensorOp, MathOperation.multiply_add)] min_cc = 80 max_cc = 1024 max_cc_smem_limited = 80 alignment_constraints = [4, 2, 1] for math_inst in math_instructions: tile_descriptions = [TileDescription([256, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc), TileDescription([128, 256, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc), TileDescription([256, 64, 16], 4, [4, 1, 1], math_inst, min_cc, max_cc), TileDescription([64, 256, 16], 4, [1, 4, 1], math_inst, min_cc, max_cc), TileDescription([128, 128, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([128, 128, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([128, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([128, 64, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([64, 128, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([64, 64, 16], 10, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([256, 128, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc_smem_limited), TileDescription([128, 256, 32], 3, [2, 4, 1], math_inst, min_cc, max_cc_smem_limited), TileDescription([256, 64, 32], 4, [4, 1, 1], math_inst, min_cc, max_cc_smem_limited), TileDescription([64, 256, 32], 4, [1, 4, 1], math_inst, min_cc, max_cc_smem_limited), TileDescription([128, 128, 32], 4, [2, 2, 1], math_inst, min_cc, max_cc_smem_limited), TileDescription([128, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([128, 64, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([64, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([64, 64, 32], 5, [2, 2, 1], math_inst, min_cc, max_cc)] data_type = [math_inst.element_a, math_inst.element_b, math_inst.element_accumulator, math_inst.element_accumulator] data_type_mixed = [math_inst.element_a, math_inst.element_b, math_inst.element_a, math_inst.element_accumulator] CreateGemmOperator(manifest, layouts, tile_descriptions, data_type, alignment_constraints) CreateGemmOperator(manifest, layouts, tile_descriptions, data_type_mixed, alignment_constraints) conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC) CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints) CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type_mixed, alignment_constraints)
def test_count_min(): content2 = ak.contents.NumpyArray(np.array([1.1, 2.2, 3.3, 0.0, 2.2, 0.0, 0.0, 2.2, 0.0, 4.4])) offsets3 = ak.index.Index64(np.array([0, 3, 6, 10], dtype=np.int64)) depth1 = ak.contents.ListOffsetArray(offsets3, content2) assert (to_list(depth1) == [[1.1, 2.2, 3.3], [0.0, 2.2, 0.0], [0.0, 2.2, 0.0, 4.4]]) assert (to_list(ak.min(depth1, (- 1), highlevel=False)) == [1.1, 0.0, 0.0]) assert (ak.min(depth1.to_typetracer(), (- 1), highlevel=False).form == ak.min(depth1, (- 1), highlevel=False).form) assert (to_list(ak.min(depth1, 1, highlevel=False)) == [1.1, 0.0, 0.0]) assert (ak.min(depth1.to_typetracer(), 1, highlevel=False).form == ak.min(depth1, 1, highlevel=False).form) assert (to_list(ak.min(depth1, (- 2), highlevel=False)) == [0.0, 2.2, 0.0, 4.4]) assert (ak.min(depth1.to_typetracer(), (- 2), highlevel=False).form == ak.min(depth1, (- 2), highlevel=False).form) assert (to_list(ak.min(depth1, 0, highlevel=False)) == [0.0, 2.2, 0.0, 4.4]) assert (ak.min(depth1.to_typetracer(), 0, highlevel=False).form == ak.min(depth1, 0, highlevel=False).form) content2 = ak.contents.NumpyArray(np.array([True, True, True, False, True, False, False, True, False, True])) offsets3 = ak.index.Index64(np.array([0, 3, 6, 10], dtype=np.int64)) depth1 = ak.contents.ListOffsetArray(offsets3, content2) assert (to_list(depth1) == [[True, True, True], [False, True, False], [False, True, False, True]]) assert (to_list(ak.min(depth1, (- 1), highlevel=False)) == [True, False, False]) assert (ak.min(depth1.to_typetracer(), (- 1), highlevel=False).form == ak.min(depth1, (- 1), highlevel=False).form) assert (to_list(ak.min(depth1, 1, highlevel=False)) == [True, False, False]) assert (ak.min(depth1.to_typetracer(), 1, highlevel=False).form == ak.min(depth1, 1, highlevel=False).form) assert (to_list(ak.min(depth1, (- 2), highlevel=False)) == [False, True, False, True]) assert (ak.min(depth1.to_typetracer(), (- 2), highlevel=False).form == ak.min(depth1, (- 2), highlevel=False).form) assert (to_list(ak.min(depth1, 0, highlevel=False)) == [False, True, False, True]) assert (ak.min(depth1.to_typetracer(), 0, highlevel=False).form == ak.min(depth1, 0, highlevel=False).form)
class XLMRobertaXLForMultipleChoice(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
def test(): array = ak.Array(ak.contents.ByteMaskedArray(mask=ak.index.Index8([1, 1, 0, 0]), content=ak.contents.NumpyArray([3, 4, 2, 5]), valid_when=True)) assert (ak.singletons(array, highlevel=False).form == ak.singletons(ak.to_backend(array, 'typetracer'), highlevel=False).form) assert (ak.singletons(array, highlevel=False).form == ak.singletons(array.layout.to_typetracer(forget_length=True), highlevel=False).form)
def main(version=DEFAULT_VERSION): options = _parse_args() tarball = download_setuptools(download_base=options.download_base) return _install(tarball, _build_install_args(options))
def get_pssm_for_file(filename): scop_id = filename.split('/')[(- 1)].split('.')[0] pssm_for_scop_id = [] with open(filename, 'r') as f: lines = f.read().split() position_mutations = [] for (i, line) in enumerate(lines[2:]): if (((i % 20) == 0) and (i != 0)): pssm_for_scop_id.append(position_mutations) position_mutations = [] mutation_score = line.split(':')[1] position_mutations.append(mutation_score) return (scop_id, pssm_for_scop_id)
class OrtCUDAProviderOptions(ctypes.Structure): _fields_ = [('device_id', ctypes.c_int), ('cudnn_conv_algo_search', ctypes.c_int), ('cuda_mem_limit', ctypes.c_size_t), ('do_copy_in_default_stream', ctypes.c_int), ('has_user_compute_stream', ctypes.c_int), ('user_compute_stream', ctypes.c_void_p)]
def load_and_show(ds, shot): plt.rcParams.update({'font.size': 16}) in_ = ds[np.random.choice(range(len(ds)))] (qry, supp, tgt, _) = (in_[0], in_[1:(- 2)], in_[(- 2)], in_[(- 1)]) (fig, axes) = plt.subplots(1, (shot + 1), figsize=(30, 10)) for (i, s) in enumerate(supp): if isinstance(s, tuple): (im, anno) = (s[0], s[1]) else: anno = s im = qry im = np.copy(qry) im = Image.fromarray(im.astype(np.uint8)) d = ImageDraw.Draw(im) for loc in zip(*np.where((anno != 0))): draw_circle(d, 10, loc[1:], color=ds.palette[loc[0]]) axes[i].imshow(im) axes[i].set_title('Support') for (_, ax) in np.ndenumerate(axes): ax.set_axis_off() (fig, axes) = plt.subplots(1, 2, figsize=(30, 20)) axes[0].imshow(qry) axes[0].set_title('Query') tgt = Image.fromarray(tgt.astype(np.uint8)) tgt.putpalette(ds.palette) axes[1].imshow(tgt) axes[1].set_title('Target') for (_, ax) in np.ndenumerate(axes): ax.set_axis_off()
def parse_all_ranks(folder_path, with_rank0=True): files = os.listdir(folder_path) all_rank_durations = [] for filename in files: if (('rank' in filename) and (with_rank0 or ('rank_0' not in filename))): try: with open(os.path.join(folder_path, filename)) as f: durations = get_durations(f.readlines()) if (not durations): raise ValueError('Bad file') all_rank_durations.append(durations) except Exception: print('Bad file', folder_path, filename) return None try: return np.max(all_rank_durations, axis=0) except Exception as e: print('Error: empty directory', folder_path, e) return None
def traceest(A, m3, seed=None): rng = np.random.default_rng(seed) if ((len(A.shape) != 2) or (A.shape[(- 1)] != A.shape[(- 2)])): raise ValueError('Expected A to be like a square matrix.') n = A.shape[(- 1)] S = rng.choice([(- 1.0), (+ 1.0)], [n, m3]) (Q, _) = qr(A.matmat(S), overwrite_a=True, mode='economic') trQAQ = np.trace((Q.conj().T A.matmat(Q))) G = rng.choice([(- 1), (+ 1)], [n, m3]) right = (G - (Q (Q.conj().T G))) trGAG = np.trace((right.conj().T A.matmat(right))) return (trQAQ + (trGAG / m3))
def test_SDM(): model_name = 'SDM' (x, y, user_feature_columns, item_feature_columns, history_feature_list) = get_xy_fd_sdm(False) if (tf.__version__ >= '2.0.0'): tf.compat.v1.disable_eager_execution() else: K.set_learning_phase(True) sampler_config = NegativeSampler(sampler='uniform', num_sampled=2, item_name='item') model = SDM(user_feature_columns, item_feature_columns, history_feature_list, units=8, sampler_config=sampler_config) model.compile('adam', sampledsoftmaxloss) check_model(model, model_name, x, y)
.experimental .parametrize('pad_columns, padding_value, array_size', [(['item_id', 'timestamp'], [0, (- 1)], 2)]) .parametrize('dataset, result', [pytest.param('dataframe', 'dataframe_two_columns_len_two'), pytest.param('dataframe_pandas', 'dataframe_two_columns_len_two_pandas')]) def test_padder_two_columns_len_two(pad_columns, padding_value, array_size, dataset, result, request): dataframe = request.getfixturevalue(dataset) dataframe_two_columns_len_two = request.getfixturevalue(result) is_spark = isinstance(dataframe, SparkDataFrame) padder = Padder(pad_columns=pad_columns, padding_value=padding_value, array_size=array_size) padder_interactions = padder.transform(dataframe) columns = (padder_interactions.collect()[0].asDict().keys() if is_spark else padder_interactions.columns) assert ('user_id' in columns) assert ('item_id' in columns) assert ('timestamp' in columns) if (is_spark is True): assert padder_interactions.toPandas().equals(dataframe_two_columns_len_two.toPandas()) else: assert padder_interactions.equals(dataframe_two_columns_len_two)
class Classifer(object): def __init__(self, sess, args): self.sess = sess self.dataset_dir = args.dataset_dir self.dataset_A_dir = args.dataset_A_dir self.dataset_B_dir = args.dataset_B_dir self.sample_dir = args.sample_dir self.batch_size = args.batch_size self.image_size = args.fine_size self.time_step = args.time_step self.pitch_range = args.pitch_range self.input_c_dim = args.input_nc self.sigma_c = args.sigma_c self.sigma_d = args.sigma_d self.model = args.model self.generator = generator_resnet self.discriminator = discriminator_classifier self.criterionGAN = softmax_criterion OPTIONS = namedtuple('OPTIONS', 'batch_size image_size gf_dim df_dim output_c_dim is_training') self.options = OPTIONS._make((args.batch_size, args.fine_size, args.ngf, args.ndf, args.output_nc, (args.phase == 'train'))) self._build_model() self.now_datetime = get_now_datetime() self.saver = tf.train.Saver() def _build_model(self): self.origin_train = tf.placeholder(tf.float32, [self.batch_size, self.time_step, self.pitch_range, self.input_c_dim]) self.label_train = tf.placeholder(tf.float32, [self.batch_size, 2]) self.origin_test = tf.placeholder(tf.float32, [None, self.time_step, self.pitch_range, self.input_c_dim]) self.label_test = tf.placeholder(tf.float32, [None, 2]) self.D_origin = self.discriminator(self.origin_train, self.options, False, name='classifier') self.D_test = self.discriminator(self.origin_test, self.options, True, name='classifier') self.d_loss = self.criterionGAN(self.D_origin, self.label_train) self.d_loss_sum = tf.summary.scalar('d_loss', self.d_loss) self.D_test_softmax = tf.nn.softmax(self.D_test) self.correct_prediction_test = tf.equal(tf.argmax(self.D_test_softmax, 1), tf.argmax(self.label_test, 1)) self.accuracy_test = tf.reduce_mean(tf.cast(self.correct_prediction_test, tf.float32)) self.test_midi = tf.placeholder(tf.float32, [None, self.time_step, self.pitch_range, self.input_c_dim]) self.test_result = self.discriminator(self.test_midi, self.options, True, name='classifier') self.test_result_softmax = tf.nn.softmax(self.test_result) t_vars = tf.trainable_variables() self.d_vars = [var for var in t_vars if ('classifier' in var.name)] for var in t_vars: print(var.name) def train(self, args): self.lr = tf.placeholder(tf.float32, None, name='learning_rate') self.d_optim = tf.train.AdamOptimizer(self.lr, beta1=args.beta1).minimize(self.d_loss, var_list=self.d_vars) init_op = tf.global_variables_initializer() self.sess.run(init_op) log_dir = './logs/classifier_{}2{}_{}_{}'.format(self.dataset_A_dir, self.dataset_B_dir, self.now_datetime, str(self.sigma_c)) self.writer = tf.summary.FileWriter(log_dir, self.sess.graph) counter = 1 dataA = glob('./datasets/{}/train/*.*'.format(self.dataset_A_dir)) dataB = glob('./datasets/{}/train/*.*'.format(self.dataset_B_dir)) labelA = [(1.0, 0.0) for _ in range(len(dataA))] labelB = [(0.0, 1.0) for _ in range(len(dataB))] data_origin = (dataA + dataB) label_origin = (labelA + labelB) training_list = [] for pair in zip(data_origin, label_origin): training_list.append(pair) print('Successfully create training list!') dataA = glob('./datasets/{}/test/*.*'.format(self.dataset_A_dir)) dataB = glob('./datasets/{}/test/*.*'.format(self.dataset_B_dir)) labelA = [(1.0, 0.0) for _ in range(len(dataA))] labelB = [(0.0, 1.0) for _ in range(len(dataB))] data_origin = (dataA + dataB) label_origin = (labelA + labelB) testing_list = [] for pair in zip(data_origin, label_origin): testing_list.append(pair) print('Successfully create testing list!') data_test = [((np.load(pair[0]) * 2.0) - 1.0) for pair in testing_list] data_test = np.array(data_test).astype(np.float32) gaussian_noise = np.random.normal(0, self.sigma_c, [data_test.shape[0], data_test.shape[1], data_test.shape[2], data_test.shape[3]]) data_test += gaussian_noise label_test = [pair[1] for pair in testing_list] label_test = np.array(label_test).astype(np.float32).reshape(len(label_test), 2) for epoch in range(args.epoch): shuffle(training_list) batch_idx = (len(training_list) // self.batch_size) lr = (args.lr if (epoch < args.epoch_step) else ((args.lr * (args.epoch - epoch)) / (args.epoch - args.epoch_step))) for idx in range(batch_idx): batch = training_list[(idx * self.batch_size):((idx + 1) * self.batch_size)] batch_data = [((np.load(pair[0]) * 2.0) - 1.0) for pair in batch] batch_data = np.array(batch_data).astype(np.float32) batch_label = [pair[1] for pair in batch] batch_label = np.array(batch_label).astype(np.float32).reshape(len(batch_label), 2) (_, summary_str, d_loss) = self.sess.run([self.d_optim, self.d_loss_sum, self.d_loss], feed_dict={self.origin_train: batch_data, self.label_train: batch_label, self.lr: lr}) self.writer.add_summary(summary_str, counter) counter += 1 self.save(args.checkpoint_dir, epoch) accuracy_test = self.sess.run(self.accuracy_test, feed_dict={self.origin_test: data_test, self.label_test: label_test}) print('epoch:', epoch, 'testing accuracy:', accuracy_test, 'loss:', d_loss) def save(self, checkpoint_dir, step): model_name = 'classifier.model' model_dir = 'classifier_{}2{}_{}_{}'.format(self.dataset_A_dir, self.dataset_B_dir, self.now_datetime, str(self.sigma_c)) checkpoint_dir = os.path.join(checkpoint_dir, model_dir) if (not os.path.exists(checkpoint_dir)): os.makedirs(checkpoint_dir) self.saver.save(self.sess, os.path.join(checkpoint_dir, model_name), global_step=step) def load(self, checkpoint_dir): print(' [*] Reading checkpoint...') model_dir = 'classifier_{}2{}_{}_{}'.format(self.dataset_A_dir, self.dataset_B_dir, self.now_datetime, str(self.sigma_c)) checkpoint_dir = os.path.join(checkpoint_dir, model_dir) ckpt = tf.train.get_checkpoint_state(checkpoint_dir) if (ckpt and ckpt.model_checkpoint_path): ckpt_name = os.path.basename(ckpt.model_checkpoint_path) self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name)) return True else: return False def test(self, args): init_op = tf.global_variables_initializer() self.sess.run(init_op) sample_files_origin = glob('./test/{}2{}_{}_{}_{}/{}/npy/origin/*.*'.format(self.dataset_A_dir, self.dataset_B_dir, self.model, self.sigma_d, self.now_datetime, args.which_direction)) sample_files_origin.sort(key=(lambda x: int(os.path.splitext(os.path.basename(x))[0].split('_')[0]))) sample_files_transfer = glob('./test/{}2{}_{}_{}_{}/{}/npy/transfer/*.*'.format(self.dataset_A_dir, self.dataset_B_dir, self.model, self.sigma_d, self.now_datetime, args.which_direction)) sample_files_transfer.sort(key=(lambda x: int(os.path.splitext(os.path.basename(x))[0].split('_')[0]))) sample_files_cycle = glob('./test/{}2{}_{}_{}_{}/{}/npy/cycle/*.*'.format(self.dataset_A_dir, self.dataset_B_dir, self.model, self.sigma_d, self.now_datetime, args.which_direction)) sample_files_cycle.sort(key=(lambda x: int(os.path.splitext(os.path.basename(x))[0].split('_')[0]))) sample_files = list(zip(sample_files_origin, sample_files_transfer, sample_files_cycle)) if self.load(args.checkpoint_dir): print(' [*] Load SUCCESS') else: print(' [!] Load failed...') test_dir_mid = os.path.join(args.test_dir, '{}2{}_{}_{}_{}/{}/mid_attach_prob'.format(self.dataset_A_dir, self.dataset_B_dir, self.model, self.sigma_d, self.now_datetime, args.which_direction)) if (not os.path.exists(test_dir_mid)): os.makedirs(test_dir_mid) count_origin = 0 count_transfer = 0 count_cycle = 0 line_list = [] for idx in range(len(sample_files)): print('Classifying midi: ', sample_files[idx]) sample_origin = np.load(sample_files[idx][0]) sample_transfer = np.load(sample_files[idx][1]) sample_cycle = np.load(sample_files[idx][2]) test_result_origin = self.sess.run(self.test_result_softmax, feed_dict={self.test_midi: ((sample_origin * 2.0) - 1.0)}) test_result_transfer = self.sess.run(self.test_result_softmax, feed_dict={self.test_midi: ((sample_transfer * 2.0) - 1.0)}) test_result_cycle = self.sess.run(self.test_result_softmax, feed_dict={self.test_midi: ((sample_cycle * 2.0) - 1.0)}) origin_transfer_diff = np.abs((test_result_origin - test_result_transfer)) content_diff = np.mean((((sample_origin * 1.0) - (sample_transfer * 1.0)) ** 2)) if (args.which_direction == 'AtoB'): line_list.append(((idx + 1), content_diff, origin_transfer_diff[0][0], test_result_origin[0][0], test_result_transfer[0][0], test_result_cycle[0][0])) count_origin += (1 if (np.argmax(test_result_origin[0]) == 0) else 0) count_transfer += (1 if (np.argmax(test_result_transfer[0]) == 0) else 0) count_cycle += (1 if (np.argmax(test_result_cycle[0]) == 0) else 0) path_origin = os.path.join(test_dir_mid, '{}_origin_{}.mid'.format((idx + 1), test_result_origin[0][0])) path_transfer = os.path.join(test_dir_mid, '{}_transfer_{}.mid'.format((idx + 1), test_result_transfer[0][0])) path_cycle = os.path.join(test_dir_mid, '{}_cycle_{}.mid'.format((idx + 1), test_result_cycle[0][0])) else: line_list.append(((idx + 1), content_diff, origin_transfer_diff[0][1], test_result_origin[0][1], test_result_transfer[0][1], test_result_cycle[0][1])) count_origin += (1 if (np.argmax(test_result_origin[0]) == 1) else 0) count_transfer += (1 if (np.argmax(test_result_transfer[0]) == 1) else 0) count_cycle += (1 if (np.argmax(test_result_cycle[0]) == 1) else 0) path_origin = os.path.join(test_dir_mid, '{}_origin_{}.mid'.format((idx + 1), test_result_origin[0][1])) path_transfer = os.path.join(test_dir_mid, '{}_transfer_{}.mid'.format((idx + 1), test_result_transfer[0][1])) path_cycle = os.path.join(test_dir_mid, '{}_cycle_{}.mid'.format((idx + 1), test_result_cycle[0][1])) save_midis(sample_origin, path_origin) save_midis(sample_transfer, path_transfer) save_midis(sample_cycle, path_cycle) line_list.sort(key=(lambda x: x[2]), reverse=True) with open(os.path.join(test_dir_mid, 'Rankings_{}.txt'.format(args.which_direction)), 'w') as f: f.write('Id Content_diff P_O - P_T Prob_Origin Prob_Transfer Prob_Cycle') for i in range(len(line_list)): f.writelines(('\n%5d %5f %5f %5f %5f %5f' % (line_list[i][0], line_list[i][1], line_list[i][2], line_list[i][3], line_list[i][4], line_list[i][5]))) f.close() accuracy_origin = ((count_origin * 1.0) / len(sample_files)) accuracy_transfer = ((count_transfer * 1.0) / len(sample_files)) accuracy_cycle = ((count_cycle * 1.0) / len(sample_files)) print('Accuracy of this classifier on test datasets is :', accuracy_origin, accuracy_transfer, accuracy_cycle) def test_famous(self, args): init_op = tf.global_variables_initializer() self.sess.run(init_op) song_o = np.load('./datasets/famous_songs/C2J/merged_npy/Scenes from Childhood (Schumann).npy') song_t = np.load('./datasets/famous_songs/C2J/transfer/Scenes from Childhood (Schumann).npy') print(song_o.shape, song_t.shape) if self.load(args.checkpoint_dir): print(' [*] Load SUCCESS') else: print(' [!] Load failed...') sum_o_A = 0 sum_o_B = 0 sum_t_A = 0 sum_t_B = 0 for idx in range(song_t.shape[0]): phrase_o = song_o[idx] phrase_o = phrase_o.reshape(1, phrase_o.shape[0], phrase_o.shape[1], 1) origin = self.sess.run(self.test_result_softmax, feed_dict={self.test_midi: ((phrase_o * 2.0) - 1.0)}) phrase_t = song_t[idx] phrase_t = phrase_t.reshape(1, phrase_t.shape[0], phrase_t.shape[1], 1) transfer = self.sess.run(self.test_result_softmax, feed_dict={self.test_midi: ((phrase_t * 2.0) - 1.0)}) sum_o_A += origin[0][0] sum_o_B += origin[0][1] sum_t_A += transfer[0][0] sum_t_B += transfer[0][1] print('origin, source:', (sum_o_A / song_t.shape[0]), 'target:', (sum_o_B / song_t.shape[0])) print('transfer, source:', (sum_t_A / song_t.shape[0]), 'target:', (sum_t_B / song_t.shape[0]))
def draw_box_with_tag(im, bbox, cls, score, class_list): cls_str = class_list[int(cls)] tag_height = 12 tag_width = ((len(cls_str) * 8) + 30) _bbox = bbox.astype(np.int32) cv2.rectangle(im, (_bbox[0], _bbox[1]), (_bbox[2], _bbox[3]), (0, 0, 255, 255), 2) cv2.rectangle(im, (_bbox[0], max(0, (_bbox[1] - tag_height))), ((_bbox[0] + tag_width), _bbox[1]), (255, 0, 0, 80), (- 1)) cv2.putText(im, '{} {:.2f}'.format(cls_str, score.item()), (_bbox[0], max(0, (_bbox[1] - 2))), cv2.FONT_HERSHEY_DUPLEX, 0.4, (255, 255, 255, 80)) return im
def _p_value(s, object_pairs_hook): pos = s.pos() if s.consume('true'): return ('bool', s.last(), True, pos) if s.consume('false'): return ('bool', s.last(), False, pos) if s.consume('"'): if s.consume('""'): r = _p_basicstr_content(s, _basicstr_ml_re) s.expect('"""') else: r = _p_basicstr_content(s, _basicstr_re) s.expect('"') return ('str', r, r, pos) if s.consume("'"): if s.consume("''"): r = s.expect_re(_litstr_ml_re).group(0) s.expect("'''") else: r = s.expect_re(_litstr_re).group(0) s.expect("'") return ('str', r, r, pos) if s.consume_re(_datetime_re): m = s.last() s0 = m.group(0) r = map(int, m.groups()[:6]) if m.group(7): micro = float(m.group(7)) else: micro = 0 if m.group(8): g = ((int(m.group(8), 10) * 60) + int(m.group(9), 10)) tz = _TimeZone(datetime.timedelta(0, (g * 60))) else: tz = _TimeZone(datetime.timedelta(0, 0)) (y, m, d, H, M, S) = r dt = datetime.datetime(y, m, d, H, M, S, int((micro * 1000000)), tz) return ('datetime', s0, dt, pos) if s.consume_re(_float_re): m = s.last().group(0) r = m.replace('_', '') if (('.' in m) or ('e' in m) or ('E' in m)): return ('float', m, float(r), pos) else: return ('int', m, int(r, 10), pos) if s.consume('['): items = [] with s: while True: _p_ews(s) items.append(_p_value(s, object_pairs_hook=object_pairs_hook)) s.commit() _p_ews(s) s.expect(',') s.commit() _p_ews(s) s.expect(']') return ('array', None, items, pos) if s.consume('{'): _p_ws(s) items = object_pairs_hook() if (not s.consume('}')): k = _p_key(s) _p_ws(s) s.expect('=') _p_ws(s) items[k] = _p_value(s, object_pairs_hook=object_pairs_hook) _p_ws(s) while s.consume(','): _p_ws(s) k = _p_key(s) _p_ws(s) s.expect('=') _p_ws(s) items[k] = _p_value(s, object_pairs_hook=object_pairs_hook) _p_ws(s) s.expect('}') return ('table', None, items, pos) s.fail()
class BookParser(): def __init__(self, xmlcontent=None): self.xml = xmlcontent self.title = '' self.author = '' self.inTitle = 0 self.inAuthor = 0 self.ncx = '' def startElement(self, name, attributes): if (name == 'dc:title'): self.buffer = '' self.inTitle = 1 elif (name == 'dc:creator'): self.buffer = '' self.inAuthor = 1 elif (name == 'item'): if ((attributes['id'] == 'ncx') or (attributes['id'] == 'toc') or (attributes['id'] == 'ncxtoc')): self.ncx = attributes['href'] def characters(self, data): if self.inTitle: self.buffer += data elif self.inAuthor: self.buffer += data def endElement(self, name): if (name == 'dc:title'): self.inTitle = 0 self.title = self.buffer self.buffer = '' elif (name == 'dc:creator'): self.inAuthor = 0 self.author = self.buffer self.buffer = '' def parseBook(self): parser = xml.parsers.expat.ParserCreate() parser.StartElementHandler = self.startElement parser.EndElementHandler = self.endElement parser.CharacterDataHandler = self.characters parser.Parse(self.xml, 1) return (self.title, self.author, self.ncx)
class ShearY(DauphinTransform): value_range = (0.0, 0.3) def __init__(self, name=None, prob=1.0, level=0): super().__init__(name, prob, level) def transform(self, pil_img, label, **kwargs): degree = categorize_value(self.level, self.value_range, 'float') if (random.random() > 0.5): degree = (- degree) return (pil_img.transform(pil_img.size, Image.AFFINE, (1, 0, 0, degree, 1, 0)), label)
def eval_complexity(sessions, complexity_dict): complexity = [] for sess in sessions: instruction = get_sess_ins(sess) if (instruction.find(', and price lower') > 0): ins_key = instruction[:instruction.find(', and price lower')] else: ins_key = instruction if (ins_key in complexity_dict): (attr_complexity, option_complexity) = complexity_dict[ins_key] else: print('no complexity for ', ins_key) complexity.append((attr_complexity + option_complexity)) return complexity
class EfficientPyrPool(nn.Module): def __init__(self, in_planes, proj_planes, out_planes, scales=[2.0, 1.5, 1.0, 0.5, 0.1], last_layer_br=True): super(EfficientPyrPool, self).__init__() self.stages = nn.ModuleList() scales.sort(reverse=True) self.projection_layer = CBR(in_planes, proj_planes, 1, 1) for _ in enumerate(scales): self.stages.append(nn.Conv2d(proj_planes, proj_planes, kernel_size=3, stride=1, padding=1, bias=False, groups=proj_planes)) self.merge_layer = nn.Sequential(BR((proj_planes * len(scales))), Shuffle(groups=len(scales)), CBR((proj_planes * len(scales)), proj_planes, 3, 1, groups=proj_planes), nn.Conv2d(proj_planes, out_planes, kernel_size=1, stride=1, bias=(not last_layer_br))) if last_layer_br: self.br = BR(out_planes) self.last_layer_br = last_layer_br self.scales = scales def forward(self, x): hs = [] x = self.projection_layer(x) (height, width) = x.size()[2:] for (i, stage) in enumerate(self.stages): h_s = int(math.ceil((height * self.scales[i]))) w_s = int(math.ceil((width * self.scales[i]))) h_s = (h_s if (h_s > 5) else 5) w_s = (w_s if (w_s > 5) else 5) if (self.scales[i] < 1.0): h = F.adaptive_avg_pool2d(x, output_size=(h_s, w_s)) h = stage(h) h = F.interpolate(h, (height, width), mode='bilinear', align_corners=True) elif (self.scales[i] > 1.0): h = F.interpolate(x, (h_s, w_s), mode='bilinear', align_corners=True) h = stage(h) h = F.adaptive_avg_pool2d(h, output_size=(height, width)) else: h = stage(x) hs.append(h) out = torch.cat(hs, dim=1) out = self.merge_layer(out) if self.last_layer_br: return self.br(out) return out
def register_Ns3Int64x64_t_methods(root_module, cls): cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right')) cls.add_unary_numeric_operator('-') cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right')) cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('!=') cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', 'right')) cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', 'right')) cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', 'right')) cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', 'right')) cls.add_output_stream_operator() cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>=') cls.add_constructor([]) cls.add_constructor([param('double', 'v')]) cls.add_constructor([param('int', 'v')]) cls.add_constructor([param('long int', 'v')]) cls.add_constructor([param('long long int', 'v')]) cls.add_constructor([param('unsigned int', 'v')]) cls.add_constructor([param('long unsigned int', 'v')]) cls.add_constructor([param('long long unsigned int', 'v')]) cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')]) cls.add_constructor([param('ns3::int64x64_t const &', 'o')]) cls.add_method('GetDouble', 'double', [], is_const=True) cls.add_method('GetHigh', 'int64_t', [], is_const=True) cls.add_method('GetLow', 'uint64_t', [], is_const=True) cls.add_method('Invert', 'ns3::int64x64_t', [param('uint64_t', 'v')], is_static=True) cls.add_method('MulByInvert', 'void', [param('ns3::int64x64_t const &', 'o')]) return
def get_similarity(feature, feats, metric='cosine'): dist = (- cdist(np.expand_dims(feature, axis=0), feats, metric)[0]) return dist
def replicate(network, devices, detach=False): from ._functions import Broadcast devices = tuple(devices) num_replicas = len(devices) params = list(network.parameters()) param_indices = {param: idx for (idx, param) in enumerate(params)} param_copies = Broadcast.apply(devices, *params) if (len(params) > 0): param_copies = [param_copies[i:(i + len(params))] for i in range(0, len(param_copies), len(params))] buffers = list(network._all_buffers()) buffer_indices = {buf: idx for (idx, buf) in enumerate(buffers)} buffer_copies = comm.broadcast_coalesced(buffers, devices) modules = list(network.modules()) module_copies = [[] for device in devices] module_indices = {} for (i, module) in enumerate(modules): module_indices[module] = i for j in range(num_replicas): replica = module.__new__(type(module)) replica.__dict__ = module.__dict__.copy() replica._parameters = replica._parameters.copy() replica._buffers = replica._buffers.copy() replica._modules = replica._modules.copy() module_copies[j].append(replica) for (i, module) in enumerate(modules): for (key, child) in module._modules.items(): if (child is None): for j in range(num_replicas): replica = module_copies[j][i] replica._modules[key] = None else: module_idx = module_indices[child] for j in range(num_replicas): replica = module_copies[j][i] replica._modules[key] = module_copies[j][module_idx] for (key, param) in module._parameters.items(): if (param is None): for j in range(num_replicas): replica = module_copies[j][i] replica._parameters[key] = None else: param_idx = param_indices[param] for j in range(num_replicas): replica = module_copies[j][i] replica._parameters[key] = (param_copies[j][param_idx].detach() if detach else param_copies[j][param_idx]) for (key, buf) in module._buffers.items(): if (buf is None): for j in range(num_replicas): replica = module_copies[j][i] replica._buffers[key] = None else: buffer_idx = buffer_indices[buf] for j in range(num_replicas): replica = module_copies[j][i] replica._buffers[key] = buffer_copies[j][buffer_idx] return [module_copies[j][0] for j in range(num_replicas)]
class SimpleQFunction(QFunction): def __init__(self, env_spec, name='SimpleQFunction'): super().__init__(name) self.obs_dim = (env_spec.observation_space.flat_dim,) action_dim = env_spec.observation_space.flat_dim self.model = SimpleMLPModel(output_dim=action_dim) self._q_val = None self._initialize() def _initialize(self): obs_ph = tf.compat.v1.placeholder(tf.float32, ((None,) + self.obs_dim), name='obs') with tf.compat.v1.variable_scope(self.name, reuse=False) as vs: self._variable_scope = vs self._q_val = self.model.build(obs_ph).outputs def q_vals(self): return self._q_val def __setstate__(self, state): self.__dict__.update(state) self._initialize() def __getstate__(self): new_dict = self.__dict__.copy() del new_dict['_q_val'] return new_dict
class BaseDataset(Dataset): def __init__(self, data_dir, transform_args, split, is_training, dataset_name, tasks_to, dataset_task_sequence=None): assert isinstance(data_dir, str) assert isinstance(split, str) assert isinstance(dataset_name, str) assert isinstance(tasks_to, dict) self.dataset_name = dataset_name self.data_dir = Path(data_dir) self.split = split self.is_training = is_training if (dataset_task_sequence is not None): self.original_tasks = TASK_SEQUENCES[dataset_task_sequence] else: self.original_tasks = TASK_SEQUENCES[dataset_name] self.target_tasks = tasks_to self.label_mapper = None if (self.original_tasks != self.target_tasks): self.label_mapper = LabelMapper(self.original_tasks, self.target_tasks) self._set_transforms(transform_args) def _set_class_weights(self, labels): self.p_count = (labels == 1).sum(axis=0) self.n_count = (labels == 0).sum(axis=0) if (self.label_mapper is not None): self.p_count = self.label_mapper.map(self.p_count) self.n_count = self.label_mapper.map(self.n_count) self.total = (self.p_count + self.n_count) self.class_weights = [(self.n_count / self.total), (self.p_count / self.total)] def _set_transforms(self, t_args): if t_args.maintain_ratio: transforms_list = [transforms.Resize(t_args.scale)] else: transforms_list = [transforms.Resize((t_args.scale, t_args.scale))] if self.is_training: transforms_list += [(transforms.RandomHorizontalFlip() if t_args.horizontal_flip else None), (transforms.RandomRotation(t_args.rotate) if t_args.rotate else None), (transforms.RandomCrop((t_args.crop, t_args.crop)) if (t_args.crop != 0) else None)] else: transforms_list += [(transforms.CenterCrop((t_args.crop, t_args.crop)) if t_args.crop else None)] if t_args.clahe: transforms_list += [CLAHE(clip_limit=2.0, tile_grid_size=(8, 8))] if (t_args.normalization == 'imagenet'): normalize = transforms.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD) elif (t_args.normalization == 'cxr_norm'): normalize = transforms.Normalize(mean=CXR_MEAN, std=CXR_STD) transforms_list += [transforms.ToTensor(), normalize] self.transform = transforms.Compose([t for t in transforms_list if t]) def __len__(self): return len(self.labels) def __getitem__(self, index): raise NotImplementedError
def test__setup_logging_double_verbose_without_log_file(): logging.shutdown() importlib.reload(logging) _setup_logging(2, False) logger = logging.getLogger('') assert (len(logger.handlers) == 1) assert (logger.level == logging.DEBUG) logging.shutdown() importlib.reload(logging)
def create_model(args, device): model = create_backbone(args, device) projection_head = create_projection_head(args, device) return [model, projection_head]
def test_same_top_different_top_concept(): assert (get_amr_match(a, b) == (3, 4, 4)) smatch.match_triple_dict.clear()
def get_external_blob_names(net, lexical_scope): net_proto = net.Proto() (net_ssa, _) = core.get_ssa(net_proto) input_names = core.get_undefined_blobs(net_ssa) for input_name in input_names: assert (str(input_name) in lexical_scope), (('Input blob ' + input_name) + ' is undefined') output_names = set() for op in net_proto.op: for output in op.output: if (output in lexical_scope): output_names.add(output) return (input_names, output_names)
def dummy_lower(context, builder, sig, args): def convert(rec): return (rec.x + rec.y) return context.compile_internal(builder, convert, sig, args)
def test_KLD(): (ALPHA, N, P) = (1.0, 100, 20) random.seed(SEED) qk = random.dirichlet(([ALPHA] * P)) pk = random.multinomial(N, qk) estimator = ndd.kullback_leibler_divergence ref_result = (- 0.) assert (estimator(pk, qk) == approx(ref_result))
class Mockingjay(SslProblem): _parent_cfg(corpus=dict(_cls=librispeech_for_pretrain, dataset_root='???'), train_datapipe=pretrain_task_pipe_config, train_sampler=dict(_cls=MaxTimestampBatchSampler, max_timestamp=(16000 * 20), shuffle=True), valid_datapipe=pretrain_task_pipe_config, valid_sampler=dict(_cls=FixedBatchSizeBatchSampler, batch_size=2), test_datapipe=pretrain_task_pipe_config, test_sampler=dict(_cls=FixedBatchSizeBatchSampler, batch_size=2), upstream=dict(_cls=TransformerMockingjay, config=_transformer_config, input_dim=_input_size, output_attentions=False, keep_multihead_output=False, with_input_module=True), predictor=dict(_cls=PredictorMockingjay, config=_transformer_config, output_dim=_input_size, input_dim=None), task=dict(_cls=FeatReconstructionTask, loss=L1Loss)) def setup_problem(cls, **cfg): super().setup_problem(**cfg) _parent_cfg(optimizer=dict(_cls='torch.optim.AdamW', lr=0.0002), trainer=dict(total_steps=1000000, eval_step=50000, save_step=50000, gradient_clipping=5.0, gradient_accumulate_steps=4, valid_metric='loss', valid_higher_better=False)) def train(cls, **cfg): super().train(**cfg) _parent_cfg() def inference(cls, **cfg): super().inference(**cfg) def save_additional(cls, additional_dir: Workspace, workspace: Workspace, task: Task): all_states = dict(Config={}, SpecHead=task.predictor.state_dict(), Transformer=task.upstream.state_dict(), Upstream_Config=dict(transformer=_transformer_config, audio=_audio_config, task=dict(sequence_length=0))) torch.save(all_states, (str(additional_dir.parent.resolve()) + '/all_states.ckpt')) _parent_cfg(start_stage=0, final_stage=2, stage_0=dict(_method='setup_problem'), stage_1=dict(_method='train'), stage_2=dict(_method='inference')) def run_stages(cls, **cfg): super().run_stages(**cfg)
class CerebrasEngine(CausalEngine): config_name: str = 'cerebras_engine' def __init__(self, weights_path: Optional[Union[(str, Path)]]=None): super().__init__(model_name='cerebras/Cerebras-GPT-1.3B', weights_path=weights_path) self.tokenizer.pad_token = self.tokenizer.eos_token self.tokenizer.pad_token_id = self.tokenizer.eos_token_id
.parametrize('module_creator', [ModuleCreator(TSTNetNormal(), [(4, 3, 32, 32), (4, 3, 32, 32)]), ModuleCreator(ResUnit(16), [(4, 3, 32, 32)]), ModuleCreator(NestedTestNet(), [(4, 3, 32, 32), (4, 3, 32, 32)])]) def test_simple_create_graph_def(module_creator): module = module_creator.module proto_variable_inputs = [nn.ProtoVariable(shape) for shape in module_creator.input_shape] outputs = module(*proto_variable_inputs) g = nn.graph_def.get_default_graph() variable_inputs = module_creator.get_variable_inputs() outputs = g(*variable_inputs) ref_outputs = module(*variable_inputs) forward_variable_and_check_equal(outputs, ref_outputs)
class RegexReplace(PP_Module): def __init__(self, search, replace): super().__init__() self.regex_search = search self.regex_replace = replace def process(self, orth: str): return re.sub(self.regex_search, self.regex_replace, orth)
def make_tfrecord_loaders(args): import data_utils.tf_dl data_set_args = {'batch_size': args.batch_size, 'max_seq_len': args.seq_length, 'max_preds_per_seq': args.max_preds_per_seq, 'train': True, 'num_workers': max(args.num_workers, 1), 'seed': ((args.seed + args.rank) + 1), 'threaded_dl': (args.num_workers > 0)} train = data_utils.tf_dl.TFRecordDataLoader(args.train_data, **data_set_args) data_set_args['train'] = False if (args.eval_seq_length is not None): data_set_args['max_seq_len'] = args.eval_seq_length if (args.eval_max_preds_per_seq is not None): data_set_args['max_preds_per_seq'] = args.eval_max_preds_per_seq valid = None if (args.valid_data is not None): valid = data_utils.tf_dl.TFRecordDataLoader(args.valid_data, **data_set_args) test = None if (args.test_data is not None): test = data_utils.tf_dl.TFRecordDataLoader(args.test_data, **data_set_args) tokenizer = data_utils.make_tokenizer(args.tokenizer_type, train, args.tokenizer_path, args.vocab_size, args.tokenizer_model_type, cache_dir=args.cache_dir) return ((train, valid, test), tokenizer)
def register_Ns3EpcX2PdcpUser_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::EpcX2PdcpUser const &', 'arg0')]) cls.add_method('ReceiveMcPdcpPdu', 'void', [param('ns3::EpcX2Sap::UeDataParams', 'params')], is_pure_virtual=True, is_virtual=True) return
class Vocab(): def __init__(self, tokens=None, offset=0, unknown=None): self.mapping = {} self.reverse_mapping = {} self.offset = offset self.unknown = unknown for token in tokens: self.add_token(token) def __len__(self): return (len(self.mapping) + self.offset) def __call__(self, doc): return self.map_sequence(doc) def add_token(self, token): if (token not in self.mapping): index = len(self) self.mapping[token] = index self.reverse_mapping[index] = token def __repr__(self): fmt_str = self.__class__.__name__ fmt_str += '(vocab={0}, offset={1}, unknown={2})'.format(self.__len__(), self.offset, self.unknown) return fmt_str def map(self, token, unknown=None): if (token in self.mapping): return self.mapping[token] else: return (unknown if (unknown is not None) else self.unknown) def map_sequence(self, tokens, unknown=None): return np.array([self.map(token, unknown=unknown) for token in tokens]) def reverse_map(self, index, unknown=None): if (index in self.reverse_mapping): return self.reverse_mapping[index] else: return (unknown if (unknown is not None) else self.unknown) def reverse_map_sequence(self, indices, unknown): return [self.reverse_map(index, unknown=unknown) for index in indices]
def GAS_main(adj_list: list, r_support: list, features: tf.Tensor, r_feature: tf.SparseTensor, label: tf.Tensor, masks: list, args: argparse.ArgumentParser().parse_args()) -> None: model = GAS(args) optimizer = optimizers.Adam(lr=args.lr) for _ in tqdm(range(args.epochs)): with tf.GradientTape() as tape: (train_loss, train_acc) = model([adj_list, r_support, features, r_feature, label, masks[0]]) print(f'train_loss: {train_loss:.4f}, train_acc: {train_acc:.4f}') grads = tape.gradient(train_loss, model.trainable_variables) optimizer.apply_gradients(zip(grads, model.trainable_variables)) (test_loss, test_acc) = model([adj_list, r_support, features, r_feature, label, masks[1]]) print(f'test_loss: {test_loss:.4f}, test_acc: {test_acc:.4f}')
class SetWithGet(set): def get_any(self): return random.sample(self, 1)[0] def __getitem__(self, item): return self.get_any()
def _generate_state(base_seed, worker_id): INIT_A = MULT_A = INIT_B = MULT_B = MIX_MULT_L = MIX_MULT_R = XSHIFT = ((4 * 8) // 2) MASK32 = entropy = [worker_id, (base_seed & MASK32), (base_seed >> 32), 0] pool = ([0] * 4) hash_const_A = INIT_A def hash(value): nonlocal hash_const_A value = ((value ^ hash_const_A) & MASK32) hash_const_A = ((hash_const_A * MULT_A) & MASK32) value = ((value * hash_const_A) & MASK32) value = ((value ^ (value >> XSHIFT)) & MASK32) return value def mix(x, y): result_x = ((MIX_MULT_L * x) & MASK32) result_y = ((MIX_MULT_R * y) & MASK32) result = ((result_x - result_y) & MASK32) result = ((result ^ (result >> XSHIFT)) & MASK32) return result for i in range(len(pool)): pool[i] = hash(entropy[i]) for i_src in range(len(pool)): for i_dst in range(len(pool)): if (i_src != i_dst): pool[i_dst] = mix(pool[i_dst], hash(pool[i_src])) hash_const_B = INIT_B state = [] for i_dst in range(4): data_val = pool[i_dst] data_val = ((data_val ^ hash_const_B) & MASK32) hash_const_B = ((hash_const_B * MULT_B) & MASK32) data_val = ((data_val * hash_const_B) & MASK32) data_val = ((data_val ^ (data_val >> XSHIFT)) & MASK32) state.append(data_val) return state
def dict_to_safe_json(d, sort=False): if isinstance(d, collections.OrderedDict): new_d = collections.OrderedDict() else: new_d = {} for (key, item) in d.items(): if safe_json(item): new_d[key] = item elif (isinstance(item, dict) or isinstance(item, collections.OrderedDict)): new_d[key] = dict_to_safe_json(item, sort=sort) else: new_d[key] = str(item) if sort: return collections.OrderedDict(sorted(new_d.items())) else: return new_d
def run(i, datasets): datasets = [dataset.shuffle() for dataset in datasets] train_datasets = [dataset[:20] for dataset in datasets] test_datasets = [dataset[20:] for dataset in datasets] train_datasets = [PairDataset(train_dataset, train_dataset, sample=False) for train_dataset in train_datasets] train_dataset = torch.utils.data.ConcatDataset(train_datasets) train_loader = DataLoader(train_dataset, args.batch_size, shuffle=True, follow_batch=['x_s', 'x_t']) model.load_state_dict(state_dict) optimizer = torch.optim.Adam(model.parameters(), lr=args.lr) for epoch in range(1, (1 + args.epochs)): train(train_loader, optimizer) accs = [(100 * test(test_dataset)) for test_dataset in test_datasets] print(f'Run {i:02d}:') print(' '.join([category.ljust(13) for category in WILLOW.categories])) print(' '.join([f'{acc:.2f}'.ljust(13) for acc in accs])) return accs
def convertToDense(listOfLists, length): new2d = [] for l in listOfLists: newList = ([0] * length) for t in l: newList[t[0]] = t[1] new2d.append(newList) return new2d
class RNN(tf.keras.Model): def __init__(self, units, input_dim, edge_types, shared_emb, recurrent_size=4, return_sequences=False, return_state=False, go_backwards=False, unroll=True, time_major=False, zero_output_for_mask=True, **kwargs): super(RNN, self).__init__(**kwargs) self.units = units self.input_dim = input_dim self.edge_types = edge_types self.edge_embeddings = shared_emb self.recurrent_size = recurrent_size self.return_sequences = return_sequences self.return_state = return_state self.go_backwards = go_backwards self.unroll = unroll self.time_major = time_major self.zero_output_for_mask = zero_output_for_mask self.supports_masking = True self.cell = GraphCell(units, input_dim, edge_types, shared_emb, recurrent_size, kernel_initializer=tf.initializers.RandomUniform((- (1 / np.sqrt(units))), (1 / np.sqrt(units))), recurrent_initializer=tf.initializers.RandomUniform((- (1 / np.sqrt(units))), (1 / np.sqrt(units))), bias_initializer=tf.initializers.RandomUniform((- (1 / np.sqrt(units))), (1 / np.sqrt(units)))) def call(self, inputs, input_lengths, dependencies, edge_types, mask=None, cell_mask=None, initial_states=None, training=True): timesteps = (inputs.shape[0] if self.time_major else inputs.shape[1]) if (self.unroll and (timesteps is None)): raise ValueError('Cannot unroll a RNN if the time dimension is undefined.') def step(inputs, states, edge_types, cell_mask, training): (output, new_states) = self.cell(inputs, states, edge_types, cell_mask, training) if (not nest.is_sequence(new_states)): new_states = [new_states] return (output, new_states) def swap_batch_timestep(input_t): axes = list(range(len(input_t.shape))) (axes[0], axes[1]) = (1, 0) return array_ops.transpose(input_t, axes) if (not self.time_major): inputs = nest.map_structure(swap_batch_timestep, inputs) dependencies = swap_batch_timestep(dependencies) cell_mask = swap_batch_timestep(cell_mask) edge_types = swap_batch_timestep(edge_types) flatted_inputs = nest.flatten(inputs) time_steps = flatted_inputs[0].shape[0] batch = flatted_inputs[0].shape[1] for input_ in flatted_inputs: input_.shape.with_rank_at_least(3) if (mask is not None): if (mask.dtype != dtypes_module.bool): mask = math_ops.cast(mask, dtypes_module.bool) if (len(mask.shape) == 2): mask = array_ops.expand_dims(mask, axis=(- 1)) if (not self.time_major): mask = swap_batch_timestep(mask) if (cell_mask is not None): if (cell_mask.dtype != dtypes_module.bool): cell_mask = math_ops.cast(cell_mask, dtypes_module.bool) def _expand_mask(mask_t, input_t, fixed_dim=1): assert (not nest.is_sequence(mask_t)) assert (not nest.is_sequence(input_t)) rank_diff = (len(input_t.shape) - len(mask_t.shape)) for _ in range(rank_diff): mask_t = array_ops.expand_dims(mask_t, (- 1)) multiples = (([1] * fixed_dim) + input_t.shape.as_list()[fixed_dim:]) return array_ops.tile(mask_t, multiples) if self.unroll: if (not time_steps): raise ValueError('Unrolling requires a fixed number of timesteps.') states = tuple(initial_states) successive_states = [] successive_outputs = [] def _process_single_input_t(input_t): input_t = array_ops.unstack(input_t) if self.go_backwards: input_t.reverse() return input_t if nest.is_sequence(inputs): processed_input = nest.map_structure(_process_single_input_t, inputs) else: processed_input = (_process_single_input_t(inputs),) def _get_input_tensor(time): inp = [t_[time] for t_ in processed_input] return nest.pack_sequence_as(inputs, inp) if (mask is not None): mask_list = array_ops.unstack(mask) if self.go_backwards: mask = tf.reverse(mask, [0]) mask_list.reverse() for i in range(time_steps): inp = _get_input_tensor(i) mask_t = mask_list[i] if (i < (time_steps - 1)): dep_t = dependencies[(i + 1)] edge_types_t = edge_types[i] cell_mask_t = cell_mask[i] (output, new_states) = step(inp, tuple(states), edge_types_t, cell_mask_t, training) tiled_mask_t = _expand_mask(mask_t, output) if (not successive_outputs): pre_output = array_ops.zeros_like(output) else: pre_output = successive_outputs[(- 1)] output = array_ops.where(tiled_mask_t, output, pre_output) if (not successive_states): pre_states = array_ops.zeros_like(new_states) else: pre_states = successive_states[(- 1)] return_states = [] for (state, new_state) in zip(pre_states, new_states): tiled_mask_t = _expand_mask(mask_t, new_state) return_states.append(array_ops.where(tiled_mask_t, new_state, state)) successive_outputs.append(output) successive_states.append(return_states) if (i < (time_steps - 1)): states = [] states.append(return_states[0]) for k in range((self.recurrent_size - 1)): stack_t = [] for t in range(batch): dep = dep_t[(t, k)] if (dep.numpy().decode() == '$'): dep_state = array_ops.zeros([self.units]) elif self.go_backwards: dep_state = successive_states[(int(dep.numpy().decode()) + (timesteps - input_lengths[t]))][0][t] else: dep_state = successive_states[int(dep.numpy().decode())][0][t] stack_t.append(dep_state) stack_t = tf.stack(stack_t, axis=0) states.append(stack_t) last_output = successive_outputs[(- 1)] new_states = successive_states[(- 1)] outputs = array_ops.stack(successive_outputs) if self.zero_output_for_mask: last_output = array_ops.where(_expand_mask(mask_list[(- 1)], last_output), last_output, array_ops.zeros_like(last_output)) outputs = array_ops.where(_expand_mask(mask, outputs, fixed_dim=2), outputs, array_ops.zeros_like(outputs)) def set_shape(output_): if isinstance(output_, ops.Tensor): shape = output_.shape.as_list() shape[0] = time_steps shape[1] = batch output_.set_shape(shape) return output_ outputs = nest.map_structure(set_shape, outputs) if (not self.time_major): outputs = nest.map_structure(swap_batch_timestep, outputs) if self.return_sequences: output = outputs else: output = last_output if self.return_state: if (not isinstance(new_states, (list, tuple))): states = [new_states] else: states = list(new_states) return (generic_utils.to_list(output) + states) else: return output
class MalformedSpeciesError(MalformedError): def __init__(self, malformed_element_symbol): self.malformed_element_symbol = malformed_element_symbol def __str__(self): return f'Expecting a species notation (e.g. "Si 2", "Si II", "Fe IV") - supplied {self.malformed_element_symbol}'
class BankManagerSearchTransactions(VirtualFunctionTool): name = 'BankManagerSearchTransactions' summary = 'Search transactions by keyword or amount range. If certain arguments are not provided, the corresponding filters are not applied.' parameters: List[ArgParameter] = [{'name': 'account_number', 'type': 'string', 'description': "The account number in the format 'XXX-XXXX-XXXX'.", 'required': True}, {'name': 'keyword', 'type': 'string', 'description': 'The keyword to search for in the transaction description.', 'required': False}, {'name': 'min_amount', 'type': 'number', 'description': 'The minimum transaction amount to search for.', 'required': False}, {'name': 'max_amount', 'type': 'number', 'description': 'The maximum transaction amount to search for.', 'required': False}] returns: List[ArgReturn] = [{'name': 'transactions', 'type': 'array', 'description': 'A list of dictionaries containing the date, description, amount, and balance of each matching transaction.'}] exceptions: List[ArgException] = [{'name': 'InvalidRequestException', 'description': "The 'account_number' parameter is not in the correct format, or the 'min_amount' and 'max_amount' parameters are not valid."}]
def check_test_type(test_type: str, target: str) -> None: if (test_type in [TestType.CPP.value, TestType.PY.value]): return raise Exception(f"Can't parse test type: {test_type}.", f' Please check the type of buck target: {target}')
def T2starGeneralizedQuadrangleGraph(q, dual=False, hyperoval=None, field=None, check_hyperoval=True): from sage.combinat.designs.incidence_structures import IncidenceStructure from sage.combinat.designs.block_design import ProjectiveGeometryDesign as PG (p, k) = is_prime_power(q, get_data=True) if ((not k) or (p != 2)): raise ValueError('q must be a power of 2') if (field is None): F = FiniteField(q, 'a') else: F = field Theta = PG(3, 1, F, point_coordinates=1) Pi = set((x for x in Theta.ground_set() if (x[0] == F.zero()))) if (hyperoval is None): HO = set((x for x in Pi if (((x[1] + (x[2] * x[3])) == 0) or ((x[1] == 1) and (x[2] == x[3] == 0))))) else: for v in hyperoval: v.set_immutable() HO = set(hyperoval) if check_hyperoval: if (len(HO) != (q + 2)): raise RuntimeError('incorrect hyperoval size') for L in Theta.blocks(): if set(L).issubset(Pi): if (not (len(HO.intersection(L)) in [0, 2])): raise RuntimeError('incorrect hyperoval') L = [[y for y in z if (y not in HO)] for z in [x for x in Theta.blocks() if (len(HO.intersection(x)) == 1)]] if dual: G = IncidenceStructure(L).intersection_graph() G.name(((('T2*(O,' + str(q)) + ')*; GQ') + str(((q + 1), (q - 1))))) else: G = IncidenceStructure(L).dual().intersection_graph() G.name(((('T2*(O,' + str(q)) + '); GQ') + str(((q - 1), (q + 1))))) return G
_utils.test() def test_dictcomp_fail(): def foo(n: ti.template(), m: ti.template()) -> ti.i32: a = {i: (i * i) for i in range(n) if (i % 3) if (i % 2)} return a[m] with pytest.raises(ti.TaichiCompilationError): foo(5, 2) with pytest.raises(ti.TaichiCompilationError): foo(5, 3)
def get_info_about_data_len_distribution(filepaths): numsents_maxnumtokens = [] for filepath in filepaths: first_line = True with open(filepath, 'r') as f: for line in f: if first_line: temp_line = line num_sents_field_ind = 0 while (not (temp_line.startswith('num_sentences\t') or temp_line.startswith('num_sentences\n'))): temp_line = temp_line[(temp_line.index('\t') + 1):] num_sents_field_ind += 1 temp_line = line max_num_tokens_field_ind = 0 while (not (temp_line.startswith('max_num_tokens_in_sentence\t') or temp_line.startswith('max_num_tokens_in_sentence\n'))): temp_line = temp_line[(temp_line.index('\t') + 1):] max_num_tokens_field_ind += 1 first_line = False else: if (line.strip() == ''): continue num_sents = int(get_nth_field_in_line(line, num_sents_field_ind)) max_num_tokens = int(get_nth_field_in_line(line, max_num_tokens_field_ind)) numsents_maxnumtokens.append((num_sents, max_num_tokens)) num_sentences = [tup[0] for tup in numsents_maxnumtokens] return num_sentences
class BatchNormBenchmark(op_bench.TorchBenchmarkBase): def init(self, M, N, K, device): self.input_one = torch.rand(M, N, K, device=device, requires_grad=self.auto_set()) self.mean = torch.rand(N, device=device) self.var = torch.rand(N, device=device) self.weight = torch.rand(N, device=device) self.bias = torch.rand(N, device=device) self.set_module_name('batchnorm') def forward(self): return F.batch_norm(self.input_one, self.mean, self.var, self.weight, self.bias)
('(float32[:], float32[:], float32[:])', device=True, inline=True) def trangle_area(a, b, c): return ((((a[0] - c[0]) * (b[1] - c[1])) - ((a[1] - c[1]) * (b[0] - c[0]))) / 2.0)
class IndexedCachedDataset(IndexedDataset): def __init__(self, path, fix_lua_indexing=False): super().__init__(path, fix_lua_indexing, True) self.cache = None self.cache_index = {} def supports_prefetch(self): return True def prefetch(self, indices): if all(((i in self.cache_index) for i in indices)): return indices.sort() total_size = 0 for i in indices: total_size += (self.data_offsets[(i + 1)] - self.data_offsets[i]) self.cache = np.empty(total_size, dtype=self.dtype) ptx = 0 self.cache_index.clear() for i in indices: self.cache_index[i] = ptx size = (self.data_offsets[(i + 1)] - self.data_offsets[i]) a = self.cache[ptx:(ptx + size)] self.data_file.seek((self.data_offsets[i] * self.element_size)) self.data_file.readinto(a) ptx += size def __getitem__(self, i): self.check_index(i) tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[(i + 1)]] a = np.empty(tensor_size, dtype=self.dtype) ptx = self.cache_index[i] np.copyto(a, self.cache[ptx:(ptx + a.size)]) item = torch.from_numpy(a).long() if self.fix_lua_indexing: item -= 1 return item
def parse_poolingfunction(poolingfunction_name='mean', **kwargs): if (poolingfunction_name == 'mean'): return MeanPool(pooldim=1) elif (poolingfunction_name == 'max'): return MaxPool(pooldim=1) elif (poolingfunction_name == 'linear'): return LinearSoftPool(pooldim=1) elif (poolingfunction_name == 'soft'): return SoftPool(pooldim=1) elif (poolingfunction_name == 'auto'): return AutoPool(outputdim=kwargs['outputdim']) elif (poolingfunction_name == 'attention'): return AttentionPool(inputdim=kwargs['inputdim'], outputdim=kwargs['outputdim'])
class SimpleShardSource(ShardedDataset[List[int]]): def __init__(self, num_shards: int=4): self._num_shards = num_shards def shard_names(self) -> Sequence[str]: return [f'shard_{i}' for i in range(self._num_shards)] def open_shard_at_row(self, shard_name: str, row: int) -> Iterator[List[int]]: shard_num = int(shard_name.split('_')[1]) return (([((shard_num * 10) + i)] * 10) for i in range(row, 10))
class BouncingBallExample(nn.Module): def __init__(self, radius=0.2, gravity=9.8, adjoint=False): super().__init__() self.gravity = nn.Parameter(torch.as_tensor([gravity])) self.log_radius = nn.Parameter(torch.log(torch.as_tensor([radius]))) self.t0 = nn.Parameter(torch.tensor([0.0])) self.init_pos = nn.Parameter(torch.tensor([10.0])) self.init_vel = nn.Parameter(torch.tensor([0.0])) self.absorption = nn.Parameter(torch.tensor([0.2])) self.odeint = (odeint_adjoint if adjoint else odeint) def forward(self, t, state): (pos, vel, log_radius) = state dpos = vel dvel = (- self.gravity) return (dpos, dvel, torch.zeros_like(log_radius)) def event_fn(self, t, state): (pos, _, log_radius) = state return (pos - torch.exp(log_radius)) def get_initial_state(self): state = (self.init_pos, self.init_vel, self.log_radius) return (self.t0, state) def state_update(self, state): (pos, vel, log_radius) = state pos = (pos + 1e-07) vel = ((- vel) * (1 - self.absorption)) return (pos, vel, log_radius) def get_collision_times(self, nbounces=1): event_times = [] (t0, state) = self.get_initial_state() for i in range(nbounces): (event_t, solution) = odeint_event(self, state, t0, event_fn=self.event_fn, reverse_time=False, atol=1e-08, rtol=1e-08, odeint_interface=self.odeint) event_times.append(event_t) state = self.state_update(tuple((s[(- 1)] for s in solution))) t0 = event_t return event_times def simulate(self, nbounces=1): event_times = self.get_collision_times(nbounces) (t0, state) = self.get_initial_state() trajectory = [state[0][None]] velocity = [state[1][None]] times = [t0.reshape((- 1))] for event_t in event_times: tt = torch.linspace(float(t0), float(event_t), int(((float(event_t) - float(t0)) * 50)))[1:(- 1)] tt = torch.cat([t0.reshape((- 1)), tt, event_t.reshape((- 1))]) solution = odeint(self, state, tt, atol=1e-08, rtol=1e-08) trajectory.append(solution[0][1:]) velocity.append(solution[1][1:]) times.append(tt[1:]) state = self.state_update(tuple((s[(- 1)] for s in solution))) t0 = event_t return (torch.cat(times), torch.cat(trajectory, dim=0).reshape((- 1)), torch.cat(velocity, dim=0).reshape((- 1)), event_times)
class Cabasc(nn.Module): def __init__(self, embedding_matrix, opt, _type='c'): super(Cabasc, self).__init__() self.opt = opt self.type = _type self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float)) self.squeeze_embedding = SqueezeEmbedding(batch_first=True) self.linear1 = nn.Linear((3 * opt.embed_dim), opt.embed_dim) self.linear2 = nn.Linear(opt.embed_dim, 1, bias=False) self.mlp = nn.Linear(opt.embed_dim, opt.embed_dim) self.dense = nn.Linear(opt.embed_dim, opt.polarities_dim) self.rnn_l = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True, rnn_type='GRU') self.rnn_r = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True, rnn_type='GRU') self.mlp_l = nn.Linear(opt.hidden_dim, 1) self.mlp_r = nn.Linear(opt.hidden_dim, 1) def context_attention(self, x_l, x_r, memory, memory_len, aspect_len): (left_len, right_len) = (torch.sum((x_l != 0), dim=(- 1)), torch.sum((x_r != 0), dim=(- 1))) (x_l, x_r) = (self.embed(x_l), self.embed(x_r)) (context_l, (_, _)) = self.rnn_l(x_l, left_len) (context_r, (_, _)) = self.rnn_r(x_r, right_len) attn_l = (torch.sigmoid(self.mlp_l(context_l)) + 0.5) attn_r = (torch.sigmoid(self.mlp_r(context_r)) + 0.5) for i in range(memory.size(0)): aspect_start = (left_len[i] - aspect_len[i]).item() aspect_end = left_len[i] for idx in range(memory_len[i]): if (idx < aspect_start): memory[i][idx] *= attn_l[i][idx] elif (idx < aspect_end): memory[i][idx] *= ((attn_l[i][idx] + attn_r[i][(idx - aspect_start)]) / 2) else: memory[i][idx] *= attn_r[i][(idx - aspect_start)] return memory def locationed_memory(self, memory, memory_len): for i in range(memory.size(0)): for idx in range(memory_len[i]): memory[i][idx] *= (1 - (float(idx) / int(memory_len[i]))) return memory def forward(self, inputs): (text_raw_indices, aspect_indices, x_l, x_r) = (inputs[0], inputs[1], inputs[2], inputs[3]) memory_len = torch.sum((text_raw_indices != 0), dim=(- 1)) aspect_len = torch.sum((aspect_indices != 0), dim=(- 1)) nonzeros_aspect = aspect_len.float() aspect = self.embed(aspect_indices) aspect = torch.sum(aspect, dim=1) v_a = torch.div(aspect, nonzeros_aspect.unsqueeze(1)).unsqueeze(1) memory = self.embed(text_raw_indices) memory = self.squeeze_embedding(memory, memory_len) nonzeros_memory = memory_len.float() v_s = torch.sum(memory, dim=1) v_s = torch.div(v_s, nonzeros_memory.unsqueeze(1)).unsqueeze(1) if (self.type == 'c'): memory = self.locationed_memory(memory, memory_len) elif (self.type == 'cabasc'): memory = self.context_attention(x_l, x_r, memory, memory_len, aspect_len) v_s = torch.sum(memory, dim=1) v_s = torch.div(v_s, nonzeros_memory.unsqueeze(1)) v_s = v_s.unsqueeze(dim=1) '\n # no multi-hop, but may be better. \n # however, here is something totally different from what paper depits\n for _ in range(self.opt.hops): \n #x = self.x_linear(x)\n v_ts, _ = self.attention(memory, v_a)\n ' memory_chunks = memory.chunk(memory.size(1), dim=1) c = [] for memory_chunk in memory_chunks: c_i = self.linear1(torch.cat([memory_chunk, v_a, v_s], dim=1).view(memory_chunk.size(0), (- 1))) c_i = self.linear2(torch.tanh(c_i)) c.append(c_i) alpha = F.softmax(torch.cat(c, dim=1), dim=1) v_ts = torch.matmul(memory.transpose(1, 2), alpha.unsqueeze((- 1))).transpose(1, 2) v_ns = (v_ts + v_s) v_ns = v_ns.view(v_ns.size(0), (- 1)) v_ms = torch.tanh(self.mlp(v_ns)) out = self.dense(v_ms) return out
def get_option_setter(dataset_name): dataset_class = find_dataset_using_name(dataset_name) return dataset_class.modify_commandline_options
class MHAtt(nn.Module): def __init__(self, __C): super(MHAtt, self).__init__() self.__C = __C self.linear_v = nn.Linear(__C.HIDDEN_SIZE, __C.HIDDEN_SIZE) self.linear_k = nn.Linear(__C.HIDDEN_SIZE, __C.HIDDEN_SIZE) self.linear_q = nn.Linear(__C.HIDDEN_SIZE, __C.HIDDEN_SIZE) self.linear_merge = nn.Linear(__C.HIDDEN_SIZE, __C.HIDDEN_SIZE) self.dropout = nn.Dropout(__C.DROPOUT_R) def forward(self, v, k, q, mask): n_batches = q.size(0) v = self.linear_v(v).view(n_batches, (- 1), self.__C.MULTI_HEAD, int((self.__C.HIDDEN_SIZE / self.__C.MULTI_HEAD))).transpose(1, 2) k = self.linear_k(k).view(n_batches, (- 1), self.__C.MULTI_HEAD, int((self.__C.HIDDEN_SIZE / self.__C.MULTI_HEAD))).transpose(1, 2) q = self.linear_q(q).view(n_batches, (- 1), self.__C.MULTI_HEAD, int((self.__C.HIDDEN_SIZE / self.__C.MULTI_HEAD))).transpose(1, 2) atted = self.att(v, k, q, mask) atted = atted.transpose(1, 2).contiguous().view(n_batches, (- 1), self.__C.HIDDEN_SIZE) atted = self.linear_merge(atted) return atted def att(self, value, key, query, mask): d_k = query.size((- 1)) scores = (torch.matmul(query, key.transpose((- 2), (- 1))) / math.sqrt(d_k)) if (mask is not None): scores = scores.masked_fill(mask, (- .0)) att_map = F.softmax(scores, dim=(- 1)) att_map = self.dropout(att_map) return torch.matmul(att_map, value)
def generate_resnext101(num_classes, frame_size, num_frames, cuda): model = resnext.resnet101(num_classes=num_classes, shortcut_type='B', cardinality=8, frame_size=frame_size, frame_duration=num_frames, last_fc=None) if cuda: model = model.cuda() return model
def master_params_to_state_dict(model, param_groups_and_shapes, master_params, use_fp16): if use_fp16: state_dict = model.state_dict() for (master_param, (param_group, _)) in zip(master_params, param_groups_and_shapes): for ((name, _), unflat_master_param) in zip(param_group, unflatten_master_params(param_group, master_param.view((- 1)))): assert (name in state_dict) state_dict[name] = unflat_master_param else: state_dict = model.state_dict() for (i, (name, _value)) in enumerate(model.named_parameters()): assert (name in state_dict) state_dict[name] = master_params[i] return state_dict
_function_dispatch(_ravel_dispatcher) def ravel(a, order='C'): if isinstance(a, np.matrix): return asarray(a).ravel(order=order) else: return asanyarray(a).ravel(order=order)
def linearizeFeatures(features, h, w, inDim, projDim=None, outDim=None, loc=None, pooling=None): if (pooling is None): pooling = config.imageLinPool if (loc is not None): features = addLocation(features, inDim, lDim=inDim, outDim=inDim, locType=loc['locType'], mod=loc['mod']) if (projDim is not None): features = linear(features, dim, projDim) features = relu(features) dim = projDim if (pooling > 1): poolingDims = [1, pooling, pooling, 1] features = tf.nn.max_pool(features, ksize=poolingDims, strides=poolingDims, padding='SAME') h /= pooling w /= pooling dim = ((h * w) * dim) features = tf.reshape(features, ((- 1), dim)) if (outDim is not None): features = linear(features, dim, outDim) dim = outDim return (features, dim)
def changeGWContagion_LISTCOMP(alpha, G, A, i): delta = math.exp(((- alpha) * sum([(A[u] == 1) for u in G.neighbourIterator(i)]))) for j in G.neighbourIterator(i): if (A[j] == 1): djplus = sum([(A[u] == 1) for u in G.neighbourIterator(j)]) delta += (math.exp(((- alpha) * (djplus + 1))) - math.exp(((- alpha) * djplus))) return delta
class TIntStrH(object): thisown = _swig_property((lambda x: x.this.own()), (lambda x, v: x.this.own(v)), doc='The membership flag') __repr__ = _swig_repr HashPrimes = _snap.TIntStrH_HashPrimes def __init__(self, *args): _snap.TIntStrH_swiginit(self, _snap.new_TIntStrH(*args)) def Load(self, SIn): return _snap.TIntStrH_Load(self, SIn) def Save(self, SOut): return _snap.TIntStrH_Save(self, SOut) def __eq__(self, Hash): return _snap.TIntStrH___eq__(self, Hash) def __lt__(self, Hash): return _snap.TIntStrH___lt__(self, Hash) def __call__(self, Key): return _snap.TIntStrH___call__(self, Key) def GetMemUsed(self): return _snap.TIntStrH_GetMemUsed(self) def BegI(self): return _snap.TIntStrH_BegI(self) def EndI(self): return _snap.TIntStrH_EndI(self) def GetI(self, Key): return _snap.TIntStrH_GetI(self, Key) def Gen(self, ExpectVals): return _snap.TIntStrH_Gen(self, ExpectVals) def Clr(self, DoDel=True, NoDelLim=(- 1), ResetDat=True): return _snap.TIntStrH_Clr(self, DoDel, NoDelLim, ResetDat) def Empty(self): return _snap.TIntStrH_Empty(self) def Len(self): return _snap.TIntStrH_Len(self) def GetPorts(self): return _snap.TIntStrH_GetPorts(self) def IsAutoSize(self): return _snap.TIntStrH_IsAutoSize(self) def GetMxKeyIds(self): return _snap.TIntStrH_GetMxKeyIds(self) def GetReservedKeyIds(self): return _snap.TIntStrH_GetReservedKeyIds(self) def IsKeyIdEqKeyN(self): return _snap.TIntStrH_IsKeyIdEqKeyN(self) def AddKey(self, Key): return _snap.TIntStrH_AddKey(self, Key) def AddDat(self, *args): return _snap.TIntStrH_AddDat(self, *args) def DelKey(self, Key): return _snap.TIntStrH_DelKey(self, Key) def DelIfKey(self, Key): return _snap.TIntStrH_DelIfKey(self, Key) def DelKeyId(self, KeyId): return _snap.TIntStrH_DelKeyId(self, KeyId) def DelKeyIdV(self, KeyIdV): return _snap.TIntStrH_DelKeyIdV(self, KeyIdV) def GetKey(self, KeyId): return _snap.TIntStrH_GetKey(self, KeyId) def GetKeyId(self, Key): return _snap.TIntStrH_GetKeyId(self, Key) def GetRndKeyId(self, *args): return _snap.TIntStrH_GetRndKeyId(self, *args) def IsKey(self, *args): return _snap.TIntStrH_IsKey(self, *args) def IsKeyId(self, KeyId): return _snap.TIntStrH_IsKeyId(self, KeyId) def GetDat(self, *args): return _snap.TIntStrH_GetDat(self, *args) def GetDatWithDefault(self, Key, DefaultValue): return _snap.TIntStrH_GetDatWithDefault(self, Key, DefaultValue) def GetKeyDat(self, KeyId, Key, Dat): return _snap.TIntStrH_GetKeyDat(self, KeyId, Key, Dat) def IsKeyGetDat(self, Key, Dat): return _snap.TIntStrH_IsKeyGetDat(self, Key, Dat) def FFirstKeyId(self): return _snap.TIntStrH_FFirstKeyId(self) def FNextKeyId(self, KeyId): return _snap.TIntStrH_FNextKeyId(self, KeyId) def GetKeyV(self, KeyV): return _snap.TIntStrH_GetKeyV(self, KeyV) def GetDatV(self, DatV): return _snap.TIntStrH_GetDatV(self, DatV) def GetKeyDatPrV(self, KeyDatPrV): return _snap.TIntStrH_GetKeyDatPrV(self, KeyDatPrV) def GetDatKeyPrV(self, DatKeyPrV): return _snap.TIntStrH_GetDatKeyPrV(self, DatKeyPrV) def GetKeyDatKdV(self, KeyDatKdV): return _snap.TIntStrH_GetKeyDatKdV(self, KeyDatKdV) def GetDatKeyKdV(self, DatKeyKdV): return _snap.TIntStrH_GetDatKeyKdV(self, DatKeyKdV) def Swap(self, Hash): return _snap.TIntStrH_Swap(self, Hash) def Defrag(self): return _snap.TIntStrH_Defrag(self) def Pack(self): return _snap.TIntStrH_Pack(self) def Sort(self, CmpKey, Asc): return _snap.TIntStrH_Sort(self, CmpKey, Asc) def SortByKey(self, Asc=True): return _snap.TIntStrH_SortByKey(self, Asc) def SortByDat(self, Asc=True): return _snap.TIntStrH_SortByDat(self, Asc) __swig_destroy__ = _snap.delete_TIntStrH
def test_workspace_observations(workspace_factory): w = workspace_factory() assert w.observations
def imresize(img, size, return_scale=False, interpolation='bilinear', out=None): (h, w) = img.shape[:2] resized_img = cv2.resize(img, size, dst=out, interpolation=interp_codes[interpolation]) if (not return_scale): return resized_img else: w_scale = (size[0] / w) h_scale = (size[1] / h) return (resized_img, w_scale, h_scale)
def compute_prd(eval_dist, ref_dist, num_angles=1001, epsilon=1e-10): if (not ((epsilon > 0) and (epsilon < 0.1))): raise ValueError(('epsilon must be in (0, 0.1] but is %s.' % str(epsilon))) if (not ((num_angles >= 3) and (num_angles <= 1000000.0))): raise ValueError(('num_angles must be in [3, 1e6] but is %d.' % num_angles)) angles = np.linspace(epsilon, ((np.pi / 2) - epsilon), num=num_angles) slopes = np.tan(angles) slopes_2d = np.expand_dims(slopes, 1) ref_dist_2d = np.expand_dims(ref_dist, 0) eval_dist_2d = np.expand_dims(eval_dist, 0) precision = np.minimum((ref_dist_2d * slopes_2d), eval_dist_2d).sum(axis=1) recall = (precision / slopes) max_val = max(np.max(precision), np.max(recall)) if (max_val > 1.001): raise ValueError('Detected value > 1.001, this should not happen.') precision = np.clip(precision, 0, 1) recall = np.clip(recall, 0, 1) return (precision, recall)
def seq_hidden_averaging(seq_hidden_input, len_input): masked_hidden = seq_hidden_masking_before_averaging(seq_hidden_input, len_input) sum_seq_hidden = tf.reduce_sum(masked_hidden, axis=1, name='sum_seq_hidden') seq_len_mat = tf.cast(tf.expand_dims(tf.maximum(len_input, 1), axis=1), dtype=tf.float32, name='seq_len_mat') seq_avg_hidden = tf.div(sum_seq_hidden, seq_len_mat, name='seq_avg_hidden') return seq_avg_hidden
.parametrize('given', ('data=st.data()', 'st.data()')) def test_schema_given(testdir, given): testdir.make_test(f''' from hypothesis.strategies._internal.core import DataObject lazy_schema = schemathesis.from_pytest_fixture("simple_schema") OPERATIONS = [] _schema.parametrize() _schema.given({given}) def test_a(data, case): assert isinstance(data, DataObject) OPERATIONS.append(f"{{case.method}} {{case.path}}") def teardown_module(module): assert OPERATIONS == ['GET /users', 'POST /users'] ''', paths={'/users': {'get': {'responses': {'200': {'description': 'OK'}}}, 'post': {'responses': {'200': {'description': 'OK'}}}}}) result = testdir.runpytest() result.assert_outcomes(passed=1)
def register_Ns3LteRrcSapNonCriticalExtensionConfiguration_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::LteRrcSap::NonCriticalExtensionConfiguration const &', 'arg0')]) cls.add_instance_attribute('sCellToReleaseList', 'std::list< unsigned int >', is_const=False) cls.add_instance_attribute('sCellsToAddModList', 'std::list< ns3::LteRrcSap::SCellToAddMod >', is_const=False) return
class CheckpointSaver(): def __init__(self, strategy: Tuple[(int, int, int)], run_dir: str, is_rank_zero: bool=False) -> None: ((self.k, self.m, self.s), self.run_dir, self.is_rank_zero) = (strategy, run_dir, is_rank_zero) (self.recents, self.intervals, self.step_checkpoints) = (FixedDeck(maxlen=self.k), set(), set()) self.enable_step = (self.s != (- 1)) self.path = (Path(run_dir) / 'checkpoints') if self.is_rank_zero: os.makedirs(self.path, exist_ok=True) self.step_checkpoints.update([c for c in self.path.iterdir() if ('local-epoch=' in str(c))]) overwatch.info(f'Created CheckpointSaver with `k = {self.k}` -- `m = {self.m}` -- s = {self.s}!') def save(self, epoch: int, is_local_step: bool, model: nn.Module, optimizer: Optimizer, duration: int, local_step: Optional[int]=None, train_loss: Optional[float]=None, val_loss: Optional[float]=None) -> None: if (not self.is_rank_zero): return if (self.enable_step and is_local_step and ((local_step % self.s) == 0)): step_checkpoint = (self.path / f'local-epoch={epoch}-step={local_step}-t={duration}.pt') torch.save({'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict()}, step_checkpoint) self.step_checkpoints.add(step_checkpoint) elif (not is_local_step): if ((train_loss is None) and (val_loss is None)): checkpoint = (self.path / f'epoch={epoch}-train=inf-val=inf-t={duration}.pt') else: checkpoint = (self.path / f'epoch={epoch}-train={train_loss:.4f}-val={val_loss:.4f}-t={duration}.pt') torch.save({'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict()}, checkpoint) if ((epoch % self.m) == 0): self.intervals.add(checkpoint) while (len(self.step_checkpoints) > 0): os.remove(self.step_checkpoints.pop()) to_remove = self.recents.append(checkpoint) if ((to_remove is not None) and (to_remove not in self.intervals)): os.remove(to_remove)
class Join_ApplyModel_Summarize(Join): def __init__(self, problem): Join.__init__(self) self.summarylevel_problem = problem for tip in self.summarylevel_problem.iter_leaves(): self.add_job(tip.jobs['applymodel']) def perform(self): print(('Process [%s]: Join_ApplyModel_Summarize joining %s' % (os.getpid(), self.summarylevel_problem)), file=sys.stderr) resultsPerTipSubproblem = [] for tip in self.summarylevel_problem.iter_leaves(): resultsPerTipSubproblem.append(tip.get_job_result_by_name('applymodel')) self.summarylevel_problem.jobs['summarize'].resultsPerTipSubproblem = resultsPerTipSubproblem JobPool().enqueue_job(self.summarylevel_problem.jobs['summarize'])
class TestIsotropicNormal(unittest.TestCase): def setUp(self): pass def test_log_den(self): n = 7 with util.NumpySeedContext(seed=16): for d in [3, 1]: variance = 1.1 mean = np.random.randn(d) X = (np.random.rand(n, d) + 1) isonorm = density.IsotropicNormal(mean, variance) log_dens = isonorm.log_den(X) my_log_dens = ((- np.sum(((X - mean) ** 2), 1)) / (2.0 * variance)) np.testing.assert_almost_equal(log_dens, my_log_dens) def test_grad_log(self): n = 8 with util.NumpySeedContext(seed=17): for d in [4, 1]: variance = 1.2 mean = (np.random.randn(d) + 1) X = (np.random.rand(n, d) - 2) isonorm = density.IsotropicNormal(mean, variance) grad_log = isonorm.grad_log(X) my_grad_log = ((- (X - mean)) / variance) np.testing.assert_almost_equal(grad_log, my_grad_log) def tearDown(self): pass
def changeo_Ob(attrname, G, A, i): delta = 0 for u in G.neighbourIterator(i): delta += (0 if (G.binattr[attrname][u] == NA_VALUE) else G.binattr[attrname][u]) return delta
def test_downcast(): zoo = m.create_zoo() assert ([type(animal) for animal in zoo] == [m.Labrador, m.Dog, m.Chihuahua, m.Cat, m.Panther]) assert ([animal.name for animal in zoo] == ['Fido', 'Ginger', 'Hertzl', 'Tiger', 'Leo']) zoo[1].sound = 'woooooo' assert ([dog.bark() for dog in zoo[:3]] == ['Labrador Fido goes WOOF!', 'Dog Ginger goes woooooo', 'Chihuahua Hertzl goes iyiyiyiyiyi and runs in circles']) assert ([cat.purr() for cat in zoo[3:]] == ['mrowr', 'mrrrRRRRRR']) zoo[0].excitement -= 1000 assert (zoo[0].excitement == 14000)
class A079922(SloaneSequence): def _repr_(self): return 'Solutions to the Dancing School problem with n girls and n+3 boys' def _eval(self, n): return perm_mh(n, 3)
class KleshchevPartitionCrystal(KleshchevPartition, KleshchevCrystalMixin): def e(self, i): P = self.parent() cell = self.good_cells(i) if (cell is None): return None (r, _) = cell mu = list(self) mu[r] -= 1 return type(self)(P, mu) def f(self, i): P = self.parent() cell = self.cogood_cells(i) if (cell is None): return None (r, c) = cell mu = list(self) if (c == 0): mu.append(1) else: mu[r] += 1 return type(self)(P, mu)
class PixelwiseLinear(Module): def __init__(self, fin: List[int], fout: List[int], last_activation: Module=None) -> None: assert (len(fout) == len(fin)) super().__init__() n = len(fin) self._linears = Sequential(*[Sequential(Conv2d(fin[i], fout[i], kernel_size=1, bias=True), (PReLU() if ((i < (n - 1)) or (last_activation is None)) else last_activation)) for i in range(n)]) def forward(self, x: Tensor) -> Tensor: return self._linears(x)
def resolve_entities(entities): resolved_targets = {entity_id: resolve_entity(entity, entities) for (entity_id, entity) in entities.items()} for entity_id in entities: entities[entity_id]['targets'] = resolved_targets[entity_id]
_function(resources=dict(known_spouses=known_spouses), pre=[get_person_text]) def lf_distant_supervision(x, known_spouses): (p1, p2) = x.person_names if (((p1, p2) in known_spouses) or ((p2, p1) in known_spouses)): return POSITIVE else: return ABSTAIN
def once_differentiable(fn): (fn) def wrapper(ctx, *args): with torch.no_grad(): outputs = fn(ctx, *args) if (not torch.is_grad_enabled()): return outputs requires_grad = any(((isinstance(arg, torch.Tensor) and arg.requires_grad) for arg in args)) if (not requires_grad): return outputs if (not isinstance(outputs, tuple)): outputs = (outputs,) err_fn = _functions.DelayedError(b'trying to differentiate twice a function that was markedwith _differentiable', len(outputs)) def fake_requires_grad(var): if (var is not None): var = var.detach() var.requires_grad = True return var return err_fn(*[fake_requires_grad(v) for v in outputs]) return wrapper
class MobileNetV2(nn.Module): def __init__(self, num_classes=1000, width_mult=1.0, filter_size=1): super(MobileNetV2, self).__init__() block = InvertedResidual input_channel = 32 last_channel = 1280 inverted_residual_setting = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1]] input_channel = int((input_channel * width_mult)) self.last_channel = int((last_channel * max(1.0, width_mult))) features = [ConvBNReLU(3, input_channel, stride=2)] for (t, c, n, s) in inverted_residual_setting: output_channel = int((c * width_mult)) for i in range(n): stride = (s if (i == 0) else 1) features.append(block(input_channel, output_channel, stride, expand_ratio=t, filter_size=filter_size)) input_channel = output_channel features.append(ConvBNReLU(input_channel, self.last_channel, kernel_size=1)) self.features = nn.Sequential(*features) self.classifier = nn.Sequential(nn.Linear(self.last_channel, num_classes)) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out') if (m.bias is not None): nn.init.zeros_(m.bias) elif isinstance(m, nn.BatchNorm2d): nn.init.ones_(m.weight) nn.init.zeros_(m.bias) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) nn.init.zeros_(m.bias) def forward(self, x): x = self.features(x) x = x.mean([2, 3]) x = self.classifier(x) return x
def save_arguments(args, save_loc, json_file_name='arguments.json'): argparse_dict = vars(args) arg_fname = '{}/{}'.format(save_loc, json_file_name) writer = DictWriter(file_name=arg_fname, format='json') writer.write(argparse_dict) print_log_message('Arguments are dumped here: {}'.format(arg_fname))
class Inception(nn.Module): def __init__(self, in_planes, n1x1, n3x3red, n3x3, n5x5red, n5x5, pool_planes): super(Inception, self).__init__() self.b1 = nn.Sequential(nn.Conv2d(in_planes, n1x1, kernel_size=1), nn.BatchNorm2d(n1x1), nn.PReLU()) self.b2 = nn.Sequential(nn.Conv2d(in_planes, n3x3red, kernel_size=1), nn.BatchNorm2d(n3x3red), nn.PReLU(), nn.Conv2d(n3x3red, n3x3, kernel_size=3, padding=1), nn.BatchNorm2d(n3x3), nn.PReLU()) self.b3 = nn.Sequential(nn.Conv2d(in_planes, n5x5red, kernel_size=1), nn.BatchNorm2d(n5x5red), nn.PReLU(), nn.Conv2d(n5x5red, n5x5, kernel_size=3, padding=1), nn.BatchNorm2d(n5x5), nn.PReLU(), nn.Conv2d(n5x5, n5x5, kernel_size=3, padding=1), nn.BatchNorm2d(n5x5), nn.PReLU()) self.b4 = nn.Sequential(nn.MaxPool2d(3, stride=1, padding=1), nn.Conv2d(in_planes, pool_planes, kernel_size=1), nn.BatchNorm2d(pool_planes), nn.PReLU()) def forward(self, x): y1 = self.b1(x) y2 = self.b2(x) y3 = self.b3(x) y4 = self.b4(x) return torch.cat([y1, y2, y3, y4], 1)
def _get_connect_params(query): params = urlparse.parse_qs(query) if any(((len(v) > 2) for v in params.values())): raise ValueError(('DB URI params list has duplicate keys: ' + query)) return {k: json.loads(v[0]) for (k, v) in params.items()}
class LabeledMatching(Function): def forward(ctx, features, pid_labels, lookup_table, momentum=0.5): ctx.save_for_backward(features, pid_labels) ctx.lookup_table = lookup_table ctx.momentum = momentum scores = features.mm(lookup_table.t()) return scores def backward(ctx, grad_output): (features, pid_labels) = ctx.saved_tensors lookup_table = ctx.lookup_table momentum = ctx.momentum grad_feats = None if ctx.needs_input_grad[0]: grad_feats = grad_output.mm(lookup_table) for (indx, label) in enumerate(pid_labels): if (label >= 0): lookup_table[label] = ((momentum * lookup_table[label]) + ((1 - momentum) * features[indx])) return (grad_feats, None, None, None)
def set_seed(seed): seed %= global seed_ seed_ = seed random.seed(seed) np.random.seed(seed) if ('tensorflow' in sys.modules): import tensorflow as tf tf.compat.v1.set_random_seed(seed) if ('torch' in sys.modules): warnings.warn('Enabeling deterministic mode in PyTorch can have a performance impact when using GPU.') import torch torch.manual_seed(seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False
def test_extract_dbscan(global_dtype): rng = np.random.RandomState(0) n_points_per_cluster = 20 C1 = ([(- 5), (- 2)] + (0.2 * rng.randn(n_points_per_cluster, 2))) C2 = ([4, (- 1)] + (0.2 * rng.randn(n_points_per_cluster, 2))) C3 = ([1, 2] + (0.2 * rng.randn(n_points_per_cluster, 2))) C4 = ([(- 2), 3] + (0.2 * rng.randn(n_points_per_cluster, 2))) X = np.vstack((C1, C2, C3, C4)).astype(global_dtype, copy=False) clust = OPTICS(cluster_method='dbscan', eps=0.5).fit(X) assert_array_equal(np.sort(np.unique(clust.labels_)), [0, 1, 2, 3])
class Model(nn.Module): def __init__(self, input_dim, clipping=False, attention_pooling=False, num_judges=5000, **kwargs): super(Model, self).__init__() self.mean_net_linear = nn.Linear(input_dim, 1) self.mean_net_clipping = clipping self.mean_net_pooling = (SelfAttentionPooling(input_dim) if attention_pooling else None) self.bias_net_linear = nn.Linear(input_dim, 1) self.bias_net_pooling = (SelfAttentionPooling(input_dim) if attention_pooling else None) self.judge_embbeding = nn.Embedding(num_embeddings=num_judges, embedding_dim=input_dim) def forward(self, features, judge_ids=None): if (self.mean_net_pooling is not None): x = self.mean_net_pooling(features) segment_score = self.mean_net_linear(x) else: x = self.mean_net_linear(features) segment_score = x.squeeze((- 1)).mean(dim=(- 1)) if self.mean_net_clipping: segment_score = ((torch.tanh(segment_score) * 2) + 3) if (judge_ids is None): return segment_score.squeeze((- 1)) else: time = features.shape[1] judge_features = self.judge_embbeding(judge_ids) judge_features = torch.stack([judge_features for i in range(time)], dim=1) bias_features = (features + judge_features) if (self.bias_net_pooling is not None): y = self.bias_net_pooling(bias_features) bias_score = self.bias_net_linear(y) else: y = self.bias_net_linear(bias_features) bias_score = y.squeeze((- 1)).mean(dim=(- 1)) bias_score = (bias_score + segment_score) return (segment_score.squeeze((- 1)), bias_score.squeeze((- 1)))
class Visualizer(): def __init__(self, *, color_theme: Optional[ColorTheme]=None, scale: Optional[float]=None) -> None: color_theme = (color_theme if (color_theme is not None) else global_config.color_theme) scale = (scale if (scale is not None) else global_config.scale) self.config = {'GRID_SIZE': (- 1), 'BOARD_WIDTH': (- 1), 'BOARD_HEIGHT': (- 1), 'COLOR_THEME': color_theme, 'COLOR_SET': ColorSet(), 'SCALE': scale} self._make_dwg_group = None '\n notebook \n def _repr_html_(self) -> str:\n assert self.state is not None\n return self._to_dwg_from_states(states=self.state).tostring()\n ' def get_dwg(self, states): try: SIZE = len(states.current_player) WIDTH = math.ceil(math.sqrt((SIZE - 0.1))) if ((SIZE - ((WIDTH - 1) ** 2)) >= WIDTH): HEIGHT = WIDTH else: HEIGHT = (WIDTH - 1) if (SIZE == 1): states = self._get_nth_state(states, 0) except TypeError: SIZE = 1 WIDTH = 1 HEIGHT = 1 self._set_config_by_state(states) assert (self._make_dwg_group is not None) GRID_SIZE = self.config['GRID_SIZE'] BOARD_WIDTH = self.config['BOARD_WIDTH'] BOARD_HEIGHT = self.config['BOARD_HEIGHT'] SCALE = self.config['SCALE'] dwg = svgwrite.Drawing('temp.svg', (((((BOARD_WIDTH + 1) * GRID_SIZE) * WIDTH) * SCALE), ((((BOARD_HEIGHT + 1) * GRID_SIZE) * HEIGHT) * SCALE))) group = dwg.g() group.add(dwg.rect((0, 0), ((((BOARD_WIDTH + 1) * GRID_SIZE) * WIDTH), (((BOARD_HEIGHT + 1) * GRID_SIZE) * HEIGHT)), fill=self.config['COLOR_SET'].background_color)) if (SIZE == 1): g = self._make_dwg_group(dwg, states, self.config) g.translate(((GRID_SIZE * 1) / 2), ((GRID_SIZE * 1) / 2)) group.add(g) group.scale(SCALE) dwg.add(group) return dwg for i in range(SIZE): x = (i % WIDTH) y = (i // WIDTH) _state = self._get_nth_state(states, i) g = self._make_dwg_group(dwg, _state, self.config) g.translate((((GRID_SIZE * 1) / 2) + (((BOARD_WIDTH + 1) * GRID_SIZE) * x)), (((GRID_SIZE * 1) / 2) + (((BOARD_HEIGHT + 1) * GRID_SIZE) * y))) group.add(g) group.add(dwg.rect(((((BOARD_WIDTH + 1) * GRID_SIZE) * x), (((BOARD_HEIGHT + 1) * GRID_SIZE) * y)), (((BOARD_WIDTH + 1) * GRID_SIZE), ((BOARD_HEIGHT + 1) * GRID_SIZE)), fill='none', stroke='gray')) group.scale(SCALE) dwg.add(group) return dwg def _set_config_by_state(self, _state: State): if (_state.env_id == 'animal_shogi'): from pgx._src.dwg.animalshogi import _make_animalshogi_dwg self.config['GRID_SIZE'] = 60 self.config['BOARD_WIDTH'] = 4 self.config['BOARD_HEIGHT'] = 4 self._make_dwg_group = _make_animalshogi_dwg if (((self.config['COLOR_THEME'] is None) and (self.config['COLOR_THEME'] == 'dark')) or (self.config['COLOR_THEME'] == 'dark')): self.config['COLOR_SET'] = ColorSet('dimgray', 'black', 'whitesmoke', 'whitesmoke', '#1e1e1e', 'white', '') else: self.config['COLOR_SET'] = ColorSet('white', 'lightgray', 'black', 'black', 'white', 'black', '') elif (_state.env_id == 'backgammon'): from pgx._src.dwg.backgammon import _make_backgammon_dwg self.config['GRID_SIZE'] = 25 self.config['BOARD_WIDTH'] = 17 self.config['BOARD_HEIGHT'] = 14 self._make_dwg_group = _make_backgammon_dwg if (((self.config['COLOR_THEME'] is None) and (self.config['COLOR_THEME'] == 'dark')) or (self.config['COLOR_THEME'] == 'dark')): self.config['COLOR_SET'] = ColorSet('black', 'darkgray', 'white', 'white', '#1e1e1e', 'silver', 'dimgray') else: self.config['COLOR_SET'] = ColorSet('white', 'black', 'lightgray', 'white', 'white', 'black', 'gray') elif (_state.env_id == 'bridge_bidding'): from pgx._src.dwg.bridge_bidding import _make_bridge_dwg self.config['GRID_SIZE'] = 50 self.config['BOARD_WIDTH'] = 14 self.config['BOARD_HEIGHT'] = 10 self._make_dwg_group = _make_bridge_dwg if (((self.config['COLOR_THEME'] is None) and (self.config['COLOR_THEME'] == 'dark')) or (self.config['COLOR_THEME'] == 'dark')): self.config['COLOR_SET'] = ColorSet('gray', 'black', 'black', 'dimgray', '#1e1e1e', 'gainsboro', 'white') else: self.config['COLOR_SET'] = ColorSet('white', 'black', 'lightgray', 'white', 'white', 'black', 'black') elif (_state.env_id == 'chess'): from pgx._src.dwg.chess import _make_chess_dwg self.config['GRID_SIZE'] = 50 self.config['BOARD_WIDTH'] = 8 self.config['BOARD_HEIGHT'] = 8 self._make_dwg_group = _make_chess_dwg if (((self.config['COLOR_THEME'] is None) and (self.config['COLOR_THEME'] == 'dark')) or (self.config['COLOR_THEME'] == 'dark')): self.config['COLOR_SET'] = ColorSet('none', 'none', '#404040', 'gray', '#1e1e1e', 'silver', '') else: self.config['COLOR_SET'] = ColorSet('none', 'none', 'gray', 'white', 'white', 'black', '') elif (_state.env_id == 'gardner_chess'): from pgx._src.dwg.gardner_chess import _make_gardner_chess_dwg self.config['GRID_SIZE'] = 50 self.config['BOARD_WIDTH'] = 5 self.config['BOARD_HEIGHT'] = 5 self._make_dwg_group = _make_gardner_chess_dwg if (((self.config['COLOR_THEME'] is None) and (self.config['COLOR_THEME'] == 'dark')) or (self.config['COLOR_THEME'] == 'dark')): self.config['COLOR_SET'] = ColorSet('none', 'none', '#404040', 'gray', '#1e1e1e', 'silver', '') else: self.config['COLOR_SET'] = ColorSet('none', 'none', 'gray', 'white', 'white', 'black', '') elif (_state.env_id == 'connect_four'): from pgx._src.dwg.connect_four import _make_connect_four_dwg self.config['GRID_SIZE'] = 35 self.config['BOARD_WIDTH'] = 7 self.config['BOARD_HEIGHT'] = 7 self._make_dwg_group = _make_connect_four_dwg if (((self.config['COLOR_THEME'] is None) and (self.config['COLOR_THEME'] == 'dark')) or (self.config['COLOR_THEME'] == 'dark')): self.config['COLOR_SET'] = ColorSet('black', 'darkgray', 'white', 'white', '#1e1e1e', 'silver', 'gray') else: self.config['COLOR_SET'] = ColorSet('black', 'white', 'black', 'black', 'white', 'black', 'gray') elif (_state.env_id in ('go_9x9', 'go_19x19')): from pgx._src.dwg.go import _make_go_dwg self.config['GRID_SIZE'] = 25 self.config['BOARD_WIDTH'] = _state._x.size self.config['BOARD_HEIGHT'] = _state._x.size self._make_dwg_group = _make_go_dwg if (((self.config['COLOR_THEME'] is None) and (self.config['COLOR_THEME'] == 'dark')) or (self.config['COLOR_THEME'] == 'dark')): self.config['COLOR_SET'] = ColorSet('black', 'darkgray', 'white', 'white', '#1e1e1e', 'silver', '') else: self.config['COLOR_SET'] = ColorSet('black', 'white', 'black', 'black', 'white', 'black', '') elif (_state.env_id == 'hex'): import jax.numpy as jnp from pgx._src.dwg.hex import _make_hex_dwg, four_dig self.config['GRID_SIZE'] = 30 try: self.config['BOARD_WIDTH'] = four_dig((_state._size[0] * 1.5)) self.config['BOARD_HEIGHT'] = four_dig(((_state._size[0] * jnp.sqrt(3)) / 2)) except IndexError: self.config['BOARD_WIDTH'] = four_dig((_state._size * 1.5)) self.config['BOARD_HEIGHT'] = four_dig(((_state._size * jnp.sqrt(3)) / 2)) self._make_dwg_group = _make_hex_dwg if (((self.config['COLOR_THEME'] is None) and (self.config['COLOR_THEME'] == 'dark')) or (self.config['COLOR_THEME'] == 'dark')): self.config['COLOR_SET'] = ColorSet('darkgray', 'black', 'white', 'white', '#1e1e1e', 'gray', '#333333') else: self.config['COLOR_SET'] = ColorSet('black', 'white', 'black', 'black', 'white', 'black', 'lightgray') elif (_state.env_id == 'kuhn_poker'): from pgx._src.dwg.kuhn_poker import _make_kuhnpoker_dwg self.config['GRID_SIZE'] = 30 self.config['BOARD_WIDTH'] = 8 self.config['BOARD_HEIGHT'] = 8 self._make_dwg_group = _make_kuhnpoker_dwg if (((self.config['COLOR_THEME'] is None) and (self.config['COLOR_THEME'] == 'dark')) or (self.config['COLOR_THEME'] == 'dark')): self.config['COLOR_SET'] = ColorSet('black', 'lightgray', 'white', 'lightgray', '#1e1e1e', 'lightgray', 'lightgray') else: self.config['COLOR_SET'] = ColorSet('black', 'white', 'black', 'black', 'white', 'black', '') elif (_state.env_id == 'leduc_holdem'): from pgx._src.dwg.leduc_holdem import _make_leducHoldem_dwg self.config['GRID_SIZE'] = 30 self.config['BOARD_WIDTH'] = 8 self.config['BOARD_HEIGHT'] = 8 self._make_dwg_group = _make_leducHoldem_dwg if (((self.config['COLOR_THEME'] is None) and (self.config['COLOR_THEME'] == 'dark')) or (self.config['COLOR_THEME'] == 'dark')): self.config['COLOR_SET'] = ColorSet('gray', 'lightgray', '', '', '#1e1e1e', 'lightgray', 'lightgray') else: self.config['COLOR_SET'] = ColorSet('gray', 'black', '', '', 'white', 'black', '') elif (_state.env_id == 'mahjong'): from pgx._src.dwg.mahjong import _make_mahjong_dwg self.config['GRID_SIZE'] = 10 self.config['BOARD_WIDTH'] = 70 self.config['BOARD_HEIGHT'] = 70 self._make_dwg_group = _make_mahjong_dwg if (((self.config['COLOR_THEME'] is None) and (self.config['COLOR_THEME'] == 'dark')) or (self.config['COLOR_THEME'] == 'dark')): self.config['COLOR_SET'] = ColorSet('black', 'white', 'black', 'black', 'white', 'black', 'black') else: self.config['COLOR_SET'] = ColorSet('black', 'white', 'black', 'black', 'white', 'black', 'black') elif (_state.env_id == 'othello'): from pgx._src.dwg.othello import _make_othello_dwg self.config['GRID_SIZE'] = 30 self.config['BOARD_WIDTH'] = 8 self.config['BOARD_HEIGHT'] = 8 self._make_dwg_group = _make_othello_dwg if (((self.config['COLOR_THEME'] is None) and (self.config['COLOR_THEME'] == 'dark')) or (self.config['COLOR_THEME'] == 'dark')): self.config['COLOR_SET'] = ColorSet('black', 'darkgray', 'white', 'white', '#1e1e1e', 'silver', '') else: self.config['COLOR_SET'] = ColorSet('black', 'white', 'black', 'black', 'white', 'black', '') elif (_state.env_id == '2048'): from pgx._src.dwg.play2048 import _make_2048_dwg self.config['GRID_SIZE'] = 50 self.config['BOARD_WIDTH'] = 4 self.config['BOARD_HEIGHT'] = 4 self._make_dwg_group = _make_2048_dwg if (((self.config['COLOR_THEME'] is None) and (self.config['COLOR_THEME'] == 'dark')) or (self.config['COLOR_THEME'] == 'dark')): self.config['COLOR_SET'] = ColorSet('lightgray', '', '', '', '#1e1e1e', 'black', 'white') else: self.config['COLOR_SET'] = ColorSet('black', '#f0f0f0', '', '', 'white', 'black', 'black') elif (_state.env_id == 'shogi'): from pgx._src.dwg.shogi import _make_shogi_dwg self.config['GRID_SIZE'] = 50 self.config['BOARD_WIDTH'] = 10 self.config['BOARD_HEIGHT'] = 9 self._make_dwg_group = _make_shogi_dwg if (((self.config['COLOR_THEME'] is None) and (self.config['COLOR_THEME'] == 'dark')) or (self.config['COLOR_THEME'] == 'dark')): self.config['COLOR_SET'] = ColorSet('gray', 'black', 'gray', 'gray', '#1e1e1e', 'gray', '') else: self.config['COLOR_SET'] = ColorSet('white', 'lightgray', 'black', 'black', 'white', 'black', '') elif (_state.env_id == 'sparrow_mahjong'): from pgx._src.dwg.sparrow_mahjong import _make_sparrowmahjong_dwg self.config['GRID_SIZE'] = 50 self.config['BOARD_WIDTH'] = 15 self.config['BOARD_HEIGHT'] = 10 self._make_dwg_group = _make_sparrowmahjong_dwg if (((self.config['COLOR_THEME'] is None) and (self.config['COLOR_THEME'] == 'dark')) or (self.config['COLOR_THEME'] == 'dark')): self.config['COLOR_SET'] = ColorSet('lightgray', 'dimgray', '#404040', 'gray', '#1e1e1e', 'darkgray', 'whitesmoke') else: self.config['COLOR_SET'] = ColorSet('white', 'white', 'gray', 'white', 'white', 'silver', 'black') elif (_state.env_id == 'tic_tac_toe'): from pgx._src.dwg.tictactoe import _make_tictactoe_dwg self.config['GRID_SIZE'] = 60 self.config['BOARD_WIDTH'] = 3 self.config['BOARD_HEIGHT'] = 3 self._make_dwg_group = _make_tictactoe_dwg if (((self.config['COLOR_THEME'] is None) and (self.config['COLOR_THEME'] == 'dark')) or (self.config['COLOR_THEME'] == 'dark')): self.config['COLOR_SET'] = ColorSet('gray', 'black', 'black', 'dimgray', '#1e1e1e', 'gainsboro') else: self.config['COLOR_SET'] = ColorSet('white', 'black', 'lightgray', 'white', 'white', 'black') else: assert False def _get_nth_state(self, states: State, i): return jax.tree_util.tree_map((lambda x: x[i]), states)
def train_and_validate(net, criterion, optimizer, scheduler, dataloader, device, epochs, load_model=None): if load_model: print('load model from', load_model) checkpoint = torch.load(load_model) net.load_state_dict(checkpoint['model_state_dict']) start_epoch = checkpoint['epoch'] loss = checkpoint['loss'] history = {'train': {'epoch': [], 'loss': [], 'acc': []}, 'val': {'epoch': [], 'loss': [], 'acc': []}} best_acc = 0.98 best_loss = start = time.time() for epoch in range(epochs): if load_model: epoch += start_epoch epochs += start_epoch print(('-' * 30)) print(f'Epoch {(epoch + 1)}/{epochs}') since = time.time() for phase in ['train', 'val']: if (phase == 'train'): net.train() else: print(('-' * 10)) net.eval() running_loss = 0.0 running_correct = 0 dataset_size = 0 for (batch_idx, sample) in enumerate(dataloader[phase]): (imgs, true_masks) = (sample['image'], sample['mask']) imgs = imgs.to(device=device, dtype=torch.float32) mask_type = torch.float32 true_masks = true_masks.to(device=device, dtype=mask_type) optimizer.zero_grad() with torch.set_grad_enabled((phase == 'train')): masks_pred = net(imgs) loss = criterion(masks_pred, true_masks) running_loss += loss.item() if (phase == 'train'): loss.backward() optimizer.step() ' statistics ' dataset_size += imgs.size(0) running_loss += (loss.item() * imgs.size(0)) pred = (torch.sigmoid(masks_pred) > 0.5) running_correct += ((pred == true_masks).float().mean().item() * imgs.size(0)) running_acc = (running_correct / dataset_size) ' statistics ' epoch_loss = (running_loss / dataset_size) epoch_acc = (running_correct / dataset_size) print('{} Loss {:.5f}\n{} Acc {:.2f}'.format(phase, epoch_loss, phase, epoch_acc)) if ((phase == 'val') and (epoch_acc > best_acc)): best_acc = epoch_acc best_model_wts = copy.deepcopy(net.state_dict()) torch.save({'epoch': (epoch + 1), 'model_state_dict': best_model_wts, 'optimizer_state_dict': optimizer.state_dict(), 'best_acc': best_acc}, os.path.join(os.getcwd(), 'checkpoint/best_checkpoint[epoch_{}].pt'.format((epoch + 1)))) print('Achived best result! save checkpoint.') print('val acc = ', best_acc) history[phase]['epoch'].append(epoch) history[phase]['loss'].append(epoch_loss) history[phase]['acc'].append(epoch_acc) scheduler.step(history['val']['acc'][(- 1)]) time_elapsed = (time.time() - since) print('One Epoch Complete in {:.0f}m {:.0f}s'.format((time_elapsed // 60), (time_elapsed % 60))) time_elapsed = (time.time() - start) (min, sec) = ((time_elapsed // 60), (time_elapsed % 60)) print('Total Training time {:.0f}min {:.0f}sec'.format(min, sec))