code
stringlengths
101
5.91M
class CIFARWRN(nn.Module): def __init__(self, channels, init_block_channels, in_channels=3, in_size=(32, 32), num_classes=10): super(CIFARWRN, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module('init_block', conv3x3(in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for (i, channels_per_stage) in enumerate(channels): stage = nn.Sequential() for (j, out_channels) in enumerate(channels_per_stage): stride = (2 if ((j == 0) and (i != 0)) else 1) stage.add_module('unit{}'.format((j + 1)), PreResUnit(in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=False, conv1_stride=False)) in_channels = out_channels self.features.add_module('stage{}'.format((i + 1)), stage) self.features.add_module('post_activ', PreResActivation(in_channels=in_channels)) self.features.add_module('final_pool', nn.AvgPool2d(kernel_size=8, stride=1)) self.output = nn.Linear(in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for (name, module) in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if (module.bias is not None): init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), (- 1)) x = self.output(x) return x
.dataclass class FlaxMultipleChoiceModelOutput(ModelOutput): logits: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None
_incremental_state class FConvDecoder(FairseqDecoder): def __init__(self, dictionary, embed_dim=512, out_embed_dim=256, max_positions=1024, convolutions=(((512, 3),) * 8), attention=True, dropout=0.1, selfattention=False, attention_nheads=1, selfattention_nheads=1, project_input=False, gated_attention=False, downsample=False, pretrained=False, trained_decoder=None): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([2])) self.pretrained = pretrained self.pretrained_decoder = trained_decoder self.dropout_module = FairseqDropout(dropout, module_name=self.__class__.__name__) self.need_attn = True in_channels = convolutions[0][0] def expand_bool_array(val): if isinstance(val, bool): return ([val] * len(convolutions)) return val attention = expand_bool_array(attention) selfattention = expand_bool_array(selfattention) if ((not isinstance(attention, list)) or (len(attention) != len(convolutions))): raise ValueError('Attention is expected to be a list of booleans of length equal to the number of layers.') num_embeddings = len(dictionary) padding_idx = dictionary.pad() self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx) self.embed_positions = PositionalEmbedding(max_positions, embed_dim, padding_idx) self.fc1 = Linear(embed_dim, in_channels, dropout=dropout) self.projections = nn.ModuleList() self.convolutions = nn.ModuleList() self.attention = nn.ModuleList() self.selfattention = nn.ModuleList() self.attproj = nn.ModuleList() for (i, (out_channels, kernel_size)) in enumerate(convolutions): self.projections.append((Linear(in_channels, out_channels) if (in_channels != out_channels) else None)) self.convolutions.append(LinearizedConv1d(in_channels, (out_channels * 2), kernel_size, padding=(kernel_size - 1), dropout=dropout)) self.attention.append((DownsampledMultiHeadAttention(out_channels, embed_dim, attention_nheads, project_input=project_input, gated=False, downsample=False) if attention[i] else None)) self.attproj.append((Linear(out_channels, embed_dim, dropout=dropout) if attention[i] else None)) self.selfattention.append((SelfAttention(out_channels, embed_dim, selfattention_nheads, project_input=project_input, gated=gated_attention, downsample=downsample) if selfattention[i] else None)) in_channels = out_channels self.fc2 = Linear(in_channels, out_embed_dim) self.fc3 = Linear(out_embed_dim, num_embeddings, dropout=dropout) if self.pretrained: self.gate1 = nn.Sequential(Linear((out_embed_dim * 2), out_embed_dim), nn.Sigmoid()) self.gate2 = nn.Sequential(Linear((out_embed_dim * 2), out_embed_dim), nn.Sigmoid()) self.joining = nn.Sequential(Linear((out_embed_dim * 2), (out_embed_dim * 2)), LayerNorm((out_embed_dim * 2)), nn.GLU(), Linear(out_embed_dim, (out_embed_dim * 2)), LayerNorm((out_embed_dim * 2)), nn.GLU(), Linear(out_embed_dim, out_embed_dim), LayerNorm(out_embed_dim)) self.pretrained_outputs = {} def save_output(): def hook(a, b, output): self.pretrained_outputs['out'] = output return hook self.pretrained_decoder.fc2.register_forward_hook(save_output()) def forward(self, prev_output_tokens, encoder_out): trained_encoder_out = (encoder_out['pretrained'] if self.pretrained else None) encoder_out = encoder_out['encoder']['encoder_out'] (encoder_a, encoder_b) = self._split_encoder_out(encoder_out) positions = self.embed_positions(prev_output_tokens) x = (self.embed_tokens(prev_output_tokens) + positions) x = self.dropout_module(x) target_embedding = x.transpose(0, 1) x = self.fc1(x) x = x.transpose(0, 1) avg_attn_scores = None for (proj, conv, attention, selfattention, attproj) in zip(self.projections, self.convolutions, self.attention, self.selfattention, self.attproj): residual = (x if (proj is None) else proj(x)) x = self.dropout_module(x) x = conv(x) x = F.glu(x, dim=2) if (attention is not None): r = x (x, attn_scores) = attention((attproj(x) + target_embedding), encoder_a, encoder_b) x = (x + r) if ((not self.training) and self.need_attn): if (avg_attn_scores is None): avg_attn_scores = attn_scores else: avg_attn_scores.add_(attn_scores) if (selfattention is not None): x = selfattention(x) x = ((x + residual) * math.sqrt(0.5)) x = x.transpose(0, 1) x = self.fc2(x) x = self.dropout_module(x) if (not self.pretrained): x = self.fc3(x) if self.pretrained: (trained_x, _) = self.pretrained_decoder.forward(prev_output_tokens, trained_encoder_out) y = torch.cat([x, self.pretrained_outputs['out']], dim=(- 1)) gate1 = self.gate1(y) gate2 = self.gate2(y) gated_x1 = (gate1 * x) gated_x2 = (gate2 * self.pretrained_outputs['out']) fusion = torch.cat([gated_x1, gated_x2], dim=(- 1)) fusion = self.joining(fusion) fusion_output = self.fc3(fusion) return (fusion_output, avg_attn_scores) else: return (x, avg_attn_scores) def max_positions(self): return self.embed_positions.max_positions def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn def _split_encoder_out(self, encoder_out): (encoder_a, encoder_b) = encoder_out encoder_a = encoder_a.transpose(0, 1).contiguous() encoder_b = encoder_b.transpose(0, 1).contiguous() result = (encoder_a, encoder_b) return result
def read_from_file(file_name): try: file = open(file_name, 'r', encoding='utf-8') content = file.read() file.close() except IOError as e: content = '' print('IO Error!:{}'.format(e)) return content
def test_get_flat_plane_grid_triangles() -> None: nearby_triangles = get_flat_plane_grid_triangles(range_m=1) assert (len(nearby_triangles) == 8) for range_m in range(30): tris = get_flat_plane_grid_triangles(range_m) print(f'{len(tris)} at range={range_m}')
class AttentionActor(nn.Module): def __init__(self, in_dim, out_dim, hidden_size, layers, activation=nn.ReLU): super().__init__() self.feedforward_model = build_model(hidden_size, out_dim, 1, hidden_size, activation) self._attention_stack = AttentionEncoder(1, hidden_size, hidden_size) self.embed = nn.Linear(in_dim, hidden_size) def forward(self, state_features): n_agents = state_features.shape[(- 2)] batch_size = state_features.shape[:(- 2)] embeds = F.relu(self.embed(state_features)) embeds = embeds.view((- 1), n_agents, embeds.shape[(- 1)]) attn_embeds = F.relu(self._attention_stack(embeds).view(*batch_size, n_agents, embeds.shape[(- 1)])) x = self.feedforward_model(attn_embeds) action_dist = OneHotCategorical(logits=x) action = action_dist.sample() return (action, x)
def initinterference(profile, test): ans = [] if False: latency = [] for la in profile['interference']['latency']: if ((la[0] != 'ssd') and (la[1] != 'ssd')): latency.append(la) l = len(latency) print(l) l = len(profile['interference']['latency']) latency = profile['interference']['latency'] print(l) rl = int((0.7 * l)) rindex = [i for i in range(l)] rs = random.sample(rindex, rl) x = [] y = [] for i in rs: lat = latency[i] m1 = lat[0] m2 = lat[1] b1 = str(lat[2]) th1 = str(lat[4]) b2 = str(lat[3]) th2 = str((100 - lat[4])) x.append([profile[m1]['l2cache'][b1][th1], profile[m2]['l2cache'][b2][th2], profile[m1]['dram'][b1][th1], profile[m2]['dram'][b2][th2]]) x.append([profile[m2]['l2cache'][b2][th2], profile[m1]['l2cache'][b1][th1], profile[m2]['dram'][b2][th2], profile[m1]['dram'][b1][th1]]) y.append([((lat[5] / profile[m1]['latency'][b1][th1]) - 1)]) y.append([((lat[6] / profile[m2]['latency'][b2][th2]) - 1)]) lrModel = linear_model.LinearRegression() lrModel.fit(x, y) testinterference2(lrModel, profile, test) return lrModel
def test_predict_proba_raises(model, X): f = getattr(model, 'predict_proba') assert_raises(ValueError, f, [X]) assert_raises(ValueError, f, X[0]) assert_raises((ValueError, TypeError, RuntimeError), f, X[0][0]) if (MIN_VALUE is not None): assert_raises(ValueError, f, [[[(MIN_VALUE - 0.1) for i in range(model.d)] for j in range(4)]]) if (MAX_VALUE is not None): assert_raises(ValueError, f, [[[(MAX_VALUE + 0.1) for i in range(model.d)] for j in range(4)]])
def pre_process_dataset_composite_in_user_format(user_datasets, label_map, output_shape, train_users, window_size, shift, normalise_dataset=True, verbose=0): if normalise_dataset: (means, stds) = get_mean_std_from_user_list_format(user_datasets, train_users) user_datasets_windowed = get_windows_dataset_from_user_list_format(user_datasets, window_size=window_size, shift=shift) user_datasets_processed = {} for (user, user_dataset) in user_datasets_windowed.items(): (data, labels) = user_dataset labels_mapped = apply_label_map(labels, label_map) (data_filtered, labels_filtered) = filter_none_label(data, labels_mapped) labels_one_hot = tf.keras.utils.to_categorical(labels_filtered, num_classes=output_shape) r = np.random.randint(len(labels_filtered)) assert (labels_one_hot[r].argmax() == labels_filtered[r]) if normalise_dataset: data_filtered = normalise(data_filtered, means, stds) user_datasets_processed[user] = (data_filtered, labels_one_hot) if (verbose > 0): print('Data shape of user', user, ':', data_filtered.shape) return user_datasets_processed
class ShapKernel(ExplainerMixin): available_explanations = ['local'] explainer_type = 'blackbox' def __init__(self, model, data, feature_names=None, feature_types=None, **kwargs): from shap import KernelExplainer self.model = model self.feature_names = feature_names self.feature_types = feature_types (data, n_samples) = preclean_X(data, feature_names, feature_types) (predict_fn, n_classes, _) = determine_classes(model, data, n_samples) if (3 <= n_classes): raise Exception('multiclass SHAP not supported') predict_fn = unify_predict_fn(predict_fn, data, (1 if (n_classes == 2) else (- 1))) (data, self.feature_names_in_, self.feature_types_in_) = unify_data(data, n_samples, feature_names, feature_types, False, 0) data = data.astype(np.float64, order='C', copy=False) self.shap_ = KernelExplainer(predict_fn, data, **kwargs) def explain_local(self, X, y=None, name=None, **kwargs): return shap_explain_local(self, X, y, name, False, **kwargs)
def obslogPath(year=None, hemisphere=None): base = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'obslogs') if (year is None): if (_APOGEE_REDUX == 'v402'): year = 2 elif ((_APOGEE_REDUX == 'v603') or (_APOGEE_REDUX == 'l30e.2')): year = 3 elif (_APOGEE_REDUX == 'l31c.2'): year = 5 elif (_APOGEE_REDUX == 'l33'): year = 7 else: raise IOError(('No default year available for APOGEE_REDUX %s, need to set it by hand' % _APOGEE_REDUX)) if ((year == 1) or (year == 2)): return os.path.join(base, 'obs-summary-year12.csv') elif (year == 3): return os.path.join(base, 'obs-summary-year123.csv') elif (year == 5): return os.path.join(base, 'obs-summary-year45.csv') elif (year == 7): if ((hemisphere == 'north') or (hemisphere == None)): return os.path.join(base, 'obs-summary-year6-north.csv') elif (hemisphere == 'south'): return os.path.join(base, 'obs-summary-year6-south.csv') else: raise IOError('Must set hemisphere to north or south for year 6... (hemisphere = None returns north!)')
def adjust_learning_rate(optimizer, epoch): lr = cfg.optimizer.lr for param_group in optimizer.param_groups: param_group['lr'] = lr return lr
def get_rir_cifar(num_classes, model_name=None, pretrained=False, root=os.path.join('~', '.torch', 'models'), **kwargs): channels = [[48, 48, 48, 48], [96, 96, 96, 96, 96, 96], [192, 192, 192, 192, 192, 192]] init_block_channels = 48 final_block_channels = 384 net = CIFARRiR(channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, num_classes=num_classes, **kwargs) if pretrained: if ((model_name is None) or (not model_name)): raise ValueError('Parameter `model_name` should be properly initialized for loading pretrained model.') from .model_store import download_model download_model(net=net, model_name=model_name, local_model_store_dir_path=root) return net
def save_checkpoint(state, filename='checkpoint'): if (False and ('optimizer_state' in state)): optimizer_state = state['optimizer_state'] state.pop('optimizer_state', None) optimizer_filename = '{}_optim.pth'.format(filename) torch.save({'optimizer_state': optimizer_state}, optimizer_filename) filename = '{}.pth'.format(filename) torch.save(state, filename)
def _features2eigenvalues(features): gram = tf.matmul(features, features, transpose_b=True) (eig, _) = tf.linalg.eigh(gram) return eig
def get_root_logger(save_dir, log_level=logging.INFO, filename='log.txt'): logger = logging.getLogger() if (not logger.hasHandlers()): logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=log_level) (rank, _) = get_dist_info() if (rank != 0): logger.setLevel('ERROR') if save_dir: fh = logging.FileHandler(os.path.join(save_dir, filename)) fh.setLevel(log_level) formatter = logging.Formatter('%(asctime)s - %(levelname)s: %(message)s') fh.setFormatter(formatter) logger.addHandler(fh) if (rank != 0): fh.setLevel('ERROR') return logger
def standard_laurent_ismember_filter(wsys, gpts, dim, points, rcotol=1e-06, evatol=1e-06, memtol=1e-06, verbose=True, tasks=0): from phcpy.solutions import diagnostics result = [] for point in points: rco = diagnostics(point)[1] if (rco > rcotol): (isgood, ismember) = (True, False) else: tst = standard_laurent_ismember(wsys, gpts, dim, point, evatol, memtol, verbose, tasks) (isgood, ismember) = tst if (isgood and (not ismember)): result.append(point) return result
class CosineAnnealingLR(object): def __init__(self, T_max, eta_max=0.01, eta_min=0, last_epoch=(- 1)): self.T_max = T_max self.eta_max = eta_max self.eta_min = eta_min self.last_epoch = last_epoch self._cur_lr = eta_max def step(self): self._cur_lr = self._get_lr() self.last_epoch += 1 return self._cur_lr def _get_lr(self): if (self.last_epoch == 0): return self.eta_max elif ((((self.last_epoch - 1) - self.T_max) % (2 * self.T_max)) == 0): return (self._cur_lr + (((self.eta_max - self.eta_min) * (1 - math.cos((math.pi / self.T_max)))) / 2)) return ((((1 + math.cos(((math.pi * self.last_epoch) / self.T_max))) / (1 + math.cos(((math.pi * (self.last_epoch - 1)) / self.T_max)))) * (self._cur_lr - self.eta_min)) + self.eta_min)
class FEVERDocumentDatabase(DocDB): def __init__(self, path=None): super().__init__(path) logger.info(f'Use FEVER db: {path}') _cache(maxsize=1000) def get_doc_lines(self, doc_id): cursor = self.connection.cursor() cursor.execute('SELECT lines FROM documents WHERE id = ?', (utils.normalize(doc_id),)) result = cursor.fetchone() cursor.close() return (result if (result is None) else result[0]) def get_doc_text(self, doc_id): lines = self.get_doc_lines(doc_id) if (lines is None): return None lines = lines.split('\n') return '\n'.join([line.split('\t')[1] for line in lines if (len(line.split('\t')) > 1)]) def get_non_empty_doc_ids(self): cursor = self.connection.cursor() cursor.execute('SELECT id FROM documents WHERE length(trim(lines)) > 0') results = [r[0] for r in cursor.fetchall()] cursor.close() return results
class StableDiffusionLDM3DPipeline(metaclass=DummyObject): _backends = ['torch', 'transformers'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch', 'transformers']) def from_config(cls, *args, **kwargs): requires_backends(cls, ['torch', 'transformers']) def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ['torch', 'transformers'])
def main(): batch_size = (c.BATCH_SIZE * c.TRIPLET_PER_BATCH) train_path = c.DATASET_DIR libri = data_catalog(train_path) files = list(libri['filename']) labels1 = list(libri['speaker_id']) labels_to_id = {} id_to_labels = {} i = 0 for label in np.unique(labels1): labels_to_id[label] = i id_to_labels[i] = label i += 1 no_of_speakers = len(np.unique(labels1)) (train_data, test_data) = split_data(files, labels1, batch_size) batchloader = batchTrainingImageLoader(train_data, labels_to_id, no_of_speakers, batch_size=batch_size) testloader = batchTestImageLoader(test_data, labels_to_id, no_of_speakers, batch_size=batch_size) test_steps = int((len(test_data) / batch_size)) (x_test, y_test) = testloader.__next__() b = x_test[0] num_frames = b.shape[0] logging.info('num_frames = {}'.format(num_frames)) logging.info('batch size: {}'.format(batch_size)) logging.info('x_shape:{0}, y_shape:{1}'.format(x_test.shape, y_test.shape)) base_model = convolutional_model(input_shape=x_test.shape[1:], batch_size=batch_size, num_frames=num_frames) x = base_model.output x = Dense(no_of_speakers, activation='softmax', name='softmax_layer')(x) model = Model(base_model.input, x) logging.info(model.summary()) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) print('printing format per batch:', model.metrics_names) grad_steps = 0 last_checkpoint = utils.get_last_checkpoint_if_any(c.PRE_CHECKPOINT_FOLDER) last_checkpoint = None if (last_checkpoint is not None): logging.info('Found checkpoint [{}]. Resume from here...'.format(last_checkpoint)) model.load_weights(last_checkpoint) grad_steps = int(last_checkpoint.split('_')[(- 2)]) logging.info('[DONE]') orig_time = time() Num_Iter = 100000 current_iter = 0 while (current_iter < Num_Iter): current_iter += 1 orig_time = time() (x_train, y_train) = batchloader.__next__() [loss, acc] = model.train_on_batch(x_train, y_train) logging.info('Train Steps:{0}, Time:{1:.2f}s, Loss={2}, Accuracy={3}'.format(grad_steps, (time() - orig_time), loss, acc)) with open((c.PRE_CHECKPOINT_FOLDER + '/train_loss_acc.txt'), 'a') as f: f.write('{0},{1},{2}\n'.format(grad_steps, loss, acc)) if ((grad_steps % c.TEST_PER_EPOCHS) == 0): losses = [] accs = [] for ss in range(test_steps): [loss, acc] = model.test_on_batch(x_test, y_test) (x_test, y_test) = testloader.__next__() losses.append(loss) accs.append(acc) loss = np.mean(np.array(losses)) acc = np.mean(np.array(accs)) print('loss', loss, 'acc', acc) logging.info('Test the Data Steps:{0}, Loss={1}, Accuracy={2}, '.format(grad_steps, loss, acc)) with open((c.PRE_CHECKPOINT_FOLDER + '/test_loss_acc.txt'), 'a') as f: f.write('{0},{1},{2}\n'.format(grad_steps, loss, acc)) if ((grad_steps % c.SAVE_PER_EPOCHS) == 0): utils.create_dir_and_delete_content(c.PRE_CHECKPOINT_FOLDER) model.save_weights('{0}/model_{1}_{2:.5f}.h5'.format(c.PRE_CHECKPOINT_FOLDER, grad_steps, loss)) grad_steps += 1
def load_layer_wise_quantized_model(path): model = torch.load(os.path.join(path, 'model_arch.pt')) for (name, _) in model.named_modules(): if ((name + '.pt') in os.listdir(path)): update_module(model, name, torch.load(os.path.join(path, (name + '.pt')))) model.eval() return model
def get_ID_task_avg_score(result_dict): sum = 0 num = 0 for (k, v) in result_dict.items(): if (k == 'best_model_dir'): continue sum += v num += 1 return (sum / num)
class LoadSaveStrategyTest(unittest.TestCase): (torch.cuda.is_available(), 'Skip on gpu as cpu test covers it.') def test_load_save_strategy(self): pg_info = ([('data', 2)], None) strategy = Strategy([('parallel_mode', pg_info, False), ('amp_native', None, False)]) (_, filename) = tempfile.mkstemp(suffix='st') save_strategy(strategy, filename) (status, loaded_strategy) = get_strategy(filename) self.assertTrue(status) self.assertEqual(loaded_strategy, strategy) data = pickle.dumps(strategy) (status, loaded_strategy) = get_strategy(data) self.assertTrue(status) self.assertEqual(loaded_strategy, strategy) (status, _) = get_strategy('_bad_filename.st') self.assertFalse(status) easydl_strategy = strategy.convert_strategy_to_easydl_format() self.assertEqual(len(easydl_strategy), 2) (torch.cuda.is_available(), 'Skip on gpu as cpu test covers it.') def test_adjust_strategy(self): pg_info = ([('model', 2), ('data', 2)], None) strategy = Strategy([('parallel_mode', pg_info, False), ('amp_native', None, False)]) device_context = get_device_context() device_context._node_num = 2 device_context._nproc_per_node = 8 opt_lib = OptimizationLibrary() finetune_strategy = False (status, strategy) = adjust_strategy(strategy, device_context, finetune_strategy, opt_lib) self.assertTrue(status) data_parallel_size = 1 (found, p_mode) = strategy.get_parallel_mode() self.assertTrue(found) for (name, size) in p_mode[0]: if (name == 'data'): data_parallel_size = size self.assertEqual(data_parallel_size, 8) device_context._node_num = 1 device_context._nproc_per_node = 1 (status, _) = adjust_strategy(strategy, device_context, finetune_strategy, opt_lib) self.assertFalse(status) device_context._node_num = 10 device_context._nproc_per_node = 2 finetune_strategy = True strategy = ['parallel_mode', 'amp_native'] (status, strategy) = get_strategy(strategy) self.assertTrue(status) (status, strategy) = adjust_strategy(strategy, device_context, finetune_strategy, opt_lib) self.assertTrue(status) data_parallel_size = 1 (_, p_mode) = strategy.get_parallel_mode() for (name, size) in p_mode[0]: if (name == 'data'): data_parallel_size = size self.assertEqual(data_parallel_size, 20) strategy = Strategy([('amp_native', None, False), ('module_replace', None, False), ('tensor_parallel', None, True), ('fsdp', None, False)]) removed_items = strategy.remove_distributed_method(opt_lib) self.assertEqual(len(strategy), 2) self.assertEqual(len(removed_items), 2) strategy = Strategy([('amp_native', None, False), ('module_replace', None, False), ('tensor_parallel', None, True), ('fsdp', {'cpu_offload': True}, False)]) removed_items = strategy.remove_distributed_method(opt_lib) self.assertEqual(len(strategy), 3) self.assertEqual(len(removed_items), 1) def test_load_save_with_api(self): run_dist_code('save_load')
def _extract_variable_from_kwargs(kwargs, name): variable_value = kwargs.get(name, None) if variable_value: kwargs[name] = None return (variable_value, kwargs)
class HernquistPotential(DehnenSphericalPotential): def __init__(self, amp=1.0, a=1.0, normalize=False, ro=None, vo=None): DehnenSphericalPotential.__init__(self, amp=amp, a=a, alpha=1, normalize=normalize, ro=ro, vo=vo) self._nemo_accname = 'Dehnen' self.hasC = True self.hasC_dxdv = True self.hasC_dens = True return None def _evaluate(self, R, z, phi=0.0, t=0.0): return ((((- 1.0) / (1.0 + (numpy.sqrt(((R ** 2.0) + (z ** 2.0))) / self.a))) / 2.0) / self.a) def _Rforce(self, R, z, phi=0.0, t=0.0): sqrtRz = numpy.sqrt(((R ** 2.0) + (z ** 2.0))) return ((((((- R) / self.a) / sqrtRz) / ((1.0 + (sqrtRz / self.a)) ** 2.0)) / 2.0) / self.a) def _zforce(self, R, z, phi=0.0, t=0.0): sqrtRz = numpy.sqrt(((R ** 2.0) + (z ** 2.0))) return ((((((- z) / self.a) / sqrtRz) / ((1.0 + (sqrtRz / self.a)) ** 2.0)) / 2.0) / self.a) def _rforce_jax(self, r): return (((- self._amp) / 2.0) / ((r + self.a) ** 2.0)) def _R2deriv(self, R, z, phi=0.0, t=0.0): sqrtRz = numpy.sqrt(((R ** 2.0) + (z ** 2.0))) return (((((self.a * (z ** 2.0)) + (((z ** 2.0) - (2.0 * (R ** 2.0))) * sqrtRz)) / (sqrtRz ** 3.0)) / ((self.a + sqrtRz) ** 3.0)) / 2.0) def _Rzderiv(self, R, z, phi=0.0, t=0.0): sqrtRz = numpy.sqrt(((R ** 2.0) + (z ** 2.0))) return (((((- R) * z) * (self.a + (3.0 * sqrtRz))) * ((sqrtRz * (self.a + sqrtRz)) ** (- 3.0))) / 2.0) def _surfdens(self, R, z, phi=0.0, t=0.0): r = numpy.sqrt(((R ** 2.0) + (z ** 2.0))) Rma = numpy.sqrt((((R ** 2.0) - (self.a ** 2.0)) + 0j)) if (Rma == 0.0): return (((((((- 12.0) * (self.a ** 3)) - ((5.0 * self.a) * (z ** 2))) + (numpy.sqrt((1.0 + ((z ** 2) / (self.a ** 2)))) * (((12.0 * (self.a ** 3)) - (self.a * (z ** 2))) + ((2 / self.a) * (z ** 4))))) / 30.0) / numpy.pi) * (z ** (- 5.0))) else: return (((self.a * (((((2.0 * (self.a ** 2.0)) + (R ** 2.0)) * (Rma ** (- 5))) * (numpy.arctan((z / Rma)) - numpy.arctan((((self.a * z) / r) / Rma)))) + (((z * ((((((5.0 * (self.a ** 3.0)) * r) - (4.0 * (self.a ** 4))) + ((self.a ** 2) * ((2.0 * (r ** 2.0)) + (R ** 2)))) - ((self.a * r) * ((5.0 * (R ** 2.0)) + (3.0 * (z ** 2.0))))) + ((R ** 2.0) * (r ** 2.0)))) / (((self.a ** 2.0) - (R ** 2.0)) ** 2.0)) / (((r ** 2) - (self.a ** 2.0)) ** 2.0))).real) / 4.0) / numpy.pi) def _mass(self, R, z=None, t=0.0): if (z is not None): raise AttributeError return ((1.0 / ((1.0 + (self.a / R)) ** 2.0)) / 2.0) _to_kpcGyrDecorator def _nemo_accpars(self, vo, ro): GM = (((self._amp * (vo ** 2.0)) * ro) / 2.0) return f'0,1,{GM},{(self.a * ro)},0'
class TestExtractVideoFrames(unittest.TestCase): def test_extract_video_frames(self): try: shutil.rmtree((TEST_FRAMES_DIR / 'video_frames')) except FileNotFoundError: pass extract_video_frames((TEST_FRAMES_DIR / 'video.mp4')) self.assertTrue((TEST_FRAMES_DIR / 'video_frames').is_dir()) shutil.rmtree((TEST_FRAMES_DIR / 'video_frames')) def test_parse_args(self): args = parse_args(['--input-path', TEST_FRAMES_DIR.as_posix()]) self.assertIn('frames', args.input_path)
def parse_config_dict(args, config_dict): if (args.save_exp_code is not None): config_dict['exp_arguments']['save_exp_code'] = args.save_exp_code if (args.overlap is not None): config_dict['patching_arguments']['overlap'] = args.overlap return config_dict
def _restore_attributes_(gm: GraphModule, attributes: Dict[(str, Any)]): for (name, attr) in attributes.items(): setattr(gm, name, attr)
class GroupedIterator(object): def __init__(self, iterable, chunk_size): self._len = int(math.ceil((len(iterable) / float(chunk_size)))) self.itr = iterable self.chunk_size = chunk_size def __len__(self): return self._len def __iter__(self): return self def __next__(self): chunk = [] try: for _ in range(self.chunk_size): chunk.append(next(self.itr)) except StopIteration as e: if (len(chunk) == 0): raise e return chunk
def make_loss(cfg, num_classes): if (cfg.MODEL.METRIC_LOSS_TYPE == 'triplet'): metric_loss_func = TripletLoss(cfg.SOLVER.MARGIN, cfg.SOLVER.HARD_EXAMPLE_MINING_METHOD) elif (cfg.MODEL.METRIC_LOSS_TYPE == 'contrastive'): metric_loss_func = ContrastiveLoss(cfg.SOLVER.MARGIN) elif (cfg.MODEL.METRIC_LOSS_TYPE == 'none'): def metric_loss_func(feat, target): return 0 else: print('got unsupported metric loss type {}'.format(cfg.MODEL.METRIC_LOSS_TYPE)) if (cfg.MODEL.IF_LABELSMOOTH == 'on'): id_loss_func = CrossEntropyLabelSmooth(num_classes=num_classes) print('label smooth on, numclasses:', num_classes) else: id_loss_func = F.cross_entropy def loss_func(score, feat, target): return ((cfg.MODEL.ID_LOSS_WEIGHT * id_loss_func(score, target)) + (cfg.MODEL.TRIPLET_LOSS_WEIGHT * metric_loss_func(feat, target))) return loss_func
class CNN_4Layer(nn.Module): def __init__(self, in_channels, out_channels=64, hidden_size=64): super(CNN_4Layer, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.hidden_size = hidden_size self.encoder = nn.Sequential(conv3x3(in_channels, hidden_size), conv3x3(hidden_size, hidden_size), conv3x3(hidden_size, hidden_size), conv3x3(hidden_size, out_channels)) def forward(self, inputs): embeddings = self.encoder(inputs.view((- 1), *inputs.shape[(- 3):])) return embeddings.view(*inputs.shape[:(- 3)], (- 1))
def select_topk(indices, query, gallery, topk=10): results = [] for i in range(indices.shape[0]): ids = indices[i][:topk] results.append(([query[i][0]] + [gallery[id][0] for id in ids])) return results
def check_onnx(model_path, dataloader): import onnxruntime as ort import numpy as np ort_session = ort.InferenceSession(model_path, providers=['CPUExecutionProvider']) it = iter(dataloader) input = next(it) input_names = list(input.keys()) for k in input_names: if ('label' in k): input.pop(k) else: input[k] = np.array(input[k]) ort_session.run(None, input) return True
def download_model(url, model_name, retry_times=5): if os.path.isdir(model_name): return model_name elif (os.path.exists(model_name) and is_tar_gz_file(model_name)): print('file downloaded') extrafile(model_name) return True print('download model...') retries = 0 while (retries < retry_times): try: request.urlretrieve(url, model_name, schedule) extrafile(model_name) break except KeyboardInterrupt: return False except: retries += 1 print(f"Download failed{(', Retry downloading...' if (retries < retry_times) else '!')}") return (retries < retry_times)
_REGISTRY.register() def build_fcos_dla_fpn_backbone(cfg, input_shape: ShapeSpec): assert (cfg.MODEL.BACKBONE.FREEZE_AT == (- 1)), 'Freezing layers does not be supported for DLA' depth_to_creator = {'DLA34': dla34} bottom_up = depth_to_creator[cfg.MODEL.DLA.CONV_BODY](cfg) in_features = cfg.MODEL.FPN.IN_FEATURES out_channels = cfg.MODEL.FPN.OUT_CHANNELS top_levels = cfg.MODEL.FCOS.TOP_LEVELS in_channels_top = out_channels if (top_levels == 2): top_block = LastLevelP6P7(in_channels_top, out_channels, 'p5') elif (top_levels == 1): top_block = LastLevelP6(in_channels_top, out_channels, 'p5') elif (top_levels == 0): top_block = None else: raise NotImplementedError() backbone = FPN(bottom_up=bottom_up, in_features=in_features, out_channels=out_channels, norm=cfg.MODEL.FPN.NORM, top_block=top_block, fuse_type=cfg.MODEL.FPN.FUSE_TYPE) return backbone
class UniSpeechForCTC(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class Trainer(): def __init__(self, environment, policy, replay_buffer, curriculum_scheduler, mcts_train_params, mcts_test_params, num_validation_episodes, num_episodes_per_task, batch_size, num_updates_per_episode, verbose=True): self.env = environment self.policy = policy self.buffer = replay_buffer self.curriculum_scheduler = curriculum_scheduler self.mcts_train_params = mcts_train_params self.mcts_test_params = mcts_test_params self.num_validation_episodes = num_validation_episodes self.num_episodes_per_task = num_episodes_per_task self.batch_size = batch_size self.num_updates_per_episode = num_updates_per_episode self.verbose = verbose def perform_validation_step(self, task_index): validation_rewards = [] traces_lengths = [] for _ in range(self.num_validation_episodes): mcts = MCTS(self.policy, self.env, task_index, **self.mcts_test_params) trace = mcts.sample_execution_trace() (task_reward, trace_length, progs_failed_indices) = (trace[7], len(trace[3]), trace[10]) validation_rewards.append(task_reward) traces_lengths.append(trace_length) return (validation_rewards, traces_lengths, progs_failed_indices) def play_iteration(self, task_index, verbose=False): task_name = self.env.get_program_from_index(task_index) if self.verbose: print('Attempt task {} for {} episodes'.format(task_name, self.num_episodes_per_task)) for episode in range(self.num_episodes_per_task): mcts = MCTS(self.policy, self.env, task_index, **self.mcts_train_params) res = mcts.sample_execution_trace() (observations, prog_indices, previous_actions_indices, policy_labels, lstm_states, _, _, task_reward, clean_sub_execution, rewards, programs_failed_indices, programs_failed_initstates) = res if clean_sub_execution: trace = list(zip(observations, prog_indices, lstm_states, policy_labels, rewards)) self.buffer.append_trace(trace) elif self.verbose: print('Trace has not been stored in buffer.') if (self.buffer.get_memory_length() > self.batch_size): for _ in range(self.num_updates_per_episode): batch = self.buffer.sample_batch(self.batch_size) if (batch is not None): self.policy.train_on_batch(batch) if verbose: print('Done episode {}/{}'.format((episode + 1), self.num_episodes_per_task)) def perform_validation(self): if self.verbose: print('Start validation .....') for idx in self.curriculum_scheduler.get_tasks_of_maximum_level(): (v_rewards, v_lengths, programs_failed_indices) = self.perform_validation_step(idx) self.curriculum_scheduler.update_statistics(idx, v_rewards)
class Net(nn.Module): def __init__(self, dropout, fc1_size, fc2_size): super().__init__() self.fc1 = nn.Linear(50, fc1_size) self.relu1 = nn.ReLU() self.dout = nn.Dropout(dropout) self.fc2 = nn.Linear(fc1_size, fc2_size) self.prelu = nn.PReLU(1) self.out = nn.Linear(fc2_size, 1) self.out_act = nn.Sigmoid() def forward(self, input_): a1 = self.fc1(input_) h1 = self.relu1(a1) dout = self.dout(h1) a2 = self.fc2(dout) h2 = self.prelu(a2) a3 = self.out(h2) y = self.out_act(a3) return y
def define_stochastic_G(nlatent, input_nc, output_nc, ngf, norm='instance', which_model_netG='resnet', use_dropout=False, gpu_ids=[]): netG = None use_gpu = (len(gpu_ids) > 0) if use_gpu: assert torch.cuda.is_available() norm_layer = CondInstanceNorm netG = CINResnetGenerator(nlatent, input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9, gpu_ids=gpu_ids) if (len(gpu_ids) > 0): netG.cuda() netG.apply(weights_init) return netG
def find_traj_with_fix_length(start_index, time_stamp_list, time_stamp_pose_dict): length = 0.0 for i in range(start_index, (len(time_stamp_list) - 1)): length += distance(time_stamp_pose_dict[time_stamp_list[i]], time_stamp_pose_dict[time_stamp_list[(i + 1)]]) if (length >= TRAJ_LENGTH): return i return (- 1)
def make_kl_with_gaussian_prior(weight_decay, temperature=1.0): def kl_fn(params): n_params = sum([p.size for p in jax.tree_leaves(params)]) sigma_prior = jnp.sqrt((1 / weight_decay)) mu_vi_tree = params['mean'] sigma_vi_tree = jax.tree_map(jax.nn.softplus, params['inv_softplus_std']) def get_parameter_kl(mu_vi, sigma_vi): return ((jnp.log((sigma_prior / sigma_vi)) + ((((sigma_vi ** 2) + (mu_vi ** 2)) / 2) / (sigma_prior ** 2))) - (1 / 2)) kl_tree = jax.tree_multimap(get_parameter_kl, mu_vi_tree, sigma_vi_tree) kl = sum([p_kl.sum() for p_kl in jax.tree_leaves(kl_tree)]) return ((- kl) * temperature) return kl_fn
def is_module_wrapper(module): module_wrappers = tuple(MODULE_WRAPPERS.module_dict.values()) return isinstance(module, module_wrappers)
def kconv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1), activation=None): channel_axis = (1 if (backend.image_data_format() == 'channels_first') else (- 1)) filters = int((filters * alpha)) x = layers.ZeroPadding2D(padding=((0, 1), (0, 1)), name='conv1_pad')(inputs) x = layers.Conv2D(filters, kernel, padding='valid', use_bias=False, strides=strides, name='conv1')(x) x = layers.BatchNormalization(axis=channel_axis, name='conv1_bn')(x) if (activation is not None): x = layers.Activation(activation=activation, name='conv1_act')(x) else: x = layers.ReLU(6.0, name='conv1_relu')(x) return x
def pad(x: Tensor, p: int=(2 ** (4 + 3))) -> Tuple[(Tensor, Tuple[(int, ...)])]: (h, w) = (x.size(2), x.size(3)) new_h = ((((h + p) - 1) // p) * p) new_w = ((((w + p) - 1) // p) * p) padding_left = ((new_w - w) // 2) padding_right = ((new_w - w) - padding_left) padding_top = ((new_h - h) // 2) padding_bottom = ((new_h - h) - padding_top) padding = (padding_left, padding_right, padding_top, padding_bottom) x = F.pad(x, padding, mode='replicate') return (x, padding)
def mnasnet0_5(pretrained: bool=False, progress: bool=True, **kwargs: Any) -> MNASNet: model = MNASNet(0.5, **kwargs) if pretrained: _load_pretrained('mnasnet0_5', model, progress) return model
class CompletionStreamResponse(BaseModel): id: str = Field(default_factory=(lambda : f'cmpl-{random_uuid()}')) object: str = 'text_completion' created: int = Field(default_factory=(lambda : int(time.time()))) model: str choices: List[CompletionResponseStreamChoice]
def conv_output_length(input_length, filter_size, stride, pad=0): if (input_length is None): return None if (pad == 'valid'): output_length = ((input_length - filter_size) + 1) elif (pad == 'full'): output_length = ((input_length + filter_size) - 1) elif (pad == 'same'): output_length = input_length elif isinstance(pad, int): output_length = (((input_length + (2 * pad)) - filter_size) + 1) else: raise ValueError('Invalid pad: {0}'.format(pad)) output_length = (((output_length + stride) - 1) // stride) return output_length
class Logger(object): def __init__(self, config, rank=0): self.rank = rank self.summary_writer = None self.continue_training = config.training_config.continue_training self.logdir = config.training_config.logdir self.sample_rate = config.data_config.sample_rate if (self.rank == 0): if ((not self.continue_training) and os.path.exists(self.logdir)): raise RuntimeError(f"You're trying to run training from scratch, but logdir `{self.logdir} already exists. Remove it or specify new one.`") if (not self.continue_training): os.makedirs(self.logdir) self.summary_writer = SummaryWriter(config.training_config.logdir) self.save_model_config(config) def _log_losses(self, iteration, loss_stats: dict): for (key, value) in loss_stats.items(): self.summary_writer.add_scalar(key, value, iteration) def log_training(self, iteration, stats, verbose=True): if (self.rank != 0): return stats = {f'training/{key}': value for (key, value) in stats.items()} self._log_losses(iteration, loss_stats=stats) show_message(f'Iteration: {iteration} | Losses: {[value for value in stats.values()]}', verbose=verbose) def log_test(self, iteration, stats, verbose=True): if (self.rank != 0): return stats = {f'test/{key}': value for (key, value) in stats.items()} self._log_losses(iteration, loss_stats=stats) show_message(f'Iteration: {iteration} | Losses: {[value for value in stats.values()]}', verbose=verbose) def log_audios(self, iteration, audios: dict): if (self.rank != 0): return for (key, audio) in audios.items(): self.summary_writer.add_audio(key, audio, iteration, sample_rate=self.sample_rate) def log_specs(self, iteration, specs: dict): if (self.rank != 0): return for (key, image) in specs.items(): self.summary_writer.add_image(key, plot_tensor_to_numpy(image), iteration, dataformats='HWC') def save_model_config(self, config): if (self.rank != 0): return with open(f'{self.logdir}/config.json', 'w') as f: json.dump(config.to_dict_type(), f) def save_checkpoint(self, iteration, model, optimizer=None): if (self.rank != 0): return d = {} d['iteration'] = iteration d['model'] = model.state_dict() if (not isinstance(optimizer, type(None))): d['optimizer'] = optimizer.state_dict() filename = f'{self.summary_writer.log_dir}/checkpoint_{iteration}.pt' torch.save(d, filename) def load_latest_checkpoint(self, model, optimizer=None): if (not self.continue_training): raise RuntimeError(f'Trying to load the latest checkpoint from logdir {self.logdir}, but did not set `continue_training=true` in configuration.') (model, optimizer, iteration) = load_latest_checkpoint(self.logdir, model, optimizer) return (model, optimizer, iteration)
def get_dropout_layer(dropout=None): if (dropout is not None): return [nn.Dropout(p=dropout)] else: return []
def Tanh(data, name=None): name = (GetLayerName.get('tanh') if (name is None) else name) x = mx.sym.tanh(data, name=name) return x
def deduplicate_dataset(dataset: Type[Dataset], jaccard_threshold: float=0.85) -> Tuple[(Type[Dataset], List[List[Dict]])]: duplicate_clusters = make_duplicate_clusters(dataset, jaccard_threshold) duplicate_indices = {x['base_index'] for cluster in duplicate_clusters for x in cluster} extreme_dict = {} extremes_clusters = find_extremes(duplicate_clusters, dataset, jaccard_threshold) for extremes in extremes_clusters: for element in extremes: extreme_dict[element['base_index']] = element remove_indices = (duplicate_indices - set(extreme_dict.keys())) ds_filter = dataset.filter((lambda x, idx: (idx not in remove_indices)), with_indices=True) for cluster in duplicate_clusters: for element in cluster: element['is_extreme'] = (element['base_index'] in extreme_dict) if element['is_extreme']: element['copies'] = extreme_dict[element['base_index']]['copies'] print(f'Original dataset size: {len(dataset)}') print(f'Number of duplicate clusters: {len(duplicate_clusters)}') print(f'Files in duplicate cluster: {len(duplicate_indices)}') print(f'Unique files in duplicate cluster: {len(extreme_dict)}') print(f'Filtered dataset size: {len(ds_filter)}') return (ds_filter, duplicate_clusters)
def test_generate_fangraphs_teams() -> None: with patch.object(pd.DataFrame, 'to_csv', MagicMock()) as to_csv_mock: result = _generate_teams() assert (result is not None) assert (not result.empty) result = result.query('yearID <= 2019') assert (len(result.columns) == 7) to_csv_mock.assert_called_once_with(_DATA_FILENAME) for franch_id in set(result['franchID'].values): teams = result.query(f"franchID == '{franch_id}'") franchises = teams.groupby('teamIDfg').size() if (len(franchises.index) > 1): print('franch_id', franch_id) print('franchises', franchises) print(teams) assert (len(franchises.index) == 1)
def test_ocr_reader_are_singletons(): reader_a = DummyOCRReader() reader_b = DummyOCRReader() reader_c = DummyOCRReader() assert (reader_a is reader_b) assert (reader_a is reader_c)
(deadline=None) (params=example_case_sampling()) def test_c_eval(params): (poly, poly_h, x) = params res = poly(x) res_h = poly_h._eval_c(x) coefficients = poly.coefficients exponents = poly.exponents all_close(res, res_h, coefficients, exponents, x)
def realign(dir, iter, feat_dir, lang, prev_egs_dir, cur_egs_dir, prior_subset_size, num_archives, run_opts, online_ivector_dir=None): raise Exception('Realignment stage has not been implemented in nnet3') logger.info('Getting average posterior for purposes of adjusting the priors.') avg_post_vec_file = compute_average_posterior(dir=dir, iter=iter, egs_dir=prev_egs_dir, num_archives=num_archives, prior_subset_size=prior_subset_size, run_opts=run_opts) avg_post_vec_file = '{dir}/post.{iter}.vec'.format(dir=dir, iter=iter) logger.info('Re-adjusting priors based on computed posteriors') model = '{0}/{1}.mdl'.format(dir, iter) adjust_am_priors(dir, model, avg_post_vec_file, model, run_opts) alidir = align(dir, feat_dir, lang, run_opts, iter, online_ivector_dir) common_lib.execute_command('steps/nnet3/relabel_egs.sh --cmd "{command}" --iter {iter} {alidir} {prev_egs_dir} {cur_egs_dir}'.format(command=run_opts.command, iter=iter, dir=dir, alidir=alidir, prev_egs_dir=prev_egs_dir, cur_egs_dir=cur_egs_dir))
def get_chunks(fpath, chunk_size): f = open(fpath) chunk = [] for line in f: chunk.append(line.strip()) if (len(chunk) == chunk_size): (yield chunk) chunk = [] (yield chunk)
def _get_tensorflow_version(): tf_version = str(tensorflow.__version__) if ((int(tf_version.split('.')[0]) != 1) and (int(tf_version.split('.')[0]) != 2)): raise ValueError('tensorflow version error') return int(tf_version.split('.')[0])
def atom_to_feature_vector(atom): atom_feature = [safe_index(allowable_features['possible_atomic_num_list'], atom.GetAtomicNum()), allowable_features['possible_chirality_list'].index(str(atom.GetChiralTag())), safe_index(allowable_features['possible_degree_list'], atom.GetTotalDegree()), safe_index(allowable_features['possible_formal_charge_list'], atom.GetFormalCharge()), safe_index(allowable_features['possible_numH_list'], atom.GetTotalNumHs()), safe_index(allowable_features['possible_number_radical_e_list'], atom.GetNumRadicalElectrons()), safe_index(allowable_features['possible_hybridization_list'], str(atom.GetHybridization())), allowable_features['possible_is_aromatic_list'].index(atom.GetIsAromatic()), allowable_features['possible_is_in_ring_list'].index(atom.IsInRing())] return atom_feature
class Model(Entity): def __init__(self, name=None, pose=None): Entity.__init__(self, name, pose) self.links = [] self.joints = [] self.plugins = []
def noon(dim, parameter=1.1): result = [] for i in range(dim): pol = (('x' + str((i + 1))) + '*(') for j in range(dim): if (i != j): if (pol[(- 1)] != '('): pol = (pol + ' + ') pol = (((pol + 'x') + str((j + 1))) + '^2') pol = ((((pol + ') - ') + str(parameter)) + '*x') + str((i + 1))) pol = (pol + ' + 1;') result.append(pol) return result
def get_transform(opt, for_val=False): transform_list = [] if for_val: transform_list.append(transforms.Resize(opt.loadSize, interpolation=PIL.Image.LANCZOS)) transform_list.append(transforms.CenterCrop(opt.loadSize)) transform_list.append(transforms.ToTensor()) else: transform_list.append(transforms.Resize(opt.loadSize, interpolation=PIL.Image.LANCZOS)) transform_list.append(transforms.CenterCrop(opt.fineSize)) transform_list.append(AllAugmentations()) transform_list.append(transforms.ToTensor()) transform_list.append(transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))) if (not for_val): transform_list.append(transforms.RandomErasing()) transform = transforms.Compose(transform_list) print(transform) logging.info(transform) return transform
class att_TDNN(nn.Module): def __init__(self, C, F, CE): super().__init__() dim = int((C * F)) self.mlp = nn.Linear(dim, dim) self.TDNN = nn.Conv1d(dim, CE, kernel_size=1) def FCA(self, x, B, C, F): skip = x x = torch.mean(x, dim=(- 1), keepdim=False).view(B, (- 1)) x = self.mlp(x).view(B, C, F, 1) return (skip * x) def forward(self, x): (B, C, F, T) = x.shape x = self.FCA(x, B, C, F).view(B, (- 1), T) x = self.TDNN(x).view(B, (- 1), 1, T) return x
class ResNetD(nn.Module): def __init__(self, channels, init_block_channels, bottleneck, conv1_stride, ordinary_init=False, multi_output=False, in_channels=3, in_size=(224, 224), num_classes=1000): super(ResNetD, self).__init__() self.in_size = in_size self.num_classes = num_classes self.multi_output = multi_output self.features = MultiOutputSequential() if ordinary_init: self.features.add_module('init_block', ResInitBlock(in_channels=in_channels, out_channels=init_block_channels)) else: init_block_channels = (2 * init_block_channels) self.features.add_module('init_block', SEInitBlock(in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for (i, channels_per_stage) in enumerate(channels): stage = nn.Sequential() for (j, out_channels) in enumerate(channels_per_stage): stride = (2 if ((j == 0) and (i != 0) and (i < 2)) else 1) dilation = (2 ** max(0, ((i - 1) - int((j == 0))))) stage.add_module('unit{}'.format((j + 1)), ResUnit(in_channels=in_channels, out_channels=out_channels, stride=stride, padding=dilation, dilation=dilation, bottleneck=bottleneck, conv1_stride=conv1_stride)) in_channels = out_channels if (i == 2): stage.do_output = True self.features.add_module('stage{}'.format((i + 1)), stage) self.features.add_module('final_pool', nn.AdaptiveAvgPool2d(output_size=1)) self.output = nn.Linear(in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for (name, module) in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if (module.bias is not None): init.constant_(module.bias, 0) def forward(self, x): outs = self.features(x) x = outs[0] x = x.view(x.size(0), (- 1)) x = self.output(x) if self.multi_output: return ([x] + outs[1:]) else: return x
def get_next_batch(dataloader, device): data_dict = dataloader.__next__() batch_dict = get_dict_template() batch_dict['data'] = data_dict['data'].to(device) batch_dict['time_steps'] = data_dict['time_steps'].to(device) batch_dict['mask'] = data_dict['mask'].to(device) return batch_dict
class CocoDataset(CustomDataset): CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic_light', 'fire_hydrant', 'stop_sign', 'parking_meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports_ball', 'kite', 'baseball_bat', 'baseball_glove', 'skateboard', 'surfboard', 'tennis_racket', 'bottle', 'wine_glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot_dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted_plant', 'bed', 'dining_table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush') def load_annotations(self, ann_file): self.coco = COCO(ann_file) self.cat_ids = self.coco.getCatIds() self.cat2label = {cat_id: (i + 1) for (i, cat_id) in enumerate(self.cat_ids)} self.img_ids = self.coco.getImgIds() img_infos = [] for i in self.img_ids: info = self.coco.loadImgs([i])[0] info['filename'] = info['file_name'] img_infos.append(info) return img_infos def get_ann_info(self, idx): img_id = self.img_infos[idx]['id'] ann_ids = self.coco.getAnnIds(imgIds=[img_id]) ann_info = self.coco.loadAnns(ann_ids) return self._parse_ann_info(ann_info, self.with_mask) def _filter_imgs(self, min_size=32): valid_inds = [] ids_with_ann = set((_['image_id'] for _ in self.coco.anns.values())) for (i, img_info) in enumerate(self.img_infos): if (self.img_ids[i] not in ids_with_ann): continue if (min(img_info['width'], img_info['height']) >= min_size): valid_inds.append(i) return valid_inds def _parse_ann_info(self, ann_info, with_mask=True): gt_bboxes = [] gt_labels = [] gt_bboxes_ignore = [] if with_mask: gt_masks = [] gt_mask_polys = [] gt_poly_lens = [] for (i, ann) in enumerate(ann_info): if ann.get('ignore', False): continue (x1, y1, w, h) = ann['bbox'] if ((ann['area'] <= 0) or (w < 1) or (h < 1)): continue bbox = [x1, y1, ((x1 + w) - 1), ((y1 + h) - 1)] if ann['iscrowd']: gt_bboxes_ignore.append(bbox) else: gt_bboxes.append(bbox) gt_labels.append(self.cat2label[ann['category_id']]) if with_mask: gt_masks.append(self.coco.annToMask(ann)) mask_polys = [p for p in ann['segmentation'] if (len(p) >= 6)] poly_lens = [len(p) for p in mask_polys] gt_mask_polys.append(mask_polys) gt_poly_lens.extend(poly_lens) if gt_bboxes: gt_bboxes = np.array(gt_bboxes, dtype=np.float32) gt_labels = np.array(gt_labels, dtype=np.int64) else: gt_bboxes = np.zeros((0, 4), dtype=np.float32) gt_labels = np.array([], dtype=np.int64) if gt_bboxes_ignore: gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32) else: gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) ann = dict(bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore) if with_mask: ann['masks'] = gt_masks ann['mask_polys'] = gt_mask_polys ann['poly_lens'] = gt_poly_lens return ann
class PRExplanation(ExplanationMixin): explanation_type = None def __init__(self, explanation_type, internal_obj, feature_names=None, feature_types=None, name=None, selector=None): self.explanation_type = explanation_type self._internal_obj = internal_obj self.feature_names = feature_names self.feature_types = feature_types self.name = name self.selector = selector def data(self, key=None): if (key is None): return self._internal_obj['overall'] return None def visualize(self, key=None): from ..visual.plot import plot_performance_curve data_dict = self.data(key) if (data_dict is None): return None return plot_performance_curve(data_dict, xtitle='Recall', ytitle='Precision', baseline=False, title=('PR Curve: ' + self.name), auc_prefix='Average Precision')
def _download_file(downloadPath, filePath, verbose=False, spider=False): downloadPath = downloadPath.replace(os.sep, '/') sys.stdout.write(('\r' + ('Downloading file %s ...\r' % os.path.basename(filePath)))) sys.stdout.flush() try: os.makedirs(os.path.dirname(filePath)) except OSError: pass downloading = True interrupted = False (file, tmp_savefilename) = tempfile.mkstemp() os.close(file) ntries = 1 while downloading: try: cmd = ['wget', ('%s' % downloadPath), '-O', ('%s' % tmp_savefilename), '--read-timeout=10', '--tries=3'] if (not verbose): cmd.append('-q') if spider: cmd.append('--spider') subprocess.check_call(cmd) if (not spider): shutil.move(tmp_savefilename, filePath) downloading = False if interrupted: raise KeyboardInterrupt except subprocess.CalledProcessError as e: if (not downloading): raise elif (ntries > _MAX_NTRIES): raise IOError(('File %s does not appear to exist on the server ...' % os.path.basename(filePath))) elif (not ('exit status 4' in str(e))): interrupted = True os.remove(tmp_savefilename) except OSError as e: if (e.errno == errno.ENOENT): raise OSError('Automagically downloading catalogs requires the wget program; please install wget and try again...') else: raise finally: if os.path.exists(tmp_savefilename): os.remove(tmp_savefilename) ntries += 1 sys.stdout.write((('\r' + _ERASESTR) + '\r')) sys.stdout.flush() return None
def test(): twisted2 = ['x**2*y - z*x;', 'x**2*z - y**2*x;'] maps = solve_binomials(3, twisted2, silent=False) for solmap in maps: print(solmap) print('looking only for expected pure dimensional sets ...') maps = solve_binomials(3, twisted2, puretopdim=True) for solmap in maps: print(solmap)
class TrainData(tx.data.DatasetBase[(Example, Example)]): def __init__(self, hparams=None, device: Optional[torch.device]=None): self._hparams = HParams(hparams, self.default_hparams()) data_source = TrainDataSource(self._hparams.dataset.files, compression_type=self._hparams.dataset.compression_type) self.vocab_size = tokenizer.vocab_size super().__init__(data_source, hparams, device=device) def default_hparams(): return {**tx.data.DatasetBase.default_hparams(), 'dataset': {'files': 'data.txt', 'compression_type': None, 'vocab_file': 'vocab.txt'}} def process(self, raw_example): (evt_q, evt_k_str, evt_p, evt_freq) = (raw_example[0], raw_example[1], raw_example[2], raw_example[3]) evt_q = map_evt_to_tokens(evt_q) evt_q_ids = tokenizer.map_text_to_id(evt_q) mask_pos = random.randint(1, (len(evt_q_ids) - 2)) mask_id = evt_q_ids[mask_pos] evt_q_ids[mask_pos] = tokenizer.map_token_to_id(tokenizer.mask_token) evt_k = map_evt_to_tokens(evt_k_str) evt_k_ids = tokenizer.map_text_to_id(evt_k) evt_p = map_evt_to_tokens(evt_p) evt_p_ids = tokenizer.map_text_to_id(evt_p) return {'evt_k': ' '.join(evt_k_str), 'evt_q_ids': evt_q_ids, 'evt_k_ids': evt_k_ids, 'evt_p_ids': evt_p_ids, 'mask_pos': mask_pos, 'mask_id': mask_id, 'evt_freq': evt_freq} def collate(self, examples: List[Example]) -> tx.data.Batch: (evt_q_ids, evt_q_lengths) = tx.data.padded_batch([ex['evt_q_ids'] for ex in examples], pad_value=pad_token_id) evt_k = [ex['evt_k'] for ex in examples] (evt_k_ids, evt_k_lengths) = tx.data.padded_batch([ex['evt_k_ids'] for ex in examples], pad_value=pad_token_id) (evt_p_ids, evt_p_lengths) = tx.data.padded_batch([ex['evt_p_ids'] for ex in examples], pad_value=pad_token_id) evt_freq = [ex['evt_freq'] for ex in examples] mask_id = [ex['mask_id'] for ex in examples] mask_pos = [ex['mask_pos'] for ex in examples] return tx.data.Batch(len(examples), evt_q_ids=torch.from_numpy(evt_q_ids), evt_q_lengths=torch.tensor(evt_q_lengths), evt_k=evt_k, evt_k_ids=torch.from_numpy(evt_k_ids), evt_k_lengths=torch.tensor(evt_k_lengths), evt_p_ids=torch.from_numpy(evt_p_ids), evt_p_lengths=torch.tensor(evt_p_lengths), evt_freq=torch.tensor(evt_freq), mask_id=torch.tensor(mask_id), mask_pos=torch.tensor(mask_pos))
class U_Net_F_v2(nn.Module): def __init__(self, img_ch=3, output_ch=1): super(U_Net_F_v2, self).__init__() self.Maxpool = nn.MaxPool2d(kernel_size=2, stride=2) self.Conv1 = conv_block(ch_in=img_ch, ch_out=32) self.Conv2 = conv_block(ch_in=32, ch_out=64) self.Conv3 = conv_block(ch_in=64, ch_out=128) self.Conv4 = conv_block(ch_in=128, ch_out=128) self.Conv5 = conv_block(ch_in=128, ch_out=128) self.Up5 = up_conv(ch_in=128, ch_out=128) self.Up_conv5 = conv_block(ch_in=256, ch_out=128) self.Up4 = up_conv(ch_in=128, ch_out=128) self.Up_conv4 = conv_block(ch_in=256, ch_out=128) self.Up3 = up_conv(ch_in=128, ch_out=64) self.Up_conv3 = conv_block(ch_in=128, ch_out=64) self.Up2 = up_conv(ch_in=64, ch_out=32) self.Up_conv2 = conv_block(ch_in=64, ch_out=32) self.Conv_1x1 = nn.Conv2d(32, output_ch, kernel_size=1, stride=1, padding=0) for m in self.modules(): if isinstance(m, nn.Conv2d): n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels) m.weight.data.normal_(0, math.sqrt((2.0 / n))) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.Linear): m.bias.data.zero_() def forward(self, x): x1 = self.Conv1(x) x2 = self.Maxpool(x1) x2 = self.Conv2(x2) x3 = self.Maxpool(x2) x3 = self.Conv3(x3) x4 = self.Maxpool(x3) x4 = self.Conv4(x4) x5 = self.Maxpool(x4) x5 = self.Conv5(x5) d5 = self.Up5(x5) d5 = torch.cat((x4, d5), dim=1) d5 = self.Up_conv5(d5) d4 = self.Up4(d5) d4 = torch.cat((x3, d4), dim=1) d4 = self.Up_conv4(d4) d3 = self.Up3(d4) d3 = torch.cat((x2, d3), dim=1) d3 = self.Up_conv3(d3) d2 = self.Up2(d3) d2 = torch.cat((x1, d2), dim=1) d2 = self.Up_conv2(d2) d1 = self.Conv_1x1(d2) d1 = F.relu(d1) return d1
def vis(): def vis_mesh(mesh, include_wireframe=False, **kwargs): from util3d.mayavi_vis import vis_mesh as vm (v, f) = (np.array(mesh[k]) for k in ('vertices', 'faces')) vm(v, f, include_wireframe=include_wireframe, **kwargs) example_ids = list(get_example_ids(cat_id, 'eval')) random.shuffle(example_ids) with all_ds: for example_id in example_ids: print(example_id) (image, gt_mesh, predictions) = all_ds[example_id] meshes = top_k_mesh_fn(*(np.array(predictions[k]) for k in ('probs', 'dp'))) plt.imshow(image) mlab.figure() vis_mesh(gt_mesh, color=(0, 0, 1)) for mesh in meshes: (v, f, ov) = (mesh[k] for k in ('vertices', 'faces', 'original_vertices')) mlab.figure() vis_mesh({'vertices': v, 'faces': f}, color=(0, 1, 0)) mlab.figure() vis_mesh({'vertices': ov, 'faces': f}, color=(1, 0, 0)) plt.show(block=False) mlab.show() plt.close()
class DatasetFactory(object): def __init__(self): pass def get_by_name(dataset_name, opt, is_for_train): if (dataset_name == 'ProcessedVideo'): from .processed_video_dataset import ProcessedVideoDataset dataset = ProcessedVideoDataset(opt, is_for_train) elif (dataset_name == 'ProcessedVideo+Place2'): from .concat_dataset import ProcessedVideoPlace2Dataset dataset = ProcessedVideoPlace2Dataset(opt, is_for_train) else: raise ValueError(f'Dataset {dataset_name} not recognized.') print(f'Dataset {dataset.name} was created.') return dataset
def parse_tuning_line(line, tmp): tuning_strategy = re.search('Tuning strategy:\\s+([A-Za-z]+)', line) if (tuning_strategy and tuning_strategy.group(1)): tmp['strategy'] = tuning_strategy.group(1) baseline_acc = re.search('FP32 baseline is:\\s+\\[Accuracy:\\s(\\d+(\\.\\d+)?), Duration \\(seconds\\):\\s*(\\d+(\\.\\d+)?)\\]', line) if (baseline_acc and baseline_acc.group(1)): tmp['fp32_acc'] = float(baseline_acc.group(1)) tuned_acc = re.search('Best tune result is:\\s+\\[Accuracy:\\s(\\d+(\\.\\d+)?), Duration \\(seconds\\):\\s(\\d+(\\.\\d+)?)\\]', line) if (tuned_acc and tuned_acc.group(1)): tmp['int8_acc'] = float(tuned_acc.group(1)) tune_trial = re.search('Tune \\d*\\s*result is:', line) if tune_trial: tmp['tuning_trials'] += 1 tune_time = re.search('Tuning time spend:\\s+(\\d+(\\.\\d+)?)s', line) if (tune_time and tune_time.group(1)): tmp['tune_time'] = int(tune_time.group(1)) fp32_model_size = re.search('The input model size is:\\s+(\\d+(\\.\\d+)?)', line) if (fp32_model_size and fp32_model_size.group(1)): tmp['fp32_model_size'] = int(fp32_model_size.group(1)) int8_model_size = re.search('The output model size is:\\s+(\\d+(\\.\\d+)?)', line) if (int8_model_size and int8_model_size.group(1)): tmp['int8_model_size'] = int(int8_model_size.group(1)) total_mem_size = re.search('Total resident size\\D*([0-9]+)', line) if (total_mem_size and total_mem_size.group(1)): tmp['total_mem_size'] = float(total_mem_size.group(1)) max_mem_size = re.search('Maximum resident set size\\D*([0-9]+)', line) if (max_mem_size and max_mem_size.group(1)): tmp['max_mem_size'] = float(max_mem_size.group(1))
class CycleGANDataset(data.Dataset): def __init__(self, root, regexp, transform=None, target_transform=None, download=False): self.root = root self.transform = transform self.target_transform = target_transform (self.image_paths, self.labels) = self.find_images(regexp) def find_images(self, regexp='*.png'): basenames = sorted(glob.glob(join(self.root, regexp))) image_paths = [] labels = [] for basename in basenames: image_paths.append(os.path.join(self.root, basename)) labels.append(int(basename.split('/')[(- 1)].split('_')[0])) return (image_paths, labels) def __getitem__(self, index): im = Image.open(self.image_paths[index]) target = self.labels[index] if (self.transform is not None): im = self.transform(im) if (self.target_transform is not None): target = self.target_transform(target) return (im, target) def __len__(self): return len(self.image_paths)
def wave_feature_extraction(wav_file, sr): (y, sr) = librosa.load(wav_file, sr) (y, _) = librosa.effects.trim(y, top_db=20) return y
class CamembertTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ['attention_mask'] def __init__(self, vocab_file, bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', additional_special_tokens=['<s>NOTUSED', '</s>NOTUSED'], **kwargs): super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, additional_special_tokens=additional_special_tokens, **kwargs) self.sp_model = spm.SentencePieceProcessor() self.sp_model.Load(str(vocab_file)) self.vocab_file = vocab_file self.fairseq_tokens_to_ids = {'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3} self.fairseq_offset = len(self.fairseq_tokens_to_ids) self.fairseq_tokens_to_ids['<mask>'] = (len(self.sp_model) + len(self.fairseq_tokens_to_ids)) self.fairseq_ids_to_tokens = {v: k for (k, v) in self.fairseq_tokens_to_ids.items()} def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: if (token_ids_1 is None): return (([self.cls_token_id] + token_ids_0) + [self.sep_token_id]) cls = [self.cls_token_id] sep = [self.sep_token_id] return (((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep) def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: if (token_ids_1 is not None): raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formatted with special tokens for the model.') return list(map((lambda x: (1 if (x in [self.sep_token_id, self.cls_token_id]) else 0)), token_ids_0)) if (token_ids_1 is None): return (([1] + ([0] * len(token_ids_0))) + [1]) return (((([1] + ([0] * len(token_ids_0))) + [1, 1]) + ([0] * len(token_ids_1))) + [1]) def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if (token_ids_1 is None): return (len(((cls + token_ids_0) + sep)) * [0]) return (len((((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)) * [0]) def vocab_size(self): return (len(self.fairseq_tokens_to_ids) + len(self.sp_model)) def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def _tokenize(self, text): return self.sp_model.EncodeAsPieces(text) def _convert_token_to_id(self, token): if (token in self.fairseq_tokens_to_ids): return self.fairseq_tokens_to_ids[token] elif (self.sp_model.PieceToId(token) == 0): return self.unk_token_id return (self.fairseq_offset + self.sp_model.PieceToId(token)) def _convert_id_to_token(self, index): if (index in self.fairseq_ids_to_tokens): return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece((index - self.fairseq_offset)) def __getstate__(self): state = self.__dict__.copy() state['sp_model'] = None return state def __setstate__(self, d): self.__dict__ = d self.sp_model = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file) def convert_tokens_to_string(self, tokens): out_string = ''.join(tokens).replace(SPIECE_UNDERLINE, ' ').strip() return out_string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if (not os.path.isdir(save_directory)): logger.error('Vocabulary path ({}) should be a directory'.format(save_directory)) return out_vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])) if (os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file)): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,)
def progress(self, message, *args, **kws): if self.isEnabledFor(PROGRESS_LEVEL_NUM): self._log(PROGRESS_LEVEL_NUM, message, args, **kws)
class ConcatDataset(_ConcatDataset): def get_idxs(self, idx): dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx) if (dataset_idx == 0): sample_idx = idx else: sample_idx = (idx - self.cumulative_sizes[(dataset_idx - 1)]) return (dataset_idx, sample_idx) def get_img_info(self, idx): (dataset_idx, sample_idx) = self.get_idxs(idx) return self.datasets[dataset_idx].get_img_info(sample_idx)
def autodoc_skip_member(app, what, name, obj, skip, options): if getattr(obj, '__HIDE_SPHINX_DOC__', False): return True if (name in _DEPRECATED_NAMES): return True return None
def shard_selection(shards, distributed_info=None): if (distributed_info is not None): (gr, ws) = distributed_info if (len(shards) < ws): warnings.warn('There are not enough shards.') warnings.warn('Some data will be duplicated!') ws = len(shards) gr = (gr % len(shards)) shards = list(shards[gr::ws]) shards = worker_shard_selection(shards) return shards
def build_dictionary(text): wordcount = OrderedDict() for cc in text: words = cc.split() for w in words: if (w not in wordcount): wordcount[w] = 0 wordcount[w] += 1 words = wordcount.keys() freqs = wordcount.values() sorted_idx = numpy.argsort(freqs)[::(- 1)] worddict = OrderedDict() for (idx, sidx) in enumerate(sorted_idx): worddict[words[sidx]] = (idx + 2) return worddict
def learning_rate_decay(init_lr, global_step, warmup_steps=4000.0): step = tf.cast((global_step + 1), dtype=tf.float32) return ((init_lr * (warmup_steps ** 0.5)) * tf.minimum((step * (warmup_steps ** (- 1.5))), (step ** (- 0.5))))
class Cifar10SemiSupervisedDatasetInterface(SemiDataSetInterface): def __init__(self, data_root: str=DATA_PATH, labeled_sample_num: int=4000, seed: int=0, batch_size: int=10, labeled_batch_size: int=None, unlabeled_batch_size: int=None, val_batch_size: int=None, shuffle: bool=False, num_workers: int=1, pin_memory: bool=True, drop_last=False, verbose: bool=True) -> None: super().__init__(CIFAR10, data_root, labeled_sample_num, seed, batch_size, labeled_batch_size, unlabeled_batch_size, val_batch_size, shuffle, num_workers, pin_memory, drop_last, verbose) def _init_train_val_sets(self) -> Tuple[(Dataset, Dataset)]: train_set = self.DataClass(self.data_root, train=True, download=True) val_set = self.DataClass(self.data_root, train=False, download=True) return (train_set, val_set)
def _RCMatch_composeAll(self, *, maximum=False, verbose=False): return _unwrap(_RCMatch_composeAll_orig(self, maximum, verbose))
def parse_args(): parser = argparse.ArgumentParser(description='Create density figure') parser.add_argument('--datasets', nargs='+', required=True, help='Datasets to use for density figure') parser.add_argument('--output_file', required=True, type=Path, help='The jpg file to save the plot') parser.add_argument('--checkpoint', type=Path, help='The checkpoint file to use for inference') parser.add_argument('--gpu', type=int, help='The index of the GPU to use for inference') return parser.parse_known_args()[0]
_tf2 class TestSeq2Seq(TestCase): def setUp(self): pass def tearDown(self): pass def test_seq2seq_fit_predict_evaluate(self): (train_data, test_data) = create_data() model = model_creator(config={'input_feature_num': 10, 'output_feature_num': 2, 'future_seq_len': test_data[(- 1)].shape[1], 'lstm_hidden_dim': 32}) model.fit(train_data[0], train_data[1], epochs=2, validation_data=test_data) yhat = model.predict(test_data[0]) model.evaluate(test_data[0], test_data[1]) assert (yhat.shape == (400, train_data[(- 1)].shape[1], 2)) def test_seq2seq_save_load(self): (train_data, test_data) = create_data() model = model_creator(config={'input_feature_num': 10, 'output_feature_num': 2, 'future_seq_len': test_data[(- 1)].shape[1], 'lstm_hidden_dim': 32}) model.fit(train_data[0], train_data[1], epochs=2, validation_data=test_data) with tempfile.TemporaryDirectory() as tmp_dir_file: model.save(tmp_dir_file) import keras restore_model = keras.models.load_model(tmp_dir_file, custom_objects={'LSTMSeq2Seq': LSTMSeq2Seq}) model_res = model.evaluate(test_data[0], test_data[1]) restore_model_res = restore_model.evaluate(test_data[0], test_data[1]) np.testing.assert_almost_equal(model_res, restore_model_res, decimal=5) temp_LSTMSeq2Seq = LSTMSeq2Seq(input_feature_num=10, output_feature_num=2, future_seq_len=test_data[(- 1)].shape[1], lstm_hidden_dim=32).__class__ assert isinstance(restore_model, temp_LSTMSeq2Seq) def test_seq2seq_freeze_training(self): (train_data, test_data) = create_data() model = model_creator(config={'input_feature_num': 10, 'output_feature_num': 2, 'future_seq_len': test_data[(- 1)].shape[1], 'lstm_hidden_dim': 32}) freeze_yhat = model(test_data[0], training=False) _freeze_yhat = model(test_data[0], training=False) assert np.all((_freeze_yhat == freeze_yhat)) _unfreeze_yhat = model(test_data[0], training=True) unfreeze_yhat = model(test_data[0], training=True) assert np.any((_unfreeze_yhat != unfreeze_yhat))
def nor_priors(priors): (new_upriors, new_rpriors, new_ppriors) = priors ranked_upriors = [(user, new_upriors[user]) for user in new_upriors.keys()] ranked_upriors = sorted(ranked_upriors, reverse=True, key=(lambda x: x[1])) ranked_rpriors = [(user, new_rpriors[user]) for user in new_rpriors.keys()] ranked_rpriors = sorted(ranked_rpriors, reverse=True, key=(lambda x: x[1])) ranked_ppriors = [(user, new_ppriors[user]) for user in new_ppriors.keys()] ranked_ppriors = sorted(ranked_ppriors, reverse=True, key=(lambda x: x[1])) (u_max, u_mean, u_min) = (ranked_upriors[0][1], ranked_upriors[int((len(ranked_upriors) / 2))][1], ranked_upriors[(- 1)][1]) (p_max, p_mean, p_min) = (ranked_ppriors[0][1], ranked_ppriors[int((len(ranked_ppriors) / 2))][1], ranked_ppriors[(- 1)][1]) (r_max, r_mean, r_min) = (ranked_rpriors[0][1], ranked_rpriors[int((len(ranked_rpriors) / 2))][1], ranked_rpriors[(- 1)][1]) for (i, p) in priors[0].items(): priors[0][i] = ((p - u_min) / (u_max - u_min)) for (i, p) in priors[1].items(): priors[1][i] = ((p - r_min) / (r_max - r_min)) for (i, p) in priors[2].items(): priors[2][i] = ((p - p_min) / (p_max - p_min)) return (priors, [u_mean, r_mean, p_mean])
def compute_new_gpu_util(current_gpu_util, slo, arrival_rate, avg_latency, avg_throughput): residual_latency = (slo - avg_latency) residual_throughput = (avg_throughput - arrival_rate) diff_latency = ((residual_latency * 100) / slo) diff_throughput = ((residual_throughput * 100) / arrival_rate) if ((diff_latency > 0) and (diff_latency < 10) and (diff_throughput > 0) and (diff_throughput < 10)): return current_gpu_util change_factor = max((abs(residual_latency) / avg_latency), (abs(residual_throughput) / avg_throughput)) change_gpu_util = (current_gpu_util * change_factor) new_gpu_util = 0 if ((residual_latency < 0) or (residual_throughput < 0)): new_gpu_util = (current_gpu_util + change_gpu_util) if ((residual_latency > 0) and (residual_throughput > 0)): new_gpu_util = (current_gpu_util - change_gpu_util) if (new_gpu_util > MAX_RESOURCE): return MAX_RESOURCE elif (new_gpu_util < 1): return 1 return new_gpu_util
def to_ram(ale): ram_size = ale.getRAMSize() ram = np.zeros(ram_size, dtype=np.uint8) ale.getRAM(ram) return ram
.parametrize('workers_per_gpu', (0, 2)) .parametrize(('valid', 'env_cfg'), [(True, dict(mp_start_method='fork', opencv_num_threads=0, omp_num_threads=1, mkl_num_threads=1)), (False, dict(mp_start_method=1, opencv_num_threads=0.1, omp_num_threads='s', mkl_num_threads='1'))]) def test_setup_multi_processes(workers_per_gpu, valid, env_cfg): sys_start_mehod = mp.get_start_method(allow_none=True) sys_cv_threads = cv2.getNumThreads() sys_omp_threads = os.environ.pop('OMP_NUM_THREADS', default=None) sys_mkl_threads = os.environ.pop('MKL_NUM_THREADS', default=None) config = dict(data=dict(workers_per_gpu=workers_per_gpu)) config.update(env_cfg) cfg = Config(config) setup_multi_processes(cfg) if (valid and (workers_per_gpu > 0)): assert (os.getenv('OMP_NUM_THREADS') == str(env_cfg['omp_num_threads'])) assert (os.getenv('MKL_NUM_THREADS') == str(env_cfg['mkl_num_threads'])) assert ((cv2.getNumThreads() == env_cfg['opencv_num_threads']) if (env_cfg['opencv_num_threads'] > 0) else 1) if (platform.system() != 'Windows'): assert (mp.get_start_method() == env_cfg['mp_start_method']) if sys_start_mehod: mp.set_start_method(sys_start_mehod, force=True) cv2.setNumThreads(sys_cv_threads) if sys_omp_threads: os.environ['OMP_NUM_THREADS'] = sys_omp_threads else: os.environ.pop('OMP_NUM_THREADS') if sys_mkl_threads: os.environ['MKL_NUM_THREADS'] = sys_mkl_threads else: os.environ.pop('MKL_NUM_THREADS') elif (valid and (workers_per_gpu == 0)): if (platform.system() != 'Windows'): assert (mp.get_start_method() == env_cfg['mp_start_method']) assert ((cv2.getNumThreads() == env_cfg['opencv_num_threads']) if (env_cfg['opencv_num_threads'] > 0) else 1) assert ('OMP_NUM_THREADS' not in os.environ) assert ('MKL_NUM_THREADS' not in os.environ) if sys_start_mehod: mp.set_start_method(sys_start_mehod, force=True) cv2.setNumThreads(sys_cv_threads) if sys_omp_threads: os.environ['OMP_NUM_THREADS'] = sys_omp_threads if sys_mkl_threads: os.environ['MKL_NUM_THREADS'] = sys_mkl_threads else: assert (mp.get_start_method() == sys_start_mehod) assert (cv2.getNumThreads() == sys_cv_threads) assert ('OMP_NUM_THREADS' not in os.environ) assert ('MKL_NUM_THREADS' not in os.environ)
def run_asr(asr_dir, split, w2v_ckpt, w2v_label, res_dir): cmd = ['python', '-m', 'examples.speech_recognition.infer'] cmd += [str(asr_dir.resolve())] cmd += ['--task', 'audio_finetuning', '--nbest', '1', '--quiet'] cmd += ['--w2l-decoder', 'viterbi', '--criterion', 'ctc'] cmd += ['--post-process', 'letter', '--max-tokens', '4000000'] cmd += ['--path', str(w2v_ckpt.resolve()), '--labels', w2v_label] cmd += ['--gen-subset', split, '--results-path', str(res_dir.resolve())] print(f'''running cmd: {' '.join(cmd)}''') subprocess.run(cmd, check=True)
def load_data(args): train_data = ImagePaths(args.dataset_path, size=256) train_loader = DataLoader(train_data, batch_size=args.batch_size, shuffle=False) return train_loader
class TestLinformerAttention(): .parametrize('device', ['cpu', 'cuda']) .parametrize('softmax_temp', [None, 1.0, 0.235]) .parametrize('share_kv', [False, True]) .parametrize('proj_dim_k', [13, 47, 88]) .parametrize('seq_len', [127, 28, 468]) def test_output(self, seq_len, proj_dim_k, share_kv, softmax_temp, device): seed = 2357 embed_dim = 21 v_dim = 17 num_heads = 7 batch_size = 18 q_seqlen = 47 k_seqlen = seq_len seed_cpu_cuda(seed) key_padding_mask = LengthMask(torch.randint(low=0, high=k_seqlen, size=(batch_size,), device=device), max_len=k_seqlen) lin_attn = LinformerAttention(seq_len, k=proj_dim_k, share_kv=share_kv, softmax_temp=softmax_temp, attention_dropout=0.0).to(device) q = torch.randn(batch_size, q_seqlen, num_heads, embed_dim, device=device) k = torch.randn(batch_size, k_seqlen, num_heads, embed_dim, device=device) v = torch.randn(batch_size, k_seqlen, num_heads, v_dim, device=device) (out_lin, A_lin) = lin_attn(q, k, v, key_padding_mask=key_padding_mask, need_weights=True) assert (out_lin.shape == (batch_size, q_seqlen, num_heads, v_dim)) assert (A_lin.shape == (batch_size, num_heads, q_seqlen, proj_dim_k)) assert torch.all((A_lin >= 0)) A_local_sum = A_lin.sum(dim=(- 1)) assert torch.all((torch.isclose(A_local_sum, torch.ones_like(A_local_sum)) | torch.isclose(A_local_sum, torch.zeros_like(A_local_sum))))
def get_parser(): parser = argparse.ArgumentParser(description='Cumulative Reasoning') parser.add_argument('--temperature', type=float, default=0.0, help='temperature') parser.add_argument('--majoritycnt', type=int, choices=range(1, 101), default=1, help='numbers of majority voting times') parser.add_argument('--shots', type=int, choices=range(1, 101), default=8, help='numbers of few-shot examples') parser.add_argument('--hintcnt', type=int, choices=range(0, 101), default=2, help='numbers of hints to generate') parser.add_argument('--questioncnt', type=int, choices=range(0, 101), default=8, help='numbers of questions to generate') parser.add_argument('--questiontrycnt', type=int, choices=range(0, 101), default=4, help='numbers of tries to generate questions') parser.add_argument('--answertrycnt', type=int, choices=range(0, 101), default=4, help='numbers of tries to answer') parser.add_argument('--verbose', type=ast.literal_eval, default=True, help='verbose mode') parser.add_argument('--model', type=str, default='gpt-3.5-turbo-16k-0613', help='model to use') parser.add_argument('--withcode', type=ast.literal_eval, default='False', help='whether to use code to verify answers') parser.add_argument('--dataset', type=str, default='data/test.jsonl', help='dataset to use') parser.add_argument('--problem_level_lower_bound', type=int, default=1, help='lower bound of problem level [lower_bound, upper_bound]') parser.add_argument('--problem_level_upper_bound', type=int, default=5, help='upper bound of problem level [lower_bound, upper_bound]') parser.add_argument('--problem_interval_begin', type=int, default=0, help='problem interval begin [begin, end]') parser.add_argument('--problem_interval_end', type=int, default=500, help='problem interval end [begin, end]') parser.add_argument('--inverse_problem_order', type=ast.literal_eval, default=True, help='whether to inverse problem order') return parser
def test_record_breaking_render_method(): env = BrokenRecordableEnv() rec = VideoRecorder(env) rec.capture_frame() rec.close() assert rec.empty assert rec.broken assert (not os.path.exists(rec.path))
_torch _retrieval class RagModelSaveLoadTests(unittest.TestCase): def get_rag_config(self): question_encoder_config = AutoConfig.from_pretrained('facebook/dpr-question_encoder-single-nq-base') generator_config = AutoConfig.from_pretrained('facebook/bart-large-cnn') return RagConfig.from_question_encoder_generator_configs(question_encoder_config, generator_config, bos_token_id=0, decoder_start_token_id=2, eos_token_id=2, is_encoder_decoder=True, pad_token_id=1, vocab_size=50264, title_sep=' / ', doc_sep=' // ', n_docs=5, max_combined_length=300, dataset='wiki_dpr', dataset_split='train', index_name='exact', index_path=None, use_dummy_dataset=True, retrieval_vector_size=768, retrieval_batch_size=8) def test_rag_sequence_from_pretrained(self): rag_config = self.get_rag_config() rag_decoder_tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn') rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained('facebook/dpr-question_encoder-single-nq-base') rag_retriever = RagRetriever(rag_config, question_encoder_tokenizer=rag_question_encoder_tokenizer, generator_tokenizer=rag_decoder_tokenizer) input_ids = rag_question_encoder_tokenizer('who sings does he love me with reba', return_tensors='pt').input_ids decoder_input_ids = rag_decoder_tokenizer('Linda Davis', return_tensors='pt').input_ids input_ids = input_ids.to(torch_device) decoder_input_ids = decoder_input_ids.to(torch_device) with tempfile.TemporaryDirectory() as tmp_dirname: rag_sequence = RagSequenceForGeneration.from_pretrained_question_encoder_generator('facebook/dpr-question_encoder-single-nq-base', 'facebook/bart-large-cnn', retriever=rag_retriever, config=rag_config).to(torch_device) rag_sequence.save_pretrained(tmp_dirname) rag_sequence.from_pretrained(tmp_dirname, retriever=rag_retriever) rag_sequence.to(torch_device) with torch.no_grad(): output = rag_sequence(input_ids, labels=decoder_input_ids) loss_pretrained = output.loss del rag_sequence question_encoder = AutoModel.from_pretrained('facebook/dpr-question_encoder-single-nq-base') generator = AutoModelForSeq2SeqLM.from_pretrained('facebook/bart-large-cnn') rag_sequence = RagSequenceForGeneration(config=rag_config, question_encoder=question_encoder, generator=generator, retriever=rag_retriever) rag_sequence.to(torch_device) with torch.no_grad(): output = rag_sequence(input_ids, labels=decoder_input_ids) loss_init = output.loss self.assertAlmostEqual(loss_pretrained.item(), loss_init.item(), places=4) def test_rag_token_from_pretrained(self): rag_config = self.get_rag_config() rag_decoder_tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn') rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained('facebook/dpr-question_encoder-single-nq-base') rag_retriever = RagRetriever(rag_config, question_encoder_tokenizer=rag_question_encoder_tokenizer, generator_tokenizer=rag_decoder_tokenizer) input_ids = rag_question_encoder_tokenizer('who sings does he love me with reba', return_tensors='pt').input_ids decoder_input_ids = rag_decoder_tokenizer('Linda Davis', return_tensors='pt').input_ids input_ids = input_ids.to(torch_device) decoder_input_ids = decoder_input_ids.to(torch_device) with tempfile.TemporaryDirectory() as tmp_dirname: rag_token = RagTokenForGeneration.from_pretrained_question_encoder_generator('facebook/dpr-question_encoder-single-nq-base', 'facebook/bart-large-cnn', retriever=rag_retriever, config=rag_config, question_encoder_max_length=200, generator_max_length=200).to(torch_device) rag_token.save_pretrained(tmp_dirname) rag_token.from_pretrained(tmp_dirname, retriever=rag_retriever) rag_token.to(torch_device) self.assertTrue((rag_token.question_encoder.config.max_length == 200)) self.assertTrue((rag_token.generator.config.max_length == 200)) with torch.no_grad(): output = rag_token(input_ids, labels=decoder_input_ids) loss_pretrained = output.loss del rag_token question_encoder = AutoModel.from_pretrained('facebook/dpr-question_encoder-single-nq-base') generator = AutoModelForSeq2SeqLM.from_pretrained('facebook/bart-large-cnn') rag_token = RagTokenForGeneration(config=rag_config, question_encoder=question_encoder, generator=generator, retriever=rag_retriever) rag_token.to(torch_device) with torch.no_grad(): output = rag_token(input_ids, labels=decoder_input_ids) loss_init = output.loss self.assertAlmostEqual(loss_pretrained.item(), loss_init.item(), places=4)
class BiLSTM(nn.Module): def __init__(self, in_channel=1, out_channel=10): super(BiLSTM, self).__init__() self.hidden_dim = 64 self.kernel_num = 16 self.num_layers = 2 self.V = 5 self.embed1 = nn.Sequential(nn.Conv2d(in_channel, self.kernel_num, kernel_size=3, padding=1), nn.BatchNorm2d(self.kernel_num), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2)) self.embed2 = nn.Sequential(nn.Conv2d(self.kernel_num, (self.kernel_num * 2), kernel_size=3, padding=1), nn.BatchNorm2d((self.kernel_num * 2)), nn.ReLU(inplace=True), nn.AdaptiveMaxPool2d(self.V)) self.hidden2label1 = nn.Sequential(nn.Linear((((self.V * self.V) * 2) * self.hidden_dim), (self.hidden_dim * 4)), nn.ReLU(), nn.Dropout()) self.hidden2label2 = nn.Linear((self.hidden_dim * 4), out_channel) self.bilstm = nn.LSTM((self.kernel_num * 2), self.hidden_dim, num_layers=self.num_layers, bidirectional=True, batch_first=True, bias=False) def forward(self, x): x = self.embed1(x) x = self.embed2(x) x = x.view((- 1), (self.kernel_num * 2), (self.V * self.V)) x = torch.transpose(x, 1, 2) (bilstm_out, _) = self.bilstm(x) bilstm_out = torch.tanh(bilstm_out) bilstm_out = bilstm_out.view(bilstm_out.size(0), (- 1)) logit = self.hidden2label1(bilstm_out) logit = self.hidden2label2(logit) return logit
def make_env_and_dataset(env_name: str, seed: int) -> Tuple[(gym.Env, D4RLDataset)]: env = gym.make(env_name) env = wrappers.EpisodeMonitor(env) env = wrappers.SinglePrecision(env) env.seed(seed) env.action_space.seed(seed) env.observation_space.seed(seed) dataset = D4RLDataset(env) if ('antmaze' in FLAGS.env_name): pass elif (('halfcheetah' in FLAGS.env_name) or ('walker2d' in FLAGS.env_name) or ('hopper' in FLAGS.env_name)): normalize(dataset) return (env, dataset)
class TrialTreeMulti(): def __init__(self, trial_fn, varmults, numconfigs): self.trial_fn = trial_fn self.varmults = varmults self.numconfigs = numconfigs def __call__(self, *args, **kwargs): trial = self.trial_fn(*args, **kwargs) tree = trial['tree'] if (not isinstance(tree, ContractionTreeMulti)): tree.__class__ = ContractionTreeMulti tree.set_varmults(self.varmults) tree.set_numconfigs(self.numconfigs) return trial