code
stringlengths
101
5.91M
class GaussianBlur(object): def __init__(self, sigma=[0.1, 2.0]): self.sigma = sigma def __call__(self, x): sigma = random.uniform(self.sigma[0], self.sigma[1]) x = x.filter(ImageFilter.GaussianBlur(radius=sigma)) return x
class SetTaskUpdate(EnvUpdate): def __init__(self, env_constructor, task): self._env_constructor = env_constructor self._task = task def __call__(self, old_env=None): if (old_env is None): old_env = self._env_constructor() old_env.set_task(self._task) return old_env
class OwlViTModel(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class XGLMConfig(PretrainedConfig): model_type = 'xglm' keys_to_ignore_at_inference = ['past_key_values'] attribute_map = {'num_attention_heads': 'attention_heads', 'hidden_size': 'd_model', 'num_hidden_layers': 'num_layers'} def __init__(self, vocab_size=256008, max_position_embeddings=2048, d_model=1024, ffn_dim=4096, num_layers=24, attention_heads=16, activation_function='gelu', dropout=0.1, attention_dropout=0.1, activation_dropout=0.0, layerdrop=0.0, init_std=0.02, scale_embedding=True, use_cache=True, decoder_start_token_id=2, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.d_model = d_model self.ffn_dim = ffn_dim self.num_layers = num_layers self.attention_heads = attention_heads self.activation_function = activation_function self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.layerdrop = layerdrop self.init_std = init_std self.scale_embedding = scale_embedding self.use_cache = use_cache super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, decoder_start_token_id=decoder_start_token_id, **kwargs)
class STResUNetIN50(STResUNet50): NORM_TYPE = NormType.SPARSE_INSTANCE_NORM BLOCK = BottleneckIN
_module() class SRAnnotationDataset(BaseSRDataset): def __init__(self, lq_folder, gt_folder, ann_file, pipeline, scale, test_mode=False, filename_tmpl='{}'): super().__init__(pipeline, scale, test_mode) self.lq_folder = str(lq_folder) self.gt_folder = str(gt_folder) self.ann_file = str(ann_file) self.filename_tmpl = filename_tmpl self.data_infos = self.load_annotations() def load_annotations(self): data_infos = [] with open(self.ann_file, 'r') as fin: for line in fin: gt_name = line.split(' ')[0] (basename, ext) = osp.splitext(osp.basename(gt_name)) lq_name = f'{self.filename_tmpl.format(basename)}{ext}' data_infos.append(dict(lq_path=osp.join(self.lq_folder, lq_name), gt_path=osp.join(self.gt_folder, gt_name))) return data_infos
class VoiceActivityDetection(base.Pipeline): def __init__(self, config: (VoiceActivityDetectionConfig | None)=None): self._config = (VoiceActivityDetectionConfig() if (config is None) else config) msg = f'Latency should be in the range [{self._config.step}, {self._config.duration}]' assert (self._config.step <= self._config.latency <= self._config.duration), msg self.segmentation = SpeakerSegmentation(self._config.segmentation, self._config.device) self.pred_aggregation = DelayedAggregation(self._config.step, self._config.latency, strategy='hamming', cropping_mode='loose') self.audio_aggregation = DelayedAggregation(self._config.step, self._config.latency, strategy='first', cropping_mode='center') self.binarize = Binarize(self._config.tau_active) self.timestamp_shift = 0 (self.chunk_buffer, self.pred_buffer) = ([], []) def get_config_class() -> type: return VoiceActivityDetectionConfig def suggest_metric() -> BaseMetric: return DetectionErrorRate(collar=0, skip_overlap=False) def hyper_parameters() -> Sequence[base.HyperParameter]: return [base.TauActive] def config(self) -> base.PipelineConfig: return self._config def reset(self): self.set_timestamp_shift(0) (self.chunk_buffer, self.pred_buffer) = ([], []) def set_timestamp_shift(self, shift: float): self.timestamp_shift = shift def __call__(self, waveforms: Sequence[SlidingWindowFeature]) -> Sequence[tuple[(Annotation, SlidingWindowFeature)]]: batch_size = len(waveforms) msg = 'Pipeline expected at least 1 input' assert (batch_size >= 1), msg batch = torch.stack([torch.from_numpy(w.data) for w in waveforms]) expected_num_samples = int(np.rint((self.config.duration * self.config.sample_rate))) msg = f'Expected {expected_num_samples} samples per chunk, but got {batch.shape[1]}' assert (batch.shape[1] == expected_num_samples), msg segmentations = self.segmentation(batch) voice_detection = torch.max(segmentations, dim=(- 1), keepdim=True)[0] seg_resolution = (waveforms[0].extent.duration / segmentations.shape[1]) outputs = [] for (wav, vad) in zip(waveforms, voice_detection): sw = SlidingWindow(start=wav.extent.start, duration=seg_resolution, step=seg_resolution) vad = SlidingWindowFeature(vad.cpu().numpy(), sw) self.chunk_buffer.append(wav) self.pred_buffer.append(vad) agg_waveform = self.audio_aggregation(self.chunk_buffer) agg_prediction = self.pred_aggregation(self.pred_buffer) agg_prediction = self.binarize(agg_prediction).get_timeline(copy=False) if (self.timestamp_shift != 0): shifted_agg_prediction = Timeline(uri=agg_prediction.uri) for segment in agg_prediction: new_segment = Segment((segment.start + self.timestamp_shift), (segment.end + self.timestamp_shift)) shifted_agg_prediction.add(new_segment) agg_prediction = shifted_agg_prediction agg_prediction = agg_prediction.to_annotation(utils.repeat_label('speech')) outputs.append((agg_prediction, agg_waveform)) if (len(self.chunk_buffer) == self.pred_aggregation.num_overlapping_windows): self.chunk_buffer = self.chunk_buffer[1:] self.pred_buffer = self.pred_buffer[1:] return outputs
class NMNIST(Dataset): def __init__(self, train, step, dt, path=None): super(NMNIST, self).__init__() self.step = step self.path = path self.train = train self.dt = dt self.win = (step * dt) self.len = 60000 if (train == False): self.len = 10000 self.eventflow = np.zeros(shape=(self.len, 2, 34, 34, self.step)) self.label = np.zeros(shape=(self.len, 10)) if (path is not None): self.eventflow = np.load((path + '/data.npy')) self.label = np.load((path + '/label.npy')) def __len__(self): return len(self.label) def __getitem__(self, idx): if torch.is_tensor(idx): idx = idx.tolist() x = self.eventflow[(idx, ...)].astype(np.float32) y = self.label[idx].astype(np.float32) return (x, y) def preprocessing(self, src_path, save_path=None): filenum = 0 for num in range(10): dir = os.path.join(src_path, str(num)) files = os.listdir(dir) for file in files: file_dir = os.path.join(dir, file) f = open(file_dir, 'rb') raw_data = np.fromfile(f, dtype=np.uint8) f.close() raw_data = np.uint32(raw_data) all_y = raw_data[1::5] all_x = raw_data[0::5] all_p = ((raw_data[2::5] & 128) >> 7) all_ts = ((((raw_data[2::5] & 127) << 16) | (raw_data[3::5] << 8)) | raw_data[4::5]) all_ts = np.uint32(np.around((all_ts / 1000))) win_indices = np.where((all_ts < self.win)) win_indices = win_indices[0] for i in range(len(win_indices)): index = int(win_indices[i]) polar = 0 self.eventflow[(filenum, polar, all_x[index], all_y[index], int((all_ts[index] / self.dt)))] = 1 self.label[filenum] = np.eye(10)[num] filenum += 1 print(('Done file:' + str(num))) if (save_path is not None): field = ('Train' if self.train else 'Test') np.save((('./data/NMNIST_npy/' + field) + '/data.npy'), self.eventflow) np.save((('./data/NMNIST_npy/' + field) + '/label.npy'), self.label)
def sent_len_and_num_deletion_portion(zip_inp, dic: dict): for (sent, doc_list) in zip_inp: sent_len = len(doc_list) if (sent_len not in dic): dic[sent_len] = [] dic[sent_len].append(len(sent['del_span'])) return dic
class PredictionHead(nn.Module): def __init__(self, in_channels=128, out_channels=128, hidden_channels=256): nn.Module.__init__(self) self.liner1 = nn.Linear(in_channels, hidden_channels) self.bn1 = nn.BatchNorm1d(hidden_channels) self.relu = nn.ReLU(inplace=True) self.liner2 = nn.Linear(hidden_channels, out_channels) def forward(self, x): z = self.liner1(x) z = self.bn1(z) z = self.relu(z) out = self.liner2(z) return out
def test_from_summaries_inertia(X): d = [Exponential([2.1, 0.3, 0.1]), Exponential([1.5, 3.1, 2.2])] model = DenseHMM(distributions=d, edges=[[0.1, 0.8], [0.3, 0.6]], starts=[0.2, 0.8], ends=[0.1, 0.1], inertia=0.3) d1 = model.distributions[0] d2 = model.distributions[1] model.summarize(X) model.from_summaries() assert_array_almost_equal(model.starts, [(- 2.362523), (- 0.116391)]) assert_array_almost_equal(model.ends, [(- 1.496878), (- 1.99371)]) assert_array_almost_equal(model.edges, [[(- 7.16503), (- 0.333041)], [(- 1.067542), (- 0.667059)]], 4) assert_array_almost_equal(d1.scales, [1.949585, 1.046725, 0.002491]) assert_array_almost_equal(d1._w_sum, [0.0, 0.0, 0.0]) assert_array_almost_equal(d1._xw_sum, [0.0, 0.0, 0.0]) assert_array_almost_equal(d2.scales, [1.465946, 1.120429, 1.659203]) assert_array_almost_equal(d2._w_sum, [0.0, 0.0, 0.0]) assert_array_almost_equal(d2._xw_sum, [0.0, 0.0, 0.0]) d = [Exponential([2.1, 0.3, 0.1], inertia=0.25), Exponential([1.5, 3.1, 2.2], inertia=0.83)] model = DenseHMM(distributions=d, edges=[[0.1, 0.8], [0.3, 0.6]], starts=[0.2, 0.8], ends=[0.1, 0.1], inertia=0.0) d1 = model.distributions[0] d2 = model.distributions[1] model.summarize(X) model.from_summaries() assert_array_almost_equal(model.starts, [(- 2.685274), (- 0.07064)]) assert_array_almost_equal(model.ends, [(- 1.151575), (- 1.861335)]) assert_array_almost_equal(model.edges, [[(- 9.248936), (- 0.38014)], [(- 1.009071), (- 0.734016)]], 4) assert_array_almost_equal(d1.scales, [1.987189, 0.860044, 0.026868]) assert_array_almost_equal(d1._w_sum, [0.0, 0.0, 0.0]) assert_array_almost_equal(d1._xw_sum, [0.0, 0.0, 0.0]) assert_array_almost_equal(d2.scales, [1.494211, 2.763473, 2.108064]) assert_array_almost_equal(d2._w_sum, [0.0, 0.0, 0.0]) assert_array_almost_equal(d2._xw_sum, [0.0, 0.0, 0.0])
def resize_multiple(img, sizes=(128, 256, 512, 1024), resample=Image.LANCZOS, quality=100): imgs = [] for size in sizes: imgs.append(resize_and_convert(img, size, resample, quality)) return imgs
def get_relative_import_files(module_file): no_change = False files_to_check = [module_file] all_relative_imports = [] while (not no_change): new_imports = [] for f in files_to_check: new_imports.extend(get_relative_imports(f)) module_path = Path(module_file).parent new_import_files = [str((module_path / m)) for m in new_imports] new_import_files = [f for f in new_import_files if (f not in all_relative_imports)] files_to_check = [f'{f}.py' for f in new_import_files] no_change = (len(new_import_files) == 0) all_relative_imports.extend(files_to_check) return all_relative_imports
class ScoreSdeVePipeline(DiffusionPipeline): unet: UNet2DModel scheduler: ScoreSdeVeScheduler def __init__(self, unet: UNet2DModel, scheduler: ScoreSdeVeScheduler): super().__init__() self.register_modules(unet=unet, scheduler=scheduler) _grad() def __call__(self, batch_size: int=1, num_inference_steps: int=2000, generator: Optional[Union[(torch.Generator, List[torch.Generator])]]=None, output_type: Optional[str]='pil', return_dict: bool=True, **kwargs) -> Union[(ImagePipelineOutput, Tuple)]: img_size = self.unet.config.sample_size shape = (batch_size, 3, img_size, img_size) model = self.unet sample = (randn_tensor(shape, generator=generator) * self.scheduler.init_noise_sigma) sample = sample.to(self.device) self.scheduler.set_timesteps(num_inference_steps) self.scheduler.set_sigmas(num_inference_steps) for (i, t) in enumerate(self.progress_bar(self.scheduler.timesteps)): sigma_t = (self.scheduler.sigmas[i] * torch.ones(shape[0], device=self.device)) for _ in range(self.scheduler.config.correct_steps): model_output = self.unet(sample, sigma_t).sample sample = self.scheduler.step_correct(model_output, sample, generator=generator).prev_sample model_output = model(sample, sigma_t).sample output = self.scheduler.step_pred(model_output, t, sample, generator=generator) (sample, sample_mean) = (output.prev_sample, output.prev_sample_mean) sample = sample_mean.clamp(0, 1) sample = sample.cpu().permute(0, 2, 3, 1).numpy() if (output_type == 'pil'): sample = self.numpy_to_pil(sample) if (not return_dict): return (sample,) return ImagePipelineOutput(images=sample)
def write_dict_to_tb(writer, dict, prefix, step): if (prefix[(- 1)] != '/'): prefix += '/' for (key, value) in dict.items(): writer.add_scalar(f'{prefix}{key}', value, step)
class NormalizeIntermediate(object): def __init__(self, mean, std): self.__int_depth_mean = mean['int_depth'] self.__int_depth_std = std['int_depth'] self.__int_scales_mean = mean['int_scales'] self.__int_scales_std = std['int_scales'] def __call__(self, sample): if (('int_depth' in sample) and (sample['int_depth'] is not None)): sample['int_depth'] = ((sample['int_depth'] - self.__int_depth_mean) / self.__int_depth_std) if (('int_scales' in sample) and (sample['int_scales'] is not None)): sample['int_scales'] = ((sample['int_scales'] - self.__int_scales_mean) / self.__int_scales_std) return sample
.parametrize('device', ['cpu', 'cuda']) def test_basic_encoding(device): check_cuda(device) layer = rff.layers.BasicEncoding().to(device) v = rff.dataloader.rectangular_coordinates((256, 256)).to(device) gamma_v = layer(v) gamma_v_expected = rff.functional.basic_encoding(v) np.testing.assert_almost_equal(gamma_v.cpu().numpy(), gamma_v_expected.cpu().numpy(), decimal=5)
_model def gluon_seresnext101_64x4d(pretrained=False, **kwargs): model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=64, base_width=4, block_args=dict(attn_layer=SEModule), **kwargs) return _create_resnet('gluon_seresnext101_64x4d', pretrained, **model_args)
def GaussianLinearPath(sigma, fold, l=5): def path_interpolation_func(cv_numpy_image): kernel = isotropic_gaussian_kernel(l, sigma) baseline_image = cv2.filter2D(cv_numpy_image, (- 1), kernel) image_interpolation = interpolation(cv_numpy_image, baseline_image, fold, mode='linear').astype(np.float32) lambda_derivative_interpolation = np.repeat(np.expand_dims((cv_numpy_image - baseline_image), axis=0), fold, axis=0) return (np.moveaxis(image_interpolation, 3, 1).astype(np.float32), np.moveaxis(lambda_derivative_interpolation, 3, 1).astype(np.float32)) return path_interpolation_func
class ForcedEOSTokenLogitsProcessor(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class Lookahead(Optimizer): def __init__(self, optimizer, k=5, alpha=0.5): self.optimizer = optimizer self.k = k self.alpha = alpha self.param_groups = self.optimizer.param_groups self.state = defaultdict(dict) self.fast_state = self.optimizer.state for group in self.param_groups: group['counter'] = 0 def update(self, group): for fast in group['params']: param_state = self.state[fast] if ('slow_param' not in param_state): param_state['slow_param'] = torch.zeros_like(fast.data) param_state['slow_param'].copy_(fast.data) slow = param_state['slow_param'] slow += ((fast.data - slow) * self.alpha) fast.data.copy_(slow) def update_lookahead(self): for group in self.param_groups: self.update(group) def step(self, closure=None): loss = self.optimizer.step(closure) for group in self.param_groups: if (group['counter'] == 0): self.update(group) group['counter'] += 1 if (group['counter'] >= self.k): group['counter'] = 0 return loss def state_dict(self): fast_state_dict = self.optimizer.state_dict() slow_state = {(id(k) if isinstance(k, torch.Tensor) else k): v for (k, v) in self.state.items()} fast_state = fast_state_dict['state'] param_groups = fast_state_dict['param_groups'] return {'fast_state': fast_state, 'slow_state': slow_state, 'param_groups': param_groups} def load_state_dict(self, state_dict): slow_state_dict = {'state': state_dict['slow_state'], 'param_groups': state_dict['param_groups']} fast_state_dict = {'state': state_dict['fast_state'], 'param_groups': state_dict['param_groups']} super(Lookahead, self).load_state_dict(slow_state_dict) self.optimizer.load_state_dict(fast_state_dict) self.fast_state = self.optimizer.state def add_param_group(self, param_group): param_group['counter'] = 0 self.optimizer.add_param_group(param_group)
def test_alias_delay_initialization2(capture): class B2(m.A2): def __init__(self): super(B2, self).__init__() def f(self): print('In python B2.f()') with capture: a2 = m.A2() m.call_f(a2) del a2 pytest.gc_collect() a3 = m.A2(1) m.call_f(a3) del a3 pytest.gc_collect() assert (capture == '\n PyA2.PyA2()\n PyA2.f()\n A2.f()\n PyA2.~PyA2()\n PyA2.PyA2()\n PyA2.f()\n A2.f()\n PyA2.~PyA2()\n ') with capture: b2 = B2() m.call_f(b2) del b2 pytest.gc_collect() assert (capture == '\n PyA2.PyA2()\n PyA2.f()\n In python B2.f()\n PyA2.~PyA2()\n ')
def dock(smile, unique_id, target='drd3', pythonsh=None, vina=None, parallel=True, exhaustiveness=16, mean=True, load=False): if load: try: pass score = load[smile] return score except KeyError: pass if ((pythonsh is None) or (vina is None)): global PYTHONSH pythonsh = PYTHONSH global VINA vina = VINA tmp_path = os.path.join(script_dir, f'tmp/{unique_id}') soft_mkdir(tmp_path) RECEPTOR_PATH = os.path.join(script_dir, f'data_docking/{target}.pdbqt') CONF_PATH = os.path.join(script_dir, f'data_docking/{target}_conf.txt') try: pass mol = pybel.readstring('smi', smile) mol.addh() mol.make3D() dump_mol2_path = os.path.join(tmp_path, 'ligand.mol2') dump_pdbqt_path = os.path.join(tmp_path, 'ligand.pdbqt') mol.write('mol2', dump_mol2_path, overwrite=True) prepare_ligand = os.path.join(script_dir, 'prepare_ligand4.py') subprocess.run(f'{pythonsh} {prepare_ligand} -l {dump_mol2_path} -o {dump_pdbqt_path} -A hydrogens'.split(), timeout=100) start = time() cmd = f'{vina} --receptor {RECEPTOR_PATH} --ligand {dump_pdbqt_path} --config {CONF_PATH} --exhaustiveness {exhaustiveness} --log log.txt' if parallel: subprocess.run(cmd.split(), timeout=1200) else: cmd += ' --cpu 1' subprocess.run(cmd.split(), timeout=1200) delta_t = (time() - start) print('Docking time :', delta_t) with open(os.path.join(tmp_path, 'ligand_out.pdbqt'), 'r') as f: lines = f.readlines() slines = [l for l in lines if l.startswith('REMARK VINA RESULT')] values = [l.split() for l in slines] score = [float(v[3]) for v in values] if mean: score = np.mean(score) except: if mean: score = 0 else: score = ([0] * 10) try: pass shutil.rmtree(tmp_path) except FileNotFoundError: pass return score
def init_gpu_params(params): if (params.n_gpu <= 0): params.local_rank = 0 params.master_port = (- 1) params.is_master = True params.multi_gpu = False return assert torch.cuda.is_available() logger.info('Initializing GPUs') if (params.n_gpu > 1): assert (params.local_rank != (- 1)) params.world_size = int(os.environ['WORLD_SIZE']) params.n_gpu_per_node = int(os.environ['N_GPU_NODE']) params.global_rank = int(os.environ['RANK']) params.n_nodes = (params.world_size // params.n_gpu_per_node) params.node_id = (params.global_rank // params.n_gpu_per_node) params.multi_gpu = True assert (params.n_nodes == int(os.environ['N_NODES'])) assert (params.node_id == int(os.environ['NODE_RANK'])) else: assert (params.local_rank == (- 1)) params.n_nodes = 1 params.node_id = 0 params.local_rank = 0 params.global_rank = 0 params.world_size = 1 params.n_gpu_per_node = 1 params.multi_gpu = False assert (params.n_nodes >= 1) assert (0 <= params.node_id < params.n_nodes) assert (0 <= params.local_rank <= params.global_rank < params.world_size) assert (params.world_size == (params.n_nodes * params.n_gpu_per_node)) params.is_master = ((params.node_id == 0) and (params.local_rank == 0)) params.multi_node = (params.n_nodes > 1) PREFIX = f'--- Global rank: {params.global_rank} - ' logger.info((PREFIX + ('Number of nodes: %i' % params.n_nodes))) logger.info((PREFIX + ('Node ID : %i' % params.node_id))) logger.info((PREFIX + ('Local rank : %i' % params.local_rank))) logger.info((PREFIX + ('World size : %i' % params.world_size))) logger.info((PREFIX + ('GPUs per node : %i' % params.n_gpu_per_node))) logger.info((PREFIX + ('Master : %s' % str(params.is_master)))) logger.info((PREFIX + ('Multi-node : %s' % str(params.multi_node)))) logger.info((PREFIX + ('Multi-GPU : %s' % str(params.multi_gpu)))) logger.info((PREFIX + ('Hostname : %s' % socket.gethostname()))) torch.cuda.set_device(params.local_rank) if params.multi_gpu: logger.info('Initializing PyTorch distributed') torch.distributed.init_process_group(init_method='env://', backend='nccl')
class Accumulator(object): def __init__(self): self.table = {} def record(self, key, val): if (not (key in self.table)): self.table[key] = MeanAccumulator() self.table[key].record(val) def has(self, key): return (key in self.table) def get_mean(self, key, default=0): if (key in self.table): val = self.table[key].val else: val = default return val def reset(self, key=None): if (key is None): keys = list(self.table.keys()) else: keys = [key] for key in keys: del self.table[key]
class RubiksCubeViewer(Viewer[State]): def __init__(self, sticker_colors: Optional[list], cube_size: int): self.cube_size = cube_size self.sticker_colors_cmap = matplotlib.colors.ListedColormap(sticker_colors) self.figure_name = f"{cube_size}x{cube_size}x{cube_size} Rubik's Cube" self.figure_size = (6.0, 6.0) def render(self, state: State) -> None: self._clear_display() (fig, ax) = self._get_fig_ax() self._draw(ax, state) self._update_display(fig) def animate(self, states: Sequence[State], interval: int, save_path: Optional[str]) -> matplotlib.animation.FuncAnimation: (fig, ax) = plt.subplots(nrows=3, ncols=2, figsize=self.figure_size) fig.suptitle(self.figure_name) plt.tight_layout() ax = ax.flatten() plt.close(fig) def make_frame(state_index: int) -> None: state = states[state_index] self._draw(ax, state) self._animation = matplotlib.animation.FuncAnimation(fig, make_frame, frames=len(states), interval=interval) if save_path: self._animation.save(save_path) return self._animation def _get_fig_ax(self) -> Tuple[(plt.Figure, List[plt.Axes])]: exists = plt.fignum_exists(self.figure_name) if exists: fig = plt.figure(self.figure_name) ax = fig.get_axes() else: (fig, ax) = plt.subplots(nrows=3, ncols=2, figsize=self.figure_size, num=self.figure_name) fig.suptitle(self.figure_name) ax = ax.flatten() plt.tight_layout() plt.axis('off') if (not plt.isinteractive()): fig.show() return (fig, ax) def _draw(self, ax: List[plt.Axes], state: State) -> None: i = 0 for face in Face: ax[i].clear() ax[i].set_title(label=f'{face}') ax[i].set_xticks(jnp.arange((- 0.5), (self.cube_size - 1), 1)) ax[i].set_yticks(jnp.arange((- 0.5), (self.cube_size - 1), 1)) ax[i].tick_params(top=False, bottom=False, left=False, right=False, labelleft=False, labelbottom=False, labeltop=False, labelright=False) ax[i].imshow(state.cube[i], cmap=self.sticker_colors_cmap, vmin=0, vmax=(len(Face) - 1)) ax[i].grid(color='black', linestyle='-', linewidth=2) i += 1 def _update_display(self, fig: plt.Figure) -> None: if plt.isinteractive(): fig.canvas.draw() if jumanji.environments.is_colab(): plt.show(self.figure_name) else: fig.canvas.draw_idle() fig.canvas.flush_events() def _clear_display(self) -> None: if jumanji.environments.is_colab(): import IPython.display IPython.display.clear_output(True) def close(self) -> None: plt.close(self.figure_name)
_builder('music_qa') class MusicQABuilder(BaseDatasetBuilder): train_dataset_cls = AVQADataset eval_dataset_cls = AVQADataset DATASET_CONFIG_DICT = {'default': 'configs/datasets/musicqa/defaults_qa.yaml'} def build(self): self.build_processors() build_info = self.config.build_info ann_info = build_info.annotations vis_info = build_info.get(self.data_type) aud_info = build_info.get('audios') datasets = dict() for split in ann_info.keys(): if (split not in ['train', 'val', 'test']): continue is_train = (split == 'train') vis_processor = (self.vis_processors['train'] if is_train else self.vis_processors['eval']) text_processor = (self.text_processors['train'] if is_train else self.text_processors['eval']) aud_processor = (self.aud_processors['train'] if is_train else self.aud_processors['eval']) ann_paths = ann_info.get(split).storage if isinstance(ann_paths, str): ann_paths = [ann_paths] abs_ann_paths = [] for ann_path in ann_paths: if (not os.path.isabs(ann_path)): ann_path = utils.get_cache_path(ann_path) abs_ann_paths.append(ann_path) ann_paths = abs_ann_paths vis_path = vis_info.storage aud_path = aud_info.storage if (not os.path.isabs(vis_path)): vis_path = utils.get_cache_path(vis_path) if (not os.path.exists(vis_path)): warnings.warn('storage path {} does not exist.'.format(vis_path)) dataset_cls = (self.train_dataset_cls if is_train else self.eval_dataset_cls) datasets[split] = dataset_cls(vis_processor=vis_processor, text_processor=text_processor, aud_processor=aud_processor, ann_paths=ann_paths, vis_root=vis_path, aud_root=aud_path) return datasets
class SelectiveKernelBasic(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64, sk_kwargs=None, reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_layer=None, aa_layer=None, drop_block=None, drop_path=None): super(SelectiveKernelBasic, self).__init__() sk_kwargs = (sk_kwargs or {}) conv_kwargs = dict(drop_block=drop_block, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer) assert (cardinality == 1), 'BasicBlock only supports cardinality of 1' assert (base_width == 64), 'BasicBlock doest not support changing base width' first_planes = (planes // reduce_first) outplanes = (planes * self.expansion) first_dilation = (first_dilation or dilation) self.conv1 = SelectiveKernelConv(inplanes, first_planes, stride=stride, dilation=first_dilation, **conv_kwargs, **sk_kwargs) conv_kwargs['act_layer'] = None self.conv2 = ConvBnAct(first_planes, outplanes, kernel_size=3, dilation=dilation, **conv_kwargs) self.se = create_attn(attn_layer, outplanes) self.act = act_layer(inplace=True) self.downsample = downsample self.stride = stride self.dilation = dilation self.drop_block = drop_block self.drop_path = drop_path def zero_init_last_bn(self): nn.init.zeros_(self.conv2.bn.weight) def forward(self, x): residual = x x = self.conv1(x) x = self.conv2(x) if (self.se is not None): x = self.se(x) if (self.drop_path is not None): x = self.drop_path(x) if (self.downsample is not None): residual = self.downsample(residual) x += residual x = self.act(x) return x
def registerSceneProperties(): bpy.types.Scene.zpy_gin_config_path = bpy.props.StringProperty(name='', description='Path to a gin config file.', default='', subtype='FILE_PATH', update=_load_gin_config) bpy.types.Scene.zpy_runpy_path = bpy.props.StringProperty(name='', description='Path to run.py file', default='', subtype='FILE_PATH', update=_load_runpy) bpy.types.Scene.zpy_template_dir = bpy.props.StringProperty(name='', description='Path to script template directory.', default=str(zpy.assets.script_template_dir()), subtype='DIR_PATH')
def run_scan_net(sn_loc, save_model, data_config='scan_net_ICO'): config = Config(article_sections='all', ico_encoder='CBoW', article_encoder='GRU', attn=False, cond_attn=False, tokenwise_attention=False, batch_size=32, attn_batch_size=32, epochs=25, attn_epochs=25, data_config=data_config, pretrain_attention=False, tune_embeddings=False, no_pretrained_word_embeddings=False, attention_acceptance='auc') article_sections = {'all': None, 'abstract/results': {'abstract', 'results'}, 'results': {'results'}, 'abstracts': {'abstract'}} data_configs = {'vanilla': identity, 'cheating': replace_articles_with_evidence_spans, 'no_prompt': replace_prompts_with_empty, 'no_article': replace_articles_with_empty, 'double_training_trick': double_training_trick, 'scan_net': scan_net_preprocess_create(sn_loc), 'scan_net_ICO': scan_net_ICO_preprocess_create(sn_loc)} configs = [(data_configs[data_config], article_sections['all'], config)] os.makedirs(save_model, exist_ok=True) results = generate_paper_results(configs, mode='experiment', save_dir=save_model, determinize=False) if (len(results) > 1): raise ValueError("Can't properly output more than result file in this setting, FIXME") (val_metrics, attn_metrics) = results[0] df = results_to_csv(config, val_metrics, attn_metrics) print('<csvsnippet>') df.to_csv(sys.stdout, index=False, compression=None) print('</csvsnippet>')
class BernoulliDropoutDimensionPNGenerator(nn.Module): def __init__(self, dim_hidden, dropout: float=0.1): super(BernoulliDropoutDimensionPNGenerator, self).__init__() self.dim_hidden = dim_hidden self.dropout = dropout dropout_dim = random.sample(list(range(self.dim_hidden)), math.floor((self.dropout * self.dim_hidden))) self.dropout_mask = torch.ones((1, self.dim_hidden)) self.dropout_mask[(0, dropout_dim)] = torch.zeros(self.dropout_mask[(0, dropout_dim)].shape) def update(self): dropout_dim = random.sample(list(range(self.dim_hidden)), math.floor((self.dropout * self.dim_hidden))) self.dropout_mask = torch.ones((1, self.dim_hidden)) self.dropout_mask[(0, dropout_dim)] = torch.zeros(self.dropout_mask[(0, dropout_dim)].shape) def forward(self, emb): return (emb, (emb * self.dropout_mask))
def convex_hull(dim, points, checkin=True, checkout=True): if checkin: if (not convex_hull_checkin(dim, points)): return None from phcpy.phcpy2c3 import py2c_giftwrap_convex_hull from phcpy.phcpy2c3 import py2c_giftwrap_number_of_facets from phcpy.phcpy2c3 import py2c_giftwrap_retrieve_facet strpoints = str(points) fail = py2c_giftwrap_convex_hull(len(strpoints), strpoints) nbrfacets = py2c_giftwrap_number_of_facets(dim) print('computed', nbrfacets, 'facets') result = [] for k in range(nbrfacets): strfacet = py2c_giftwrap_retrieve_facet(dim, k) facet = eval(strfacet) result.append(facet) if (dim == 3): from phcpy.phcpy2c3 import py2c_giftwrap_clear_3d_facets fail = py2c_giftwrap_clear_3d_facets() if (dim == 4): from phcpy.phcpy2c3 import py2c_giftwrap_clear_4d_facets fail = py2c_giftwrap_clear_4d_facets() if checkout: if (not convex_hull_checkout(dim, points, result)): print('the list of facets is not correct') return result
class NotStopShaperConfig(RewardConfig): def __init__(self, on_switch_value, other_value): self.on_switch_value = on_switch_value self.other_value = other_value def create_reward_shaper(self): return NoStopShaper(self.on_switch_value, self.other_value)
def spark_df_to_ray_dataset(df: 'SparkDataFrame') -> 'Dataset': spark_xshards = spark_df_to_pd_sparkxshards(df) ray_dataset = spark_xshards_to_ray_dataset(spark_xshards) return ray_dataset
def gen_updates_rmsprop(loss, all_parameters, learning_rate=1.0, rho=0.9, epsilon=1e-06): all_grads = [theano.grad(loss, param) for param in all_parameters] all_accumulators = [theano.shared((param.get_value() * 0.0)) for param in all_parameters] updates = [] for (param_i, grad_i, acc_i) in zip(all_parameters, all_grads, all_accumulators): acc_i_new = ((rho * acc_i) + ((1 - rho) * (grad_i ** 2))) updates.append((acc_i, acc_i_new)) updates.append((param_i, (param_i - ((learning_rate * grad_i) / T.sqrt((acc_i_new + epsilon)))))) return updates
class TransformerDecoderLayer(nn.Module): def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation=nn.LeakyReLU, normalize_before=False): super().__init__() self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model) self.norm2 = nn.LayerNorm(d_model) self.norm3 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.dropout3 = nn.Dropout(dropout) self.activation = activation() self.normalize_before = normalize_before def with_pos_embed(self, tensor, pos: Optional[Tensor]): return (tensor if (pos is None) else (tensor + pos)) def forward_post(self, tgt, memory, tgt_mask: Optional[Tensor]=None, memory_mask: Optional[Tensor]=None, tgt_key_padding_mask: Optional[Tensor]=None, memory_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None, query_pos: Optional[Tensor]=None): q = k = self.with_pos_embed(tgt, query_pos) tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask)[0] tgt = (tgt + self.dropout1(tgt2)) tgt = self.norm1(tgt) tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos), key=self.with_pos_embed(memory, pos), value=memory, attn_mask=memory_mask, key_padding_mask=memory_key_padding_mask)[0] tgt = (tgt + self.dropout2(tgt2)) tgt = self.norm2(tgt) tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) tgt = (tgt + self.dropout3(tgt2)) tgt = self.norm3(tgt) return tgt def forward_pre(self, tgt, memory, tgt_mask: Optional[Tensor]=None, memory_mask: Optional[Tensor]=None, tgt_key_padding_mask: Optional[Tensor]=None, memory_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None, query_pos: Optional[Tensor]=None): tgt2 = self.norm1(tgt) q = k = self.with_pos_embed(tgt2, query_pos) tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask)[0] tgt = (tgt + self.dropout1(tgt2)) tgt2 = self.norm2(tgt) tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos), key=self.with_pos_embed(memory, pos), value=memory, attn_mask=memory_mask, key_padding_mask=memory_key_padding_mask)[0] tgt = (tgt + self.dropout2(tgt2)) tgt2 = self.norm3(tgt) tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) tgt = (tgt + self.dropout3(tgt2)) return tgt def forward(self, tgt, memory, tgt_mask: Optional[Tensor]=None, memory_mask: Optional[Tensor]=None, tgt_key_padding_mask: Optional[Tensor]=None, memory_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None, query_pos: Optional[Tensor]=None): if self.normalize_before: return self.forward_pre(tgt, memory, tgt_mask, memory_mask, tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos) return self.forward_post(tgt, memory, tgt_mask, memory_mask, tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
def main(args, init_distributed=False): utils.import_user_module(args) assert ((args.max_tokens is not None) or (args.max_sentences is not None)), 'Must specify batch size either with --max-tokens or --max-sentences' metrics.reset() if (torch.cuda.is_available() and (not args.cpu)): torch.cuda.set_device(args.device_id) np.random.seed(args.seed) torch.manual_seed(args.seed) if init_distributed: args.distributed_rank = distributed_utils.distributed_init(args) if distributed_utils.is_master(args): checkpoint_utils.verify_checkpoint_directory(args.save_dir) logger.info(args) task = tasks.setup_task(args) for valid_sub_split in args.valid_subset.split(','): task.load_dataset(valid_sub_split, combine=False, epoch=1) if (args.crd_weight > 0.0): for train_sub_split in args.train_subset.split(','): task.load_dataset(train_sub_split, combine=True, epoch=1) model = task.build_model(args) criterion = task.build_criterion(args) logger.info(model) logger.info('model {}, criterion {}'.format(args.arch, criterion.__class__.__name__)) logger.info('num. model params: {} (num. trained: {})'.format(sum((p.numel() for p in model.parameters())), sum((p.numel() for p in model.parameters() if p.requires_grad)))) if (('distill' in args.criterion.lower()) or ('crd' in args.criterion.lower())): logger.info('In teacher model, num. model params: {} (num. trained: {})'.format(sum((p.numel() for p in criterion.teacher_model.parameters())), sum((p.numel() for p in criterion.teacher_model.parameters() if p.requires_grad)))) if (args.quantization_config_path is not None): quantizer = quantization_utils.Quantizer(config_path=args.quantization_config_path, max_epoch=args.max_epoch, max_update=args.max_update) else: quantizer = None if (args.model_parallel_size == 1): trainer = Trainer(args, task, model, criterion, quantizer) else: trainer = MegatronTrainer(args, task, model, criterion) logger.info('training on {} GPUs'.format(args.distributed_world_size)) logger.info('max tokens per GPU = {} and max sentences per GPU = {}'.format(args.max_tokens, args.max_sentences)) (extra_state, epoch_itr) = checkpoint_utils.load_checkpoint(args, trainer) max_epoch = (args.max_epoch or math.inf) max_update = (args.max_update or math.inf) lr = trainer.get_lr() train_meter = meters.StopwatchMeter() train_meter.start() while ((lr > args.min_lr) and (epoch_itr.next_epoch_idx <= max_epoch)): valid_losses = train(args, trainer, task, epoch_itr, max_update) if (should_stop_early(args, valid_losses[0]) or (trainer.get_num_updates() >= max_update)): break lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0]) epoch_itr = trainer.get_train_iterator(epoch_itr.next_epoch_idx, load_dataset=(os.pathsep in getattr(args, 'data', ''))) train_meter.stop() logger.info('done training in {:.1f} seconds'.format(train_meter.sum))
def save_for_huggingface_upstream(model, tokenizer, output_dir): tokenizer.save_pretrained(output_dir) torch.save(model.quantized_state_dict(), os.path.join(output_dir, WEIGHTS_NAME)) model.model.config.architectures = [model.model.__class__.__name__] model.model.config.torch_dtype = 'int8' model.model.config.save_pretrained(output_dir)
class GPT3QueryExecutor(): def __init__(self, sites): self.prompt = self._construct_prompt(sites) self.model = 'text-davinci-003' def _construct_prompt(self, sites): sites = deepcopy(sites) test_searches = ['Search(fields=[name], filters=[category == landmark])', 'Search(fields=[name], filters=[category == concert])', 'Search(fields=[name], text_query=live music)', 'Search(fields=[name, price], text_query=live music, filters=[price <= 40])', 'Search(fields=[name, price], filters=[category == restaurant, price <= 10], sort_by=[distance_to(The Mall)])', 'Search(fields=[name, price, distance], filters=[category == restaurant], sort_by=[distance_to(The Mall), price])', 'Search(fields=[name], text_query="good for kids", filters=[category == park], sort_by=[distance_to(Saul\'s)])', 'Search(fields=[name], filters=[vegan == true])'] static_search = StaticQueryExecutor(sites) def get_result_str(q): try: return static_search(q) except SearchError as e: return str(e) examples = [{'query': q, 'result': get_result_str(q)} for q in test_searches] for s in sites: del s['type'] del s['id_'] s['loc'] = [round(s['loc'][0], 2), round(s['loc'][1], 2)] prompt = QueryExecutorTemplate.render(sites=sites, example_queries=examples) return prompt def __call__(self, query_str): prompt = (self.prompt + f'''Query: {query_str} Result: ''') response = openai.Completion.create(model=self.model, prompt=prompt, temperature=0.1, max_tokens=256, top_p=0.95, frequency_penalty=0, presence_penalty=0, stop=['\n\n', 'Query', 'Query:']) print(response) return response['choices'][0]['text'] def distance(self, s1, s2) -> float: dist = np.linalg.norm((np.array(s1['loc']) - np.array(s2['loc']))) dist *= 69 dist = (round((dist * 10)) / 10) return dist
class CDilated(nn.Module): def __init__(self, nIn, nOut, kSize, stride=1, d=1, groups=1): super().__init__() padding = (int(((kSize - 1) / 2)) * d) self.conv = nn.Conv2d(nIn, nOut, kSize, stride=stride, padding=padding, bias=False, dilation=d, groups=groups) def forward(self, input): output = self.conv(input) return output
def run_sequence(seq_name, seq_home, dataset_name, yaml_name, num_gpu=1, epoch=60, debug=0, script_name='prompt'): try: worker_name = multiprocessing.current_process().name worker_id = (int(worker_name[(worker_name.find('-') + 1):]) - 1) gpu_id = (worker_id % num_gpu) torch.cuda.set_device(gpu_id) except: pass seq_txt = seq_name save_name = '{}'.format(yaml_name) save_path = ((((f'./RGBE_workspace/results/{dataset_name}/' + save_name) + '/') + seq_txt) + '.txt') save_folder = (f'./RGBE_workspace/results/{dataset_name}/' + save_name) if (not os.path.exists(save_folder)): os.makedirs(save_folder) if os.path.exists(save_path): print(f'-1 {seq_name}') return if (script_name == 'vipt'): params = rgbe_prompt_params.parameters(yaml_name, epoch) ostrack = ViPTTrack(params) tracker = ViPT_RGBE(tracker=ostrack) seq_path = ((seq_home + '/') + seq_name) print((('Process sequence: ' + seq_name) + '')) (RGB_img_list, E_img_list, RGB_gt, absent_label) = genConfig(seq_path, dataset_name) if (absent_label[0] == 0): first_present_idx = absent_label.argmax() RGB_img_list = RGB_img_list[first_present_idx:] E_img_list = E_img_list[first_present_idx:] RGB_gt = RGB_gt[first_present_idx:] if (len(RGB_img_list) == len(RGB_gt)): result = np.zeros_like(RGB_gt) else: result = np.zeros((len(RGB_img_list), 4), dtype=RGB_gt.dtype) result[0] = np.copy(RGB_gt[0]) toc = 0 for (frame_idx, (rgb_path, E_path)) in enumerate(zip(RGB_img_list, E_img_list)): tic = cv2.getTickCount() if (frame_idx == 0): image = get_x_frame(rgb_path, E_path, dtype=getattr(params.cfg.DATA, 'XTYPE', 'rgbrgb')) tracker.initialize(image, RGB_gt[0].tolist()) elif (frame_idx > 0): image = get_x_frame(rgb_path, E_path, dtype=getattr(params.cfg.DATA, 'XTYPE', 'rgbrgb')) (region, confidence) = tracker.track(image) result[frame_idx] = np.array(region) toc += (cv2.getTickCount() - tic) toc /= cv2.getTickFrequency() np.savetxt(save_path, result, fmt='%.14f', delimiter=',') print('{} , fps:{}'.format(seq_name, (frame_idx / toc)))
class DensePoseTransformData(object): MASK_LABEL_SYMMETRIES = [0, 1, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 14] POINT_LABEL_SYMMETRIES = [0, 1, 2, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15, 18, 17, 20, 19, 22, 21, 24, 23] def __init__(self, uv_symmetries: Dict[(str, torch.Tensor)], device: torch.device): self.mask_label_symmetries = DensePoseTransformData.MASK_LABEL_SYMMETRIES self.point_label_symmetries = DensePoseTransformData.POINT_LABEL_SYMMETRIES self.uv_symmetries = uv_symmetries self.device = torch.device('cpu') def to(self, device: torch.device, copy: bool=False) -> 'DensePoseTransformData': if ((self.device == device) and (not copy)): return self uv_symmetry_map = {} for key in self.uv_symmetries: uv_symmetry_map[key] = self.uv_symmetries[key].to(device=device, copy=copy) return DensePoseTransformData(uv_symmetry_map, device) def load(io: Union[(str, BinaryIO)]): import scipy.io uv_symmetry_map = scipy.io.loadmat(io) uv_symmetry_map_torch = {} for key in ['U_transforms', 'V_transforms']: uv_symmetry_map_torch[key] = [] map_src = uv_symmetry_map[key] map_dst = uv_symmetry_map_torch[key] for i in range(map_src.shape[1]): map_dst.append(torch.from_numpy(map_src[(0, i)]).to(dtype=torch.float)) uv_symmetry_map_torch[key] = torch.stack(map_dst, dim=0) transform_data = DensePoseTransformData(uv_symmetry_map_torch, device=torch.device('cpu')) return transform_data
def test_automain_imported(ex): main_called = [False] with patch.object(sys, 'argv', ['test.py']): def foo(): main_called[0] = True assert ('foo' in ex.commands) assert (ex.commands['foo'] == foo) assert (ex.default_command == 'foo') assert (main_called[0] is False)
_dataset(name='PointNav-v1') class PointNavDatasetV1(Dataset): episodes: List[NavigationEpisode] content_scenes_path: str = '{data_path}/content/{scene}.json.gz' def check_config_paths_exist(config: Config) -> bool: return (os.path.exists(config.DATA_PATH.format(split=config.SPLIT)) and os.path.exists(config.SCENES_DIR)) def get_scenes_to_load(cls, config: Config) -> List[str]: assert cls.check_config_paths_exist(config) dataset_dir = os.path.dirname(config.DATA_PATH.format(split=config.SPLIT)) cfg = config.clone() cfg.defrost() cfg.CONTENT_SCENES = [] dataset = cls(cfg) has_individual_scene_files = os.path.exists(dataset.content_scenes_path.split('{scene}')[0].format(data_path=dataset_dir)) if has_individual_scene_files: return cls._get_scenes_from_folder(content_scenes_path=dataset.content_scenes_path, dataset_dir=dataset_dir) else: cfg.CONTENT_SCENES = [ALL_SCENES_MASK] dataset = cls(cfg) return list(map(cls.scene_from_scene_path, dataset.scene_ids)) def _get_scenes_from_folder(content_scenes_path, dataset_dir): scenes = [] content_dir = content_scenes_path.split('{scene}')[0] scene_dataset_ext = content_scenes_path.split('{scene}')[1] content_dir = content_dir.format(data_path=dataset_dir) if (not os.path.exists(content_dir)): return scenes for filename in os.listdir(content_dir): if filename.endswith(scene_dataset_ext): scene = filename[:(- len(scene_dataset_ext))] scenes.append(scene) scenes.sort() return scenes def __init__(self, config: Optional[Config]=None) -> None: self.episodes = [] if (config is None): return datasetfile_path = config.DATA_PATH.format(split=config.SPLIT) with gzip.open(datasetfile_path, 'rt') as f: self.from_json(f.read(), scenes_dir=config.SCENES_DIR) dataset_dir = os.path.dirname(datasetfile_path) has_individual_scene_files = os.path.exists(self.content_scenes_path.split('{scene}')[0].format(data_path=dataset_dir)) if has_individual_scene_files: scenes = config.CONTENT_SCENES if (ALL_SCENES_MASK in scenes): scenes = self._get_scenes_from_folder(content_scenes_path=self.content_scenes_path, dataset_dir=dataset_dir) for scene in scenes: scene_filename = self.content_scenes_path.format(data_path=dataset_dir, scene=scene) with gzip.open(scene_filename, 'rt') as f: self.from_json(f.read(), scenes_dir=config.SCENES_DIR) else: self.episodes = list(filter(self.build_content_scenes_filter(config), self.episodes)) def from_json(self, json_str: str, scenes_dir: Optional[str]=None) -> None: deserialized = json.loads(json_str) if (CONTENT_SCENES_PATH_FIELD in deserialized): self.content_scenes_path = deserialized[CONTENT_SCENES_PATH_FIELD] for episode in deserialized['episodes']: episode = NavigationEpisode(**episode) if (scenes_dir is not None): if episode.scene_id.startswith(DEFAULT_SCENE_PATH_PREFIX): episode.scene_id = episode.scene_id[len(DEFAULT_SCENE_PATH_PREFIX):] episode.scene_id = os.path.join(scenes_dir, episode.scene_id) for (g_index, goal) in enumerate(episode.goals): episode.goals[g_index] = NavigationGoal(**goal) if (episode.shortest_paths is not None): for path in episode.shortest_paths: for (p_index, point) in enumerate(path): path[p_index] = ShortestPathPoint(**point) self.episodes.append(episode)
def wrap_atari_dqn(env): from baselines.common.atari_wrappers import wrap_deepmind return wrap_deepmind(env, frame_stack=True, scale=True)
def load_dataset_indonesian(data_name='prosa', data_path=None, data_path_test=None): if (data_name == 'prosa'): train = pd.read_csv('../input/dataset-prosa/data_train_full.tsv', sep='\t', header=None) train = train.rename(columns={0: 'text', 1: 'label'}) train = train[(train['label'] != 'neutral')] train['label'] = train['label'].apply((lambda x: (1 if (x == 'positive') else 0))) train['text'] = train['text'].apply((lambda x: preprocess_text(x))) test = pd.read_csv('../input/dataset-prosa/data_testing_full.tsv', sep='\t', header=None) test = test.rename(columns={0: 'text', 1: 'label'}) test = test[(test['label'] != 'neutral')] test['label'] = test['label'].apply((lambda x: (1 if (x == 'positive') else 0))) test['text'] = test['text'].apply((lambda x: preprocess_text(x))) elif (data_name == 'trip_advisor'): if (data_path == None): train = pd.read_csv('../input/dataset-tripadvisor/train_set.csv') else: train = pd.read_csv(data_path) train = train.rename(columns={'content': 'text', 'polarity': 'label'}) train['label'] = train['label'].apply((lambda x: (1 if (x == 'positive') else 0))) train['text'] = train['text'].apply((lambda x: preprocess_text(x))) if (data_path_test == None): test = pd.read_csv('../input/dataset-tripadvisor/test_set.csv') else: test = pd.read_csv(data_path_test) test = test.rename(columns={'content': 'text', 'polarity': 'label'}) test['label'] = test['label'].apply((lambda x: (1 if (x == 'positive') else 0))) test['text'] = test['text'].apply((lambda x: preprocess_text(x))) elif (data_name == 'toxic'): if (data_path == None): data = pd.read_csv('../input/simpler-preprocess-indonesian-hate-abusive-text/preprocessed_indonesian_toxic_tweet.csv') else: data = pd.read_csv(data_path) data['label'] = ((data['HS'] == 1) | (data['Abusive'] == 1)).apply((lambda x: int(x))) data = data[['Tweet', 'label']] data = data.rename(columns={'Tweet': 'text'}) (X_train, X_test, y_train, y_test) = train_test_split(data.text.values, data.label.values, test_size=0.1, random_state=RANDOM_SEED, stratify=data.label.values) train = pd.DataFrame({'text': X_train, 'label': y_train}) test = pd.DataFrame({'text': X_test, 'label': y_test}) print('~~~Train Data~~~') print('Shape: ', train.shape) print(train[0:2]) print('\nLabel:') print(train.label.value_counts()) print('\n~~~Test Data~~~') print('Shape: ', test.shape) print(test[0:4]) print('\nLabel:') print(test.label.value_counts()) return (train, test)
class XLMProphetNetForConditionalGeneration(ProphetNetForConditionalGeneration): config_class = XLMProphetNetConfig
def _find_cuda_config(base_paths, required_version): def get_header_version(path): version = int(_get_header_version(path, 'CUDA_VERSION')) if (not version): return None return ('%d.%d' % ((version // 1000), ((version % 1000) // 10))) (cuda_header_path, header_version) = _find_header(base_paths, 'cuda.h', required_version, get_header_version) cuda_version = header_version cuda_library_path = _find_library(base_paths, 'cudart', cuda_version) def get_nvcc_version(path): pattern = 'Cuda compilation tools, release \\d+\\.\\d+, V(\\d+\\.\\d+\\.\\d+)' for line in subprocess.check_output([path, '--version']).splitlines(): match = re.match(pattern, line.decode('ascii')) if match: return match.group(1) return None nvcc_name = ('nvcc.exe' if _is_windows() else 'nvcc') (nvcc_path, nvcc_version) = _find_versioned_file(base_paths, ['', 'bin', 'local/cuda/bin'], nvcc_name, cuda_version, get_nvcc_version) nvvm_path = _find_file(base_paths, ['nvvm/libdevice', 'share/cuda', 'lib/nvidia-cuda-toolkit/libdevice', 'local/cuda/nvvm/libdevice'], 'libdevice*.10.bc') cupti_header_path = _find_file(base_paths, _header_paths(), 'cupti.h') cupti_library_path = _find_library(base_paths, 'cupti', required_version) cuda_binary_dir = os.path.dirname(nvcc_path) nvvm_library_dir = os.path.dirname(nvvm_path) cuda_toolkit_paths = (os.path.normpath(os.path.join(cuda_binary_dir, '..')), os.path.normpath(os.path.join(nvvm_library_dir, '../..'))) if (cuda_toolkit_paths[0] != cuda_toolkit_paths[1]): raise ConfigError(('Inconsistent CUDA toolkit path: %s vs %s' % cuda_toolkit_paths)) return {'cuda_version': cuda_version, 'cuda_include_dir': os.path.dirname(cuda_header_path), 'cuda_library_dir': os.path.dirname(cuda_library_path), 'cuda_binary_dir': cuda_binary_dir, 'nvvm_library_dir': nvvm_library_dir, 'cupti_include_dir': os.path.dirname(cupti_header_path), 'cupti_library_dir': os.path.dirname(cupti_library_path), 'cuda_toolkit_path': cuda_toolkit_paths[0]}
def _encode(x, nums_filters: list, bn_train: bool=True): for num_filters in nums_filters: x = _conv_bn_relu(x, num_filters, bn_train=bn_train) pool = MemorizedMaxPooling2D(pool_size=(2, 2), strides=(2, 2)) x = pool(x) return (x, pool)
class MLPAvg(BaseAvg): def __init__(self, in_channels, hidden_channels, n_mods, out_channels, **kwargs): super().__init__(**kwargs) in_channels *= 2 mlp = [] for _ in range((n_mods - 1)): mlp.append(nn.Linear(in_channels, hidden_channels)) mlp.append(nn.ReLU()) in_channels = hidden_channels mlp.append(nn.Linear(in_channels, out_channels)) self.mlp = nn.Sequential(*mlp) def _net_forward(self, feat, point_key, point_edges): return self.mlp(feat)
class Identity(codecs.Codec): _description = f'Identity.' def __init__(self, *args): super().__init__(args) def name(self): return 'identity' def batch_run(self, tensors, return_rec=True, return_metrics=True): (batch, channel, height, width) = tensors.shape bpp = (torch.finfo(tensors[0].dtype).bits * channel) n_bits = ((bpp * height) * width) out = {'bpp': bpp, 'n_bits': n_bits, 'rate': n_bits, 'sender_time': 0, 'receiver_time': 0} if return_metrics: out['psnr'] = float('inf') out['ms-ssim'] = 1 if return_rec: return (out, tensors) return out
def topk_accuracy(output, target, topk=(1,)): single_input = (not isinstance(topk, (tuple, list))) if single_input: topk = (topk,) maxk = max(topk) batch_size = target.size(0) (_, pred) = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, (- 1)).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view((- 1)).float().sum(0, keepdim=True)[0] res.append(((correct_k * 100.0) / batch_size)) if single_input: return res[0] return res
class AudioEncoder(nn.Module): def __init__(self, config): super().__init__() if (config['audio_encoder_args']['type'] == 'cnn'): if (config['audio_encoder_args']['model'] == 'ResNet38'): self.audio_enc = ResNet38(config) elif (config['audio_encoder_args']['model'] == 'Cnn14'): self.audio_enc = Cnn14(config) if config['audio_encoder_args']['pretrained']: pretrained_cnn = torch.load('pretrained_models/audio_encoder/{}.pth'.format(config['audio_encoder_args']['model']))['model'] dict_new = self.audio_enc.state_dict().copy() trained_list = [i for i in pretrained_cnn.keys() if (not (('fc' in i) or i.startswith('spec') or i.startswith('logmel')))] for i in range(len(trained_list)): dict_new[trained_list[i]] = pretrained_cnn[trained_list[i]] self.audio_enc.load_state_dict(dict_new) self.audio_width = 2048 elif (config['audio_encoder_args']['type'] == 'transformer'): self.audio_enc = HTSAT_Swin_Transformer(spec_size=256, patch_size=4, patch_stride=(4, 4), num_classes=527, embed_dim=96, depths=[2, 2, 6, 2], num_heads=[4, 8, 16, 32], window_size=8, config=config) if config['audio_encoder_args']['pretrained']: audio_ckpt = torch.load('pretrained_models/audio_encoder/HTSAT.ckpt', map_location='cpu')['state_dict'] for key in list(audio_ckpt.keys()): if (key.startswith('sed_model') and (('spectrogram_extractor' not in key) and ('logmel_extractor' not in key))): v = audio_ckpt.pop(key) audio_ckpt[key[10:]] = v self.audio_enc.load_state_dict(audio_ckpt, strict=False) param_names = [n for (n, p) in self.audio_enc.named_parameters()] self.audio_width = 768 else: raise NotImplementedError('No such audio encoder network.') if config['audio_encoder_args']['freeze']: for (name, param) in self.audio_enc.named_parameters(): param.requires_grad = False def forward(self, inputs): audio_encoded = self.audio_enc(inputs) return audio_encoded
def recover_image(img, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)): img[0] = (((img[0] * std[0]) + mean[0]) * 255.0) img[1] = (((img[1] * std[1]) + mean[1]) * 255.0) img[2] = (((img[2] * std[2]) + mean[2]) * 255.0) img = img.permute(1, 2, 0) return cv2.cvtColor(img.numpy().astype('uint8'), cv2.COLOR_BGR2RGB)
class OffloadOpManager(): def __init__(self, config: OffloadOpManagerArgs): self.name: str = config.name self.offload_index: int = 0 self.reload_index: int = 0 self.index_to_offload: Set[int] = set(config.index_to_offload) self.cpu_storage: Deque[torch.Tensor] = deque() self.gpu_storage: Deque[torch.Tensor] = deque() self.shaped_cpu_cache: DefaultDict[(torch.Size, Deque[torch.Tensor])] = defaultdict(deque) def reset(self): self.offload_index = self.reload_index = 0 def offload(self, x, offload_event_queue, current_stream, copy_stream, is_last_layer=False): self.offload_index += 1 if (self.offload_index not in self.index_to_offload): return x if is_last_layer: self.gpu_storage.append(x) return x def _detach_to_cpu(x): if (isinstance(x, torch.Tensor) and (x.device.type == 'cuda')): offload_event_queue.deque_event_and_synchronize() tensor = x.detach() copy_stream.wait_stream(current_stream) s = tensor.shape if (not self.shaped_cpu_cache[s]): packed = torch.empty_like(tensor, device='cpu', pin_memory=True) else: packed = self.shaped_cpu_cache[s].popleft() with torch.cuda.stream(copy_stream): packed.copy_(tensor, non_blocking=True) tensor.record_stream(copy_stream) offload_event_queue.enque_event() return packed out_detached_cpu = tree_map(_detach_to_cpu, x) self.cpu_storage.append(out_detached_cpu) def reload(self, x_gen, reload_event_queue, current_stream, copy_stream, is_last_layer=False): self.reload_index += 1 if (self.reload_index not in self.index_to_offload): return x_gen() if is_last_layer: return self.gpu_storage.popleft() def _to_cuda(x): if (isinstance(x, torch.Tensor) and (x.device.type == 'cpu')): s = x.shape reload_event_queue.deque_event_and_synchronize() self.shaped_cpu_cache[s].append(x) with torch.cuda.stream(copy_stream): unpacked = x.to(f'cuda:{atorch.local_rank()}', non_blocking=True) current_stream.wait_stream(copy_stream) unpacked.record_stream(current_stream) reload_event_queue.enque_event() return unpacked return x out = tree_map(_to_cuda, self.cpu_storage.popleft()) return out
class StructuredSubnetLinear(nn.Linear): def __init__(self, in_features, out_features, bias=False, sparsity=0.5, trainable=True): super().__init__(in_features=in_features, out_features=out_features, bias=bias) self.sparsity = sparsity self.trainable = trainable self.w_m = nn.Parameter(torch.empty(out_features)) self.weight_mask = None (self.zeros_weight, self.ones_weight) = (torch.zeros(self.w_m.shape), torch.ones(self.w_m.shape)) if bias: self.b_m = nn.Parameter(torch.empty(out_features)) self.bias_mask = None (self.zeros_bias, self.ones_bias) = (torch.zeros(self.b_m.shape), torch.ones(self.b_m.shape)) else: self.register_parameter('bias', None) self.init_mask_parameters() if (trainable == False): raise Exception('Non-trainable version is not yet implemented') def forward(self, x, weight_mask=None, bias_mask=None, mode='train'): (w_pruned, b_pruned) = (None, None) if (mode == 'train'): self.weight_mask = GetSubnetFaster.apply(self.w_m.abs(), self.zeros_weight, self.ones_weight, self.sparsity) w_pruned = (self.weight * self.weight_mask.view((- 1), 1)) b_pruned = None if (self.bias is not None): self.bias_mask = GetSubnetFaster.apply(self.b_m.abs(), self.zeros_bias, self.ones_bias, self.sparsity) b_pruned = (self.bias_mask * self.bias) elif (mode == 'valid'): w_pruned = (self.weight * self.weight_mask.view((- 1), 1)) b_pruned = None if (self.bias is not None): b_pruned = (self.bias * self.bias_mask) elif (mode == 'test'): w_pruned = (self.weight * weight_mask.view((- 1), 1)) b_pruned = None if (self.bias is not None): b_runed = (self.bias * bias_mask) else: raise Exception((('[ERROR] The mode ' + str(mode)) + ' is not supported!')) return F.linear(input=x, weight=w_pruned, bias=b_pruned) def init_mask_parameters(self): (fan_in, _) = nn.init._calculate_fan_in_and_fan_out(self.weight) bound = (1 / math.sqrt(fan_in)) nn.init.uniform_(self.w_m, (- bound), bound) if (self.bias is not None): (fan_in, _) = nn.init._calculate_fan_in_and_fan_out(self.w_m) bound = (1 / math.sqrt(fan_in)) nn.init.uniform_(self.b_m, (- bound), bound)
def _late_fusion_fc_layers(num_layers, layer_sizes, input_rois, input_weights, fusion_method, l2_weight_decay, keep_prob, num_final_classes, box_rep, is_training): if (l2_weight_decay > 0): weights_regularizer = slim.l2_regularizer(l2_weight_decay) else: weights_regularizer = None num_branches = len(input_rois) branch_outputs = [] with slim.arg_scope([slim.fully_connected], weights_regularizer=weights_regularizer): for branch_idx in range(num_branches): branch_rois = input_rois[branch_idx] fc_drop = slim.flatten(branch_rois, scope='br{}_flatten'.format(branch_idx)) for layer_idx in range(num_layers): fc_name_idx = (6 + layer_idx) fc_layer = slim.fully_connected(fc_drop, layer_sizes[layer_idx], scope='br{}_fc{}'.format(branch_idx, fc_name_idx)) fc_drop = slim.dropout(fc_layer, keep_prob=keep_prob, is_training=is_training, scope='br{}_fc{}_drop'.format(branch_idx, fc_name_idx)) branch_outputs.append(fc_drop) fused_features = avod_fc_layer_utils.feature_fusion(fusion_method, branch_outputs, input_weights) output_layers = build_output_layers(fused_features, num_final_classes, box_rep) return output_layers
def read_vec_int_ark(file_or_fd): fd = open_or_fd(file_or_fd) try: key = read_key(fd) while key: ali = read_vec_int(fd) (yield (key, ali)) key = read_key(fd) finally: if (fd is not file_or_fd): fd.close()
def get_test_classes_for_model(test_file, model_class): test_classes = get_test_classes(test_file) target_test_classes = [] for test_class in test_classes: if (model_class in test_class.all_model_classes): target_test_classes.append(test_class) return sorted(target_test_classes, key=(lambda x: x.__name__))
def generate_point(base, index): (f, result, i) = (1.0, 0.0, index) while (i > 0.0): f /= base result += (f * (i % base)) i = math.floor((i / base)) return result
def getTrainLMDB(netCaffe): for i in netCaffe: try: if (i.include[0].phase == 0): return i.data_param.source except: pass print('ERROR: cannot access TRAIN DB file.') exit(1)
_lazy_imports('av') def create_video_folder(root: Union[(str, pathlib.Path)], name: Union[(str, pathlib.Path)], file_name_fn: Callable[([int], str)], num_examples: int, size: Optional[Union[(Sequence[int], int, Callable[([int], Union[(Sequence[int], int)])])]]=None, fps=25, **kwargs) -> List[pathlib.Path]: if (size is None): def size(idx): num_frames = 1 num_channels = 3 (height, width) = (torch.randint(2, 6, size=(2,), dtype=torch.int) * 2).tolist() return (num_frames, num_channels, height, width) root = (pathlib.Path(root) / name) os.makedirs(root) return [create_video_file(root, file_name_fn(idx), size=(size(idx) if callable(size) else size)) for idx in range(num_examples)]
def _test(): import torch pretrained = False models = [(nin_cifar10, 10), (nin_cifar100, 100), (nin_svhn, 10)] for (model, num_classes) in models: net = model(pretrained=pretrained) net.eval() weight_count = _calc_width(net) print('m={}, {}'.format(model.__name__, weight_count)) assert ((model != nin_cifar10) or (weight_count == 966986)) assert ((model != nin_cifar100) or (weight_count == 984356)) assert ((model != nin_svhn) or (weight_count == 966986)) x = torch.randn(1, 3, 32, 32) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, num_classes))
def _create_hrnet(variant, pretrained, **model_kwargs): model_cls = HighResolutionNet features_only = False if model_kwargs.pop('features_only', False): model_cls = HighResolutionNetFeatures model_kwargs['num_classes'] = 0 features_only = True model = build_model_with_cfg(model_cls, variant, pretrained, default_cfg=default_cfgs[variant], model_cfg=cfg_cls[variant], pretrained_strict=(not features_only), **model_kwargs) if features_only: model.default_cfg = default_cfg_for_features(model.default_cfg) return model
def global_scale(mesh: Type[trimesh.base.Trimesh], scale: float, in_place: bool=True) -> Type[trimesh.base.Trimesh]: mesh_ = (mesh if in_place else copy.deepcopy(mesh)) mesh_.vertices *= scale mesh = mesh_ return mesh
def _collect_report_features(fields): src_features = collect_features(fields, side='src') tgt_features = collect_features(fields, side='tgt') return (src_features, tgt_features)
class LeNetMNIST(nn.Module): def __init__(self): super(LeNetMNIST, self).__init__() self.conv1 = nn.Conv2d(in_channels=1, out_channels=6, kernel_size=5, stride=1) self.relu1 = nn.ReLU() self.pool1 = nn.MaxPool2d(kernel_size=2) self.conv2 = nn.Conv2d(in_channels=6, out_channels=16, kernel_size=5, stride=1) self.relu2 = nn.ReLU() self.pool2 = nn.MaxPool2d(kernel_size=2) self.conv3 = nn.Conv2d(in_channels=16, out_channels=120, kernel_size=5, stride=1) self.relu3 = nn.ReLU() self.fc1 = nn.Linear(in_features=120, out_features=84) self.relu4 = nn.ReLU() self.fc2 = nn.Linear(in_features=84, out_features=10) def forward(self, x): out = self.conv1(x) out = self.relu1(out) out = self.pool1(out) out = self.conv2(out) out = self.relu2(out) out = self.pool2(out) out = self.conv3(out) out = self.relu3(out) out = torch.flatten(out, 1) out = self.fc1(out) out = self.relu4(out) out = self.fc2(out) return out
def test_truncnormalinit(): model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2)) func = TruncNormalInit(mean=100, std=1e-05, bias=200, a=0, b=200, layer=['Conv2d', 'Linear']) func(model) assert model[0].weight.allclose(torch.tensor(100.0)) assert model[2].weight.allclose(torch.tensor(100.0)) assert model[0].bias.allclose(torch.tensor(200.0)) assert model[2].bias.allclose(torch.tensor(200.0)) func = TruncNormalInit(mean=300, std=1e-05, a=100, b=400, bias_prob=0.01, layer=['Conv2d', 'Linear']) res = bias_init_with_prob(0.01) func(model) assert model[0].weight.allclose(torch.tensor(300.0)) assert model[2].weight.allclose(torch.tensor(300.0)) assert model[0].bias.allclose(torch.tensor(res)) assert model[2].bias.allclose(torch.tensor(res)) model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1)) func = TruncNormalInit(mean=300, std=1e-05, a=100, b=400, bias_prob=0.01, layer='_ConvNd') func(model) assert model[0].weight.allclose(torch.tensor(300.0)) assert model[2].weight.allclose(torch.tensor(300.0)) assert torch.all((model[0].bias == res)) assert torch.all((model[2].bias == res))
def tcsize_broadcast(*sizes) -> tc.Size: szfinal = tc.Size() for sz in sizes: (szlong, szshort) = ((szfinal, sz) if (len(szfinal) >= len(sz)) else (sz, szfinal)) for i in range(1, (1 + len(szshort))): if (szshort[(- i)] != 1): if (szlong[(- i)] == 1): szlong[(- i)] = szshort[(- i)] elif (szshort[(- i)] != szlong[(- i)]): raise ValueError('sizes not match') szfinal = szlong return szfinal
class jointTemplateResponseGenerator(nn.Module): def __init__(self, ev_generator, masker, masker_dropout, enc_embedding, dec_embedding, encoder_src, decoder, generator, bridge, fields): super(jointTemplateResponseGenerator, self).__init__() self.ev_generator = ev_generator self.masker = masker self.masker_dropout = masker_dropout self.enc_embedding = enc_embedding self.dec_embedding = dec_embedding self.encoder_src = encoder_src self.decoder = decoder self.generator = generator self.bridge = bridge self.fields = fields def forward(self, I_word, I_word_length, D_word, D_word_length, ref_tgt_inputs, ref_tgt_lengths, src_inputs, tgt_inputs, src_lengths): (ref_contexts, enc_hidden, ref_mask, dist, src_contexts, src_mask, preds) = self.encode(I_word, I_word_length, D_word, D_word_length, ref_tgt_inputs, ref_tgt_lengths, src_inputs, src_lengths) dec_init_hidden = self.init_decoder_state(enc_hidden, ref_contexts) (dec_outputs, dec_hiddens, attn) = self.decode(tgt_inputs, ref_contexts, dec_init_hidden, dist, ref_mask, src_contexts, src_mask) return (dec_outputs, attn, preds) def init_decoder_state(self, enc_hidden, context): return enc_hidden def encode(self, I_word, I_word_length, D_word, D_word_length, ref_tgt_inputs, ref_tgt_lengths, src_inputs, src_lengths): (ev, enc_outputs) = self.ev_generator(I_word, I_word_length, D_word, D_word_length, ref_tgt_inputs, ref_tgt_lengths) ev = self.masker_dropout(ev) ev_for_return = ev enc_outputs = self.masker_dropout(enc_outputs) (_, _dim) = ev.size() (_len, _batch, _) = enc_outputs.size() if (self.bridge is not None): dist = self.bridge(ev) else: dist = None ev = ev.unsqueeze(0) ev = ev.expand(_len, _batch, _dim) preds = self.masker(torch.cat([ev, enc_outputs], 2)) preds = preds.squeeze(2) emb_src = self.enc_embedding(src_inputs) (src_contexts, enc_hidden) = self.encoder_src(emb_src, src_lengths, None) ref_mask = sequence_mask(ref_tgt_lengths) src_mask = sequence_mask(src_lengths) return (enc_outputs, enc_hidden, ref_mask, dist, src_contexts, src_mask, preds) def decode(self, input, context, state, dist, context_mask, src_context, src_context_mask): emb = self.dec_embedding(input) if (dist is not None): (dec_outputs, dec_hiddens, attn) = self.decoder(emb, context, state, dist, context_mask, src_context, src_context_mask) else: (dec_outputs, dec_hiddens, attn) = self.decoder(emb, context, context, state, context_mask, src_context, src_context_mask) return (dec_outputs, dec_hiddens, attn) def save_checkpoint(self, epoch, opt, filename): torch.save({'ev_generator_dict': self.ev_generator.state_dict(), 'masker_dict': self.masker.state_dict(), 'masker_dropout_dict': self.masker_dropout.state_dict(), 'enc_embedding_dict': self.enc_embedding.state_dict(), 'dec_embedding_dict': self.dec_embedding.state_dict(), 'encoder_src_dict': self.encoder_src.state_dict(), 'decoder_dict': self.decoder.state_dict(), 'generator_dict': self.generator.state_dict(), 'epoch': epoch, 'opt': opt}, filename) def load_checkpoint(self, filename): ckpt = torch.load(filename) self.ev_generator.load_state_dict(ckpt['ev_generator_dict']) self.masker.load_state_dict(ckpt['masker_dict']) self.masker_dropout.load_state_dict(ckpt['masker_dropout_dict']) self.enc_embedding.load_state_dict(ckpt['enc_embedding_dict']) self.dec_embedding.load_state_dict(ckpt['dec_embedding_dict']) self.encoder_src.load_state_dict(ckpt['encoder_src_dict']) self.decoder.load_state_dict(ckpt['decoder_dict']) self.generator.load_state_dict(ckpt['generator_dict']) if (self.bridge is not None): self.bridge.load_state_dict(ckpt['bridge_dict']) epoch = ckpt['epoch'] return epoch
def VarRecurrent(reverse=False): def forward(input, hidden, cell, mask): output = [] steps = (range((input.size(0) - 1), (- 1), (- 1)) if reverse else range(input.size(0))) for i in steps: if ((mask is None) or (mask[i].data.min() > 0.5)): hidden = cell(input[i], hidden) elif (mask[i].data.max() > 0.5): hidden_next = cell(input[i], hidden) if isinstance(hidden, tuple): (hx, cx) = hidden (hp1, cp1) = hidden_next hidden = ((hx + ((hp1 - hx) * mask[i])), (cx + ((cp1 - cx) * mask[i]))) else: hidden = (hidden + ((hidden_next - hidden) * mask[i])) output.append((hidden[0] if isinstance(hidden, tuple) else hidden)) if reverse: output.reverse() output = torch.cat(output, 0).view(input.size(0), *output[0].size()) return (hidden, output) return forward
class SuppressStdout(object): def __init__(self, suppress=True): self.suppress = suppress self.sys_stdout_ref = None def __enter__(self): self.sys_stdout_ref = sys.stdout if self.suppress: sys.stdout = self return sys.stdout def __exit__(self, type, value, traceback): sys.stdout = self.sys_stdout_ref def write(self, *args, **kwargs): pass
def clean_smiles(s): s2 = NeutraliseCharges(s) m = AllChem.MolFromSmiles(s2[0]) Chem.Kekulize(m) s = Chem.MolToSmiles(m, isomericSmiles=False, kekuleSmiles=True) return s
_torch class BertGenerationEncoderTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = ((BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()) all_generative_model_classes = ((BertGenerationDecoder,) if is_torch_available() else ()) def setUp(self): self.model_tester = BertGenerationEncoderTester(self) self.config_tester = ConfigTester(self, config_class=BertGenerationConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) def test_model_as_decoder_with_default_input_mask(self): (config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask) = self.model_tester.prepare_config_and_inputs_for_decoder() input_mask = None self.model_tester.create_and_check_model_as_decoder(config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask) def test_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*config_and_inputs) def test_model_from_pretrained(self): model = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder') self.assertIsNotNone(model)
class TFSpeech2TextModel(metaclass=DummyObject): _backends = ['tf'] def __init__(self, *args, **kwargs): requires_backends(self, ['tf'])
def sample_a_sorted_batch_from_numpy(numpyX, numpyY, batch_size, use_cuda): if (batch_size != None): select_index = random.sample(range(len(numpyY)), batch_size) else: select_index = np.array(range(len(numpyY))) batch_x = copy.deepcopy(numpyX[select_index]) batch_y = copy.deepcopy(numpyY[select_index]) (index_decoder_X, index_decoder_Y) = get_decoder_index_XY(batch_y) all_lens = np.array([len(x) for x in batch_y]) maxL = np.max(all_lens) idx = np.argsort(all_lens) idx = idx[::(- 1)] batch_x = batch_x[idx] batch_y = batch_y[idx] all_lens = all_lens[idx] index_decoder_X = index_decoder_X[idx] index_decoder_Y = index_decoder_Y[idx] numpy_batch_x = batch_x batch_x = align_variable_numpy(batch_x, maxL, 0) batch_y = align_variable_numpy(batch_y, maxL, 2) batch_x = Variable(torch.from_numpy(batch_x.astype(np.int64))) if use_cuda: batch_x = batch_x.cuda() return (numpy_batch_x, batch_x, batch_y, index_decoder_X, index_decoder_Y, all_lens, maxL)
def cook_test(test, crefs, eff=None, n=4): (reflen, refmaxcounts) = (crefs[0], crefs[1]) (testlen, counts) = precook(test, n, True) result = {} if (eff == 'closest'): result['reflen'] = min(((abs((l - testlen)), l) for l in reflen))[1] else: result['reflen'] = reflen result['testlen'] = testlen result['guess'] = [max(0, ((testlen - k) + 1)) for k in range(1, (n + 1))] result['correct'] = ([0] * n) for (ngram, count) in counts.items(): result['correct'][(len(ngram) - 1)] += min(refmaxcounts.get(ngram, 0), count) return result
def model_factory(model_params: ModelParams): in_channels = 1 if (model_params.model == 'MinkLoc'): block_module = create_resnet_block(model_params.block) backbone = MinkFPN(in_channels=in_channels, out_channels=model_params.feature_size, num_top_down=model_params.num_top_down, conv0_kernel_size=model_params.conv0_kernel_size, block=block_module, layers=model_params.layers, planes=model_params.planes) pooling = PoolingWrapper(pool_method=model_params.pooling, in_dim=model_params.feature_size, output_dim=model_params.output_dim) model = MinkLoc(backbone=backbone, pooling=pooling, normalize_embeddings=model_params.normalize_embeddings) else: raise NotImplementedError('Model not implemented: {}'.format(model_params.model)) return model
def compute_pwcca(acts1, acts2, epsilon=0.0): sresults = cca_core.get_cca_similarity(acts1, acts2, epsilon=epsilon, compute_dirns=False, compute_coefs=True, verbose=False) if (np.sum(sresults['x_idxs']) <= np.sum(sresults['y_idxs'])): dirns = (np.dot(sresults['coef_x'], (acts1[sresults['x_idxs']] - sresults['neuron_means1'][sresults['x_idxs']])) + sresults['neuron_means1'][sresults['x_idxs']]) coefs = sresults['cca_coef1'] acts = acts1 idxs = sresults['x_idxs'] else: dirns = (np.dot(sresults['coef_y'], (acts1[sresults['y_idxs']] - sresults['neuron_means2'][sresults['y_idxs']])) + sresults['neuron_means2'][sresults['y_idxs']]) coefs = sresults['cca_coef2'] acts = acts2 idxs = sresults['y_idxs'] (P, _) = np.linalg.qr(dirns.T) weights = np.sum(np.abs(np.dot(P.T, acts[idxs].T)), axis=1) weights = (weights / np.sum(weights)) return (np.sum((weights * coefs)), weights, coefs)
class GenELMClassifier(BaseELM, ClassifierMixin): def __init__(self, hidden_layer=MLPRandomLayer(random_state=0), binarizer=LabelBinarizer((- 1), 1), regressor=None): super(GenELMClassifier, self).__init__(hidden_layer, regressor) self.binarizer = binarizer self.classes_ = None self.genelm_regressor_ = GenELMRegressor(hidden_layer, regressor) def decision_function(self, X): return self.genelm_regressor_.predict(X) def fit(self, X, y): self.classes_ = np.unique(y) y_bin = self.binarizer.fit_transform(y) self.genelm_regressor_.fit(X, y_bin) return self def predict(self, X): raw_predictions = self.decision_function(X) class_predictions = self.binarizer.inverse_transform(raw_predictions) return class_predictions
_config def model_lifelong_sidetune_reverse_cifar(): n_channels_out = 3 cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'base_class': 'FCN4Reshaped', 'base_weights_path': '/mnt/models/fcn4-from-resnet44-cifar.pth', 'base_kwargs': {'eval_only': True}, 'use_baked_encoding': False, 'side_class': 'ResnetiCifar44NoLinear', 'side_kwargs': {'eval_only': False}, 'side_weights_path': '/mnt/models/resnet44-nolinear-cifar.pth', 'transfer_class': 'nn.Linear', 'transfer_kwargs': {'in_features': 64, 'out_features': 10}, 'transfer_weights_path': None, 'decoder_class': None, 'decoder_weights_path': None, 'decoder_kwargs': {}}}} del n_channels_out
def validate(val_loader, model, criterion, epoch): batch_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() model.eval() end = time.time() for (i, (input, target)) in enumerate(val_loader): with torch.no_grad(): output = model(input) loss = criterion(output, target) prec1 = accuracy(output.data, target, topk=(1,))[0] losses.update(loss.data.item(), input.size(0)) top1.update(prec1.item(), input.size(0)) batch_time.update((time.time() - end)) end = time.time() if ((i % args.print_freq) == 0): print('Test: [{0}/{1}]\tTime {batch_time.val:.3f} ({batch_time.avg:.3f})\tLoss {loss.val:.4f} ({loss.avg:.4f})\ {top1.val:.3f} ({top1.avg:.3f})'.format(i, len(val_loader), batch_time=batch_time, loss=losses, top1=top1)) print(' * {top1.avg:.3f}'.format(top1=top1)) if args.tensorboard: log_value('val_loss', losses.avg, epoch) log_value('val_acc', top1.avg, epoch) return top1.avg
def predict(model: Model, data: Dataset, device: torch.device, n_gpu: int, compute_loss: bool=False, normalization: str='batch', num_workers: int=0, cfg: Dict=None, fp16: bool=False) -> Tuple[(Dict[(str, float)], List[str], List[str], List[List[str]], List[np.ndarray], List[np.ndarray])]: (eval_batch_size, eval_batch_type, max_output_length, min_output_length, eval_metrics, sacrebleu_cfg, beam_size, beam_alpha, n_best, return_attention, return_prob, generate_unk, repetition_penalty, no_repeat_ngram_size) = parse_test_args(cfg) if (return_prob == 'ref'): decoding_description = '' else: decoding_description = (' (Greedy decoding with ' if (beam_size < 2) else f' (Beam search with beam_size={beam_size}, beam_alpha={beam_alpha}, n_best={n_best}, ') decoding_description += f"min_output_length={min_output_length}, max_output_length={max_output_length}, return_prob='{return_prob}', generate_unk={generate_unk}, repetition_penalty={repetition_penalty}, no_repeat_ngram_size={no_repeat_ngram_size})" logger.info('Predicting %d example(s)...%s', len(data), decoding_description) assert (eval_batch_size >= n_gpu), '`batch_size` must be bigger than `n_gpu`.' valid_iter = data.make_iter(batch_size=eval_batch_size, batch_type=eval_batch_type, shuffle=False, num_workers=num_workers, pad_index=model.pad_index, device=device) model.eval() valid_scores = {'loss': float('nan'), 'acc': float('nan'), 'ppl': float('nan')} all_outputs = [] valid_attention_scores = [] valid_sequence_scores = [] total_loss = 0 total_nseqs = 0 total_ntokens = 0 total_n_correct = 0 (output, ref_scores, hyp_scores, attention_scores) = (None, None, None, None) disable_tqdm = isinstance(data, StreamDataset) gen_start_time = time.time() with tqdm(total=len(data), disable=disable_tqdm, desc='Predicting...') as pbar: for batch in valid_iter: total_nseqs += batch.nseqs reverse_index = batch.sort_by_src_length() sort_reverse_index = expand_reverse_index(reverse_index, n_best) batch_size = len(sort_reverse_index) if (compute_loss and batch.has_trg): assert (model.loss_function is not None) with torch.autocast(device_type=device.type, enabled=fp16): with torch.no_grad(): (batch_loss, log_probs, attn, n_correct) = model(return_type='loss', return_attention=return_attention, **vars(batch)) batch_loss = batch.normalize(batch_loss, 'sum', n_gpu=n_gpu) n_correct = batch.normalize(n_correct, 'sum', n_gpu=n_gpu) if (return_prob == 'ref'): ref_scores = batch.score(log_probs) attention_scores = attn.detach().cpu().float().numpy() output = batch.trg total_loss += batch_loss.item() total_n_correct += n_correct.item() total_ntokens += batch.ntokens if (return_prob != 'ref'): (output, hyp_scores, attention_scores) = search(model=model, batch=batch, beam_size=beam_size, beam_alpha=beam_alpha, max_output_length=max_output_length, n_best=n_best, return_attention=return_attention, return_prob=return_prob, generate_unk=generate_unk, repetition_penalty=repetition_penalty, no_repeat_ngram_size=no_repeat_ngram_size, fp16=fp16) all_outputs.extend(output[sort_reverse_index]) valid_attention_scores.extend((attention_scores[sort_reverse_index] if (attention_scores is not None) else [])) valid_sequence_scores.extend((ref_scores[sort_reverse_index] if ((ref_scores is not None) and (ref_scores.shape[0] == batch_size)) else (hyp_scores[sort_reverse_index] if ((hyp_scores is not None) and (hyp_scores.shape[0] == batch_size)) else []))) pbar.update(batch.nseqs) gen_duration = (time.time() - gen_start_time) assert (total_nseqs == len(data)), (total_nseqs, len(data)) assert (len(all_outputs) == (len(data) * n_best)), (len(all_outputs), len(data), n_best) if compute_loss: if (normalization == 'batch'): normalizer = total_nseqs elif (normalization == 'tokens'): normalizer = total_ntokens elif (normalization == 'none'): normalizer = 1 assert (normalizer > 0) assert (total_ntokens > 0) valid_scores['loss'] = (total_loss / normalizer) valid_scores['acc'] = (total_n_correct / total_ntokens) valid_scores['ppl'] = math.exp((total_loss / total_ntokens)) decoded_valid = model.trg_vocab.arrays_to_sentences(arrays=all_outputs, cut_at_eos=True) if (return_prob == 'ref'): logger.info('Evaluation result (scoring) %s, duration: %.4f[sec]', ', '.join([f'{eval_metric}: {valid_scores[eval_metric]:6.2f}' for eval_metric in ['loss', 'ppl', 'acc']]), gen_duration) return (valid_scores, None, None, decoded_valid, valid_sequence_scores, valid_attention_scores) valid_hyp = [data.tokenizer[data.trg_lang].post_process(s, generate_unk=generate_unk) for s in decoded_valid] valid_ref = [data.tokenizer[data.trg_lang].post_process(s) for s in data.trg] if data.has_trg: valid_hyp_1best = (valid_hyp if (n_best == 1) else [valid_hyp[i] for i in range(0, len(valid_hyp), n_best)]) assert (len(valid_hyp_1best) == len(valid_ref)), (valid_hyp_1best, valid_ref) eval_start_time = time.time() for eval_metric in eval_metrics: if (eval_metric == 'bleu'): valid_scores[eval_metric] = bleu(valid_hyp_1best, valid_ref, **sacrebleu_cfg) elif (eval_metric == 'chrf'): valid_scores[eval_metric] = chrf(valid_hyp_1best, valid_ref, **sacrebleu_cfg) elif (eval_metric == 'token_accuracy'): decoded_valid_1best = (decoded_valid if (n_best == 1) else [decoded_valid[i] for i in range(0, len(decoded_valid), n_best)]) valid_scores[eval_metric] = token_accuracy(decoded_valid_1best, data.get_list(lang=data.trg_lang, tokenized=True)) elif (eval_metric == 'sequence_accuracy'): valid_scores[eval_metric] = sequence_accuracy(valid_hyp_1best, valid_ref) eval_duration = (time.time() - eval_start_time) score_str = ', '.join([f'{eval_metric}: {valid_scores[eval_metric]:6.2f}' for eval_metric in (eval_metrics + ['loss', 'ppl', 'acc']) if (not math.isnan(valid_scores[eval_metric]))]) logger.info('Evaluation result (%s) %s, generation: %.4f[sec], evaluation: %.4f[sec]', ('beam search' if (beam_size > 1) else 'greedy'), score_str, gen_duration, eval_duration) else: logger.info('Generation took %.4f[sec]. (No references given)', gen_duration) return (valid_scores, valid_ref, valid_hyp, decoded_valid, valid_sequence_scores, valid_attention_scores)
def train_dmc_sac_aug(args): train_env = dc.envs.load_dmc(**vars(args)) test_env = dc.envs.load_dmc(**vars(args)) obs_shape = train_env.observation_space.shape action_shape = train_env.action_space.shape max_action = train_env.action_space.high[0] augmentation_lst = [aug(args.batch_size) for aug in eval(args.augmentations)] augmenter = dc.augmentations.AugmentationSequence(augmentation_lst) agent = dc.sac_aug.PixelSACAgent(obs_shape, action_shape[0], args.log_std_low, args.log_std_high) if args.prioritized_replay: buffer_t = dc.replay.PrioritizedReplayBuffer else: buffer_t = dc.replay.ReplayBuffer buffer = buffer_t(args.buffer_size, state_dtype=int, state_shape=train_env.observation_space.shape, action_shape=train_env.action_space.shape) agent = dc.sac_aug.sac_aug(agent=agent, train_env=train_env, test_env=test_env, buffer=buffer, augmenter=augmenter, **vars(args))
def get_config(level): if (not isinstance(level, int)): raise ValueError(f'level is {level}, but must be an integer.') return _get_config(num_obstacles=level, valid_step_range=(50, 150))
def fix_random_seed(seed=0): if (seed is not None): torch.manual_seed(seed) np.random.seed(seed) if torch.cuda.is_available(): torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True elif torch.cuda.is_available(): torch.backends.cudnn.benchmark = True
def vis_trend(ctx: Context, train_ctx: Context, server: str, env: str, port: int=80) -> None: if (not ('vis' in ctx)): ctx.vis = Visdom(server=server, port=port, env=env) try: for (k, v) in train_ctx.metrics.items(): if isinstance(v, (int, float)): if ctx.vis.win_exists(k): ctx.vis.line(X=np.array([train_ctx.epoch_idx]), Y=np.array([v]), opts=dict(title=k, xlabel='epoch'), win=k, update='append') else: ctx.vis.line(X=np.array([train_ctx.epoch_idx]), Y=np.array([v]), opts=dict(title=k, xlabel='epoch'), win=k) ctx.vis.save([env]) except ConnectionError: train_ctx.logger.warning(('Could not connect to visdom server "%s".' % server))
def prepare_add_object_given_background(image, datum, verbose=False, prefix_plan=None): task = 'add_object_given_background' if verbose: print('Task: ', task) assert ('unnormalized_boxes' in datum), 'unnormalized_boxes not in datum' assert ('box_captions' in datum), 'box_captions not in datum' d = datum mask_img = image.copy().convert('L') mask_draw = ImageDraw.Draw(mask_img) context_img = image.copy().convert('RGB') context_draw = ImageDraw.Draw(context_img) text_tokens = [] mask_draw.rectangle([(0, 0), mask_img.size], fill=0) n_total_boxes = len(datum['unnormalized_boxes']) n_mask_objs = 1 n_context_objs = (n_total_boxes - n_mask_objs) mask_obj_indices = list(range(n_mask_objs)) if verbose: print('# total boxes: ', len(datum['unnormalized_boxes'])) print('# boxes to mask: ', n_mask_objs) print('# boxes to show: ', n_context_objs) for mask_obj_index in mask_obj_indices: box = d['unnormalized_boxes'][mask_obj_index] mask_draw.rectangle(box.long().tolist(), fill=255) context_draw.rectangle(box.long().tolist(), fill=(0, 0, 0)) box_caption = d['box_captions'][mask_obj_index] text_tokens += [f'Add {box_caption}'] target_image = image text = ' '.join(text_tokens) return {'text': text, 'target_image': target_image, 'context_image': context_img, 'mask_image': mask_img, 'step_caption': box_caption}
class Model(): def __init__(self, bit_width=None, model_name=None, load=None): self.bit_width = bit_width self.load = load self.model_name = model_name self.model = keras.Sequential([SYQ(self.bit_width, 32, (3, 3), activation='relu', input_shape=(28, 28, 1)), SYQ(self.bit_width, 32, (3, 3), activation='relu'), Flatten(), SYQ_Dense(self.bit_width, 128, activation=tf.nn.relu), SYQ_Dense(self.bit_width, 128, activation=tf.nn.relu), Dense(10, activation=tf.nn.softmax)]) print(self.model.get_config()) def train_model(self): if (self.load is not None): self.model = load_model(args.load) assert (self.model_name is not None) self.model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) self.model.fit(train_images, train_labels, epochs=1) self.model.save((args.model_name + '.h5')) def evaluate_model(self): if (self.load is not None): self.model = load_model(self.load, custom_objects={'SYQ': SYQ, 'SYQ_Dense': SYQ_Dense}) (test_loss, test_acc) = self.model.evaluate(test_images, test_labels) print('Test accuracy:', test_acc) predictions = self.model.predict(test_images)
def compute_doc_freq(crefs): document_frequency = defaultdict(float) for refs in crefs: for ngram in set([ngram for ref in refs for (ngram, count) in ref.iteritems()]): document_frequency[ngram] += 1 return document_frequency
def translate_x_rel(img, pct, **kwargs): pixels = (pct * img.size[0]) _check_args_tf(kwargs) return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs)
def load_adult_data(): df = pd.read_csv(' header=None) df.columns = ['Age', 'WorkClass', 'fnlwgt', 'Education', 'EducationNum', 'MaritalStatus', 'Occupation', 'Relationship', 'Race', 'Gender', 'CapitalGain', 'CapitalLoss', 'HoursPerWeek', 'NativeCountry', 'Income'] train_cols = df.columns[0:(- 1)] label = df.columns[(- 1)] X_df = df[train_cols] y_df = df[label] dataset = {'problem': 'classification', 'full': {'X': X_df, 'y': y_df}} return dataset
class BelPinRef(BBAStruct): bel: int pin: IdString def serialise_lists(self, context: str, bba: BBAWriter): pass def serialise(self, context: str, bba: BBAWriter): bba.u32(self.bel) bba.u32(self.pin.index)
def Split_On_Multiple_Dot(input_word): multiple_dot = '\\w*[.][.]+\\w*' multiple_dot_rule = re.compile(multiple_dot) words_with_multiple_dots = multiple_dot_rule.findall(input_word) new_tokens = [] for word in words_with_multiple_dots: splitter = '' for i in range(0, word.count('.')): splitter += '.' word_splitted = word.split(splitter) split_word_index = 0 for split_word in word_splitted: if (split_word == ''): continue if (split_word_index > 0): new_tokens.append(splitter) new_tokens.append(split_word) split_word_index += 1 if (split_word_index == 1): new_tokens.append(splitter) return new_tokens
def dobldobl_set_parameter_homotopy(hom, idx, verbose=False): from phcpy.phcpy2c3 import py2c_copy_dobldobl_container_to_target_system from phcpy.interface import store_dobldobl_system from phcpy.solver import number_of_symbols dim = number_of_symbols(hom) store_dobldobl_system(hom, nbvar=dim) py2c_copy_dobldobl_container_to_target_system() from phcpy.phcpy2c3 import py2c_padcon_dobldobl_initialize_parameter_homotopy vrb = int(verbose) return py2c_padcon_dobldobl_initialize_parameter_homotopy(idx, vrb)
def binarize(dataset): binary_examples = [] for example in dataset.examples: if (example.label != 'neutral'): binary_examples.append(example) dataset.examples = binary_examples return dataset
def ibn_resnext50_32x4d(**kwargs): return get_ibnresnext(blocks=50, cardinality=32, bottleneck_width=4, model_name='ibn_resnext50_32x4d', **kwargs)
def analytical_base_partition_function(numer, denom): if (not isinstance(numer, numbers.Integral)): raise ValueError('Expected `numer` of type int, but is of type {}'.format(type(numer))) if (not isinstance(denom, numbers.Integral)): raise ValueError('Expected `denom` of type int, but is of type {}'.format(type(denom))) if (not (numer >= 0)): raise ValueError('Expected `numer` >= 0, but is = {}'.format(numer)) if (not (denom > 0)): raise ValueError('Expected `denom` > 0, but is = {}'.format(denom)) alpha = (numer / denom) if (alpha == 0): return (np.pi * np.sqrt(2)) if (alpha == 2): return np.sqrt((2 * np.pi)) a_p = (np.arange(1, numer, dtype=np.float64) / numer).tolist() b_q = ((np.arange((- 0.5), (numer - 0.5), dtype=np.float64) / numer).tolist() + (np.arange(1, (2 * denom), dtype=np.float64) / (2 * denom)).tolist()) z = (((1.0 / numer) - (1.0 / (2 * denom))) ** (2 * denom)) mult = ((np.exp(np.abs((((2 * denom) / numer) - 1.0))) * np.sqrt(np.abs((((2 * denom) / numer) - 1.0)))) * ((2 * np.pi) ** (1 - denom))) return (mult * np.float64(mpmath.meijerg([[], a_p], [b_q, []], z)))
def get_ror_cifar(num_classes, blocks, model_name=None, pretrained=False, root=os.path.join('~', '.torch', 'models'), **kwargs): assert (num_classes in [10, 100]) assert (((blocks - 8) % 6) == 0) layers = ([((blocks - 8) // 6)] * 3) channels_per_layers = [16, 32, 64] init_block_channels = 16 channels = [([ci] * li) for (ci, li) in zip(channels_per_layers, layers)] net = CIFARRoR(channels=channels, init_block_channels=init_block_channels, num_classes=num_classes, **kwargs) if pretrained: if ((model_name is None) or (not model_name)): raise ValueError('Parameter `model_name` should be properly initialized for loading pretrained model.') from .model_store import download_model download_model(net=net, model_name=model_name, local_model_store_dir_path=root) return net
class ASPPAvgBranch(nn.Module): def __init__(self, in_channels, out_channels, upscale_out_size): super(ASPPAvgBranch, self).__init__() self.upscale_out_size = upscale_out_size self.pool = nn.AdaptiveAvgPool2d(1) self.conv = conv1x1_block(in_channels=in_channels, out_channels=out_channels) def forward(self, x): in_size = (self.upscale_out_size if (self.upscale_out_size is not None) else x.shape[2:]) x = self.pool(x) x = self.conv(x) x = F.interpolate(x, size=in_size, mode='bilinear', align_corners=True) return x