code
stringlengths
101
5.91M
class PushToHubMixin(): def push_to_hub(self, repo_path_or_name: Optional[str]=None, repo_url: Optional[str]=None, use_temp_dir: bool=False, commit_message: Optional[str]=None, organization: Optional[str]=None, private: Optional[bool]=None, use_auth_token: Optional[Union[(bool, str)]]=None, **model_card_kwargs) -> str: if use_temp_dir: if (repo_url is None): if (use_auth_token is None): use_auth_token = True repo_name = Path(repo_path_or_name).name repo_url = self._get_repo_url_from_name(repo_name, organization=organization, private=private, use_auth_token=use_auth_token) repo_path_or_name = tempfile.mkdtemp() repo = self._create_or_get_repo(repo_path_or_name=repo_path_or_name, repo_url=repo_url, organization=organization, private=private, use_auth_token=use_auth_token) self.save_pretrained(repo_path_or_name) if (hasattr(self, 'history') and hasattr(self, 'create_model_card')): base_model_card_args = {'output_dir': repo_path_or_name, 'model_name': Path(repo_path_or_name).name} base_model_card_args.update(model_card_kwargs) self.create_model_card(**base_model_card_args) url = self._push_to_hub(repo, commit_message=commit_message) if use_temp_dir: shutil.rmtree(repo_path_or_name) return url def _get_repo_url_from_name(repo_name: str, organization: Optional[str]=None, private: bool=None, use_auth_token: Optional[Union[(bool, str)]]=None) -> str: if isinstance(use_auth_token, str): token = use_auth_token elif use_auth_token: token = HfFolder.get_token() if (token is None): raise ValueError('You must login to the Hugging Face hub on this computer by typing `transformers-cli login` and entering your credentials to use `use_auth_token=True`. Alternatively, you can pass your own token as the `use_auth_token` argument.') else: token = None return create_repo(token, repo_name, organization=organization, private=private, repo_type=None, exist_ok=True) def _create_or_get_repo(cls, repo_path_or_name: Optional[str]=None, repo_url: Optional[str]=None, organization: Optional[str]=None, private: bool=None, use_auth_token: Optional[Union[(bool, str)]]=None) -> Repository: if ((repo_path_or_name is None) and (repo_url is None)): raise ValueError('You need to specify a `repo_path_or_name` or a `repo_url`.') if ((use_auth_token is None) and (repo_url is None)): use_auth_token = True if (repo_path_or_name is None): repo_path_or_name = repo_url.split('/')[(- 1)] if ((repo_url is None) and (not os.path.exists(repo_path_or_name))): repo_name = Path(repo_path_or_name).name repo_url = cls._get_repo_url_from_name(repo_name, organization=organization, private=private, use_auth_token=use_auth_token) if (not os.path.exists(repo_path_or_name)): os.makedirs(repo_path_or_name) repo = Repository(repo_path_or_name, clone_from=repo_url, use_auth_token=use_auth_token) repo.git_pull() return repo def _push_to_hub(cls, repo: Repository, commit_message: Optional[str]=None) -> str: if (commit_message is None): if ('Tokenizer' in cls.__name__): commit_message = 'add tokenizer' elif ('Config' in cls.__name__): commit_message = 'add config' else: commit_message = 'add model' return repo.push_to_hub(commit_message=commit_message)
def fbresnet34(num_classes=1000): model = FBResNet(BasicBlock, [3, 4, 6, 3], num_classes=num_classes) return model
class MidBlockTemporalDecoder(nn.Module): def __init__(self, in_channels: int, out_channels: int, attention_head_dim: int=512, num_layers: int=1, upcast_attention: bool=False): super().__init__() resnets = [] attentions = [] for i in range(num_layers): input_channels = (in_channels if (i == 0) else out_channels) resnets.append(SpatioTemporalResBlock(in_channels=input_channels, out_channels=out_channels, temb_channels=None, eps=1e-06, temporal_eps=1e-05, merge_factor=0.0, merge_strategy='learned', switch_spatial_to_temporal_mix=True)) attentions.append(Attention(query_dim=in_channels, heads=(in_channels // attention_head_dim), dim_head=attention_head_dim, eps=1e-06, upcast_attention=upcast_attention, norm_num_groups=32, bias=True, residual_connection=True)) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) def forward(self, hidden_states: torch.FloatTensor, image_only_indicator: torch.FloatTensor): hidden_states = self.resnets[0](hidden_states, image_only_indicator=image_only_indicator) for (resnet, attn) in zip(self.resnets[1:], self.attentions): hidden_states = attn(hidden_states) hidden_states = resnet(hidden_states, image_only_indicator=image_only_indicator) return hidden_states
def loadPal(filename): _annolist = AnnoList_pb2.AnnoList() f = open(filename, 'rb') _annolist.ParseFromString(f.read()) f.close() return _annolist
def get_deconv_output_size(input_size, kernel_size, stride, padding, dilation, output_padding): ndim = len(input_size) output_size = [] for i in range(ndim): if (kernel_size[i] == (- 1)): raise ValueError("deconv don't support kernel_size < 0") size = (((((input_size[i] - 1) * stride[i]) - (2 * padding[i])) + kernel_size[i]) + output_padding[i]) output_size.append(size) return output_size
def linspace(start, stop, num, endpoint=True): start = tf.convert_to_tensor(start) stop = tf.convert_to_tensor(stop, dtype=start.dtype) if endpoint: if (num == 1): return tf.reduce_mean(tf.stack([start, stop], axis=0), axis=0, keepdims=True) else: return tf.linspace(start, stop, num) elif (num > 1): step = ((stop - start) / tf.cast(num, start.dtype)) return tf.linspace(start, (stop - step), num) else: return tf.linspace(start, stop, num)
class ReSDPipeline(StableDiffusionPipeline): _grad() def __call__(self, prompt: Union[(str, List[str])], prompt1_steps: Optional[int]=None, prompt2: Optional[str]=None, head_start_latents: Optional[Union[(torch.FloatTensor, list)]]=None, head_start_step: Optional[int]=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, guidance_scale: float=7.5, negative_prompt: Optional[Union[(str, List[str])]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[torch.Generator]=None, latents: Optional[torch.FloatTensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[([int, int, torch.FloatTensor], None)]]=None, callback_steps: Optional[int]=1): height = (height or (self.unet.config.sample_size * self.vae_scale_factor)) width = (width or (self.unet.config.sample_size * self.vae_scale_factor)) self.check_inputs(prompt, height, width, callback_steps) batch_size = (1 if isinstance(prompt, str) else len(prompt)) device = self._execution_device do_classifier_free_guidance = (guidance_scale > 1.0) text_embeddings = self._encode_prompt(prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt) if (prompt2 is not None): text_embeddings2 = self._encode_prompt(prompt2, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps if (head_start_latents is None): num_channels_latents = self.unet.in_channels latents = self.prepare_latents((batch_size * num_images_per_prompt), num_channels_latents, height, width, text_embeddings.dtype, device, generator, latents) elif (type(head_start_latents) == list): latents = head_start_latents[(- 1)] assert (len(head_start_latents) == self.scheduler.config.solver_order) else: latents = head_start_latents extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) num_warmup_steps = (len(timesteps) - (num_inference_steps * self.scheduler.order)) with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): if ((not head_start_step) or (i >= head_start_step)): latent_model_input = (torch.cat(([latents] * 2)) if do_classifier_free_guidance else latents) latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) if ((prompt1_steps is None) or (i < prompt1_steps)): noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample else: noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings2).sample if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = (noise_pred_uncond + (guidance_scale * (noise_pred_text - noise_pred_uncond))) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample if (((i + 1) > num_warmup_steps) and (((i + 1) % self.scheduler.order) == 0)): progress_bar.update() if ((callback is not None) and ((i % callback_steps) == 0)): callback(i, t, latents) image = self.decode_latents(latents) has_nsfw_concept = False if (output_type == 'pil'): image = self.numpy_to_pil(image) if (not return_dict): return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
def cp_ckpt(remote_dir='data_wd/youtube_vos_jobs/result', curr_dir='backup'): exps = os.listdir(curr_dir) for exp in exps: exp_dir = os.path.join(curr_dir, exp) stages = os.listdir(exp_dir) for stage in stages: stage_dir = os.path.join(exp_dir, stage) finals = ['ema_ckpt', 'ckpt'] for final in finals: final_dir = os.path.join(stage_dir, final) ckpts = os.listdir(final_dir) for ckpt in ckpts: if ('.pth' not in ckpt): continue curr_ckpt_path = os.path.join(final_dir, ckpt) remote_ckpt_path = os.path.join(remote_dir, exp, stage, final, ckpt) if os.path.exists(remote_ckpt_path): os.system('rm {}'.format(remote_ckpt_path)) try: shutil.copy(curr_ckpt_path, remote_ckpt_path) print('Copy {} to {}.'.format(curr_ckpt_path, remote_ckpt_path)) except OSError as Inst: return
def vgg16_bn(pretrained=False, **kwargs): if pretrained: kwargs['init_weights'] = False model = VGG(make_layers(cfg['D'], batch_norm=True), **kwargs) if pretrained: model.load_state_dict(torch.load(os.path.join(models_dir, model_name['vgg16_bn']))) return model
def create_dir_struct(): if (not os.path.isdir('train')): os.mkdir('train') if (not os.path.isdir('val')): os.mkdir('val') if (not os.path.isdir('test')): os.mkdir('test')
def check_input_shape(input_shape, factor): if (input_shape is None): raise ValueError('Input shape should be a tuple of 3 integers, not None!') (h, w) = (input_shape[:2] if (backend.image_data_format() == 'channels_last') else input_shape[1:]) min_size = (factor * 6) is_wrong_shape = (((h % min_size) != 0) or ((w % min_size) != 0) or (h < min_size) or (w < min_size)) if is_wrong_shape: raise ValueError(('Wrong shape {}, input H and W should '.format(input_shape) + 'be divisible by `{}`'.format(min_size)))
class MultipleOptimizer(object): def __init__(self, op): self.optimizers = op def param_groups(self): param_groups = [] for optimizer in self.optimizers: param_groups.extend(optimizer.param_groups) return param_groups def zero_grad(self): for op in self.optimizers: op.zero_grad() def step(self): for op in self.optimizers: op.step() def state(self): return {k: v for op in self.optimizers for (k, v) in op.state.items()} def state_dict(self): return [op.state_dict() for op in self.optimizers] def load_state_dict(self, state_dicts): assert (len(state_dicts) == len(self.optimizers)) for i in range(len(state_dicts)): self.optimizers[i].load_state_dict(state_dicts[i])
class Logger(object): 'Reference: def __init__(self, fn, ask=True): if (not os.path.exists('./results/')): os.mkdir('./results/') logdir = self._make_dir(fn) if (not os.path.exists(logdir)): os.mkdir(logdir) if ((len(os.listdir(logdir)) != 0) and ask): ans = input('log_dir is not empty. All data inside log_dir will be deleted. Will you proceed [y/N]? ') if (ans in ['y', 'Y']): shutil.rmtree(logdir) else: exit(1) self.set_dir(logdir) def _make_dir(self, fn): logdir = f'./results/{fn}/' return logdir def set_dir(self, logdir, log_fn='log.txt'): self.logdir = logdir if (not os.path.exists(logdir)): os.mkdir(logdir) self.writer = SummaryWriter(logdir) self.log_file = open(os.path.join(logdir, log_fn), 'a') def log(self, string): self.log_file.write((('[%s] %s' % (datetime.now(), string)) + '\n')) self.log_file.flush() print(('[%s] %s' % (datetime.now(), string))) sys.stdout.flush() def log_dirname(self, string): self.log_file.write((('%s (%s)' % (string, self.logdir)) + '\n')) self.log_file.flush() print(('%s (%s)' % (string, self.logdir))) sys.stdout.flush() def scalar_summary(self, tag, value, step): self.writer.add_scalar(tag, value, step) def image_summary(self, tag, images, step): self.writer.add_image(tag, images, step) def histo_summary(self, tag, values, step): self.writer.add_histogram(tag, values, step, bins='auto')
def test_registry(): assert ('disp_mask' in LOSS_REG), 'Missing key from loss registry.' assert (LOSS_REG['disp_mask'] == MaskReg), 'Incorrect class in loss registry.'
def get_shared_folder() -> Path: user = os.getenv('USER') if Path('/checkpoint/').is_dir(): p = Path(f'/checkpoint/{user}/experiments') p.mkdir(exist_ok=True) return p raise RuntimeError('No shared folder available')
def _check_executable(cmd): if (subprocess.call('which {}'.format(cmd), shell=True) != 0): return False else: return True
def parseAbsFileLinesInList(pathToListingFile): pathToFolderContainingThisListFile = os.path.dirname(pathToListingFile) list1 = [] with open(pathToListingFile, 'r') as inp: for line in inp: if (line.strip() == '-'): list1.append('-') elif ((not line.startswith('#')) and (line.strip() != '')): pathToFileParsed = line.strip() if os.path.isabs(pathToFileParsed): list1.append(os.path.normpath(pathToFileParsed)) else: list1.append(os.path.normpath(((pathToFolderContainingThisListFile + '/') + pathToFileParsed))) return list1
class PoolFormerForImageClassification(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
def _test(): import torch pretrained = False models = [revnet38, revnet110, revnet164] for model in models: net = model(pretrained=pretrained) net.eval() weight_count = _calc_width(net) print('m={}, {}'.format(model.__name__, weight_count)) assert ((model != revnet38) or (weight_count == 685864)) assert ((model != revnet110) or (weight_count == 1982600)) assert ((model != revnet164) or (weight_count == 2491656)) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000))
def which_algorithm(config: N2VConfig): if (config.structN2Vmask is not None): return Algorithm.StructN2V elif ((config.n2v_manipulator == PixelManipulator.MEDIAN.value) and (not config.unet_residual) and config.blurpool and config.skip_skipone): return Algorithm.N2V2 else: return Algorithm.N2V
class SimpleSampler(BaseSampler): def __init__(self, **kwargs): super(SimpleSampler, self).__init__(**kwargs) self._path_length = 0 self._path_return = 0 self._current_path = defaultdict(list) self._last_path_return = 0 self._max_path_return = (- np.inf) self._n_episodes = 0 self._current_observation = None self._total_samples = 0 def _process_observations(self, observation, action, reward, terminal, next_observation, info): processed_observation = {'observations': observation, 'actions': action, 'rewards': [reward], 'terminals': [terminal], 'next_observations': next_observation, 'infos': info} return processed_observation def sample(self): if (self._current_observation is None): self._current_observation = self.env.reset() action = self.policy.actions_np([self.env.convert_to_active_observation(self._current_observation)[None]])[0] (next_observation, reward, terminal, info) = self.env.step(action) self._path_length += 1 self._path_return += reward self._total_samples += 1 processed_sample = self._process_observations(observation=self._current_observation, action=action, reward=reward, terminal=terminal, next_observation=next_observation, info=info) for (key, value) in processed_sample.items(): self._current_path[key].append(value) if (terminal or (self._path_length >= self._max_path_length)): last_path = {field_name: np.array(values) for (field_name, values) in self._current_path.items()} self.pool.add_path(last_path) self._last_n_paths.appendleft(last_path) self._max_path_return = max(self._max_path_return, self._path_return) self._last_path_return = self._path_return self.policy.reset() self._current_observation = None self._path_length = 0 self._path_return = 0 self._current_path = defaultdict(list) self._n_episodes += 1 else: self._current_observation = next_observation return (next_observation, reward, terminal, info) def random_batch(self, batch_size=None, **kwargs): batch_size = (batch_size or self._batch_size) observation_keys = getattr(self.env, 'observation_keys', None) return self.pool.random_batch(batch_size, observation_keys=observation_keys, **kwargs) def get_diagnostics(self): diagnostics = super(SimpleSampler, self).get_diagnostics() diagnostics.update({'max-path-return': self._max_path_return, 'last-path-return': self._last_path_return, 'episodes': self._n_episodes, 'total-samples': self._total_samples}) return diagnostics
def load_rf1(as_frame: bool=False) -> Union[(np.ndarray, pd.DataFrame)]: with resources.path('pytorch_widedeep.datasets.data', 'rf1_train.parquet.brotli') as fpath: df = pd.read_parquet(fpath) if as_frame: return df else: return df.to_numpy()
def unflatten_values(vals, batch_size, n_samples): data_dim = (vals[0].ndim - 1) assert all([(v.ndim == (data_dim + 1)) for v in vals]) if (data_dim == 0): return [v.reshape([batch_size, n_samples]) for v in vals] elif (data_dim == 1): return [v.reshape([batch_size, n_samples, v.shape[1]]) for v in vals] raise
class ShardOptim(): class ShardFlatManager(): def __init__(self, param_name, fds, optim_slice): self.fds = fds self.param_name = param_name self.optim_slice = optim_slice def items(self): return self.optim_slice.items() def check_1d(self, state_name): for each_slice in self.optim_slice[state_name]: if (len(each_slice.get_shape()) != 1): return False return True def load_tensor_by_param_and_state_name(self, state_name, target_start=None, target_end=None): if ((target_start is None) and (target_end is None)): if self.check_1d(state_name): raise ValueError(f'{self.param_name}, {state_name} should be scaler') return self.fds[state_name].get_tensor(f'{self.param_name}-{state_name}') if ((target_start is None) or (target_end is None)): raise ValueError('Both `target_start` and `target_end` must be None or number') optim_slice = self.optim_slice[state_name] shards = [] for s in optim_slice: (seg_start, seg_end) = (0, s.get_shape()[0]) if ((seg_start <= target_start) and (target_end <= seg_end)): shards.append(s[target_start:target_end]) break if ((target_start <= seg_end) and (target_end >= seg_start)): shards.append(s[max(target_start, seg_start):min(target_end, seg_end)]) target_start -= seg_end target_end -= seg_end return torch.cat(shards) def __init__(self, path): osd_param_files = sorted(glob.glob(f'{path}/optim_param*')) self.osd_param = [] self.fds = defaultdict((lambda : defaultdict(dict))) with open(f'{path}/optim_meta', 'rb') as f: self.osd_meta = pickle.load(f) for i in osd_param_files: with open(i, 'rb') as f: self.osd_param.append(safetensors.safe_open(i, framework='pt')) self.state_dict = defaultdict((lambda : defaultdict(list))) for fd in self.osd_param: for k in fd.keys(): (param_name, state_name) = k.split('-') self.state_dict[param_name][state_name].append(fd.get_slice(k)) self.fds[param_name][state_name] = fd self.state_dict['param_groups'] = [] for meta in self.osd_meta: self.state_dict['param_groups'].append({k: v for (k, v) in meta.items() if (k != 'params_names')}) state = self.state_dict['state'] for p in self.osd_meta: for param_name in p['params']: state[param_name] = ShardOptim.ShardFlatManager(param_name, self.fds[param_name], self.state_dict[param_name]) def reshard_optim_state_dict(self, model): from torch.distributed.fsdp import _optim_utils orig_fn = _optim_utils._shard_orig_param_state _optim_utils._shard_orig_param_state = self._shard_orig_param_state shard_dict = FSDP.shard_full_optim_state_dict(self.state_dict, model) _optim_utils._shard_orig_param_state = orig_fn return shard_dict def load_tensor_by_param_and_state_name(self, param_name, state_name, target_start=None, target_end=None): return self.state_dict['state'][param_name].load_tensor_by_param_and_state_name(state_name, target_start, target_end) def _shard_orig_param_state(fsdp_param_info, fqn, optim_state): if (not optim_state): return {} flat_param = fsdp_param_info.handle.flat_param param_idx = fsdp_param_info.param_indices[fqn] shard_param_info = flat_param._shard_param_infos[param_idx] if (not shard_param_info.in_shard): return {} new_optim_state: Dict[(str, Any)] = {} intra_param_start_idx = shard_param_info.intra_param_start_idx intra_param_end_idx = shard_param_info.intra_param_end_idx for (state_name, _) in optim_state.items(): if optim_state.check_1d(state_name): value = optim_state.load_tensor_by_param_and_state_name(state_name, intra_param_start_idx, (intra_param_end_idx + 1)) else: value = optim_state.load_tensor_by_param_and_state_name(state_name) new_optim_state[state_name] = value return new_optim_state
class _ReverseGrad(Function): def forward(ctx, input, grad_scaling): ctx.grad_scaling = grad_scaling return input.view_as(input) def backward(ctx, grad_output): grad_scaling = ctx.grad_scaling return (((- grad_scaling) * grad_output), None)
class TestTorchOP(unittest.TestCase): def setUpClass(self): pass def tearDownClass(self): pass def test_1(self): from torch.ao.quantization import MinMaxObserver, PerChannelMinMaxObserver, QConfig qconfig = QConfig(activation=MinMaxObserver.with_args(qscheme=torch.per_tensor_affine, dtype=torch.qint8), weight=MinMaxObserver.with_args(dtype=torch.qint8, qscheme=torch.per_tensor_symmetric)) n = Net().eval() example_in = torch.rand(3, 256) example_in2 = torch.rand(256, 10) prepared_model = prepare(n, qconfig, example_inputs=(example_in, example_in2), inplace=False) prepared_model(example_in, example_in2) convert_model = convert(prepared_model) traced_model = torch.jit.trace(convert_model, (example_in, example_in2)) print(traced_model.inlined_graph) torch.jit.freeze(traced_model.eval()) torch.jit.save(traced_model, '{}.pt'.format(file_name)) ref_out = traced_model(example_in, example_in2).detach().numpy() graph = compile('{}.pt'.format(file_name)) graph.save(file_name) newgraph = Graph() newgraph.graph_init((file_name + '/conf.yaml'), (file_name + '/model.bin')) out = newgraph.inference([example_in.numpy(), example_in2.numpy()]) self.assertTrue((cmpData(ref_out, [*out.values()][0]) < 0.001)) os.remove('{}.pt'.format(file_name)) shutil.rmtree(file_name)
class TFAutoModelForNextSentencePrediction(_BaseAutoModelClass): _model_mapping = TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
def load_mauna(): data_file = '../data/mauna.mat' data = scipy.io.loadmat(data_file) return (data['X'], data['y'])
def sample_stars_all_elements(weight, selection, elements, errors, nsample, random_seed=None): if random_seed: np.random.seed(random_seed) weight = np.cumsum((weight * selection)) weight /= weight[(- 1)] sample = np.random.random(nsample) sample = np.sort(sample) stars = np.zeros_like(weight) for (i, item) in enumerate(weight): if (i == 0): count = len(sample[np.where(np.logical_and((sample > 0.0), (sample <= item)))]) stars[i] = count else: count = len(sample[np.where(np.logical_and((sample > weight[(i - 1)]), (sample <= item)))]) stars[i] = count abundances = np.zeros((len(elements), nsample)) n = 0 for i in range(len(weight)): if (stars[i] != 0): for j in range(int(stars[i])): for k in range(len(elements)): abundances[k][n] = elements[k][i] n += 1 abundances = np.array(abundances) for (i, element) in enumerate(elements): perturbation = np.random.normal(0, errors[i], len(abundances[i])) abundances[i] += perturbation return abundances
def add_results(final_results, name, result_dict, result_list, took, show_accuracy=False): percentiles = [50.0, 80.0, 90.0, 95.0, 99.0, 99.9] buckets = np.percentile(result_list, percentiles).tolist() buckets_str = ','.join(['{}:{:.4f}'.format(p, b) for (p, b) in zip(percentiles, buckets)]) if (result_dict['total'] == 0): result_dict['total'] = len(result_list) result = {'took': took, 'mean': np.mean(result_list), 'percentiles': {str(k): v for (k, v) in zip(percentiles, buckets)}, 'qps': (len(result_list) / took), 'count': len(result_list), 'good_items': result_dict['good'], 'total_items': result_dict['total']} acc_str = '' if show_accuracy: result['accuracy'] = ((100.0 * result_dict['good']) / result_dict['total']) acc_str = ', acc={:.3f}%'.format(result['accuracy']) if ('mAP' in result_dict): result['mAP'] = (100.0 * result_dict['mAP']) acc_str += ', mAP={:.3f}%'.format(result['mAP']) final_results[name] = result print('{} qps={:.2f}, mean={:.4f}, time={:.3f}{}, queries={}, tiles={}'.format(name, result['qps'], result['mean'], took, acc_str, len(result_list), buckets_str)) return result['mAP']
def make_focal_loss_evaluator(cfg): max_disp = cfg.model.losses.focal_loss.get('max_disp', None) start_disp = cfg.model.losses.focal_loss.get('start_disp', 0) dilation = cfg.model.losses.focal_loss.get('dilation', 1) weights = cfg.model.losses.focal_loss.get('weights', None) coefficient = cfg.model.losses.focal_loss.get('coefficient', 0.0) sparse = cfg.data.sparse return StereoFocalLoss(max_disp=max_disp, start_disp=start_disp, dilation=dilation, weights=weights, focal_coefficient=coefficient, sparse=sparse)
class StateDictType(enum.Enum): DIFFUSERS_OLD = 'diffusers_old' PEFT = 'peft' DIFFUSERS = 'diffusers'
class ConsistentMCDropout2d(_ConsistentMCDropout): def _get_sample_mask_shape(self, sample_shape): return ([sample_shape[0]] + ([1] * (len(sample_shape) - 1)))
class StreamingEpochBatchIterator(EpochBatchIterating): def __init__(self, dataset, max_sentences=1, collate_fn=None, epoch=1, num_workers=0, buffer_size=0, timeout=0, persistent_workers=False): assert isinstance(dataset, torch.utils.data.IterableDataset) self.dataset = dataset self.max_sentences = max_sentences self.collate_fn = collate_fn self.epoch = max(epoch, 1) self.num_workers = num_workers self.buffer_size = min(buffer_size, 20) self.timeout = timeout self.persistent_workers = persistent_workers self._current_epoch_iterator = None def next_epoch_idx(self): if ((self._current_epoch_iterator is not None) and self.end_of_epoch()): return (self.epoch + 1) else: return self.epoch def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False, set_dataset_epoch=True): self.epoch = self.next_epoch_idx if (set_dataset_epoch and hasattr(self.dataset, 'set_epoch')): self.dataset.set_epoch(self.epoch) self._current_epoch_iterator = self._get_iterator_for_epoch(self.epoch, shuffle) return self._current_epoch_iterator def end_of_epoch(self) -> bool: return (not self._current_epoch_iterator.has_next()) def iterations_in_epoch(self) -> int: if (self._current_epoch_iterator is not None): return self._current_epoch_iterator.n return 0 def state_dict(self): return {'epoch': self.epoch} def load_state_dict(self, state_dict): self.epoch = state_dict['epoch'] def _get_iterator_for_epoch(self, epoch, shuffle, offset=0): if (self.num_workers > 0): os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning' worker_init_fn = getattr(self.dataset, 'worker_init_fn', None) itr = torch.utils.data.DataLoader(self.dataset, batch_size=self.max_sentences, collate_fn=self.collate_fn, num_workers=self.num_workers, timeout=self.timeout, worker_init_fn=worker_init_fn, pin_memory=True, persistent_workers=self.persistent_workers) if (self.buffer_size > 0): itr = BufferedIterator(self.buffer_size, itr) itr = CountingIterator(itr, start=offset) return itr
def CFNet(d, replace_mish=False): net = cfnet(d, use_concat_volume=True) if replace_mish: replace_layers(net, Mish, nn.ReLU(inplace=True)) print('replacing', Mish(), '->', nn.ReLU()) return net
def cross_entropy2d(input, target, weight=None, val=False): if val: size_average = False else: size_average = True (n, c, h, w) = input.size() log_p = F.log_softmax(input, dim=1) log_p = log_p.transpose(1, 2).transpose(2, 3).contiguous().view((- 1), c) log_p = log_p[(target.view(((n * h) * w), 1).repeat(1, c) >= 0)] log_p = log_p.view((- 1), c) mask = (target >= 0) target = target[mask] loss = F.nll_loss(log_p, target, ignore_index=250, weight=weight, size_average=False) if size_average: loss /= mask.data.sum() return loss
('action') def action(ac_data): room = state.get_room_for_client(request.sid) LOG.debug('Received action %s in room %s ', ac_data, room.id) game = room.game metadata = {'mturk_id': mturk_params(request.args)['workerId']} if ('virtual_room_id' in ac_data): vroom = ac_data['virtual_room_id'] old_player = copy.deepcopy(game.turn_players[vroom]) game.message(ac_data, vroom=vroom, metadata=metadata) for i in game.virtual_rooms[vroom]: io.emit('action', {'data': ac_data}, to=room.players[i]) if TAKE_TURNS: io.emit('your_turn', {'vroom': vroom}, to=room.players[game.turn_players[vroom]]) io.emit('end_turn', {'vroom': vroom}, to=room.players[old_player]) else: game.message(ac_data, metadata) for (i, client_id) in enumerate(room.players): io.emit('action', {'data': ac_data}, to=client_id) if TAKE_TURNS: io.emit('your_turn', to=room.players[game.turn_player]) io.emit('end_turn', {'vroom': vroom}, to=room.players[(1 - game.turn_players[vroom])])
class Sigurbergsson2019(dataset.Dataset): name = 'sigurbergsson2019' url = ' hash = 'fb5c41c385062af222f68c8ebb2f7a86da26c081f6822f0' files = [{'name': 'sigurbergsson2019da.csv', 'language': 'da', 'type': 'training', 'platform': 'unknown'}] license = 'UNKNOWN' def process(cls, tmp_file_path, dataset_folder, api_config): helpers.untarbz_file(tmp_file_path) tmp_file_path = os.path.join(os.path.dirname(tmp_file_path), 'dkhate/oe20da_data/offenseval-da-training-v1.tsv') tmp_file_path = helpers.clean_csv(tmp_file_path, sep='\t') helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'sigurbergsson2019da.csv')) def unify_row(cls, row): row['text'] = row['tweet'] if ((row['subtask_a'] != 'OFF') and (row['subtask_a'] != 'NOT')): row['labels'] = [] else: row['labels'] = [row['subtask_a']] row = row.drop(['id', 'tweet', 'subtask_a']) return row
def test_find_duplicates_dir_num_enc_workers(cnn, mocker): num_enc_workers = 2 cnn.encoding_map = data_encoding_map() ret_val_find_dup_dict = {'filename1.jpg': [('dup1.jpg', 0.82)], 'filename2.jpg': [('dup2.jpg', 0.9)]} encode_images_mocker = mocker.patch('imagededup.methods.cnn.CNN.encode_images') find_dup_dict_mocker = mocker.patch('imagededup.methods.cnn.CNN._find_duplicates_dict', return_value=ret_val_find_dup_dict) cnn._find_duplicates_dir(image_dir=TEST_IMAGE_DIR, min_similarity_threshold=0.9, scores=False, num_enc_workers=num_enc_workers, num_sim_workers=cpu_count()) encode_images_mocker.assert_called_once_with(image_dir=TEST_IMAGE_DIR, recursive=False, num_enc_workers=num_enc_workers)
def set_grad_none(model, targets): for (n, p) in model.named_parameters(): if (n in targets): p.grad = None
class SupervisedDataset(Dataset): def __init__(self, data_path: str, tokenizer: transformers.PreTrainedTokenizer): super(SupervisedDataset, self).__init__() logging.warning('Loading data...') list_data_dict = json.load(open(data_path, 'r')) logging.warning('Formatting inputs...') sources = [example['conversations'] for example in list_data_dict] data_dict = preprocess(sources, tokenizer) self.input_ids = data_dict['input_ids'] self.labels = data_dict['labels'] def __len__(self): return len(self.input_ids) def __getitem__(self, i) -> Dict[(str, torch.Tensor)]: return dict(input_ids=self.input_ids[i], labels=self.labels[i])
def write_latest_filename(output_label, latest_filename): latest_fits_filename_holder = os.path.join('latest_filenames', 'latest_{}.txt'.format(output_label)) ensure_containing_directory_exists(latest_fits_filename_holder) with open(latest_fits_filename_holder, 'w') as stream: stream.write(latest_filename) stream.write('\n') _logger.info('Updated %r', latest_fits_filename_holder)
class SECONDNet(Detector3DTemplate): def __init__(self, model_cfg, num_class, dataset): super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset) self.module_list = self.build_networks() def forward(self, batch_dict): for cur_module in self.module_list: batch_dict = cur_module(batch_dict) if self.training: (loss, tb_dict, disp_dict) = self.get_training_loss() ret_dict = {'loss': loss} return (ret_dict, tb_dict, disp_dict) else: (pred_dicts, recall_dicts) = self.post_processing(batch_dict) return (pred_dicts, recall_dicts) def get_training_loss(self): disp_dict = {} (loss_rpn, tb_dict) = self.dense_head.get_loss() tb_dict = {'loss_rpn': loss_rpn.item(), **tb_dict} loss = loss_rpn return (loss, tb_dict, disp_dict)
class MultiScalePrior(Flow): def __init__(self, in_channels, hidden_channels, h_channels, factor, transform, alpha, inverse, coupling_type, h_type, activation, normalize, num_groups): super(MultiScalePrior, self).__init__(inverse) self.conv1x1 = Conv1x1Flow(in_channels, inverse=inverse) self.coupling = NICE2d(in_channels, hidden_channels=hidden_channels, h_channels=h_channels, transform=transform, alpha=alpha, inverse=inverse, factor=factor, type=coupling_type, h_type=h_type, split_type='continuous', order='up', activation=activation, normalize=normalize, num_groups=num_groups) out_channels = (in_channels // factor) self.z1_channels = self.coupling.z1_channels assert ((out_channels + self.z1_channels) == in_channels) self.actnorm = ActNorm2dFlow(out_channels, inverse=inverse) def sync(self): self.conv1x1.sync() def forward(self, input: torch.Tensor, h=None) -> Tuple[(torch.Tensor, torch.Tensor)]: (out, logdet_accum) = self.conv1x1.forward(input) (out, logdet) = self.coupling.forward(out, h=h) logdet_accum = (logdet_accum + logdet) (out1, out2) = split2d(out, self.z1_channels) (out2, logdet) = self.actnorm.forward(out2) logdet_accum = (logdet_accum + logdet) out = unsplit2d([out1, out2]) return (out, logdet_accum) def backward(self, input: torch.Tensor, h=None) -> Tuple[(torch.Tensor, torch.Tensor)]: (out1, out2) = split2d(input, self.z1_channels) (out2, logdet_accum) = self.actnorm.backward(out2) out = unsplit2d([out1, out2]) (out, logdet) = self.coupling.backward(out, h=h) logdet_accum = (logdet_accum + logdet) (out, logdet) = self.conv1x1.backward(out) logdet_accum = (logdet_accum + logdet) return (out, logdet_accum) def init(self, data, h=None, init_scale=1.0) -> Tuple[(torch.Tensor, torch.Tensor)]: (out, logdet_accum) = self.conv1x1.init(data, init_scale=init_scale) (out, logdet) = self.coupling.init(out, h=h, init_scale=init_scale) logdet_accum = (logdet_accum + logdet) (out1, out2) = split2d(out, self.z1_channels) (out2, logdet) = self.actnorm.init(out2, init_scale=init_scale) logdet_accum = (logdet_accum + logdet) out = unsplit2d([out1, out2]) return (out, logdet_accum)
class ATCDataASR(datasets.GeneratorBasedBuilder): DEFAULT_WRITER_BATCH_SIZE = 256 DEFAULT_CONFIG_NAME = 'all' BUILDER_CONFIGS = [ATCDataASRConfig(name='train', description='ATC train dataset.'), ATCDataASRConfig(name='dev', description='ATC dev dataset.'), ATCDataASRConfig(name='test', description='ATC test dataset.'), ATCDataASRConfig(name='unsupervised', description='ATC unsupervised dataset.')] def _info(self): return datasets.DatasetInfo(description=_DESCRIPTION, features=datasets.Features({'id': datasets.Value('string'), 'file': datasets.Value('string'), 'audio': datasets.features.Audio(sampling_rate=_SAMPLING_RATE), 'text': datasets.Value('string'), 'segment_start_time': datasets.Value('float'), 'segment_end_time': datasets.Value('float'), 'duration': datasets.Value('float')}), supervised_keys=('audio', 'text'), homepage=_HOMEPAGE, citation=_CITATION, task_templates=[AutomaticSpeechRecognition(audio_column='audio', transcription_column='text')]) def _split_generators(self, dlmanager): split = self.config.name if ('unsupervised' in split): split_name = datasets.Split.TEST elif (('test' in split) or ('dev' in split) or ('dummy' in split)): split_name = datasets.Split.TEST else: split_name = datasets.Split.TRAIN filepath = self.config.data_dir return [datasets.SplitGenerator(name=split_name, gen_kwargs={'filepath': filepath, 'split': split})] def _generate_examples(self, filepath, split): logger.info('Generating examples located in: %s', filepath) text_file = os.path.join(filepath, 'text') wavscp = os.path.join(filepath, 'wav.scp') segments = os.path.join(filepath, 'segments') id_ = '' (text_dict, wav_dict) = ({}, {}) (segments_dict, utt2wav_id) = ({}, {}) line = 0 with open(text_file) as text_f: for line in text_f: if (len(line.split(' ')) > 1): (id_, transcript) = line.split(' ', maxsplit=1) transcript = _remove_special_characters(transcript) if (len(transcript.split(' ')) == 0): continue if (len(transcript) < 2): continue text_dict[id_] = transcript else: if (not ('test_unsup' in self.config.name)): continue id_ = line.rstrip().split(' ')[0] text_dict[id_] = '' with open(wavscp) as text_f: for line in text_f: if line: if (len(line.split()) < 2): continue (id_, wavpath) = line.split(' ', maxsplit=1) wavpath = [x for x in wavpath.split(' ') if (('.wav' in x) or ('.WAV' in x) or ('.flac' in x) or ('.sph' in x))][0].rstrip() (segment, sampling_rate) = sf.read(wavpath, dtype=np.int16) wav_dict[id_] = [wavpath.rstrip(), segment, sampling_rate] with open(segments) as text_f: for line in text_f: if line: if (len(line.split()) < 4): continue (id_, wavid_, start, end) = line.rstrip().split(' ') segments_dict[id_] = (start.rstrip(), end.rstrip()) utt2wav_id[id_] = wavid_ for (rec_id, text) in text_dict.items(): if ((rec_id in utt2wav_id) and (rec_id in segments_dict)): (wavpath, segment, sampling_rate) = wav_dict[utt2wav_id[rec_id]] (seg_start, seg_end) = segments_dict[rec_id] (seg_start, seg_end) = (float(seg_start), float(seg_end)) duration = round((seg_end - seg_start), 3) samples = _extract_audio_segment(segment, sampling_rate, float(seg_start), float(seg_end)) example = {'audio': {'path': wavpath, 'array': samples, 'sampling_rate': sampling_rate}, 'id': rec_id, 'file': wavpath, 'text': text, 'segment_start_time': format(float(seg_start), '.3f'), 'segment_end_time': format(float(seg_end), '.3f'), 'duration': format(float(duration), '.3f')} (yield (rec_id, example))
class Barrier(Node): def __init__(self, children): super().__init__('barrier', children, None) def qasm(self, prec=15): return (('barrier ' + self.children[0].qasm(prec)) + ';')
_module class XMLDataset(CustomDataset): def __init__(self, min_size=None, **kwargs): super(XMLDataset, self).__init__(**kwargs) self.cat2label = {cat: (i + 1) for (i, cat) in enumerate(self.CLASSES)} self.min_size = min_size def load_annotations(self, ann_file): img_infos = [] img_ids = mmcv.list_from_file(ann_file) for img_id in img_ids: filename = 'JPEGImages/{}.jpg'.format(img_id) xml_path = osp.join(self.img_prefix, 'Annotations', '{}.xml'.format(img_id)) tree = ET.parse(xml_path) root = tree.getroot() size = root.find('size') width = int(size.find('width').text) height = int(size.find('height').text) img_infos.append(dict(id=img_id, filename=filename, width=width, height=height)) return img_infos def get_ann_info(self, idx): img_id = self.img_infos[idx]['id'] xml_path = osp.join(self.img_prefix, 'Annotations', '{}.xml'.format(img_id)) tree = ET.parse(xml_path) root = tree.getroot() bboxes = [] labels = [] bboxes_ignore = [] labels_ignore = [] for obj in root.findall('object'): name = obj.find('name').text label = self.cat2label[name] difficult = int(obj.find('difficult').text) bnd_box = obj.find('bndbox') bbox = [int(bnd_box.find('xmin').text), int(bnd_box.find('ymin').text), int(bnd_box.find('xmax').text), int(bnd_box.find('ymax').text)] ignore = False if self.min_size: assert (not self.test_mode) w = (bbox[2] - bbox[0]) h = (bbox[3] - bbox[1]) if ((w < self.min_size) or (h < self.min_size)): ignore = True if (difficult or ignore): bboxes_ignore.append(bbox) labels_ignore.append(label) else: bboxes.append(bbox) labels.append(label) if (not bboxes): bboxes = np.zeros((0, 4)) labels = np.zeros((0,)) else: bboxes = (np.array(bboxes, ndmin=2) - 1) labels = np.array(labels) if (not bboxes_ignore): bboxes_ignore = np.zeros((0, 4)) labels_ignore = np.zeros((0,)) else: bboxes_ignore = (np.array(bboxes_ignore, ndmin=2) - 1) labels_ignore = np.array(labels_ignore) ann = dict(bboxes=bboxes.astype(np.float32), labels=labels.astype(np.int64), bboxes_ignore=bboxes_ignore.astype(np.float32), labels_ignore=labels_ignore.astype(np.int64)) return ann
class Normalize(tv_t.Normalize): def __init__(self, mean, std, inplace=False) -> None: super().__init__(mean, std, inplace)
class TimeInvariantMLSAFilter(object): def __init__(self, coef, alpha, n_shift): self.coef = coef self.n_shift = n_shift self.mlsa_filter = pysptk.synthesis.Synthesizer(pysptk.synthesis.MLSADF(order=(coef.shape[0] - 1), alpha=alpha), hopsize=n_shift) def __call__(self, y): assert (len(y.shape) == 1) y = np.float64(y) num_frames = (int((len(y) / self.n_shift)) + 1) coef = np.tile(self.coef, [num_frames, 1]) return self.mlsa_filter.synthesis(y, coef)
class II2S(nn.Module): def __init__(self, opts): super(II2S, self).__init__() self.opts = opts self.net = Net(self.opts) self.load_downsampling() self.setup_loss_builder() self.set_up_face_predictor() def load_downsampling(self): factor = (self.opts.size // 256) self.downsample = BicubicDownSample(factor=factor) def setup_optimizer(self): opt_dict = {'sgd': torch.optim.SGD, 'adam': torch.optim.Adam, 'sgdm': partial(torch.optim.SGD, momentum=0.9), 'adamax': torch.optim.Adamax} latent = [] if self.opts.tile_latent: tmp = self.net.latent_avg.clone().detach().cuda() tmp.requires_grad = True for i in range(self.net.layer_num): latent.append(tmp) optimizer = opt_dict[self.opts.opt_name]([tmp], lr=self.opts.learning_rate) else: for i in range(self.net.layer_num): tmp = self.net.latent_avg.clone().detach().cuda() tmp.requires_grad = True latent.append(tmp) optimizer = opt_dict[self.opts.opt_name](latent, lr=self.opts.learning_rate) return (optimizer, latent) def setup_dataloader(self, image_path=None, align_input=False): self.dataset = ImagesDataset(opts=self.opts, image_path=image_path, face_predictor=self.predictor, align_input=align_input) self.dataloader = DataLoader(self.dataset, batch_size=1, shuffle=False) print('Number of images: {}'.format(len(self.dataset))) def setup_loss_builder(self): self.loss_builder = LossBuilder(self.opts) def set_up_face_predictor(self): self.predictor = None predictor_weight = os.path.join('pretrained_models', 'shape_predictor_68_face_landmarks.dat') download_weight(predictor_weight) self.predictor = dlib.shape_predictor(predictor_weight) def invert_images(self, image_path=None, output_dir=None, return_latents=False, align_input=False, save_output=True): final_latents = None if return_latents: final_latents = [] self.setup_dataloader(image_path=image_path, align_input=align_input) device = self.opts.device ibar = tqdm(self.dataloader, desc='Images') for (ref_im_H, ref_im_L, ref_name) in ibar: (optimizer, latent) = self.setup_optimizer() pbar = tqdm(range(self.opts.steps), desc='Embedding') for step in pbar: optimizer.zero_grad() latent_in = torch.stack(latent).unsqueeze(0) (gen_im, _) = self.net.generator([latent_in], input_is_latent=True, return_latents=False) im_dict = {'ref_im_H': ref_im_H.to(device), 'ref_im_L': ref_im_L.to(device), 'gen_im_H': gen_im, 'gen_im_L': self.downsample(gen_im)} (loss, loss_dic) = self.cal_loss(im_dict, latent_in) loss.backward() optimizer.step() if self.opts.verbose: pbar.set_description('Embedding: Loss: {:.3f}, L2 loss: {:.3f}, Perceptual loss: {:.3f}, P-norm loss: {:.3f}'.format(loss, loss_dic['l2'], loss_dic['percep'], loss_dic['p-norm'])) if (self.opts.save_intermediate and ((step % self.opts.save_interval) == 0) and save_output): self.save_intermediate_results(ref_name, gen_im, latent_in, step, output_dir) if save_output: self.save_results(ref_name, gen_im, latent_in, output_dir) if return_latents: final_latents.append(latent_in) return final_latents def cal_loss(self, im_dict, latent_in): (loss, loss_dic) = self.loss_builder(**im_dict) p_norm_loss = self.net.cal_p_norm_loss(latent_in) loss_dic['p-norm'] = p_norm_loss loss += p_norm_loss return (loss, loss_dic) def save_results(self, ref_name, gen_im, latent_in, output_dir): save_im = toPIL(((gen_im[0] + 1) / 2).detach().cpu().clamp(0, 1)) save_latent = latent_in.detach().cpu().numpy() os.makedirs(output_dir, exist_ok=True) latent_path = os.path.join(output_dir, f'{ref_name[0]}.npy') image_path = os.path.join(output_dir, f'{ref_name[0]}.png') save_im.save(image_path) np.save(latent_path, save_latent) def save_intermediate_results(self, ref_name, gen_im, latent_in, step, output_dir): save_im = toPIL(((gen_im[0] + 1) / 2).detach().cpu().clamp(0, 1)) save_latent = latent_in.detach().cpu().numpy() intermediate_folder = os.path.join(output_dir, ref_name[0]) os.makedirs(intermediate_folder, exist_ok=True) latent_path = os.path.join(intermediate_folder, f'{ref_name[0]}_{step:04}.npy') image_path = os.path.join(intermediate_folder, f'{ref_name[0]}_{step:04}.png') save_im.save(image_path) np.save(latent_path, save_latent) def set_seed(self): if self.opt.seed: torch.manual_seed(self.opt.seed) torch.cuda.manual_seed(self.opt.seed) torch.backends.cudnn.deterministic = True
def retrieve_info_for_model(model_type, frameworks: Optional[List[str]]=None): if (model_type not in auto_module.MODEL_NAMES_MAPPING): raise ValueError(f'{model_type} is not a valid model type.') model_name = auto_module.MODEL_NAMES_MAPPING[model_type] config_class = auto_module.configuration_auto.CONFIG_MAPPING_NAMES[model_type] archive_map = auto_module.configuration_auto.CONFIG_ARCHIVE_MAP_MAPPING_NAMES.get(model_type, None) if (model_type in auto_module.tokenization_auto.TOKENIZER_MAPPING_NAMES): tokenizer_classes = auto_module.tokenization_auto.TOKENIZER_MAPPING_NAMES[model_type] tokenizer_class = (tokenizer_classes[0] if (tokenizer_classes[0] is not None) else tokenizer_classes[1]) else: tokenizer_class = None feature_extractor_class = auto_module.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES.get(model_type, None) processor_class = auto_module.processing_auto.PROCESSOR_MAPPING_NAMES.get(model_type, None) model_files = get_model_files(model_type, frameworks=frameworks) model_camel_cased = config_class.replace('Config', '') available_frameworks = [] for fname in model_files['model_files']: if ('modeling_tf' in str(fname)): available_frameworks.append('tf') elif ('modeling_flax' in str(fname)): available_frameworks.append('flax') elif ('modeling' in str(fname)): available_frameworks.append('pt') if (frameworks is None): frameworks = available_frameworks.copy() else: frameworks = [f for f in frameworks if (f in available_frameworks)] model_classes = retrieve_model_classes(model_type, frameworks=frameworks) if (archive_map is None): model_upper_cased = model_camel_cased.upper() else: parts = archive_map.split('_') idx = 0 while ((idx < len(parts)) and (parts[idx] != 'PRETRAINED')): idx += 1 if (idx < len(parts)): model_upper_cased = '_'.join(parts[:idx]) else: model_upper_cased = model_camel_cased.upper() model_patterns = ModelPatterns(model_name, checkpoint=find_base_model_checkpoint(model_type, model_files=model_files), model_type=model_type, model_camel_cased=model_camel_cased, model_lower_cased=model_files['module_name'], model_upper_cased=model_upper_cased, config_class=config_class, tokenizer_class=tokenizer_class, feature_extractor_class=feature_extractor_class, processor_class=processor_class) return {'frameworks': frameworks, 'model_classes': model_classes, 'model_files': model_files, 'model_patterns': model_patterns}
class BiReLUFunction(InplaceFunction): def forward(ctx, input, inplace=False): if ((input.size(1) % 2) != 0): raise RuntimeError('dimension 1 of input must be multiple of 2, but got {}'.format(input.size(1))) ctx.inplace = inplace if ctx.inplace: ctx.mark_dirty(input) output = input else: output = input.clone() (pos, neg) = output.chunk(2, dim=1) pos.clamp_(min=0) neg.clamp_(max=0) ctx.save_for_backward(output) return output def backward(ctx, grad_output): (output,) = ctx.saved_variables grad_input = grad_output.masked_fill(output.eq(0), 0) return (grad_input, None)
def deep_merge(base_item, new_item): if (isinstance(base_item, dict) and isinstance(new_item, dict)): ret = deepcopy(base_item) for key in new_item: if (key in ret): ret[key] = deep_merge(ret[key], new_item[key]) else: ret[key] = new_item[key] return ret if (isinstance(base_item, list) and isinstance(new_item, list)): ret = deepcopy(base_item) ret.extend(new_item) return ret return new_item
def evaluate(config: DictConfig) -> None: OmegaConf.set_struct(config, False) checkpoint_type = config.eval.get('checkpoint_type', 'lightning') if (checkpoint_type not in ['lightning', 'pytorch']): raise NotImplementedError(f'checkpoint_type ${checkpoint_type} not supported') if (checkpoint_type == 'lightning'): cls = hydra.utils.get_class(config.task._target_) trained_model = cls.load_from_checkpoint(checkpoint_path=config.eval.ckpt) else: trained_model: LightningModule = hydra.utils.instantiate(config.task, cfg=config, _recursive_=False) load_return = trained_model.model.load_state_dict(load_checkpoint(config.eval.ckpt, device=trained_model.device), strict=False) log.info(load_return) datamodule: LightningDataModule = trained_model._datamodule datamodule.prepare_data() datamodule.setup() log.info(f'Model hyperparameters: {trained_model.hparams}') callbacks: List[Callback] = [] if ('callbacks' in config): for (_, cb_conf) in config['callbacks'].items(): if ('_target_' in cb_conf): log.info(f'Instantiating callback <{cb_conf._target_}>') callbacks.append(hydra.utils.instantiate(cb_conf)) logger: List[LightningLoggerBase] = [] if ('logger' in config): for (_, lg_conf) in config['logger'].items(): if ('_target_' in lg_conf): log.info(f'Instantiating logger <{lg_conf._target_}>') logger.append(hydra.utils.instantiate(lg_conf)) log.info(f'Instantiating trainer <{config.trainer._target_}>') trainer: Trainer = hydra.utils.instantiate(config.trainer, callbacks=callbacks, logger=logger, _convert_='partial') log.info('Starting evaluation!') if config.eval.get('run_val', True): trainer.validate(model=trained_model, datamodule=datamodule) if config.eval.get('run_test', True): trainer.test(model=trained_model, datamodule=datamodule) log.info('Finalizing!') utils.finish(config=config, model=trained_model, datamodule=datamodule, trainer=trainer, callbacks=callbacks, logger=logger)
class SENet_senti_attention_wise(nn.Module): def __init__(self, C): super(SENet_senti_attention_wise, self).__init__() self.base = models.resnet101(pretrained=True) self.spatial = senti_block() self.fc = nn.Linear(2048, 3) def forward(self, x): for (name, module) in self.base._modules.items(): if (name == 'avgpool'): break x = module(x) out = self.spatial(x) out = self.fc(out) return (out, out, out)
class TestUtils(unittest.TestCase): ('numpy.random.randint') def test_random_crop(self, mock_np_random_randint): mock_np_random_randint.return_value = 1 test_img = np.expand_dims(np.array([[0, 255], [0, 255]]), axis=2) crop_dims = (1, 1) cropped_img = utils.random_crop(test_img, crop_dims) self.assertEqual([255], cropped_img) ('numpy.random.random') def test_random_flip(self, mock_np_random_randint): mock_np_random_randint.return_value = 0 temp = np.array([[0, 255], [0, 255]]) test_img = np.dstack((temp, temp, temp)) temp = np.array([[255, 0], [255, 0]]) expected = np.dstack((temp, temp, temp)) flipped_img = utils.random_horizontal_flip(test_img) np.testing.assert_array_equal(expected, flipped_img) def test_normalize_label(self): labels = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1]) normed_label = utils.normalize_labels(labels) np.testing.assert_array_equal(np.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]), normed_label)
.ml_torch_only def test_ragged_to_dense(dtype, ml): values = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=dtype) row_splits = np.array([0, 2, 4, 4, 5, 12, 13], dtype=np.int64) out_col_size = 4 default_value = np.array((- 1), dtype=dtype) ans = mltest.run_op(ml, ml.device, True, ml.ops.ragged_to_dense, values, row_splits, out_col_size, default_value) expected = np.full(((row_splits.shape[0] - 1), out_col_size), default_value) for i in range((row_splits.shape[0] - 1)): for (j, value_idx) in enumerate(range(row_splits[i], row_splits[(i + 1)])): if (j < expected.shape[1]): expected[(i, j)] = values[value_idx] np.testing.assert_equal(ans, expected)
_update_dense(default_config=ConfigDense()) def test_cg_dense(*args, **kwargs): f = _test_cg_gpr(*args, **kwargs) mf = tf.reduce_mean(f, axis=0) res = tf.squeeze((f - mf), axis=(- 1)) Sff = (tf.matmul(res, res, transpose_a=True) / f.shape[0]) return (mf, Sff)
def preprocess_image(image, is_training): if is_training: image = tf.image.resize_image_with_crop_or_pad(image, (HEIGHT + 8), (WIDTH + 8)) image = tf.random_crop(image, [HEIGHT, WIDTH, DEPTH]) image = tf.image.random_flip_left_right(image) image = tf.image.per_image_standardization(image) return image
def rprecision(guess_item, gold_item, rank_keys): gold_ids_list = _get_ids_list(gold_item, rank_keys) guess_ids = _get_ids_list(guess_item, rank_keys)[0] Rprec_vector = [] for gold_ids in gold_ids_list: Rprec = _computeRprec(guess_ids, gold_ids) Rprec_vector.append(Rprec) return max(Rprec_vector)
def resnet18_mpncov_160(pretrained=False, progress=True, **kwargs): return _resnet_mpncov_160('resnet18_mpncov_160', BasicBlock, [2, 2, 2, 2], pretrained, progress, **kwargs)
class ANYDataset(): def initialize(self, opt): self.data_size = 0 def __len__(self): return self.data_size def name(self): return 'ANY'
_module() class IterTimerHook(Hook): def before_epoch(self, runner): self.t = time.time() def before_iter(self, runner): runner.log_buffer.update({'data_time': (time.time() - self.t)}) def after_iter(self, runner): runner.log_buffer.update({'time': (time.time() - self.t)}) self.t = time.time()
class FrozenPbModel(TFModel): def supports_path(path: str) -> bool: return ('frozen_pb' == get_model_type(path)) def supports_profiling(self) -> bool: return True
(events=subsets(_ALL_EVENTS_WITH_HANDLERS)) _events_with_registered_handlers_to_subset def test_list_nested_in_dict(events): assert (_RECORDED_EVENTS == []) run_cell('x = {1: [2, 3, 4]}') throw_and_print_diff_if_recorded_not_equal_to(filter_events_to_subset([TraceEvent.init_module, TraceEvent.before_stmt, TraceEvent.before_assign_rhs, TraceEvent.before_dict_literal, TraceEvent.dict_key, TraceEvent.before_list_literal, *([TraceEvent.list_elt] * 3), TraceEvent.after_list_literal, TraceEvent.dict_value, TraceEvent.after_dict_literal, TraceEvent.after_assign_rhs, TraceEvent.after_stmt, TraceEvent.after_module_stmt], events))
def train(max_walk_length, p, q, run): g = nx.planted_partition_graph(n_communities, community_size, p_in, p_out) labels = np.zeros(((n_communities * community_size), n_communities), dtype=np.float32) for c in range(n_communities): labels[(range((c * community_size), ((c + 1) * community_size)), c)] = 1 features = (labels + np.abs(np.random.normal(loc=0.0, scale=features_noise_scale, size=((n_communities * community_size), n_communities)).astype(np.float32))) dim_data = n_communities dims = (dim_data, dim_l1, dim_) DATA_PARAMETERS = 'n_communities={n_communities}-community_size={community_size}-p_in={p_in}-p_out={p_out}-fns={features_noise_scale}'.format(n_communities=n_communities, community_size=community_size, p_in=p_in, p_out=p_out, features_noise_scale=features_noise_scale) VAE_PARAMETERS = 'orth-adj_scaling-n__samples={n__samples}-dims={dims}-bias={use_bias}'.format(n__samples=n__samples, dims=dims, use_bias=use_bias) TRAINING_PARAMETERS = 'spb={seeds_per_batch}-WL={max_walk_length}-p={p}-q={q}-ns={neighbour_samples}-n_epochs={n_epochs}-run={run}'.format(seeds_per_batch=seeds_per_batch, max_walk_length=max_walk_length, p=p, q=q, neighbour_samples=neighbour_samples, n_epochs=n_epochs, run=run) MODEL_DATA = os.path.join(MODEL_PATH, ((((DATA_PARAMETERS + '---') + VAE_PARAMETERS) + '---') + TRAINING_PARAMETERS)) MODEL_RESULTS = (MODEL_DATA + '.results.pkl') if os.path.exists(MODEL_RESULTS): warnings.warn('"{}" already exist, skipping.'.format(MODEL_RESULTS)) return adj = nx.adjacency_matrix(g).astype(np.float32) def build_q(dims, use_bias=False): (dim_data, dim_l1, dim_) = dims q_input = keras.layers.Input(shape=(dim_data,), name='q_input') (q_layer1_placeholders, q_layer1) = ae.gc_layer_with_placeholders(dim_l1, 'q_layer1', {'use_bias': use_bias, 'activation': 'relu'}, q_input) (q__flat_placeholders, q__flat) = ae.gc_layer_with_placeholders(dim_, 'q_mu_flat', {'use_bias': use_bias, 'gather_mask': True}, q_layer1) (q_logD_flat_placeholders, q_logD_flat) = ae.gc_layer_with_placeholders(dim_, 'q_logD_flat', {'use_bias': use_bias, 'gather_mask': True}, q_layer1) q_logD_flat = keras.layers.Concatenate(name='q_mulogD_flat')([q__flat, q_logD_flat]) q_model = ae.Model(inputs=((([q_input] + q_layer1_placeholders) + q__flat_placeholders) + q_logD_flat_placeholders), outputs=q_logD_flat) return (q_model, ('OrthogonalGaussian',)) (q_model, q_codecs) = build_q(dims, use_bias=use_bias) def p_builder(p_input): p_layer1 = keras.layers.Dense(dim_l1, use_bias=use_bias, activation='relu', kernel_regularizer='l2', bias_regularizer='l2', name='p_layer1')(p_input) p_adj = layers.Bilinear(0, use_bias=use_bias, kernel_regularizer='l2', bias_regularizer='l2', name='p_adj')([p_layer1, p_layer1]) p_v = keras.layers.Dense(dim_data, use_bias=use_bias, kernel_regularizer='l2', bias_regularizer='l2', name='p_v')(p_layer1) return ([p_adj, p_v], ('SigmoidBernoulliScaledAdjacency', 'SigmoidBernoulli')) (vae, vae_codecs) = ae.build_vae((q_model, q_codecs), p_builder, n__samples, [1.0, 1.0, 1.0]) def target_func(batch_adj, required_nodes, final_nodes): return [np.zeros(1), utils.expand_dims_tile(utils.expand_dims_tile((batch_adj + np.eye(batch_adj.shape[0])), 0, n__samples), 0, 1), utils.expand_dims_tile(labels[final_nodes], 1, n__samples)] steps_per_epoch = int(np.ceil((len(features) / seeds_per_batch))) history = vae.fit_generator_feed(batching.batches(vae, adj, labels, target_func, seeds_per_batch, max_walk_length, p=p, q=q, neighbour_samples=neighbour_samples), steps_per_epoch=steps_per_epoch, epochs=n_epochs, check_array_lengths=False, shuffle=False, verbose=0, callbacks=[ae.ModelBatchCheckpoint((MODEL_DATA + '.batch-checkpoint.h5'), monitor='loss', period=10, save_best_only=True), keras.callbacks.ModelCheckpoint((MODEL_DATA + '.epoch-checkpoint.h5'), monitor='loss', period=10, save_best_only=True), TQDMCallback()]) (x, _, feeds) = next(batching.batches(vae, adj, features, target_func, adj.shape[0], 1, p=1, q=1, neighbour_samples=None)) (embeddings, adj_pred, features_pred) = vae.predict_on_fed_batch(x, feeds=feeds) with open(MODEL_RESULTS, 'wb') as outfile: pickle.dump({'history': history.history, 'labels': labels, 'features': features, 'adj': adj, 'embeddings': embeddings, 'adj_pred': adj_pred, 'features_pred': features_pred}, outfile)
class GroundTruthDatasetFactory(Dataset): def __init__(self, train_gt_images, val_gt_images, test_gt_images, inner_circle=True): self.train_gt_images = train_gt_images self.val_gt_images = val_gt_images self.test_gt_images = test_gt_images assert (self.train_gt_images.shape[1] == self.train_gt_images.shape[2]), 'Train images are not square.' assert ((self.train_gt_images.shape[1] % 2) == 1), 'Train image size has to be odd.' assert (self.val_gt_images.shape[1] == self.val_gt_images.shape[2]), 'Val images are not square.' assert ((self.val_gt_images.shape[1] % 2) == 1), 'Val image size has to be odd.' assert (self.test_gt_images.shape[1] == self.test_gt_images.shape[2]), 'Test images are not square.' assert ((self.test_gt_images.shape[1] % 2) == 1), 'Test image size has to be odd.' self.shape = (self.train_gt_images.shape[1], self.train_gt_images.shape[2]) self.inner_circle = inner_circle if self.inner_circle: circ_space = np.sqrt((((self.shape[0] / 2.0) ** 2) / 2.0)) min_pt = [(- circ_space), (- circ_space)] max_pt = [circ_space, circ_space] else: min_pt = [((- self.shape[0]) / 2.0), ((- self.shape[1]) / 2.0)] max_pt = [(self.shape[0] / 2.0), (self.shape[1] / 2.0)] space = uniform_discr(min_pt, max_pt, self.shape, dtype=np.float32) self.train_len = self.train_gt_images.shape[0] self.validation_len = self.val_gt_images.shape[0] self.test_len = self.test_gt_images.shape[0] self.random_access = True super().__init__(space=space) def _create_pair_dataset(self, forward_op, post_processor=None, noise_type=None, noise_kwargs=None, noise_seeds=None): dataset = ObservationGroundTruthPairDataset(self.generator, forward_op, post_processor=post_processor, train_len=self.train_len, validation_len=self.validation_len, test_len=self.test_len, noise_type=noise_type, noise_kwargs=noise_kwargs, noise_seeds=noise_seeds) return dataset def build_projection_dataset(self, num_angles, upscale_shape=70, impl='astra_cpu'): (forward_op, get_reco_ray_trafo, reco_ray_trafo) = self._build_forward_op(upscale_shape, impl, num_angles) ds = self._create_pair_dataset(forward_op=forward_op, noise_type=None) ds.get_ray_trafo = get_reco_ray_trafo ds.ray_trafo = reco_ray_trafo return ds def _build_forward_op(self, upscale_shape, impl, num_angles): reco_space = self.space if self.inner_circle: space = odl.uniform_discr(min_pt=reco_space.min_pt, max_pt=reco_space.max_pt, shape=(upscale_shape, upscale_shape), dtype=np.float32) min_pt = reco_space.min_pt max_pt = reco_space.max_pt proj_space = odl.uniform_discr(min_pt, max_pt, (2 * (((2 * int(reco_space.max_pt[0])) - 1),)), dtype=np.float32) detector_length = get_detector_length(proj_space) det_partition = odl.uniform_partition((- np.sqrt((((reco_space.shape[0] / 2.0) ** 2) / 2))), np.sqrt((((reco_space.shape[0] / 2.0) ** 2) / 2)), detector_length) else: space = odl.uniform_discr(min_pt=reco_space.min_pt, max_pt=reco_space.max_pt, shape=(upscale_shape, upscale_shape), dtype=np.float32) min_pt = reco_space.min_pt max_pt = reco_space.max_pt proj_space = odl.uniform_discr(min_pt, max_pt, (2 * (reco_space.shape[0],)), dtype=np.float32) detector_length = get_detector_length(proj_space) det_partition = odl.uniform_partition(((- reco_space.shape[0]) / 2.0), (reco_space.shape[0] / 2.0), detector_length) angle_partition = odl.uniform_partition(0, np.pi, num_angles) reco_geometry = odl.tomo.Parallel2dGeometry(angle_partition, det_partition) ray_trafo = odl.tomo.RayTransform(space, reco_geometry, impl=impl) def get_reco_ray_trafo(**kwargs): return odl.tomo.RayTransform(reco_space, reco_geometry, **kwargs) reco_ray_trafo = get_reco_ray_trafo(impl=impl) class _ResizeOperator(odl.Operator): def __init__(self): super().__init__(reco_space, space) def _call(self, x, out, **kwargs): out.assign(space.element(resize(x, (upscale_shape, upscale_shape), order=1))) resize_op = _ResizeOperator() forward_op = (ray_trafo * resize_op) return (forward_op, get_reco_ray_trafo, reco_ray_trafo) def generator(self, part='train'): if (part == 'train'): gen = self._train_generator() elif (part == 'validation'): gen = self._val_generator() elif (part == 'test'): gen = self._test_generator() else: raise NotImplementedError for gt in gen: (yield gt) def _train_generator(self): for i in range(self.train_len): (yield self.train_gt_images[i].type(torch.float32)) def _test_generator(self): for i in range(self.test_len): (yield self.test_gt_images[i].type(torch.float32)) def _val_generator(self): for i in range(self.validation_len): (yield self.val_gt_images[i].type(torch.float32)) def get_sample(self, index, part='train', out=None): if (out == None): if (part == 'train'): return self.train_gt_images[index].type(torch.float32) elif (part == 'validation'): return self.val_gt_images[index].type(torch.float32) elif (part == 'test'): return self.test_gt_images[index].type(torch.float32) else: raise NotImplementedError elif (part == 'train'): out = self.train_gt_images[index].type(torch.float32) elif (part == 'validation'): out = self.val_gt_images[index].type(torch.float32) elif (part == 'test'): out = self.test_gt_images[index].type(torch.float32) else: raise NotImplementedError
class PoolerEndLogits(nn.Module): def __init__(self, hidden_size, num_classes): super(PoolerEndLogits, self).__init__() self.dense_0 = nn.Linear(hidden_size, hidden_size) self.activation = nn.Tanh() self.LayerNorm = nn.LayerNorm(hidden_size) self.dense_1 = nn.Linear(hidden_size, num_classes) def forward(self, hidden_states, start_positions=None, p_mask=None): x = self.dense_0(torch.cat([hidden_states, start_positions], dim=(- 1))) x = self.activation(x) x = self.LayerNorm(x) x = self.dense_1(x) return x
def log_args(args): logging.info('\n+ Hyperpixel Flow Arguments +') for arg_key in args.__dict__: logging.info(('| %20s: %-24s |' % (arg_key, str(args.__dict__[arg_key])))) logging.info('++\n')
class FurthestPointSampling(Function): def forward(ctx, points_xyz: torch.Tensor, num_points: int) -> torch.Tensor: assert points_xyz.is_contiguous() (B, N) = points_xyz.size()[:2] output = torch.cuda.IntTensor(B, num_points) temp = torch.cuda.FloatTensor(B, N).fill_(.0) ext_module.furthest_point_sampling_forward(points_xyz, temp, output, b=B, n=N, m=num_points) if (torch.__version__ != 'parrots'): ctx.mark_non_differentiable(output) return output def backward(xyz, a=None): return (None, None)
class TestNoiseAdaptiveLayout(QiskitTestCase): def test_on_linear_topology(self): calib_time = datetime(year=2019, month=2, day=1, hour=0, minute=0, second=0) qr = QuantumRegister(2, name='q') circuit = QuantumCircuit(qr) circuit.cx(qr[0], qr[1]) dag = circuit_to_dag(circuit) qubit_list = [] ro_errors = [0.01, 0.01, 0.01] for ro_error in ro_errors: qubit_list.append(make_qubit_with_error(ro_error)) p01 = [Nduv(date=calib_time, name='gate_error', unit='', value=0.9)] g01 = Gate(name='CX0_1', gate='cx', parameters=p01, qubits=[0, 1]) p12 = [Nduv(date=calib_time, name='gate_error', unit='', value=0.1)] g12 = Gate(name='CX1_2', gate='cx', parameters=p12, qubits=[1, 2]) gate_list = [g01, g12] bprop = BackendProperties(last_update_date=calib_time, backend_name='test_backend', qubits=qubit_list, backend_version='1.0.0', gates=gate_list, general=[]) nalayout = NoiseAdaptiveLayout(bprop) nalayout.run(dag) initial_layout = nalayout.property_set['layout'] self.assertNotEqual(initial_layout[qr[0]], 0) self.assertNotEqual(initial_layout[qr[1]], 0) def test_bad_readout(self): calib_time = datetime(year=2019, month=2, day=1, hour=0, minute=0, second=0) qr = QuantumRegister(2, name='q') circuit = QuantumCircuit(qr) circuit.cx(qr[0], qr[1]) dag = circuit_to_dag(circuit) qubit_list = [] ro_errors = [0.01, 0.01, 0.8] for ro_error in ro_errors: qubit_list.append(make_qubit_with_error(ro_error)) p01 = [Nduv(date=calib_time, name='gate_error', unit='', value=0.1)] g01 = Gate(name='CX0_1', gate='cx', parameters=p01, qubits=[0, 1]) p12 = [Nduv(date=calib_time, name='gate_error', unit='', value=0.1)] g12 = Gate(name='CX1_2', gate='cx', parameters=p12, qubits=[1, 2]) gate_list = [g01, g12] bprop = BackendProperties(last_update_date=calib_time, backend_name='test_backend', qubits=qubit_list, backend_version='1.0.0', gates=gate_list, general=[]) nalayout = NoiseAdaptiveLayout(bprop) nalayout.run(dag) initial_layout = nalayout.property_set['layout'] self.assertNotEqual(initial_layout[qr[0]], 2) self.assertNotEqual(initial_layout[qr[1]], 2) def test_grid_layout(self): calib_time = datetime(year=2019, month=2, day=1, hour=0, minute=0, second=0) qr = QuantumRegister(4, name='q') circuit = QuantumCircuit(qr) circuit.cx(qr[0], qr[3]) circuit.cx(qr[1], qr[3]) circuit.cx(qr[2], qr[3]) dag = circuit_to_dag(circuit) qubit_list = [] ro_errors = ([0.01] * 6) for ro_error in ro_errors: qubit_list.append(make_qubit_with_error(ro_error)) p01 = [Nduv(date=calib_time, name='gate_error', unit='', value=0.3)] p03 = [Nduv(date=calib_time, name='gate_error', unit='', value=0.3)] p12 = [Nduv(date=calib_time, name='gate_error', unit='', value=0.3)] p14 = [Nduv(date=calib_time, name='gate_error', unit='', value=0.1)] p34 = [Nduv(date=calib_time, name='gate_error', unit='', value=0.1)] p45 = [Nduv(date=calib_time, name='gate_error', unit='', value=0.1)] p25 = [Nduv(date=calib_time, name='gate_error', unit='', value=0.3)] g01 = Gate(name='CX0_1', gate='cx', parameters=p01, qubits=[0, 1]) g03 = Gate(name='CX0_3', gate='cx', parameters=p03, qubits=[0, 3]) g12 = Gate(name='CX1_2', gate='cx', parameters=p12, qubits=[1, 2]) g14 = Gate(name='CX1_4', gate='cx', parameters=p14, qubits=[1, 4]) g34 = Gate(name='CX3_4', gate='cx', parameters=p34, qubits=[3, 4]) g45 = Gate(name='CX4_5', gate='cx', parameters=p45, qubits=[4, 5]) g25 = Gate(name='CX2_5', gate='cx', parameters=p25, qubits=[2, 5]) gate_list = [g01, g03, g12, g14, g34, g45, g25] bprop = BackendProperties(last_update_date=calib_time, backend_name='test_backend', qubits=qubit_list, backend_version='1.0.0', gates=gate_list, general=[]) nalayout = NoiseAdaptiveLayout(bprop) nalayout.run(dag) initial_layout = nalayout.property_set['layout'] for qid in range(4): for qloc in [0, 2]: self.assertNotEqual(initial_layout[qr[qid]], qloc)
def line_on_mask(line, mask, width=2, iou_threshold=0.6): iou = compute_iou_mask_and_line(line, mask, width) return (iou > iou_threshold)
class _pure_kv_variable_scope(): def __init__(self, name_or_scope, reuse=None, initializer=None, regularizer=None, caching_device=None, partitioner=None, custom_getter=None, old_name_scope=None, dtype=dtypes.float32, use_resource=None, constraint=None): self._name_or_scope = name_or_scope self._reuse = reuse self._initializer = initializer self._regularizer = regularizer self._caching_device = caching_device self._partitioner = partitioner self._custom_getter = custom_getter self._old_name_scope = old_name_scope self._dtype = dtype self._use_resource = use_resource self._constraint = constraint self._var_store = _get_default_kv_variable_store() self._var_scope_store = get_kv_variable_scope_store() if isinstance(self._name_or_scope, tf_variable_scope.VariableScope): self._new_name = self._name_or_scope.name name_scope = self._name_or_scope._name_scope variable_scope_object = KvVariableScope((self._name_or_scope.reuse if (not self._reuse) else self._reuse), name=self._new_name, initializer=self._name_or_scope.initializer, regularizer=self._name_or_scope.regularizer, caching_device=self._name_or_scope.caching_device, partitioner=self._name_or_scope.partitioner, dtype=self._name_or_scope.dtype, custom_getter=self._name_or_scope.custom_getter, name_scope=name_scope, use_resource=self._name_or_scope.use_resource, constraint=self._constraint) if (self._initializer is not None): variable_scope_object.set_initializer(self._initializer) if (self._regularizer is not None): variable_scope_object.set_regularizer(self._regularizer) if (self._caching_device is not None): variable_scope_object.set_caching_device(self._caching_device) if (self._partitioner is not None): variable_scope_object.set_partitioner(self._partitioner) if (self._custom_getter is not None): variable_scope_object.set_custom_getter(tf_variable_scope._maybe_wrap_custom_getter(self._custom_getter, self._name_or_scope.custom_getter)) if (self._dtype is not None): variable_scope_object.set_dtype(self._dtype) if (self._use_resource is not None): variable_scope_object.set_use_resource(self._use_resource) self._cached_variable_scope_object = variable_scope_object def __enter__(self): self._old = self._var_scope_store.current_scope if isinstance(self._name_or_scope, tf_variable_scope.VariableScope): self._var_scope_store.open_variable_scope(self._new_name) self._old_subscopes = copy.copy(self._var_scope_store.variable_scopes_count) variable_scope_object = self._cached_variable_scope_object else: self._new_name = (((self._old.name + '/') + self._name_or_scope) if self._old.name else self._name_or_scope) self._reuse = (self._reuse or self._old.reuse) if (self._old_name_scope is None): name_scope = self._name_or_scope else: name_scope = self._old_name_scope variable_scope_object = KvVariableScope(self._reuse, name=self._new_name, initializer=self._old.initializer, regularizer=self._old.regularizer, caching_device=self._old.caching_device, partitioner=self._old.partitioner, dtype=self._old.dtype, use_resource=self._old.use_resource, custom_getter=self._old.custom_getter, name_scope=name_scope, constraint=self._constraint) if (self._initializer is not None): variable_scope_object.set_initializer(self._initializer) if (self._regularizer is not None): variable_scope_object.set_regularizer(self._regularizer) if (self._caching_device is not None): variable_scope_object.set_caching_device(self._caching_device) if (self._partitioner is not None): variable_scope_object.set_partitioner(self._partitioner) if (self._custom_getter is not None): variable_scope_object.set_custom_getter(tf_variable_scope._maybe_wrap_custom_getter(self._custom_getter, self._old.custom_getter)) if (self._dtype is not None): variable_scope_object.set_dtype(self._dtype) if (self._use_resource is not None): variable_scope_object.set_use_resource(self._use_resource) self._var_scope_store.open_variable_scope(self._new_name) self._var_scope_store.current_scope = variable_scope_object return variable_scope_object def __exit__(self, type_arg, value_arg, traceback_arg): if isinstance(self._name_or_scope, tf_variable_scope.VariableScope): self._var_scope_store.variable_scopes_count = self._old_subscopes else: self._var_scope_store.close_variable_subscopes(self._new_name) self._var_scope_store.current_scope = self._old
class ILPolicy(Policy, metaclass=abc.ABCMeta): def __init__(self, net, dim_actions): super(Policy, self).__init__() self.net = net self.dim_actions = dim_actions self.action_distribution = CategoricalNet(self.net.output_size, self.dim_actions) def forward(self, *x): raise NotImplementedError def act(self, observations, rnn_states, prev_actions, masks, deterministic=False): (features, rnn_states) = self.net(observations, rnn_states, prev_actions, masks) distribution = self.action_distribution(features) if deterministic: action = distribution.mode() else: action = distribution.sample() return (action, rnn_states) def get_value(self, *args: Any, **kwargs: Any): raise NotImplementedError def evaluate_actions(self, *args: Any, **kwargs: Any): raise NotImplementedError def build_distribution(self, observations, rnn_states, prev_actions, masks) -> CustomFixedCategorical: (features, rnn_states) = self.net(observations, rnn_states, prev_actions, masks) return self.action_distribution(features)
def main(): parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--env', help='environment ID (0==sine, 1==stand, 2=reset, 3=overheat)', type=int, default=0) args = parser.parse_args() print(('--env=' + str(args.env))) if (args.env == 0): SinePolicyExample() if (args.env == 1): SineStandExample() if (args.env == 2): ResetPoseExample() if (args.env == 3): MotorOverheatExample()
def merging_lora_with_base(pipe, ckpt_dir, adapter_name='default'): unet_sub_dir = os.path.join(ckpt_dir, 'unet') text_encoder_sub_dir = os.path.join(ckpt_dir, 'text_encoder') if isinstance(pipe.unet, PeftModel): pipe.unet.set_adapter(adapter_name) else: pipe.unet = PeftModel.from_pretrained(pipe.unet, unet_sub_dir, adapter_name=adapter_name) pipe.unet = pipe.unet.merge_and_unload() if os.path.exists(text_encoder_sub_dir): if isinstance(pipe.text_encoder, PeftModel): pipe.text_encoder.set_adapter(adapter_name) else: pipe.text_encoder = PeftModel.from_pretrained(pipe.text_encoder, text_encoder_sub_dir, adapter_name=adapter_name) pipe.text_encoder = pipe.text_encoder.merge_and_unload() return pipe
def flip_first_two_dim(inp): if (len(inp.size()) == 2): return inp.permute(1, 0).contiguous() elif (len(inp.size()) == 3): return inp.permute(1, 0, 2).contiguous()
.dataclass class DDIMSchedulerState(): common: CommonSchedulerState final_alpha_cumprod: jnp.ndarray init_noise_sigma: jnp.ndarray timesteps: jnp.ndarray num_inference_steps: Optional[int] = None def create(cls, common: CommonSchedulerState, final_alpha_cumprod: jnp.ndarray, init_noise_sigma: jnp.ndarray, timesteps: jnp.ndarray): return cls(common=common, final_alpha_cumprod=final_alpha_cumprod, init_noise_sigma=init_noise_sigma, timesteps=timesteps)
def get_icd9_descript_dict(path): lines = _read_file(path) icd9_descript_dict = {} for l in lines[1:]: elems = l.split('\t') try: assert (len(elems) == 8) except: print('Problem with following line while loading icd9_descript_dict:') print(l) raise icd9 = elems[0] descript = elems[1] is_category = (len(icd9.split('.')) == 1) if is_category: descript = (descript + ' (category)') icd9_descript_dict[icd9] = descript return icd9_descript_dict
def convert_layer(layer, mode, copy_weights, layer_config=None, output_dim=None): (layer_bias, bias_weight) = (False, None) if (('weight' in layer.__dict__['_parameters']) and copy_weights): weight = layer.weight if (('bias' in layer.__dict__['_parameters']) and (layer.bias is not None)): bias_weight = layer.bias layer_bias = True new_layer = None if (layer_config is None): layer_config = {} layer_config['type'] = mode if isinstance(layer, nn.Conv2d): if (mode in ['fa', 'usf', 'brsf', 'frsf']): new_layer = fa_constructor.Conv2d(layer.in_channels, layer.out_channels, layer.kernel_size, layer.stride, layer.padding, layer.dilation, layer.groups, layer_bias, layer.padding_mode, layer_config) elif (mode == 'dfa'): new_layer = dfa_layers.Conv2d(layer.in_channels, layer.out_channels, output_dim, layer.kernel_size, layer.stride, layer.padding, layer.dilation, layer.groups, layer_bias, layer.padding_mode, layer_config) elif (mode == 'backpropagation'): new_layer = bp_layers.Conv2d(layer.in_channels, layer.out_channels, layer.kernel_size, layer.stride, layer.padding, layer.dilation, layer.groups, layer_bias, layer.padding_mode, layer_config) elif isinstance(layer, nn.Linear): if (mode in ['fa', 'usf', 'brsf', 'frsf']): new_layer = fa_constructor.Linear(layer.in_features, layer.out_features, layer_bias, layer_config) elif (mode == 'dfa'): new_layer = dfa_layers.Linear(layer.in_features, layer.out_features, output_dim, layer_bias, layer_config) elif (mode == 'backpropagation'): new_layer = bp_layers.Linear(layer.in_features, layer.out_features, layer_bias, layer_config) if ((new_layer is not None) and copy_weights): new_layer.weight = weight new_layer.bias = bias_weight return new_layer
def get_tfrecord_by_location(tfrecord: str, location: Tuple[(int, int)], decode: bool=True, *, locations_array: Optional[List[Tuple[(int, int)]]]=None, index_array: Optional[np.ndarray]=None) -> Any: if isinstance(location, list): location = tuple(location) if ((not isinstance(location, tuple)) or (len(location) != 2) or (not isinstance(location[0], (int, np.integer))) or (not isinstance(location[1], (int, np.integer)))): raise IndexError(f'index must be a tuple of two ints. Got: {location}') index = tfrecord2idx.find_index(tfrecord) if ((locations_array is not None) or (index and tfrecord2idx.index_has_locations(index))): if (locations_array is None): locations = tfrecord2idx.get_locations_from_index(index) else: locations = locations_array try: idx = locations.index(location) except ValueError: log.error(f'Unable to find record with location {location} in {tfrecord}') return (False, False) record = tfrecord2idx.get_tfrecord_by_index(tfrecord, idx, index_array=index_array) slide = record['slide'] image = (sf.io.decode_image(record['image_raw']) if decode else record['image_raw']) return (slide, image) else: parser = get_tfrecord_parser(tfrecord, ('slide', 'image_raw', 'loc_x', 'loc_y'), decode_images=decode) dataset = TFRecordDataset(tfrecord) for (i, record) in enumerate(dataset): (slide, image, loc_x, loc_y) = parser(record) if ((loc_x, loc_y) == location): if decode: return (slide, image) else: slide = bytes(record['slide']).decode('utf-8') images = bytes(record['image_raw']) return (slide, images) log.error(f'Unable to find record with location {location} in {tfrecord}') return (False, False)
def require_onnx(test_case): if (not is_onnx_available()): return unittest.skip('test requires ONNX')(test_case) else: return test_case
def wrn28_10(num_classes=10, dropout=False): model = Wide_ResNet(depth=28, widen_factor=10, num_classes=num_classes) return model
def finalize_config(cfg, cfg_file_path, cfg_cmd_string): if (cfg_file_path is not None): __merge_config_from_file(cfg, cfg_file_path) if (cfg_cmd_string is not None): __merge_config_from_cmdline(cfg, cfg_cmd_string) cfg.immutable(True)
def parse_device_type(str): mace_check((str in DEVICE_MAP), ('unknown device %s' % str)) return DEVICE_MAP[str]
class Constants(ConstantsBase): def __init__(self, **kwargs): self.RUN = 'test' w = 1e-10 self.P = problems.Sin1D_2(w=w, A=0, B=((- 1) / w)) self.SUBDOMAIN_XS = get_subdomain_xs([np.array([2, 3, 2, 4, 3])], [((2 * np.pi) / self.P.w)]) self.SUBDOMAIN_WS = get_subdomain_ws(self.SUBDOMAIN_XS, 0.7) self.BOUNDARY_N = ((1 / self.P.w),) self.Y_N = (0, (1 / (self.P.w ** 2))) self.ACTIVE_SCHEDULER = active_schedulers.PointActiveSchedulerND self.ACTIVE_SCHEDULER_ARGS = (np.array([0]),) self.DEVICE = 0 self.MODEL = models.FCN self.N_HIDDEN = 16 self.N_LAYERS = 2 self.BATCH_SIZE = (500,) self.RANDOM = False self.LRATE = 0.001 self.BOUNDARY_WEIGHT = 100 self.BOUNDARY_BATCH_SIZE = 50 self.DATALOSS_WEIGHT = 10 self.N_STEPS = 50000 self.SEED = 123 self.BATCH_SIZE_TEST = (5000,) self.BOUNDARY_BATCH_SIZE_TEST = 100 self.PLOT_LIMS = (1, False) self.SUMMARY_FREQ = 250 self.TEST_FREQ = 5000 self.MODEL_SAVE_FREQ = 10000 self.SHOW_FIGURES = False self.SAVE_FIGURES = True self.CLEAR_OUTPUT = False for key in kwargs.keys(): self[key] = kwargs[key] self.SUMMARY_OUT_DIR = ('results/summaries/%s/' % self.RUN) self.MODEL_OUT_DIR = ('results/models/%s/' % self.RUN) self.HOSTNAME = socket.gethostname().lower()
class BasicBlock(nn.Module): expansion = 1 def __init__(self, in_channels, out_channels, stride=1): super().__init__() self.residual_function = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True), nn.Conv2d(out_channels, (out_channels * BasicBlock.expansion), kernel_size=3, padding=1, bias=False), nn.BatchNorm2d((out_channels * BasicBlock.expansion))) self.shortcut = nn.Sequential() if ((stride != 1) or (in_channels != (BasicBlock.expansion * out_channels))): self.shortcut = nn.Sequential(nn.Conv2d(in_channels, (out_channels * BasicBlock.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((out_channels * BasicBlock.expansion))) def forward(self, x): return nn.ReLU(inplace=True)((self.residual_function(x) + self.shortcut(x)))
def test_inv_link_logit(): scores = np.array([[np.inf, (- np.inf), 999.0, (- 999.0), 0.0, 1.0986123, np.nan]]) expected = np.array([[[0.0, 1.0], [1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.5, 0.5], [0.25, 0.75], [np.nan, np.nan]]]) result = inv_link(scores, 'logit') np.testing.assert_almost_equal(result, expected)
def main_upper(x_minus, x_plus, y_minus, y_plus, plot=False, num=0, print_info=True): if print_info: print('14 orthant upper: using 23 orthant lower function') x_minus_new = (- x_plus) x_plus_new = (- x_minus) (a, b, c) = lower.main_lower(x_minus_new, x_plus_new, y_minus, y_plus, print_info=print_info) b = (- b) c = (- c) if plot: utils.plot_surface(x_minus[num], x_plus[num], y_minus[num], y_plus[num], a[num], b[num], c[num]) return (a.detach(), b.detach(), c.detach())
def generate_dataset(seed, in_file, tokenizer_name, out_dir, eval_ratio): tokenizer = transformers.AutoTokenizer.from_pretrained(tokenizer_name, use_fast=False) with open(in_file, 'r') as f: conversations = json.load(f) random.seed(seed) random.shuffle(conversations) eval_num = int((eval_ratio * len(conversations))) train_conversations = conversations[eval_num:] eval_conversations = conversations[:eval_num] generate_split(train_conversations, tokenizer, 'train', out_dir) generate_split(eval_conversations, tokenizer, 'eval', out_dir)
class ParserFileManager(object): def __init__(self, grammar_dir): self.grammar_dir = Path(grammar_dir) self.max_size = 12 self.logger = LaLogger.getInstance().get_logger(LoggerTypeEnum.DEFAULT) self.parser_dict = {} self.prefix = 'parser' self.module_dir = 'iheartla' self.default_hash_value = hashlib.md5('default'.encode()).hexdigest() self.init_hash_value = hashlib.md5('init'.encode()).hexdigest() self.default_parsers_dict = {self.init_hash_value: 0, self.default_hash_value: 0} for f in (self.grammar_dir.parent / 'la_local_parsers').glob('parser*.py'): (name, hash_value, t) = self.separate_parser_file(f.name) if (hash_value in self.default_parsers_dict): self.default_parsers_dict[hash_value] = t if (hash_value == self.default_hash_value): default_file = open((grammar_dir / f)) self.default_parser_content = default_file.read() default_file.close() self.save_threads = [] self.cache_dir = os.path.join(user_cache_dir(), self.module_dir) self.init_cache() self.load_parsers() def reload(self): self.parser_dict = {} self.init_cache() self.load_parsers() def set_test_mode(self): self.max_size = 1000 def separate_parser_file(self, parser_file): name = parser_file.split('.')[0] sep_list = name.split('_') timestamp = time.mktime(datetime.strptime(sep_list[2], '%Y-%m-%d-%H-%M-%S').timetuple()) return (name, sep_list[1], timestamp) def merge_default_parsers(self): copy_from_default = True for f in listdir(self.cache_dir): if self.valid_parser_file(f): (name, hash_value, t) = self.separate_parser_file(f) if (hash_value in self.default_parsers_dict): if (self.default_parsers_dict[hash_value] <= t): copy_from_default = False break if copy_from_default: self.clean_parsers() dir_path = Path(self.cache_dir) for f in (self.grammar_dir.parent / 'la_local_parsers').glob('parser*.py'): if (not (dir_path / f.name).exists()): shutil.copy(f, dir_path) def valid_parser_file(self, parser_file): return ((self.prefix in parser_file) and ('.py' in parser_file) and ('_' in parser_file)) def init_cache(self): if (not Path(user_cache_dir()).exists()): Path(user_cache_dir()).mkdir() dir_path = Path(self.cache_dir) if (not dir_path.exists()): dir_path.mkdir() self.merge_default_parsers() def clean_parsers(self): dir_path = Path(self.cache_dir) if dir_path.exists(): shutil.rmtree(dir_path) dir_path.mkdir() def load_from_pickle(self): if self.cache_file.exists(): try: with open(self.cache_file, 'rb') as f: self.parser_dict = pickle.load(f) except Exception as e: print('IO error:{}'.format(e)) def load_parsers(self): for f in listdir(self.cache_dir): if self.valid_parser_file(f): (name, hash_value, t) = self.separate_parser_file(f) module_name = '{}.{}'.format(self.module_dir, name) path_to_file = os.path.join(self.cache_dir, '{}.py'.format(name)) spec = importlib.util.spec_from_file_location(module_name, path_to_file) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) parser_a = getattr(module, 'grammar{}Parser'.format(hash_value)) parser_semantic = getattr(module, 'grammar{}ModelBuilderSemantics'.format(hash_value)) parser = parser_a(semantics=parser_semantic()) self.parser_dict[hash_value] = parser self.logger.debug('After loading, self.parser_dict:{}'.format(self.parser_dict)) if (len(self.parser_dict) > 1): print('{} parsers loaded'.format(len(self.parser_dict))) else: print('{} parser loaded'.format(len(self.parser_dict))) def get_parser(self, key, grammar, extra_dict={}): hash_value = hashlib.md5(key.encode()).hexdigest() if (hash_value in self.parser_dict): return self.parser_dict[hash_value] if (not DEBUG_PARSER): rule_content = self.gen_parser_code(hash_value, extra_dict) module_name = '{}_{}_{}'.format(self.prefix, hash_value, datetime.now().strftime('%Y-%m-%d-%H-%M-%S')) new_file_name = os.path.join(self.cache_dir, '{}.py'.format(module_name)) save_to_file(rule_content, new_file_name) spec = importlib.util.spec_from_file_location(module_name, new_file_name) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) parser_a = getattr(module, 'grammar{}Parser'.format(hash_value)) parser_semantic = getattr(module, 'grammar{}ModelBuilderSemantics'.format(hash_value)) parser = parser_a(semantics=parser_semantic()) self.parser_dict[hash_value] = parser return parser else: parser = tatsu.compile(grammar, asmodel=True) self.parser_dict[hash_value] = parser try: save_thread = threading.Thread(target=self.save_grammar, args=(hash_value, grammar)) save_thread.start() self.save_threads.append(save_thread) except: self.save_grammar(hash_value, grammar) return parser def save_grammar(self, hash_value, grammar): self.check_parser_cnt() code = tatsu.to_python_sourcecode(grammar, name='grammar{}'.format(hash_value), filename=os.path.join('la_grammar', 'here')) code_model = tatsu.to_python_model(grammar, name='grammar{}'.format(hash_value), filename=os.path.join('la_grammar', 'here')) code_model = code_model.replace('from __future__ import print_function, division, absolute_import, unicode_literals', '') code += code_model save_to_file(code, os.path.join(self.cache_dir, '{}_{}_{}.py'.format(self.prefix, hash_value, datetime.now().strftime('%Y-%m-%d-%H-%M-%S')))) def check_parser_cnt(self): parser_size = len(self.parser_dict) self.logger.debug('check_parser_cnt, self.parser_dict:{}, max:{}'.format(self.parser_dict, self.max_size)) while (parser_size > self.max_size): earliest_time = time.time() earliest_file = None earliest_hash = None for f in listdir(self.cache_dir): if self.valid_parser_file(f): (name, hash_value, t) = self.separate_parser_file(f) if (hash_value not in self.default_parsers_dict): cur_time = os.path.getmtime(os.path.join(self.cache_dir, f)) if (cur_time < earliest_time): earliest_time = cur_time earliest_file = f earliest_hash = hash_value if ((earliest_file is not None) and (earliest_hash in self.parser_dict)): del self.parser_dict[earliest_hash] os.remove(os.path.join(self.cache_dir, earliest_file)) parser_size = len(self.parser_dict) else: break self.logger.debug('check_parser_cnt, self.parser_dict:{}'.format(self.parser_dict)) def save_dict(self): self.logger.debug('self.parser_dict:{}'.format(self.parser_dict)) try: with open(self.cache_file, 'wb') as f: pickle.dump(self.parser_dict, f, pickle.HIGHEST_PROTOCOL) except Exception as e: print('IO error:{}'.format(e)) def gen_parser_code(self, hash_value, extra_dict): cur_content = self.default_parser_content.replace(self.default_hash_value, hash_value) if ('ids' in extra_dict): id_list = extra_dict['ids'] id_alone_original_rule = 'class IdentifierAlone(ModelBase):\n id = None\n value = None' id_alone_cur_rule = 'class IdentifierAlone(ModelBase):\n const = None\n id = None\n value = None' cur_content = cur_content.replace(id_alone_original_rule, id_alone_cur_rule) id_original_rule = "('IdentifierAlone')\n def _identifier_alone_(self): # noqa\n with self._ifnot():\n self._KEYWORDS_()\n with self._group():\n with self._choice():\n with self._option():\n self._pattern('[A-Za-z\\\\p{Ll}\\\\p{Lu}\\\\p{Lo}]\\\\p{M}*')\n self.name_last_node('value')\n with self._option():\n self._token('`')\n self._pattern('[^`]*')\n self.name_last_node('id')\n self._token('`')\n self._error('no available options')\n self.ast._define(\n ['id', 'value'],\n []\n )" choice_list = [f''' with self._option(): self._pattern('{item}')'''.format(item)[1:] for item in id_list] option_list = [f''' with self._option(): self._pattern('{item}')'''.format(item)[1:] for item in id_list] id_rule = (((((("('IdentifierAlone')\n def _identifier_alone_(self): # noqa\n with self._choice():\n with self._option():\n with self._group():\n with self._choice():" + '\n') + '\n'.join(choice_list)) + "\n self._error('no available options')\n self.name_last_node('const')\n with self._option():\n with self._group():\n with self._choice():\n with self._option():\n with self._ifnot():\n with self._group():\n with self._choice():\n with self._option():\n self._KEYWORDS_()") + '\n') + '\n'.join(option_list)) + "\n self._error('no available options')\n self._pattern('[A-Za-z\\\\p{Ll}\\\\p{Lu}\\\\p{Lo}]\\\\p{M}*')\n self.name_last_node('value')\n with self._option():\n self._token('`')\n self._pattern('[^`]*')\n self.name_last_node('id')\n self._token('`')\n self._error('no available options')\n self._error('no available options')\n self.ast._define(\n ['const', 'id', 'value'],\n []\n )") cur_content = cur_content.replace(id_original_rule, id_rule) if ('funcs' in extra_dict): funcs_list = extra_dict['funcs'] choice_list = [f''' with self._option(): self._pattern('{item}')'''.format(item)[1:] for item in funcs_list] funcs_original_rule = "()\n def _func_id_(self): # noqa\n self._token('!!!')" funcs_rule = ((('()\n def _func_id_(self): # noqa\n with self._choice():' + '\n') + '\n'.join(choice_list)) + "\n self._error('no available options')") cur_content = cur_content.replace(funcs_original_rule, funcs_rule) if ('pkg' in extra_dict): funcs_list = extra_dict['pkg'] if ('e' in funcs_list): funcs_list.remove('e') constant_original = '()\n def _constant_(self): # noqa\n self._pi_()' constant_new = "()\n def _constant_(self): # noqa\n with self._choice():\n with self._option():\n self._pi_()\n with self._option():\n self._e_()\n self._error('no available options')" cur_content = cur_content.replace(constant_original, constant_new) keywords_original = '()\n def _KEYWORDS_(self): # noqa\n self._BUILTIN_KEYWORDS_()' keywords_new = "()\n def _KEYWORDS_(self): # noqa\n with self._choice():\n with self._option():\n self._BUILTIN_KEYWORDS_()\n with self._option():\n self._e_()\n self._error('no available options')" cur_content = cur_content.replace(keywords_original, keywords_new) builtin_original_rule = '()\n def _builtin_operators_(self): # noqa\n self._predefined_built_operators_()' choice_list = [f''' with self._option(): self._{item}_()'''.format(item)[1:] for item in funcs_list] funcs_rule = ((('()\n def _builtin_operators_(self): # noqa\n with self._choice():' + '\n') + '\n'.join(choice_list)) + "\n with self._option():\n self._predefined_built_operators_()\n self._error('no available options')") cur_content = cur_content.replace(builtin_original_rule, funcs_rule) return cur_content def generate_new_parser_files(self): la_local_parsers = (self.grammar_dir.parent / 'la_local_parsers') for f in listdir(la_local_parsers): if (self.init_hash_value in f): init_parser = read_from_file((la_local_parsers / f)) init_parser = init_parser.replace(self.init_hash_value, 'init') save_to_file(init_parser, os.path.join(la_local_parsers, 'init_parser.py')) if (self.default_hash_value in f): def_parser = read_from_file((la_local_parsers / f)) def_parser = def_parser.replace(self.default_hash_value, 'default') original_class = 'super(grammardefaultParser, self).__init__(\n whitespace=whitespace,\n nameguard=nameguard,\n comments_re=comments_re,\n eol_comments_re=eol_comments_re,\n ignorecase=ignorecase,\n left_recursion=left_recursion,\n parseinfo=parseinfo,\n keywords=keywords,\n namechars=namechars,\n buffer_class=buffer_class,\n **kwargs\n )' new_class = 'super(grammardefaultParser, self).__init__(\n whitespace=whitespace,\n nameguard=nameguard,\n comments_re=comments_re,\n eol_comments_re=eol_comments_re,\n ignorecase=ignorecase,\n left_recursion=left_recursion,\n parseinfo=parseinfo,\n keywords=keywords,\n namechars=namechars,\n buffer_class=buffer_class,\n **kwargs\n )\n self.new_id_list = []\n self.new_func_list = []\n self.builtin_list = []\n self.const_e = False' def_parser = def_parser.replace(original_class, new_class) id_alone_original_rule = 'class IdentifierAlone(ModelBase):\n id = None\n value = None' id_alone_cur_rule = 'class IdentifierAlone(ModelBase):\n id = None\n value = None\n const = None' def_parser = def_parser.replace(id_alone_original_rule, id_alone_cur_rule) id_original_rule = "('IdentifierAlone')\n def _identifier_alone_(self): # noqa\n with self._ifnot():\n self._KEYWORDS_()\n with self._group():\n with self._choice():\n with self._option():\n self._pattern('[A-Za-z\\\\p{Ll}\\\\p{Lu}\\\\p{Lo}]\\\\p{M}*')\n self.name_last_node('value')\n with self._option():\n self._token('`')\n self._pattern('[^`]*')\n self.name_last_node('id')\n self._token('`')\n self._error('no available options')\n self.ast._define(\n ['id', 'value'],\n []\n )" id_rule = "('IdentifierAlone')\n def _identifier_alone_(self): # noqa\n if len(self.new_id_list) > 0:\n with self._choice():\n with self._option():\n with self._group():\n with self._choice():\n for new_id in self.new_id_list:\n with self._option():\n self._pattern(new_id)\n self._error('no available options')\n self.name_last_node('const')\n with self._option():\n with self._group():\n with self._choice():\n with self._option():\n with self._ifnot():\n with self._group():\n with self._choice():\n with self._option():\n self._KEYWORDS_()\n for new_id in self.new_id_list:\n with self._option():\n self._pattern(new_id)\n self._error('no available options')\n self._pattern('[A-Za-z\\\\p{Ll}\\\\p{Lu}\\\\p{Lo}]\\\\p{M}*')\n self.name_last_node('value')\n with self._option():\n self._token('`')\n self._pattern('[^`]*')\n self.name_last_node('id')\n self._token('`')\n self._error('no available options')\n self._error('no available options')\n self.ast._define(\n ['const', 'id', 'value'],\n []\n )\n else:\n # default\n with self._ifnot():\n self._KEYWORDS_()\n with self._group():\n with self._choice():\n with self._option():\n self._pattern('[A-Za-z\\\\p{Ll}\\\\p{Lu}\\\\p{Lo}]\\\\p{M}*')\n self.name_last_node('value')\n with self._option():\n self._token('`')\n self._pattern('[^`]*')\n self.name_last_node('id')\n self._token('`')\n self._error('no available options')\n self.ast._define(\n ['id', 'value'],\n []\n )" def_parser = def_parser.replace(id_original_rule, id_rule) funcs_original_rule = "()\n def _func_id_(self): # noqa\n self._token('!!!')" funcs_rule = "()\n def _func_id_(self): # noqa\n if len(self.new_func_list) > 0:\n with self._choice():\n for new_id in self.new_func_list:\n with self._option():\n self._pattern(new_id)\n self._error('no available options')\n else:\n # default\n self._token('!!!')" def_parser = def_parser.replace(funcs_original_rule, funcs_rule) builtin_original_rule = '()\n def _builtin_operators_(self): # noqa\n self._predefined_built_operators_()' funcs_rule = '()\n def _builtin_operators_(self): # noqa\n if len(self.builtin_list) > 0:\n with self._choice():\n for new_builtin in self.builtin_list:\n with self._option():\n func = getattr(self, "_{}_".format(new_builtin), None)\n func()\n with self._option():\n self._predefined_built_operators_()\n self._error(\'no available options\')\n else:\n self._predefined_built_operators_()' def_parser = def_parser.replace(builtin_original_rule, funcs_rule) constant_original = '()\n def _constant_(self): # noqa\n self._pi_()' constant_new = "()\n def _constant_(self): # noqa\n if self.const_e:\n with self._choice():\n with self._option():\n self._pi_()\n with self._option():\n self._e_()\n self._error('no available options')\n else:\n self._pi_()" def_parser = def_parser.replace(constant_original, constant_new) keywords_original = '()\n def _KEYWORDS_(self): # noqa\n self._BUILTIN_KEYWORDS_()' keywords_new = "()\n def _KEYWORDS_(self): # noqa\n if self.const_e:\n with self._choice():\n with self._option():\n self._BUILTIN_KEYWORDS_()\n with self._option():\n self._e_()\n self._error('no available options')\n else:\n self._BUILTIN_KEYWORDS_()" def_parser = def_parser.replace(keywords_original, keywords_new) builtin_keys = "\n with self._option():\n self._pattern('R')\n with self._option():\n self._pattern('Z')\n with self._option():\n self._pattern('T')" builtin_keys_new = "\n with self._option():\n self._pattern('R')\n with self._option():\n self._pattern('Z')\n with self._option():\n self._pattern('T')\n for new_id in self.new_func_list:\n with self._option():\n self._pattern(new_id)" save_to_file(def_parser, os.path.join(la_local_parsers, 'default_parser.py'))
def save_vocab(count=[], name='vocab.txt'): pwd = os.getcwd() vocabulary_size = len(count) with open(os.path.join(pwd, name), 'w') as f: for i in xrange(vocabulary_size): f.write(('%s %d\n' % (tf.compat.as_text(count[i][0]), count[i][1]))) print(('%d vocab saved to %s in %s' % (vocabulary_size, name, pwd)))
def compare_dataframes(gts, ts): accs = [] names = [] for (k, tsacc) in ts.items(): if (k in gts): logging.info('Comparing {}...'.format(k)) accs.append(mm.utils.compare_to_groundtruth(gts[k], tsacc, 'iou', distth=0.5)) names.append(k) else: logging.warning('No ground truth for {}, skipping.'.format(k)) return (accs, names)
class StickPlot(): def __init__(self, title, stick_figure_edges, ax, elev=17, azim=47, rang=800): self.lines = [] self.ghost_lines = [] self.initialized = False self.ax = ax self.ax.set_title(title) self.ax.view_init(elev=elev, azim=azim) self.ax.set_xlim3d((- rang), rang) self.ax.set_ylim3d((- rang), rang) self.ax.set_zlim3d((- rang), rang) self.stick_figure_edges = stick_figure_edges def initialize(self, coords, ghost_coords=None): for (i_start, i_end) in self.stick_figure_edges: if (ghost_coords is not None): (line,) = self.ax.plot(*zip(ghost_coords[i_start], ghost_coords[i_end]), color='grey', linestyle='--', marker='o', markersize=2) self.ghost_lines.append(line) (line,) = self.ax.plot(*zip(coords[i_start], coords[i_end]), marker='o', markersize=2) self.lines.append(line) self.initialized = True def _prepare_coords(coords): coords = np.vstack([coords, [0, 0, 0]]) coords += [0, 0, 3000] cameras = data.h36m.get_cameras(f'{paths.DATA_ROOT}/h36m/Release-v1.2/metadata.xml') coords = cameras[0][0].camera_to_world(coords) coords -= coords[(- 1)] return coords def update(self, coords, ghost_coords=None): coords = self._prepare_coords(coords) if (ghost_coords is not None): ghost_coords = self._prepare_coords(ghost_coords) self.update_raw(coords, ghost_coords) def update_raw(self, coords, ghost_coords): if (not self.initialized): return self.initialize(coords, ghost_coords) if (ghost_coords is not None): for ((i_start, i_end), line) in zip(self.stick_figure_edges, self.ghost_lines): (x, y, z) = tuple(zip(ghost_coords[i_start], ghost_coords[i_end])) line.set_data(x, y) line.set_3d_properties(z) for ((i_start, i_end), line) in zip(self.stick_figure_edges, self.lines): (x, y, z) = tuple(zip(coords[i_start], coords[i_end])) line.set_data(x, y) line.set_3d_properties(z)
def use_gpu(compute_device_type='CUDA', use_cpu=True) -> None: C = bpy.context preferences = bpy.context.preferences cycles_preferences = preferences.addons['cycles'].preferences compute_devices = [d[0] for d in cycles_preferences.get_device_types(C)] if (compute_device_type not in compute_devices): raise RuntimeError('Non-existing device type') else: cycles_preferences.compute_device_type = compute_device_type devices = cycles_preferences.get_devices_for_type(compute_device_type) if (len(devices) > 0): for c in devices: c.use = True if (c.type == 'CPU'): c.use = use_cpu log.info(f'Using devices {c} {c.type} {c.use}') C.scene.cycles.device = 'GPU' log.info(f'Using gpu type:{compute_device_type} cpu:{use_cpu}')
class Writer(): def __init__(self, opt): self.name = opt.name self.opt = opt self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) self.log_name = os.path.join(self.save_dir, 'loss_log.txt') self.testacc_log = os.path.join(self.save_dir, 'testacc_log.txt') self.start_logs() self.nexamples = 0 self.confidence_acc = 0 self.ncorrect = 0 if (opt.is_train and (not opt.no_vis) and (SummaryWriter is not None)): self.display = SummaryWriter(logdir=(os.path.join(self.opt.checkpoints_dir, self.opt.name) + '/tensorboard')) else: self.display = None def start_logs(self): if self.opt.is_train: with open(self.log_name, 'a') as log_file: now = time.strftime('%c') log_file.write((' Training Loss (%s) \n' % now)) else: with open(self.testacc_log, 'a') as log_file: now = time.strftime('%c') log_file.write((' Testing Acc (%s) \n' % now)) def print_current_losses(self, epoch, i, losses, t, t_data, loss_types='total_loss'): if (type(losses) == list): message = ('(epoch: %d, iters: %d, time: %.3f, data: %.3f)' % (epoch, i, t, t_data)) for (loss_type, loss_value) in zip(loss_types, losses): message += (' %s: %.3f' % (loss_type, loss_value.item())) else: message = ('(epoch: %d, iters: %d, time: %.3f, data: %.3f) loss: %.3f ' % (epoch, i, t, t_data, losses.item())) print(message) with open(self.log_name, 'a') as log_file: log_file.write(('%s\n' % message)) def plot_loss(self, losses, epoch, i, n, loss_types): iters = (i + ((epoch - 1) * n)) if self.display: if (type(losses) == list): for (loss_type, loss_value) in zip(loss_types, losses): self.display.add_scalar(('data/train_loss/' + loss_type), loss_value, iters) else: self.display.add_scalar('data/train_loss', losses, iters) def plot_model_wts(self, model, epoch): if (self.opt.is_train and self.display): for (name, param) in model.net.named_parameters(): self.display.add_histogram(name, param.clone().cpu().data.numpy(), epoch) def print_acc(self, epoch, acc): if (self.opt.arch == 'evaluator'): message = 'epoch: {}, TEST ACC: [{:.5} %]\n'.format(epoch, (acc * 100)) else: message = 'epoch: {}, TEST REC LOSS: [{:.5}]\n'.format(epoch, acc) print(message) with open(self.testacc_log, 'a') as log_file: log_file.write(('%s\n' % message)) def plot_acc(self, acc, epoch): if self.display: if (self.opt.arch == 'evaluator'): self.display.add_scalar('data/test_acc/grasp_prediction', acc, epoch) else: self.display.add_scalar('data/test_loss/grasp_reconstruction', acc, epoch) def reset_counter(self): self.ncorrect = 0 self.nexamples = 0 def update_counter(self, ncorrect, nexamples): self.nexamples += nexamples self.ncorrect += ncorrect def acc(self): return (float(self.ncorrect) / self.nexamples) def close(self): if (self.display is not None): self.display.close()
_module() class DavarLoadImageFromFile(): def __init__(self, decode_from_array=False, to_float32=False): self.decode_from_array = decode_from_array self.to_float32 = to_float32 def __call__(self, results): if self.decode_from_array: data_array = results['img_info'] assert isinstance(data_array, np.ndarray) data_list = [data_array[i] for i in range(data_array.size)] data_str = bytes(data_list) data_str = data_str.decode() data_list = data_str.split('&&') results['img_info'] = dict() results['img_info']['filename'] = data_list[0] results['img_info']['height'] = int(data_list[1]) results['img_info']['width'] = int(data_list[2]) if ('img_prefix' in results): filename = osp.join(results['img_prefix'], results['img_info']['filename']) elif ('img_info' in results): filename = results['img_info']['filename'] else: filename = results['img'] img = mmcv.imread(filename, (cv2.IMREAD_IGNORE_ORIENTATION + cv2.IMREAD_COLOR)) if (not isinstance(img, np.ndarray)): print('Reading Error at {}'.format(filename)) return None if self.to_float32: img = img.astype(np.float32) results['filename'] = filename results['img'] = img results['img_shape'] = img.shape results['ori_shape'] = img.shape return results def __repr__(self): return (self.__class__.__name__ + '(to_float32={})'.format(self.to_float32))
class LongformerOnnxConfig(OnnxConfig): def inputs(self) -> Mapping[(str, Mapping[(int, str)])]: return OrderedDict([('input_ids', {0: 'batch', 1: 'sequence'}), ('attention_mask', {0: 'batch', 1: 'sequence'})]) def outputs(self) -> Mapping[(str, Mapping[(int, str)])]: return OrderedDict([('last_hidden_state', {0: 'batch', 1: 'sequence'}), ('pooler_output', {0: 'batch'})])
def search_by_batch(model, beams, mem_dict): def ready_to_submit(hypotheses): inp = model.prepare_incremental_input([hyp.seq[(- 1):] for hyp in hypotheses]) concat_hyps = dict() for hyp in hypotheses: for (k, v) in hyp.state_dict.items(): concat_hyps[k] = (concat_hyps.get(k, []) + [v]) for (k, v) in concat_hyps.items(): if (len(v[0].size()) >= 3): concat_hyps[k] = torch.cat(v, 1) else: concat_hyps[k] = torch.cat(v, 0) return (concat_hyps, inp) while True: hypotheses = [] indices = [] offset = (- 1) for (idx, beam) in enumerate(beams): if (not beam.completed()): for hyp in beam.hypotheses: hypotheses.append(hyp) indices.append(idx) offset = (len(hyp.seq) - 1) if (not hypotheses): break (state_dict, inp) = ready_to_submit(hypotheses) cur_mem_dict = dict() indices = torch.tensor(indices).cuda() for (k, v) in mem_dict.items(): if isinstance(v, list): cur_mem_dict[k] = [v[i] for i in indices] else: cur_mem_dict[k] = v.index_select(1, indices) (state_dict, results) = model.decode_step(inp, state_dict, cur_mem_dict, offset, beams[0].beam_size) _len_each_beam = [len(beam.hypotheses) for beam in beams if (not beam.completed())] _state_dict_each_beam = [dict() for _ in _len_each_beam] for (k, v) in state_dict.items(): split_dim = (1 if (len(v.size()) >= 3) else 0) for (i, x) in enumerate(v.split(_len_each_beam, dim=split_dim)): _state_dict_each_beam[i][k] = x _pos = 0 _idx = 0 for beam in beams: if (not beam.completed()): _len = len(beam.hypotheses) beam.update(_state_dict_each_beam[_idx], results[_pos:(_pos + _len)]) _pos += _len _idx += 1
class SensorSuite(): sensors: Dict[(str, Sensor)] observation_spaces: SpaceDict def __init__(self, sensors: Iterable[Sensor]) -> None: self.sensors = OrderedDict() spaces: OrderedDict[(str, Space)] = OrderedDict() for sensor in sensors: assert (sensor.uuid not in self.sensors), "'{}' is duplicated sensor uuid".format(sensor.uuid) self.sensors[sensor.uuid] = sensor spaces[sensor.uuid] = sensor.observation_space self.observation_spaces = SpaceDict(spaces=spaces) def get(self, uuid: str) -> Sensor: return self.sensors[uuid] def get_observations(self, *args: Any, **kwargs: Any) -> Observations: return Observations(self.sensors, *args, **kwargs)