code
stringlengths
101
5.91M
def foreground_mean(filename): with open(filename, 'r') as f: res = json.load(f) class_ids = np.array([int(i) for i in res['results']['mean'].keys() if (i != 'mean')]) class_ids = class_ids[(class_ids != 0)] class_ids = class_ids[(class_ids != (- 1))] class_ids = class_ids[(class_ids != 99)] tmp = res['results']['mean'].get('99') if (tmp is not None): _ = res['results']['mean'].pop('99') metrics = res['results']['mean']['1'].keys() res['results']['mean']['mean'] = OrderedDict() for m in metrics: foreground_values = [res['results']['mean'][str(i)][m] for i in class_ids] res['results']['mean']['mean'][m] = np.nanmean(foreground_values) with open(filename, 'w') as f: json.dump(res, f, indent=4, sort_keys=True)
def export_sentence_embedding(): import os pheme_data_output_path = os.path.join(os.path.dirname(__file__), '..', '..', 'output', 'elmo', 'pheme_source_tweet_corpus.txt') pheme_data_embedding_output = os.path.join(os.path.dirname(__file__), '..', '..', 'output', 'elmo', 'pheme_source_tweet_corpus_elmo_embedding.txt') with open(pheme_data_output_path, mode='r', encoding='utf-8') as corpus_input: default_elmo.embed_file(input_file=corpus_input, output_file_path=pheme_data_embedding_output, output_format='average')
def setup_dist(rank, world_size, master_port=None): os.environ['MASTER_ADDR'] = 'localhost' os.environ['MASTER_PORT'] = ('12354' if (master_port is None) else str(master_port)) dist.init_process_group('nccl', rank=rank, world_size=world_size) torch.cuda.set_device(rank)
def copy_files_and_create_dirs(files: List[Tuple[(str, str)]]) -> None: for file in files: target_dir_name = os.path.dirname(file[1]) if (not os.path.exists(target_dir_name)): os.makedirs(target_dir_name) shutil.copyfile(file[0], file[1])
def load_cifar10_data(datadir): transform = transforms.Compose([transforms.ToTensor()]) cifar10_train_ds = CIFAR10_truncated(datadir, train=True, download=True, transform=transform) cifar10_test_ds = CIFAR10_truncated(datadir, train=False, download=True, transform=transform) (X_train, y_train) = (cifar10_train_ds.data, cifar10_train_ds.target) (X_test, y_test) = (cifar10_test_ds.data, cifar10_test_ds.target) return (X_train, y_train, X_test, y_test)
def _hash_file(fpath, algorithm='sha256', chunk_size=65535): if ((algorithm == 'sha256') or ((algorithm == 'auto') and (len(hash) == 64))): hasher = hashlib.sha256() else: hasher = hashlib.md5() with open(fpath, 'rb') as fpath_file: for chunk in iter((lambda : fpath_file.read(chunk_size)), b''): hasher.update(chunk) return hasher.hexdigest()
def deform_conv_function(input, offset, weight, stride=1, padding=0, dilation=1, deform_groups=1, im2col_step=64): if ((input is not None) and (input.dim() != 4)): raise ValueError('Expected 4D tensor as input, got {}D tensor instead.'.format(input.dim())) f = DeformConvFunction(_pair(stride), _pair(padding), _pair(dilation), deform_groups, im2col_step) return f(input, offset, weight)
class SparsityStats(object): __sparsity_ignore__ = () def sparsity(self, **kwargs): raise NotImplementedError('Derived classes must implement a method to estimate sparsity.')
def load_image_single(f_path): im = Image.open(f_path).convert('RGB') width_side = im.size[0] new_h = (width_side / 2) im = im.crop((0, ((im.size[1] / 2) - (new_h / 2)), width_side, ((im.size[1] / 2) + (new_h / 2)))) im = im.resize((image_size[0], image_size[1]), Image.LANCZOS) in_ = np.array(im, dtype=np.uint8) in_ = in_.transpose((2, 0, 1)) return in_[(np.newaxis, ...)]
class Screen(): screen = None font = None y_pos = 0 x_pos = 0 def setup_screen(self): pygame.display.set_caption('OpenBot keyboard controller') self.font = pygame.font.Font(None, 32) self.screen = pygame.display.set_mode([1280, 760], pygame.RESIZABLE) self.screen.fill(white) text = usage() print(text) lines = text.strip().split('\r') self.x_pos = 50 self.y_pos = 50 delimiter = ':' for line in lines: if (delimiter in line): space = (' ' if ('\t' in line) else '') elements = line.strip().split(delimiter) text = self.font.render(((space + elements[0].strip()) + delimiter), True, blue) self.screen.blit(text, (self.x_pos, self.y_pos)) text = self.font.render(elements[1].strip(), True, black) self.screen.blit(text, ((self.x_pos + 200), self.y_pos)) else: text = self.font.render(line, True, red) self.screen.blit(text, (self.x_pos, self.y_pos)) pygame.display.update() self.y_pos += 40 def add_text(self, message): text = self.font.render(message, True, red) self.y_pos += 20 self.screen.blit(text, (self.x_pos, self.y_pos)) pygame.display.update()
def collate_to_max_length_with_id(batch: List[List[torch.Tensor]], max_len: int=None, fill_values: List[float]=None) -> List[torch.Tensor]: tokens_size = [sample[(- 1)] for sample in batch] srcs = [sample[(- 2)] for sample in batch] ids = [sample[(- 3)] for sample in batch] batch = [sample[:(- 3)] for sample in batch] lengths = np.array([[len(field_data) for field_data in sample] for sample in batch]) (batch_size, num_fields) = lengths.shape fill_values = (fill_values or ([0.0] * num_fields)) max_lengths = lengths.max(axis=0) if max_len: assert (max_lengths.max() <= max_len) max_lengths = (np.ones_like(max_lengths) * max_len) output = [torch.full((batch_size, max_lengths[field_idx]), fill_value=fill_values[field_idx], dtype=batch[0][field_idx].dtype) for field_idx in range((num_fields - 1))] output.append(torch.full((batch_size, max_lengths[(- 1)], 3), fill_value=fill_values[(- 1)], dtype=batch[0][(- 1)].dtype)) for sample_idx in range(batch_size): for field_idx in range(num_fields): data = batch[sample_idx][field_idx] output[field_idx][sample_idx][:data.shape[0]] = data output.append(ids) output.append(srcs) output.append(tokens_size) return output
def get_scheduler(config): name = 'iterative' if (config.start_step == config.end_step): name = 'oneshot' return SCHEDULERS[name](config)
def _log_parameters(logger: Callable, params: dict): logger(('\n\t' + ', \n\t'.join([f'{x[0]}: {x[1]}' for x in params.items()])))
def load_svhn(dataset_dir, split='train'): data_dir = osp.join(dataset_dir, SVHN[split]) n_max = (25000 if (split == 'train') else 9000) return read_image_list(data_dir, n_max=n_max)
class SubMConvFunction(Function): def forward(ctx, features, filters, indice_pairs, indice_pair_num, num_activate_out): ctx.save_for_backward(indice_pairs, indice_pair_num, features, filters) return ops.indice_conv(features, filters, indice_pairs, indice_pair_num, num_activate_out, False, True) def backward(ctx, grad_output): (indice_pairs, indice_pair_num, features, filters) = ctx.saved_tensors (input_bp, filters_bp) = ops.indice_conv_backward(features, filters, grad_output, indice_pairs, indice_pair_num, False, True) return (input_bp, filters_bp, None, None, None)
def _weights_init(m): if isinstance(m, nn.Conv2d): torch.nn.init.xavier_uniform_(m.weight) if (m.bias is not None): torch.nn.init.zeros_(m.bias) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.Linear): m.weight.data.normal_(0, 0.01) m.bias.data.zero_()
def get_few_shot_cot_prompt(dataset_key) -> str: data = load_few_shot_cot_prompts() if (dataset_key not in data): raise KeyError('Few-shot-CoT prompts are not available for dataset `{}`'.format(dataset_key)) return data[dataset_key]['prompt']
class Hypothesis(object): def __init__(self, tokens, log_probs, hidden_state, cell_state, coverage): self.tokens = tokens self.log_probs = log_probs self.hidden_state = hidden_state self.cell_state = cell_state self.coverage = coverage def extend(self, token, log_prob, hidden_state, cell_state, coverage): return Hypothesis(tokens=(self.tokens + [token]), log_probs=(self.log_probs + [log_prob]), hidden_state=hidden_state, cell_state=cell_state, coverage=coverage) def latest_token(self): return self.tokens[(- 1)] def avg_log_prob(self): return (sum(self.log_probs) / len(self.tokens))
class ComputeTDErrorMixin(): def __init__(self): def compute_td_error(obs_t, act_t, rew_t, obs_tp1, done_mask, importance_weights): input_dict = self._lazy_tensor_dict({SampleBatch.CUR_OBS: obs_t, SampleBatch.ACTIONS: act_t, SampleBatch.REWARDS: rew_t, SampleBatch.NEXT_OBS: obs_tp1, SampleBatch.DONES: done_mask, PRIO_WEIGHTS: importance_weights}) actor_critic_loss(self, self.model, None, input_dict) return self.td_error self.compute_td_error = compute_td_error
class BasicBlockNoSkip(nn.Module): expansion = 1 def __init__(self, in_planes, planes, stride=1, downsample=None): super(BasicBlockNoSkip, self).__init__() self.conv1 = conv3x3(in_planes, planes, stride=stride) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = conv3x3(planes, planes, stride=1) self.bn2 = nn.BatchNorm2d(planes) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.bn2(self.conv2(out)) out = F.relu(out) return out
def fs_cothub_bbh_match_answer(task_data, response): ans_line = response.split('answer is ') if (len(ans_line) == 1): return (False, response) else: ans = ans_line[(- 1)].strip() if task_data['options']: options = ['(A)', '(B)', '(C)', '(D)', '(E)', '(F)', '(G)', '(H)', '(I)', '(J)', '(K)', '(L)', '(M)', '(N)', '(O)', '(P)', '(Q)', '(R)', '(S)', '(T)', '(U)', '(V)', '(W)', '(X)', '(Y)', '(Z)'] for option in options: if (option in ans): return (True, option) return (False, ans) else: if (ans[(- 1)] == '.'): ans = ans[:(- 1)] return (True, ans)
def iresnet50(pretrained=False, **kwargs): model = iResNet(Bottleneck, [3, 4, 6, 3], **kwargs) if pretrained: os.makedirs(default_cache_path, exist_ok=True) model.load_state_dict(torch.load(download_from_url(model_urls['iresnet50'], root=default_cache_path))) return model
class TestCommon(unittest.TestCase): def test_move_element_to_front(self): f = common.move_element_to_front self.assertEqual(f([1, 2, 3, 4], 0), [1, 2, 3, 4]) self.assertEqual(f([1, 2, 3, 4], 1), [1, 2, 3, 4]) self.assertEqual(f([1, 2, 3, 4], 2), [2, 1, 3, 4]) self.assertEqual(f([1, 2, 3, 4], 3), [3, 1, 2, 4]) self.assertEqual(f([1, 2, 3, 4], 4), [4, 1, 2, 3]) self.assertEqual(f([1, 2, 3, 4], 'a'), [1, 2, 3, 4]) self.assertEqual(f(['a', 'b', 'c', 'd'], 'd'), ['d', 'a', 'b', 'c']) self.assertEqual(f(['ab', 'a', 'ac', 'ad'], 'a'), ['a', 'ab', 'ac', 'ad'])
def save_load_code(data_size, batch_size): backend = ('nccl' if torch.cuda.is_available() else 'gloo') res = atorch.init_distributed(backend, set_cuda_device_using_local_rank=True) if (not res): raise Exception('init failed') model_context = create_model_context(data_size=data_size, batch_size=batch_size) b_data = [None, None] if (atorch.rank() == 0): pg_info = ([('data', 1)], None) strategy = Strategy([('parallel_mode', pg_info, False)]) (_, filename) = tempfile.mkstemp(suffix='st') save_strategy(strategy, filename) (_, save_filename) = tempfile.mkstemp(suffix='st') b_data = [filename, save_filename] torch.distributed.broadcast_object_list(b_data, src=0) (status, res, best_strategy) = auto_accelerate(model_context.model, model_context.optim_func, model_context.dataset, loss_func=model_context.loss_func, prepare_input=model_context.prepare_input, optim_args=model_context.optim_args, dataloader_args=model_context.dataloader_args, load_strategy=b_data[0], save_strategy_to_file=b_data[1]) assert status assert (len(best_strategy) == 1) device = ('cuda' if torch.cuda.is_available() else 'cpu') num = run_train(res.model, res.dataloader, res.optim, res.prepare_input, res.loss_func, device) assert (num == (data_size // batch_size)), f'num={num}' (status, strategy) = get_strategy(b_data[1]) assert status assert (strategy == best_strategy) pg_info = ([('data', 8)], None) strategy = Strategy([('parallel_mode', pg_info, False)]) s_data = pickle.dumps(strategy) (status, res, strategy) = auto_accelerate(model_context.model, model_context.optim_func, model_context.dataset, loss_func=model_context.loss_func, prepare_input=model_context.prepare_input, optim_args=model_context.optim_args, dataloader_args=model_context.dataloader_args, load_strategy=s_data) assert status assert (len(strategy) == 1) device = ('cuda' if torch.cuda.is_available() else 'cpu') num = run_train(res.model, res.dataloader, res.optim, res.prepare_input, res.loss_func, device) assert (num == (data_size // batch_size)), f'num={num}' assert (strategy == best_strategy) atorch.reset_distributed()
class ReplayBuffer(metaclass=abc.ABCMeta): def __init__(self, env_spec, size_in_transitions, time_horizon): del env_spec self._current_size = 0 self._current_ptr = 0 self._n_transitions_stored = 0 self._time_horizon = time_horizon self._size_in_transitions = size_in_transitions self._size = (size_in_transitions // time_horizon) self._initialized_buffer = False self._buffer = {} self._episode_buffer = {} def store_episode(self): episode_buffer = self._convert_episode_to_batch_major() rollout_batch_size = len(episode_buffer['observation']) idx = self._get_storage_idx(rollout_batch_size) for key in self._buffer: self._buffer[key][idx] = episode_buffer[key] self._n_transitions_stored = min(self._size_in_transitions, (self._n_transitions_stored + (self._time_horizon * rollout_batch_size))) def sample(self, batch_size): raise NotImplementedError def add_transition(self, **kwargs): transition = {k: [v] for (k, v) in kwargs.items()} self.add_transitions(**transition) def add_transitions(self, **kwargs): if (not self._initialized_buffer): self._initialize_buffer(**kwargs) for (key, value) in kwargs.items(): self._episode_buffer[key].append(value) if (len(self._episode_buffer['observation']) == self._time_horizon): self.store_episode() for key in self._episode_buffer: self._episode_buffer[key].clear() def _initialize_buffer(self, **kwargs): for (key, value) in kwargs.items(): self._episode_buffer[key] = list() values = np.array(value) self._buffer[key] = np.zeros([self._size, self._time_horizon, *values.shape[1:]], dtype=values.dtype) self._initialized_buffer = True def _get_storage_idx(self, size_increment=1): if ((self._current_size + size_increment) <= self._size): idx = np.arange(self._current_size, (self._current_size + size_increment)) elif (self._current_size < self._size): overflow = (size_increment - (self._size - self._current_size)) idx_a = np.arange(self._current_size, self._size) idx_b = np.arange(0, overflow) idx = np.concatenate([idx_a, idx_b]) self._current_ptr = overflow elif ((self._current_ptr + size_increment) <= self._size): idx = np.arange(self._current_ptr, (self._current_ptr + size_increment)) self._current_ptr += size_increment else: overflow = (size_increment - (self._size - self._current_size)) idx_a = np.arange(self._current_ptr, self._size) idx_b = np.arange(0, overflow) idx = np.concatenate([idx_a, idx_b]) self._current_ptr = overflow self._current_size = min(self._size, (self._current_size + size_increment)) if (size_increment == 1): idx = idx[0] return idx def _convert_episode_to_batch_major(self): transitions = {} for key in self._episode_buffer: val = np.array(self._episode_buffer[key]) transitions[key] = val.swapaxes(0, 1) return transitions def full(self): return (self._current_size == self._size) def n_transitions_stored(self): return self._n_transitions_stored
class MicroCodeGen(): def __init__(self): pass def gen_micro_ops_list_from_bytes(self, model_tag, op_src_path_list, op_class_name_list, jinja_file_name, output_path): cwd = os.path.dirname(__file__) j2_env = Environment(loader=FileSystemLoader(cwd), trim_blocks=True, keep_trailing_newline=True) template_name = (JINJA2_DIR + jinja_file_name) source = j2_env.get_template(template_name).render(model_tag=model_tag, op_src_path_list=op_src_path_list, op_class_name_list=op_class_name_list, op_class_name_list_size=len(op_class_name_list)) with open(output_path, 'w') as f: f.write(source) def gen_micro_source_from_bytes(self, model_tag, embed_data, jinja_file_name, output_path): cwd = os.path.dirname(__file__) j2_env = Environment(loader=FileSystemLoader(cwd), trim_blocks=True, keep_trailing_newline=True) template_name = (JINJA2_DIR + jinja_file_name) source = j2_env.get_template(template_name).render(model_tag=model_tag, embed_data=embed_data, data_size=len(embed_data)) with open(output_path, 'w') as f: f.write(source) def gen_micro_source_from_array(self, model_tag, embed_data, jinja_file_name, output_path): cwd = os.path.dirname(__file__) j2_env = Environment(loader=FileSystemLoader(cwd), trim_blocks=True, keep_trailing_newline=True) template_name = (JINJA2_DIR + jinja_file_name) hex_bytes_string = ', '.join(map(hex, embed_data)) source = j2_env.get_template(template_name).render(model_tag=model_tag, hex_bytes_string=hex_bytes_string, data_size=len(embed_data)) with open(output_path, 'w') as f: f.write(source) def gen_net_def_data(self, model_tag, model_def_data, output_path): embed_data = np.frombuffer(model_def_data, dtype=np.uint8) self.gen_micro_source_from_array(model_tag, embed_data, 'micro_net_def.h.jinja2', output_path) def gen_graph_data(self, model_tag, graph_data, output_path): embed_data = np.frombuffer(graph_data, dtype=np.uint8) self.gen_micro_source_from_array(model_tag, embed_data, 'micro_graph_data.h.jinja2', output_path) def gen_ops_data(self, model_tag, op_src_path_list, op_class_name_list, output_path): self.gen_micro_ops_list_from_bytes(model_tag, op_src_path_list, op_class_name_list, 'micro_ops_list.h.jinja2', output_path) def gen_engin_config(self, model_tag, config_data, output_path): self.gen_micro_source_from_bytes(model_tag, config_data, 'micro_engine_config.cc.jinja2', output_path) def gen_model_data(self, model_tag, model_param_data, output_path): embed_data = np.frombuffer(model_param_data, dtype=np.uint8) self.gen_micro_source_from_array(model_tag, embed_data, 'micro_model_data.h.jinja2', output_path) def gen_engine_factory(self, model_tag, output_path_h, output_path_cc): self.gen_micro_source_from_bytes(model_tag, '', 'micro_engine_factory.h.jinja2', output_path_h) self.gen_micro_source_from_bytes(model_tag, '', 'micro_engine_factory.cc.jinja2', output_path_cc) def gen_engine_c_interface(self, model_tag, output_path_h, output_path_cc): self.gen_micro_source_from_bytes(model_tag, '', 'micro_engine_c_interface.h.jinja2', output_path_h) self.gen_micro_source_from_bytes(model_tag, '', 'micro_engine_c_interface.cc.jinja2', output_path_cc) def gen_cmake_file(self, model_tag, output_path): cwd = os.path.dirname(__file__) j2_env = Environment(loader=FileSystemLoader(cwd), trim_blocks=True) template_name = (JINJA2_DIR + 'CMakeLists.txt.jinja2') source = j2_env.get_template(template_name).render(model_tag=model_tag) with open(output_path, 'w') as f: f.write(source)
def analyze(df): print() cols = df.columns.values total = float(len(df)) print('{} rows'.format(int(total))) for col in cols: uniques = df[col].unique() unique_count = len(uniques) if (unique_count > 100): print('** {}:{} ({}%)'.format(col, unique_count, int(((unique_count / total) * 100)))) else: print('** {}:{}'.format(col, expand_categories(df[col]))) expand_categories(df[col])
class WarmupCosineWithWarmupRestartsSchedule(WarmupCosineWithHardRestartsSchedule): def __init__(self, warmup=0.002, t_total=(- 1), cycles=1.0, **kw): assert ((warmup * cycles) < 1.0) warmup = ((warmup * cycles) if (warmup >= 0) else warmup) super(WarmupCosineWithWarmupRestartsSchedule, self).__init__(warmup=warmup, t_total=t_total, cycles=cycles, **kw) def get_lr_(self, progress): progress = ((progress * self.cycles) % 1.0) if (progress < self.warmup): return (progress / self.warmup) else: progress = ((progress - self.warmup) / (1 - self.warmup)) ret = (0.5 * (1.0 + math.cos((math.pi * progress)))) return ret
class ColorJitter(object): def __init__(self, brightness=0, contrast=0, saturation=0, hue=0): self.brightness = brightness self.contrast = contrast self.saturation = saturation self.hue = hue def get_params(brightness, contrast, saturation, hue): transforms = [] if (brightness > 0): brightness_factor = np.random.uniform(max(0, (1 - brightness)), (1 + brightness)) transforms.append(Lambda((lambda img: F.adjust_brightness(img, brightness_factor)))) if (contrast > 0): contrast_factor = np.random.uniform(max(0, (1 - contrast)), (1 + contrast)) transforms.append(Lambda((lambda img: F.adjust_contrast(img, contrast_factor)))) if (saturation > 0): saturation_factor = np.random.uniform(max(0, (1 - saturation)), (1 + saturation)) transforms.append(Lambda((lambda img: F.adjust_saturation(img, saturation_factor)))) if (hue > 0): hue_factor = np.random.uniform((- hue), hue) transforms.append(Lambda((lambda img: F.adjust_hue(img, hue_factor)))) np.random.shuffle(transforms) transform = Compose(transforms) return transform def __call__(self, img): transform = self.get_params(self.brightness, self.contrast, self.saturation, self.hue) return transform(img)
class LogitsProcessorList(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class MABlock(nn.Module): def __init__(self, conv, kernel_size=3, bias=True, act=nn.ReLU(True)): super(MABlock, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.k1 = Parameter(torch.zeros(1)) self.softmax = nn.Softmax(dim=1) m = [] m.append(conv(1, 1, kernel_size, bias=bias)) m.append(act) m.append(conv(1, 1, kernel_size, bias=bias)) self.body = nn.Sequential(*m) self.reset_parameters() def reset_parameters(self): last_zero_init(self.body) def NCA(self, x): (batch, channel, height, width) = x.size() input_x = x Apx = self.avg_pool(input_x) px = Apx px = self.softmax(px) px = px.view(batch, 1, channel, 1) input_x = input_x.view(batch, (height * width), channel) input_x = input_x.unsqueeze(1) context = torch.matmul(input_x, px) context = context.view(batch, 1, height, width) return context def forward(self, x): context = self.NCA(x) context = self.body(context) return (self.k1 * context)
def prepare_validation_features(examples): examples[question_column_name] = [q.lstrip() for q in examples[question_column_name]] tokenized_examples = tokenizer(examples[(question_column_name if pad_on_right else context_column_name)], examples[(context_column_name if pad_on_right else question_column_name)], truncation=('only_second' if pad_on_right else 'only_first'), max_length=max_seq_length, stride=128, return_overflowing_tokens=True, return_offsets_mapping=True, padding='max_length') sample_mapping = tokenized_examples.pop('overflow_to_sample_mapping') tokenized_examples['example_id'] = [] for i in range(len(tokenized_examples['input_ids'])): sequence_ids = tokenized_examples.sequence_ids(i) context_index = (1 if pad_on_right else 0) sample_index = sample_mapping[i] tokenized_examples['example_id'].append(examples['id'][sample_index]) tokenized_examples['offset_mapping'][i] = [(o if (sequence_ids[k] == context_index) else None) for (k, o) in enumerate(tokenized_examples['offset_mapping'][i])] return tokenized_examples
def merge_semantic_and_instance(sem_seg, ins_seg, semantic_thing_seg, label_divisor, thing_ids, stuff_area, void_label): pan_seg = (torch.zeros_like(sem_seg) + void_label) is_thing = ((ins_seg > 0) & (semantic_thing_seg > 0)) class_id_tracker = Counter() instance_ids = torch.unique(ins_seg) for ins_id in instance_ids: if (ins_id == 0): continue thing_mask = ((ins_seg == ins_id) & is_thing) if (torch.nonzero(thing_mask).size(0) == 0): continue (class_id, _) = torch.mode(sem_seg[thing_mask].view((- 1))) class_id_tracker[class_id.item()] += 1 new_ins_id = class_id_tracker[class_id.item()] pan_seg[thing_mask] = ((class_id * label_divisor) + new_ins_id) class_ids = torch.unique(sem_seg) for class_id in class_ids: if (class_id.item() in thing_ids): continue stuff_mask = ((sem_seg == class_id) & (ins_seg == 0)) if (stuff_mask.sum().item() >= stuff_area): pan_seg[stuff_mask] = (class_id * label_divisor) return pan_seg
def check_linear_binning(delta): diff_lambda = np.diff((10 ** delta.log_lambda)) diff_log_lambda = np.diff(delta.log_lambda) (q5_lambda, q25_lambda) = np.percentile(diff_lambda, [5, 25]) (q5_log_lambda, q25_log_lambda) = np.percentile(diff_log_lambda, [5, 25]) if ((q25_lambda - q5_lambda) < 1e-06): linear_binning = True pixel_step = np.min(diff_lambda) elif (((q25_log_lambda - q5_log_lambda) < 1e-06) and (q5_log_lambda < 0.01)): linear_binning = False pixel_step = np.min(diff_log_lambda) elif (q5_log_lambda >= 0.01): raise ValueError('Could not figure out if linear or log wavelength binning was used, probably submitted lambda as log_lambda') else: raise ValueError('Could not figure out if linear or log wavelength binning was used') return (linear_binning, pixel_step)
_module() class PanopticFPN(TwoStagePanopticSegmentor): 'Implementation of `Panoptic feature pyramid\n networks < def __init__(self, backbone: ConfigType, neck: OptConfigType=None, rpn_head: OptConfigType=None, roi_head: OptConfigType=None, train_cfg: OptConfigType=None, test_cfg: OptConfigType=None, data_preprocessor: OptConfigType=None, init_cfg: OptMultiConfig=None, semantic_head: OptConfigType=None, panoptic_fusion_head: OptMultiConfig=None) -> None: super().__init__(backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, data_preprocessor=data_preprocessor, init_cfg=init_cfg, semantic_head=semantic_head, panoptic_fusion_head=panoptic_fusion_head)
class Baseline(abc.ABC): def __init__(self, sec_from_now: float, helper: PredictHelper): assert ((sec_from_now % 0.5) == 0), f'Parameter sec from now must be divisible by 0.5. Received {sec_from_now}.' self.helper = helper self.sec_from_now = sec_from_now self.sampled_at = 2 def __call__(self, token: str) -> Prediction: pass
def _create_socket_server(path): server = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) path_dir = os.path.dirname(path) os.makedirs(path_dir, exist_ok=True) if os.path.exists(path): os.unlink(path) server.bind(path) server.listen(0) return server
def register(image: ValidImage, /, template: typing.Optional[ValidImage]=None, *, type_of_transform: str='Affine', interpolator: str='bSpline', metric: str='mattes', initial_rigid: bool=True, template_mask: typing.Optional[ValidImage]=None) -> (nib.nifti1.Nifti1Image | ants.ANTsImage): if (template is None): standard_mni = ants.get_ants_data('mni') template = ants.image_read(standard_mni) else: template = to_ants(template) is_nibabel = isinstance(image, nib.nifti1.Nifti1Image) image = to_ants(image) if initial_rigid: logger.debug('Doing initial rigid registration.') transforms = ants.registration(fixed=template, moving=image, type_of_transform='Rigid', aff_metric=metric, syn_metric=metric) rigid_transform = transforms['fwdtransforms'][0] else: rigid_transform = None logger.debug(f'Doing {type_of_transform} registration.') transform = ants.registration(fixed=template, moving=image, initial_transform=rigid_transform, type_of_transform=type_of_transform, mask=template_mask, aff_metric=metric, syn_metric=metric)['fwdtransforms'] logger.debug('Applying transformations.') registered = ants.apply_transforms(template, image, transform, interpolator=interpolator) return (registered.to_nibabel() if is_nibabel else registered)
class TraceMalloc(threading.Thread): def __init__(self) -> None: super().__init__(name=self.__class__.__name__) def run(self): process = psutil.Process() tracemalloc.start(_NUM_FRAMES) log.info(f'Started tracing memory allocations for {_NUM_FRAMES} frames.') snapshot = tracemalloc.take_snapshot() while True: snapshot_prev = snapshot snapshot = tracemalloc.take_snapshot() (traced_current, _traced_peak) = tracemalloc.get_traced_memory() stats_by_filename = _printable_stats(snapshot.statistics(key_type='filename')) stats_diff_by_filename = _printable_stats([d for d in snapshot.compare_to(snapshot_prev, key_type='filename') if (d.size_diff > 0)]) del snapshot_prev log.info(f'''Process is using {humanize_bytes(process.memory_info().rss)}. Memory tracing is using {humanize_bytes(tracemalloc.get_tracemalloc_memory())} and tracing {humanize_bytes(traced_current)}. The current top memory allocations by filename are: {stats_by_filename} The current top memory allocation diffs by filename are: {stats_diff_by_filename}''') time.sleep(_INTERVAL)
_torchsde class DPMSolverSDESchedulerTest(SchedulerCommonTest): scheduler_classes = (DPMSolverSDEScheduler,) num_inference_steps = 10 def get_scheduler_config(self, **kwargs): config = {'num_train_timesteps': 1100, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'noise_sampler_seed': 0} config.update(**kwargs) return config def test_timesteps(self): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_betas(self): for (beta_start, beta_end) in zip([1e-05, 0.0001, 0.001], [0.0002, 0.002, 0.02]): self.check_over_configs(beta_start=beta_start, beta_end=beta_end) def test_schedules(self): for schedule in ['linear', 'scaled_linear']: self.check_over_configs(beta_schedule=schedule) def test_prediction_type(self): for prediction_type in ['epsilon', 'v_prediction']: self.check_over_configs(prediction_type=prediction_type) def test_full_loop_no_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps) model = self.dummy_model() sample = (self.dummy_sample_deter * scheduler.init_noise_sigma) sample = sample.to(torch_device) for (i, t) in enumerate(scheduler.timesteps): sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) if (torch_device in ['mps']): assert (abs((result_sum.item() - 167.)) < 0.01) assert (abs((result_mean.item() - 0.)) < 0.001) elif (torch_device in ['cuda']): assert (abs((result_sum.item() - 171.)) < 0.01) assert (abs((result_mean.item() - 0.)) < 0.001) else: assert (abs((result_sum.item() - 162.)) < 0.01) assert (abs((result_mean.item() - 0.)) < 0.001) def test_full_loop_with_v_prediction(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(prediction_type='v_prediction') scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps) model = self.dummy_model() sample = (self.dummy_sample_deter * scheduler.init_noise_sigma) sample = sample.to(torch_device) for (i, t) in enumerate(scheduler.timesteps): sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) if (torch_device in ['mps']): assert (abs((result_sum.item() - 124.)) < 0.01) assert (abs((result_mean.item() - 0.)) < 0.001) elif (torch_device in ['cuda']): assert (abs((result_sum.item() - 128.)) < 0.01) assert (abs((result_mean.item() - 0.)) < 0.001) else: assert (abs((result_sum.item() - 119.)) < 0.01) assert (abs((result_mean.item() - 0.)) < 0.001) def test_full_loop_device(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps, device=torch_device) model = self.dummy_model() sample = (self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma) for t in scheduler.timesteps: sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) if (torch_device in ['mps']): assert (abs((result_sum.item() - 167.)) < 0.01) assert (abs((result_mean.item() - 0.)) < 0.001) elif (torch_device in ['cuda']): assert (abs((result_sum.item() - 171.)) < 0.01) assert (abs((result_mean.item() - 0.)) < 0.001) else: assert (abs((result_sum.item() - 162.)) < 0.01) assert (abs((result_mean.item() - 0.)) < 0.001) def test_full_loop_device_karras_sigmas(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config, use_karras_sigmas=True) scheduler.set_timesteps(self.num_inference_steps, device=torch_device) model = self.dummy_model() sample = (self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma) sample = sample.to(torch_device) for t in scheduler.timesteps: sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) if (torch_device in ['mps']): assert (abs((result_sum.item() - 176.)) < 0.01) assert (abs((result_mean.item() - 0.)) < 0.01) elif (torch_device in ['cuda']): assert (abs((result_sum.item() - 177.)) < 0.01) assert (abs((result_mean.item() - 0.)) < 0.01) else: assert (abs((result_sum.item() - 170.)) < 0.01) assert (abs((result_mean.item() - 0.)) < 0.01)
class Discriminator(nn.Module): def __init__(self): super().__init__() self.embedding = nn.Embedding(10, 10) self.layer1 = nn.Sequential(nn.Linear(in_features=((28 * 28) + 10), out_features=1024), nn.LeakyReLU()) self.layer2 = nn.Sequential(nn.Linear(in_features=1024, out_features=512), nn.LeakyReLU()) self.layer3 = nn.Sequential(nn.Linear(in_features=512, out_features=256), nn.LeakyReLU()) self.output = nn.Sequential(nn.Linear(in_features=256, out_features=1), nn.Sigmoid()) def forward(self, x, y): labels_embedding = self.embedding(y) x = torch.cat([x, labels_embedding], dim=(- 1)) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.output(x) return x
def gen_vqa_texts(annotation): questions = json.load(open(annotation))['questions'] for q in questions: (yield q['question'])
_cache() def statcast_pitcher_pitch_movement(year: int, minP: Union[(int, str)]='q', pitch_type: str='FF') -> pd.DataFrame: pitch_type = norm_pitch_code(pitch_type) url = f' res = requests.get(url, timeout=None).content data = pd.read_csv(io.StringIO(res.decode('utf-8'))) data = sanitize_statcast_columns(data) return data
def resnet_l123(): model = resnet101(pretrained=True) del model.layer4 del model.avgpool del model.fc return model
class MultiHeadDotProductAttention(nn.Module): def __init__(self, d_q_in: int, d_k_in: int, d_v_in: int, d_qk: int, d_v: int, num_heads: int, d_out: int, normalize: bool=True, dropout_p: float=0.0) -> None: super().__init__() self.num_heads = num_heads self.normalize = normalize self.q_linear = nn.Linear(d_q_in, (d_qk * num_heads), bias=False) self.k_linear = nn.Linear(d_k_in, (d_qk * num_heads), bias=False) self.v_linear = nn.Linear(d_v_in, (d_v * num_heads), bias=False) self.attn = DotProductAttention(d_qk) self.final_linear = nn.Linear((d_v * num_heads), d_out, bias=False) self.dropout = None if (dropout_p > 0.0): self.dropout = nn.Dropout(dropout_p) if self.normalize: self.layer_norm = nn.LayerNorm(d_out, eps=1e-06) def forward(self, Q: Tensor, K: Tensor, V: Tensor, mask: None=None) -> Tensor: assert (K.shape[2] == V.shape[2]), 'keys must be the same size as values' Q = self.q_linear(Q) K = self.k_linear(K.permute(0, 2, 1)).permute(0, 2, 1).contiguous() V = self.v_linear(V.permute(0, 2, 1)).permute(0, 2, 1).contiguous() Q = Q.view((Q.shape[0] * self.num_heads), (Q.shape[1] // self.num_heads)) K = K.view((K.shape[0] * self.num_heads), (K.shape[1] // self.num_heads), K.shape[2]) V = V.view((V.shape[0] * self.num_heads), (V.shape[1] // self.num_heads), V.shape[2]) attended_V = self.attn(Q, K, V, mask=mask) attended_V = attended_V.view((attended_V.shape[0] // self.num_heads), self.num_heads, attended_V.shape[1]) attended_V = attended_V.view(attended_V.shape[0], (attended_V.shape[1] * attended_V.shape[2])) out = self.final_linear(attended_V) if (self.dropout is not None): out = self.dropout(out) if self.normalize: out = self.layer_norm(out) return out
class Joiner(nn.Sequential): def __init__(self, backbone, position_embedding): super().__init__(backbone, position_embedding) def forward(self, tensor_list: NestedTensor): xs = self[0](tensor_list) out: List[NestedTensor] = [] pos = [] for (name, x) in xs.items(): out.append(x) pos.append(self[1](x).to(x.tensors.dtype)) return (out, pos)
class Decoder(layers.Layer): def __init__(self, *, ch, out_ch, ch_mult=(1, 2, 4, 8), num_res_blocks, attn_resolutions, in_channels, image_size, z_channels, give_pre_end=False, name=None, **ignorekwargs): super().__init__(name=name) self.ch = ch self.num_resolutions = len(ch_mult) self.num_res_blocks = num_res_blocks self.resolution = image_size self.in_channels = in_channels self.give_pre_end = give_pre_end block_in = (ch * ch_mult[(self.num_resolutions - 1)]) curr_res = (image_size // (2 ** (self.num_resolutions - 1))) self.z_shape = (1, z_channels, curr_res, curr_res) self.conv_in = tf.keras.Sequential([layers.ZeroPadding2D(), layers.Conv2D(block_in, 3, name=f'{self.name}/conv_in')]) self.mid = tf.keras.Sequential() self.mid.add(ResnetBlock(in_channels=block_in, out_channels=block_in, name=f'{self.name}/mid/block_1')) self.mid.add(AttnBlock(block_in, name=f'{self.name}/mid/attn_1')) self.mid.add(ResnetBlock(in_channels=block_in, out_channels=block_in, name=f'{self.name}/mid/block_2')) self.up = tf.keras.Sequential() for i_level in reversed(range(self.num_resolutions)): block_out = (ch * ch_mult[i_level]) for i_block in range((self.num_res_blocks + 1)): self.up.add(ResnetBlock(in_channels=block_in, out_channels=block_out, name=f'{self.name}/up.{i_level}/block.{i_block}')) block_in = block_out if (curr_res in attn_resolutions): self.up.add(AttnBlock(block_in, name=f'{self.name}/up.{i_level}/attn.{i_block}')) if (i_level != 0): self.up.add(Upsample(block_in, name=f'{self.name}/up.{i_level}/upsample')) curr_res = (curr_res * 2) self.end = tf.keras.Sequential([Normalize(name=f'{self.name}/norm_out'), layers.Activation(nonlinearity), layers.ZeroPadding2D(), layers.Conv2D(out_ch, 3, name=f'{self.name}/conv_out')]) def call(self, z, training=False): h = self.conv_in(z, training=training) h = self.mid(h, training=training) h = self.up(h, training=training) if self.give_pre_end: return h return self.end(h, training=training) def last_layer(self): return self.end.layers[(- 1)]
def vis_predictions(model, inputs, targets, instructions, save_path, prefix=''): input_vars = (Variable(tensor.contiguous()) for tensor in inputs) predictions = model(input_vars) predictions = predictions.data.cpu().numpy() targets = targets.cpu().numpy() num_inputs = inputs[0].size(0) for ind in tqdm(range(num_inputs)): pred = predictions[ind] targ = targets[ind] instr = instructions[ind] full_path = os.path.join(save_path, ((prefix + str(ind)) + '.png')) vis_value_map(pred, targ, full_path, title=instr, share=False)
def _get_base_voxel_dataset(model_id, edge_length_threshold=0.1, voxel_config=None, filled=False, auto_save=True): kwargs = dict(model_id=model_id, edge_length_threshold=edge_length_threshold, voxel_config=voxel_config, filled=filled) subdir = get_voxel_subdir(**kwargs) if auto_save: create_voxel_data(overwrite=False, **kwargs) return bvd.BinvoxDataset(subdir, mode='r')
def load_dataset(args=None, dataset=None): if (args is not None): dataset_name = args.dataset.lower() else: dataset_name = dataset.lower() if (dataset_name == 'synthetic_graph_cls'): return load_synthetic_graph_cls(args) elif (dataset_name == 'zinc'): zinc_data = MoleculeDatasetDGL() return zinc_data elif ('sbms' in dataset_name): name = dataset_name.split('_')[(- 1)] sbms_dataset = SBMsDatasetDGL(name) return sbms_dataset else: raise NameError(f'unknow dataset name {dataset_name}')
def resume_model(base_model, args, logger=None): ckpt_path = os.path.join(args.experiment_path, 'ckpt-last.pth') if (not os.path.exists(ckpt_path)): print_log(f'[RESUME INFO] no checkpoint file from path {ckpt_path}...', logger=logger) return (0, 0) print_log(f'[RESUME INFO] Loading model weights from {ckpt_path}...', logger=logger) map_location = {('cuda:%d' % 0): ('cuda:%d' % args.local_rank)} state_dict = torch.load(ckpt_path, map_location=map_location) base_ckpt = {k.replace('module.', ''): v for (k, v) in state_dict['base_model'].items()} base_model.load_state_dict(base_ckpt, strict=True) start_epoch = (state_dict['epoch'] + 1) best_metrics = state_dict['best_metrics'] if (not isinstance(best_metrics, dict)): best_metrics = best_metrics.state_dict() print_log(f'[RESUME INFO] resume ckpts {(start_epoch - 1)} epoch( best_metrics = {str(best_metrics):s})', logger=logger) return (start_epoch, best_metrics)
def main(output_dir, palette=sns.color_palette('light:#5A9', as_cmap=True), annot=False, output_dpi=600, linewidth=2.0, context='paper', fig_scale=1.5): sns.set_context(context, font_scale=3.0) common_kwargs = dict(cmap=palette, annot=annot, annot_kws={'fontsize': 18}, fmt='.1f', cbar=True, xticklabels=True, yticklabels=True, square=True) state_dict = torch.load('/home/jiahuei/Documents/1_TF_files/relation_trans/mscoco_v1/RTrans__baseline/model_best.pth') encoder_params = [[] for _ in range(6)] decoder_params = [[] for _ in range(6)] encoder_layer_sizes = ([0] * 6) decoder_layer_sizes = ([0] * 6) for (k, v) in state_dict.items(): v = torch.nn.functional.normalize(v, p=2.0, dim=(- 1), eps=1e-12, out=None) v = v.cpu().numpy().reshape((- 1)) if k.startswith('model.encoder.layers'): i = int(k.split('.')[3]) encoder_layer_sizes[i] += v.size encoder_params[i].append(v) elif k.startswith('model.decoder.layers'): i = int(k.split('.')[3]) decoder_layer_sizes[i] += v.size decoder_params[i].append(v) print(encoder_layer_sizes) print(decoder_layer_sizes) encoder_params = np.array([np.concatenate(_) for _ in encoder_params]) decoder_params = np.array([np.concatenate(_) for _ in decoder_params]) print(encoder_params.shape) print(decoder_params.shape) encoder = compute_sim(encoder_params) decoder = compute_sim(decoder_params) matrices = dict(encoder=encoder, decoder=decoder) for (name, mat) in tqdm(matrices.items()): (fig, ax) = plt.subplots(nrows=1, ncols=1, figsize=((4.5 * fig_scale), (3 * fig_scale))) ax = sns.heatmap(mat, vmin=np.unique(mat)[1], ax=ax, **common_kwargs) ax.xaxis.tick_top() ax.xaxis.set_label_position('top') ax.tick_params(length=0) plt.tight_layout(pad=0) fname = f'layer-sim norm-last {name} (light:#5A9)' if annot: fname += '_annot' plt.savefig(f'{os.path.join(output_dir, fname)}.png', dpi=output_dpi) plt.clf() plt.close('all')
def convert_all_files(dataset='uea'): assert (dataset in ['uea', 'ucr']) if (dataset == 'uea'): folder = 'UEA' elif (dataset == 'ucr'): folder = 'UCR' arff_folder = (DATA_DIR + '/raw/{}/Multivariate_arff'.format(folder)) for ds_name in tqdm([x for x in os.listdir(arff_folder) if os.path.isdir(((arff_folder + '/') + x))]): train_file = (arff_folder + '/{}/{}_TRAIN.arff'.format(ds_name, ds_name)) test_file = (arff_folder + '/{}/{}_TEST.arff'.format(ds_name, ds_name)) save_dir = (DATA_DIR + '/processed/{}/{}'.format(folder, ds_name)) if any([(x.split('/')[(- 1)] not in os.listdir((arff_folder + '/{}'.format(ds_name)))) for x in (train_file, test_file)]): if (ds_name not in ['Images', 'Descriptions']): print('No files found for folder: {}'.format(ds_name)) continue elif os.path.isdir(save_dir): print('Files already exist for: {}'.format(ds_name)) continue else: (train_data, test_data, train_labels, test_labels) = create_torch_data(train_file, test_file) data = torch.cat([train_data, test_data]) labels = torch.cat([train_labels, test_labels]) original_idxs = (np.arange(0, train_data.size(0)), np.arange(train_data.size(0), data.size(0))) save_pickle(data, (save_dir + '/data.pkl')) save_pickle(labels, (save_dir + '/labels.pkl')) save_pickle(original_idxs, (save_dir + '/original_idxs.pkl'))
def get_format_custom(logger, level): if ('RANK' in os.environ): rank = int(os.environ['RANK']) if (level == logging.INFO): logger.addFilter(Filter((rank == 0))) else: rank = 0 format_str = '[%(asctime)s-rk{}-%(message)s'.format(rank) formatter = logging.Formatter(format_str) return formatter
def chunked(n: int, data: Iterable[T]) -> Iterator[list[T]]: data_iter = iter(data) def take(n: int, data_iter: Iterator[T]) -> Iterable[T]: for _ in range(n): try: (yield next(data_iter)) except StopIteration: return while (len((result := list(take(n, data_iter)))) > 0): (yield result)
def quaternionProduct(qx, qy): a = qx[0] b = qx[1] c = qx[2] d = qx[3] e = qy[0] f = qy[1] g = qy[2] h = qy[3] q1 = ((((a * e) - (b * f)) - (c * g)) - (d * h)) q2 = ((((a * f) + (b * e)) + (c * h)) - (d * g)) q3 = ((((a * g) - (b * h)) + (c * e)) + (d * f)) q4 = ((((a * h) + (b * g)) - (c * f)) + (d * e)) return (q1, q2, q3, q4)
def main(): g = Github(os.environ['GITHUB_TOKEN']) repo = g.get_repo('huggingface/transformers') open_issues = repo.get_issues(state='open') for issue in open_issues: comments = sorted([comment for comment in issue.get_comments()], key=(lambda i: i.created_at), reverse=True) last_comment = (comments[0] if (len(comments) > 0) else None) if ((last_comment is not None) and (last_comment.user.login == 'github-actions[bot]') and ((dt.utcnow() - issue.updated_at).days > 7) and ((dt.utcnow() - issue.created_at).days >= 30) and (not any(((label.name.lower() in LABELS_TO_EXEMPT) for label in issue.get_labels())))): issue.edit(state='closed') elif (((dt.utcnow() - issue.updated_at).days > 23) and ((dt.utcnow() - issue.created_at).days >= 30) and (not any(((label.name.lower() in LABELS_TO_EXEMPT) for label in issue.get_labels())))): issue.create_comment('This issue has been automatically marked as stale because it has not had recent activity. If you think this still needs to be addressed please comment on this thread.\n\nPlease note that issues that do not follow the [contributing guidelines]( are likely to be ignored.')
class Deconv2DNoBiasLayerGuidedBackProp(Deconv2DNoBiasLayer): def output(self, input=None, dropout_active=True, *args, **kwargs): if (input is None): input = self.input_layer.output(*args, dropout_active=dropout_active, **kwargs) if (dropout_active and (self.dropout > 0.0)): retain_prob = (1 - self.dropout) mask = layers.srng.binomial(input.shape, p=retain_prob, dtype='int32').astype('float32') input = ((input / retain_prob) * mask) contiguous_input = gpu_contiguous(input) contiguous_filters = gpu_contiguous(self.W) if (self.stride == 1): deconved = self.image_acts_op(contiguous_input, contiguous_filters) else: (_, x, y, _) = self.get_output_shape() deconved = self.image_acts_op(contiguous_input, contiguous_filters, as_tensor_variable((x, y))) mask = ((deconved > 0.0) * (self.mirror_layer.input_layer.output() > 0.0)) return (mask * deconved)
def ismember(a_vec, b_vec): bool_ind = np.isin(a_vec, b_vec) common = a_vec[bool_ind] (common_unique, common_inv) = np.unique(common, return_inverse=True) (b_unique, b_ind) = np.unique(b_vec, return_index=True) return bool_ind
def rescale_attributions_to_tokens(attributions: OneOrMoreAttributionSequences, tokens: OneOrMoreTokenSequences) -> OneOrMoreAttributionSequences: return [(attr[:len(tokens)] if (not all((math.isnan(x) for x in attr))) else []) for (attr, tokens) in zip(attributions, tokens)]
class TFElectraPreTrainedModel(metaclass=DummyObject): _backends = ['tf'] def __init__(self, *args, **kwargs): requires_backends(self, ['tf'])
class PolyConvSeqBranch(nn.Module): def __init__(self, in_channels, out_channels_list, kernel_size_list, strides_list, padding_list, num_blocks): super(PolyConvSeqBranch, self).__init__() assert (len(out_channels_list) == len(kernel_size_list)) assert (len(out_channels_list) == len(strides_list)) assert (len(out_channels_list) == len(padding_list)) self.conv_list = ParametricSequential() for (i, (out_channels, kernel_size, strides, padding)) in enumerate(zip(out_channels_list, kernel_size_list, strides_list, padding_list)): self.conv_list.add_module('conv{}'.format((i + 1)), PolyConv(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=strides, padding=padding, num_blocks=num_blocks)) in_channels = out_channels def forward(self, x, index): x = self.conv_list(x, index=index) return x
class FP16_Optimizer(object): def __init__(self, optimizer, static_loss_scale=1.0, dynamic_loss_scale=False): if (not torch.cuda.is_available): raise SystemError('Cannot use fp16 without CUDA') self.fp16_param_groups = [] self.fp32_param_groups = [] self.fp32_flattened_groups = [] for (i, param_group) in enumerate(optimizer.param_groups): print('FP16_Optimizer processing param group {}:'.format(i)) fp16_params_this_group = [] fp32_params_this_group = [] for param in param_group['params']: if param.requires_grad: if (param.type() == 'torch.cuda.HalfTensor'): print('FP16_Optimizer received torch.cuda.HalfTensor with {}'.format(param.size())) fp16_params_this_group.append(param) elif (param.type() == 'torch.cuda.FloatTensor'): print('FP16_Optimizer received torch.cuda.FloatTensor with {}'.format(param.size())) fp32_params_this_group.append(param) else: raise TypeError('Wrapped parameters must be either torch.cuda.FloatTensor or torch.cuda.HalfTensor. Received {}'.format(param.type())) fp32_flattened_this_group = None if (len(fp16_params_this_group) > 0): fp32_flattened_this_group = _flatten_dense_tensors([param.detach().data.clone().float() for param in fp16_params_this_group]) fp32_flattened_this_group = Variable(fp32_flattened_this_group, requires_grad=True) fp32_flattened_this_group.grad = fp32_flattened_this_group.new(*fp32_flattened_this_group.size()) if (fp32_flattened_this_group is not None): param_group['params'] = ([fp32_flattened_this_group] + fp32_params_this_group) else: param_group['params'] = fp32_params_this_group self.fp16_param_groups.append(fp16_params_this_group) self.fp32_param_groups.append(fp32_params_this_group) self.fp32_flattened_groups.append(fp32_flattened_this_group) self.optimizer = optimizer.__class__(optimizer.param_groups) self.param_groups = self.optimizer.param_groups if dynamic_loss_scale: self.dynamic_loss_scale = True self.loss_scaler = DynamicLossScaler() else: self.dynamic_loss_scale = False self.loss_scaler = LossScaler(static_loss_scale) self.overflow = False self.first_closure_call_this_step = True def zero_grad(self): self.optimizer.zero_grad() for fp16_group in self.fp16_param_groups: for param in fp16_group: if (param.grad is not None): param.grad.detach_() param.grad.zero_() def _check_overflow(self): params = [] for group in self.fp16_param_groups: for param in group: params.append(param) for group in self.fp32_param_groups: for param in group: params.append(param) self.overflow = self.loss_scaler.has_overflow(params) def _update_scale(self, has_overflow=False): self.loss_scaler.update_scale(has_overflow) def _copy_grads_fp16_to_fp32(self): for (fp32_group, fp16_group) in zip(self.fp32_flattened_groups, self.fp16_param_groups): if (len(fp16_group) > 0): fp32_group.grad.data.copy_(_flatten_dense_tensors([fp16_param.grad.data for fp16_param in fp16_group])) def _downscale_fp32(self): if (self.loss_scale != 1.0): for param_group in self.optimizer.param_groups: for param in param_group['params']: param.grad.data.mul_((1.0 / self.loss_scale)) def clip_fp32_grads(self, clip=(- 1)): if (not self.overflow): fp32_params = [] for param_group in self.optimizer.param_groups: for param in param_group['params']: fp32_params.append(param) if (clip > 0): return torch.nn.utils.clip_grad_norm(fp32_params, clip) def _copy_params_fp32_to_fp16(self): for (fp16_group, fp32_group) in zip(self.fp16_param_groups, self.fp32_flattened_groups): if (len(fp16_group) > 0): for (fp16_param, fp32_data) in zip(fp16_group, _unflatten_dense_tensors(fp32_group.data, fp16_group)): fp16_param.data.copy_(fp32_data) def state_dict(self): state_dict = {} state_dict['loss_scaler'] = self.loss_scaler state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale state_dict['overflow'] = self.overflow state_dict['first_closure_call_this_step'] = self.first_closure_call_this_step state_dict['optimizer_state_dict'] = self.optimizer.state_dict() return state_dict def load_state_dict(self, state_dict): self.loss_scaler = state_dict['loss_scaler'] self.dynamic_loss_scale = state_dict['dynamic_loss_scale'] self.overflow = state_dict['overflow'] self.first_closure_call_this_step = state_dict['first_closure_call_this_step'] self.optimizer.load_state_dict(state_dict['optimizer_state_dict']) def step(self, closure=None): if ((closure is not None) and isinstance(self.loss_scaler, DynamicLossScaler)): raise TypeError('Using step with a closure is currently not compatible with dynamic loss scaling.') scale = self.loss_scaler.loss_scale self._update_scale(self.overflow) if self.overflow: print('OVERFLOW! Skipping step. Attempted loss scale: {}'.format(scale)) return if (closure is not None): self._step_with_closure(closure) else: self.optimizer.step() self._copy_params_fp32_to_fp16() return def _step_with_closure(self, closure): def wrapped_closure(): if self.first_closure_call_this_step: self.first_closure_call_this_step = False else: self._copy_params_fp32_to_fp16() '\n Our API expects the user to give us ownership of the backward() call by\n replacing all calls to loss.backward() with optimizer.backward(loss).\n This requirement holds whether or not the call to backward() is made within\n a closure.\n If the user is properly calling optimizer.backward(loss) within "closure,"\n calling closure() here will give the fp32 master params fresh gradients\n for the optimizer to play with,\n so all wrapped_closure needs to do is call closure() and return the loss.\n ' temp_loss = closure() return temp_loss self.optimizer.step(wrapped_closure) self.first_closure_call_this_step = True def backward(self, loss, update_fp32_grads=True): self.loss_scaler.backward(loss.float()) if update_fp32_grads: self.update_fp32_grads() def update_fp32_grads(self): if self.dynamic_loss_scale: self._check_overflow() if self.overflow: return self._copy_grads_fp16_to_fp32() self._downscale_fp32() def loss_scale(self): return self.loss_scaler.loss_scale
def apply_delta_low_cpu_mem(base_model_path, target_model_path, delta_path): base_tokenizer = AutoTokenizer.from_pretrained(base_model_path, use_fast=False) base_config = AutoConfig.from_pretrained(base_model_path) if os.path.exists(target_model_path): shutil.rmtree(target_model_path) target_model_path = os.path.abspath(target_model_path) if (not os.path.exists(target_model_path)): os.makedirs(target_model_path) split_size = (4 * GB) with tempfile.TemporaryDirectory() as tmp_base_path, tempfile.TemporaryDirectory() as tmp_delta_path: print(f'Split files for the base model to {tmp_base_path}') split_files(base_model_path, tmp_base_path, split_size) print(f'Split files for the delta model to {tmp_delta_path}') split_files(delta_path, tmp_delta_path, split_size) base_pattern = os.path.join(tmp_base_path, 'pytorch_model-*.bin') base_files = glob.glob(base_pattern) delta_pattern = os.path.join(tmp_delta_path, 'pytorch_model-*.bin') delta_files = glob.glob(delta_pattern) delta_state_dict = torch.load(delta_files[0]) print('Applying the delta') weight_map = {} total_size = 0 for (i, base_file) in tqdm(enumerate(base_files)): state_dict = torch.load(base_file) file_name = f'pytorch_model-{i}.bin' for (name, param) in state_dict.items(): if (name not in delta_state_dict): for delta_file in delta_files: delta_state_dict = torch.load(delta_file) gc.collect() if (name in delta_state_dict): break state_dict[name] += delta_state_dict[name] weight_map[name] = file_name total_size += (param.numel() * param.element_size()) gc.collect() torch.save(state_dict, os.path.join(target_model_path, file_name)) with open(os.path.join(target_model_path, 'pytorch_model.bin.index.json'), 'w') as f: json.dump({'weight_map': weight_map, 'metadata': {'total_size': total_size}}, f) print(f'Saving the target model to {target_model_path}') base_tokenizer.save_pretrained(target_model_path) base_config.save_pretrained(target_model_path)
class RemBertTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__(self, vocab_file, do_lower_case=False, remove_space=True, keep_accents=True, bos_token='[CLS]', eos_token='[SEP]', unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', **kwargs): super().__init__(do_lower_case=do_lower_case, remove_space=remove_space, keep_accents=keep_accents, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, **kwargs) self.do_lower_case = do_lower_case self.remove_space = remove_space self.keep_accents = keep_accents self.vocab_file = vocab_file self.sp_model = spm.SentencePieceProcessor() self.sp_model.Load(vocab_file) def vocab_size(self): return len(self.sp_model) def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__(self): state = self.__dict__.copy() state['sp_model'] = None return state def __setstate__(self, d): self.__dict__ = d self.sp_model = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file) def _tokenize(self, text, sample=False): pieces = self.sp_model.EncodeAsPieces(text) return pieces def _convert_token_to_id(self, token): return self.sp_model.PieceToId(token) def _convert_id_to_token(self, index): return self.sp_model.IdToPiece(index) def convert_tokens_to_string(self, tokens): out_string = self.sp_model.decode_pieces(tokens) return out_string def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if (token_ids_1 is None): return ((cls + token_ids_0) + sep) return ((((cls + token_ids_0) + sep) + token_ids_1) + sep) def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: if (token_ids_1 is not None): raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formatted with special tokens for the model.') return list(map((lambda x: (1 if (x in [self.sep_token_id, self.cls_token_id]) else 0)), token_ids_0)) if (token_ids_1 is not None): return (((([1] + ([0] * len(token_ids_0))) + [1]) + ([0] * len(token_ids_1))) + [1]) return (([1] + ([0] * len(token_ids_0))) + [1]) def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if (token_ids_1 is None): return (len(((cls + token_ids_0) + sep)) * [0]) return ((len(((cls + token_ids_0) + sep)) * [0]) + (len((token_ids_1 + sep)) * [1])) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if (not os.path.isdir(save_directory)): logger.error('Vocabulary path ({}) should be a directory'.format(save_directory)) return out_vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])) if (os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file)): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,)
def assert_params_all_zeros(module) -> bool: weight_data = module.weight.data is_weight_zero = weight_data.allclose(weight_data.new_zeros(weight_data.size())) if (hasattr(module, 'bias') and (module.bias is not None)): bias_data = module.bias.data is_bias_zero = bias_data.allclose(bias_data.new_zeros(bias_data.size())) else: is_bias_zero = True return (is_weight_zero and is_bias_zero)
def main(): parser = argparse.ArgumentParser(description='Convert model keys') parser.add_argument('src', help='src detectron model path') parser.add_argument('dst', help='save path') args = parser.parse_args() convert(args.src, args.dst)
class TestMetrics(unittest.TestCase): def test_tensorflow_2(self): image = np.ones([256, 256, 1]) resize_kwargs = {'size': [224, 224]} transforms = TRANSFORMS(framework='tensorflow', process='preprocess') resize = transforms['Resize'](**resize_kwargs) random_crop_kwargs = {'size': 128} random_crop = transforms['RandomCrop'](**random_crop_kwargs) transform_list = [resize, random_crop] compose = transforms['Compose'](transform_list) image_result = compose((image, None)) self.assertEqual(image_result[0].shape, (128, 128))
def virno(): colors1 = pl.cm.viridis(np.linspace(0.0, 1, 128)) colors2 = pl.cm.inferno_r(np.linspace(0.0, 1, 128)) colors = np.vstack((colors1[5:][::(- 1)], colors2[12:99][::(- 1)])) virno = mcolors.LinearSegmentedColormap.from_list('virno', colors) return virno
def set_logger(ckpt_dir, seed, log_dir=None): logger = logging.getLogger(str(seed)) logger.propagate = False for handler in logger.handlers[:]: logger.removeHandler(handler) ch = logging.StreamHandler() ch.setFormatter(LoggingFormatter()) logger.addHandler(ch) if (log_dir is None): logger.addHandler(logging.FileHandler(os.path.join(ckpt_dir, '{}.log'.format(seed)), mode='a+')) else: full_log_dir = os.path.join(ckpt_dir, log_dir) os.makedirs(full_log_dir, exist_ok=True) logger.addHandler(logging.FileHandler(os.path.join(full_log_dir, '{}.log'.format(seed)), mode='a+')) logger.setLevel(logging.DEBUG) addLoggingLevel('SETTINGS', levelNum=logging.DEBUG) return logger
def learn(env, policy_fn, *, timesteps_per_actorbatch, clip_param, entcoeff, optim_epochs, optim_stepsize, optim_batchsize, gamma, lam, max_timesteps=0, max_episodes=0, max_iters=0, max_seconds=0, callback=None, adam_epsilon=1e-05, schedule='constant'): ob_space = env.observation_space ac_space = env.action_space pi = policy_fn('pi', ob_space, ac_space) oldpi = policy_fn('oldpi', ob_space, ac_space) atarg = tf.placeholder(dtype=tf.float32, shape=[None]) ret = tf.placeholder(dtype=tf.float32, shape=[None]) lrmult = tf.placeholder(name='lrmult', dtype=tf.float32, shape=[]) clip_param = (clip_param * lrmult) ob = U.get_placeholder_cached(name='ob') ac = pi.pdtype.sample_placeholder([None]) kloldnew = oldpi.pd.kl(pi.pd) ent = pi.pd.entropy() meankl = tf.reduce_mean(kloldnew) meanent = tf.reduce_mean(ent) pol_entpen = ((- entcoeff) * meanent) ratio = tf.exp((pi.pd.logp(ac) - oldpi.pd.logp(ac))) surr1 = (ratio * atarg) surr2 = (tf.clip_by_value(ratio, (1.0 - clip_param), (1.0 + clip_param)) * atarg) pol_surr = (- tf.reduce_mean(tf.minimum(surr1, surr2))) vf_loss = tf.reduce_mean(tf.square((pi.vpred - ret))) total_loss = ((pol_surr + pol_entpen) + vf_loss) losses = [pol_surr, pol_entpen, vf_loss, meankl, meanent] loss_names = ['pol_surr', 'pol_entpen', 'vf_loss', 'kl', 'ent'] var_list = pi.get_trainable_variables() lossandgrad = U.function([ob, ac, atarg, ret, lrmult], (losses + [U.flatgrad(total_loss, var_list)])) adam = MpiAdam(var_list, epsilon=adam_epsilon) assign_old_eq_new = U.function([], [], updates=[tf.assign(oldv, newv) for (oldv, newv) in zipsame(oldpi.get_variables(), pi.get_variables())]) compute_losses = U.function([ob, ac, atarg, ret, lrmult], losses) U.initialize() adam.sync() seg_gen = traj_segment_generator(pi, env, timesteps_per_actorbatch, stochastic=True) episodes_so_far = 0 timesteps_so_far = 0 iters_so_far = 0 tstart = time.time() lenbuffer = deque(maxlen=100) rewbuffer = deque(maxlen=100) assert (sum([(max_iters > 0), (max_timesteps > 0), (max_episodes > 0), (max_seconds > 0)]) == 1), 'Only one time constraint permitted' while True: if callback: callback(locals(), globals()) if (max_timesteps and (timesteps_so_far >= max_timesteps)): break elif (max_episodes and (episodes_so_far >= max_episodes)): break elif (max_iters and (iters_so_far >= max_iters)): break elif (max_seconds and ((time.time() - tstart) >= max_seconds)): break if (schedule == 'constant'): cur_lrmult = 1.0 elif (schedule == 'linear'): cur_lrmult = max((1.0 - (float(timesteps_so_far) / max_timesteps)), 0) else: raise NotImplementedError logger.log((' Iteration %i ' % iters_so_far)) seg = seg_gen.__next__() add_vtarg_and_adv(seg, gamma, lam) (ob, ac, atarg, tdlamret) = (seg['ob'], seg['ac'], seg['adv'], seg['tdlamret']) vpredbefore = seg['vpred'] atarg = ((atarg - atarg.mean()) / atarg.std()) d = Dataset(dict(ob=ob, ac=ac, atarg=atarg, vtarg=tdlamret), shuffle=(not pi.recurrent)) optim_batchsize = (optim_batchsize or ob.shape[0]) if hasattr(pi, 'ob_rms'): pi.ob_rms.update(ob) assign_old_eq_new() logger.log('Optimizing...') logger.log(fmt_row(13, loss_names)) for _ in range(optim_epochs): losses = [] for batch in d.iterate_once(optim_batchsize): (*newlosses, g) = lossandgrad(batch['ob'], batch['ac'], batch['atarg'], batch['vtarg'], cur_lrmult) adam.update(g, (optim_stepsize * cur_lrmult)) losses.append(newlosses) logger.log(fmt_row(13, np.mean(losses, axis=0))) logger.log('Evaluating losses...') losses = [] for batch in d.iterate_once(optim_batchsize): newlosses = compute_losses(batch['ob'], batch['ac'], batch['atarg'], batch['vtarg'], cur_lrmult) losses.append(newlosses) (meanlosses, _, _) = mpi_moments(losses, axis=0) logger.log(fmt_row(13, meanlosses)) for (lossval, name) in zipsame(meanlosses, loss_names): logger.record_tabular(('loss_' + name), lossval) logger.record_tabular('ev_tdlam_before', explained_variance(vpredbefore, tdlamret)) lrlocal = (seg['ep_lens'], seg['ep_rets']) listoflrpairs = MPI.COMM_WORLD.allgather(lrlocal) (lens, rews) = map(flatten_lists, zip(*listoflrpairs)) lenbuffer.extend(lens) rewbuffer.extend(rews) logger.record_tabular('EpLenMean', np.mean(lenbuffer)) logger.record_tabular('EpRewMean', np.mean(rewbuffer)) logger.record_tabular('EpThisIter', len(lens)) episodes_so_far += len(lens) timesteps_so_far += sum(lens) iters_so_far += 1 logger.record_tabular('EpisodesSoFar', episodes_so_far) logger.record_tabular('TimestepsSoFar', timesteps_so_far) logger.record_tabular('TimeElapsed', (time.time() - tstart)) if (MPI.COMM_WORLD.Get_rank() == 0): logger.dump_tabular() return pi
def one_hot_from_int(int_or_list, batch_size=1): if isinstance(int_or_list, int): int_or_list = [int_or_list] if ((len(int_or_list) == 1) and (batch_size > 1)): int_or_list = ([int_or_list[0]] * batch_size) assert (batch_size == len(int_or_list)) array = np.zeros((batch_size, NUM_CLASSES), dtype=np.float32) for (i, j) in enumerate(int_or_list): array[(i, j)] = 1.0 return array
def get_enum_name_by_value(): desc = caffe_pb2.LayerParameter.LayerType.DESCRIPTOR d = {} for (k, v) in desc.values_by_name.items(): d[v.number] = k return d
def _build_q_model_and_distribution(policy: Policy, obs_space: gym.spaces.Space, action_space: gym.spaces.Space, config: TrainerConfigDict) -> Tuple[(ModelV2, TorchDistributionWrapper)]: return (_build_q_models(policy, obs_space, action_space, config), TorchCategorical)
def _test_from_save_pretrained_dynamo(in_queue, out_queue, timeout): error = None try: (init_dict, model_class) = in_queue.get(timeout=timeout) model = model_class(**init_dict) model.to(torch_device) model = torch.compile(model) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, safe_serialization=False) new_model = model_class.from_pretrained(tmpdirname) new_model.to(torch_device) assert (new_model.__class__ == model_class) except Exception: error = f'{traceback.format_exc()}' results = {'error': error} out_queue.put(results, timeout=timeout) out_queue.join()
class UIScreenClassifier(pl.LightningModule): def __init__(self, num_classes=20, dropout_block=0.0, dropout=0.2, lr=5e-05, soft_labels=True, stochastic_depth_p=0.2, arch='resnet50'): super(UIScreenClassifier, self).__init__() self.save_hyperparameters() if ((arch == 'resnet50') or (arch == 'resnet50_conv')): model = models.resnet50(pretrained=False) replace_default_bn_with_custom(model, dropout=dropout_block) replace_res_blocks_with_stochastic(model, stochastic_depth_p=stochastic_depth_p) model.fc = nn.Sequential(nn.Dropout(dropout), nn.Linear(model.fc.in_features, num_classes)) self.model = model self.conv_cls = nn.Sequential(nn.InstanceNorm2d(2048), nn.Dropout2d(dropout), nn.Conv2d(2048, num_classes, 3, stride=1, padding=1)) elif (arch == 'vgg16'): model = models.vgg16_bn(pretrained=False, dropout=dropout) replace_default_bn_with_custom(model, dropout=dropout_block) model.classifier[(- 1)] = nn.Linear(4096, num_classes) self.model = model def forward(self, image): if ((self.hparams.arch == 'resnet50') or (self.hparams.arch == 'vgg16')): return self.model(image) elif (self.hparams.arch == 'resnet50_conv'): x = self.model.conv1(image) x = self.model.bn1(x) x = self.model.relu(x) x = self.model.maxpool(x) x = self.model.layer1(x) x = self.model.layer2(x) x = self.model.layer3(x) x = self.model.layer4(x) x = self.conv_cls(x) batch_size = x.shape[0] res = x.view(batch_size, self.hparams.num_classes, (- 1)).mean(dim=(- 1)) return res def training_step(self, batch, batch_idx): image = batch['image'] labels = batch['label'] outs = [self.forward(image[i].unsqueeze(0)) for i in range(len(image))] out = torch.cat(outs, dim=0) if (len(labels.shape) == 2): if self.hparams.soft_labels: loss = F.cross_entropy(out, labels.float()) else: loss = F.binary_cross_entropy_with_logits(out, labels) else: loss = F.cross_entropy(out, labels) return loss def validation_step(self, batch, batch_idx): image = batch['image'] labels = batch['label'] outs = [self.forward(image[i].unsqueeze(0)) for i in range(len(image))] out = torch.cat(outs, dim=0) if (len(labels.shape) == 2): return (out, labels) else: (_, inds) = out.max(dim=(- 1)) return (inds, labels) def validation_epoch_end(self, outputs): all_outs = torch.cat([o[0] for o in outputs], dim=0) all_labels = torch.cat([o[1] for o in outputs], dim=0) if (len(all_labels.shape) == 2): bce_score = F.binary_cross_entropy_with_logits(all_outs, all_labels) score_dict = {'bce': bce_score} print(score_dict) self.log_dict(score_dict) else: all_outs = all_outs.detach().cpu().long().numpy() all_labels = all_labels.detach().cpu().long().numpy() macro_score = f1_score(all_labels, all_outs, average='macro') micro_score = f1_score(all_labels, all_outs, average='micro') weighted_score = f1_score(all_labels, all_outs, average='weighted') score_dict = {'f1_macro': macro_score, 'f1_micro': micro_score, 'f1_weighted': weighted_score} print(score_dict) self.log_dict(score_dict) def test_step(self, batch, batch_idx): image = batch['image'] labels = batch['label'] outs = [self.forward(image[i].unsqueeze(0)) for i in range(len(image))] out = torch.cat(outs, dim=0) if (len(labels.shape) == 2): return (out, labels) else: (_, inds) = out.max(dim=(- 1)) return (inds, labels) def test_epoch_end(self, outputs): all_outs = torch.cat([o[0] for o in outputs], dim=0) all_labels = torch.cat([o[1] for o in outputs], dim=0) if (len(all_labels.shape) == 2): bce_score = F.binary_cross_entropy_with_logits(all_outs, all_labels) score_dict = {'bce': bce_score} print(score_dict) self.log_dict(score_dict) else: all_outs = all_outs.detach().cpu().long().numpy() all_labels = all_labels.detach().cpu().long().numpy() macro_score = f1_score(all_labels, all_outs, average='macro') micro_score = f1_score(all_labels, all_outs, average='micro') weighted_score = f1_score(all_labels, all_outs, average='weighted') score_dict = {'f1_macro': macro_score, 'f1_micro': micro_score, 'f1_weighted': weighted_score} print(score_dict) return score_dict def configure_optimizers(self): optimizer = torch.optim.AdamW([p for p in self.parameters() if p.requires_grad], lr=self.hparams.lr) return optimizer
class LambdaMap(LambdaBase): def forward(self, input): return list(map(self.lambda_func, self.forward_prepare(input)))
def load_sub_model(library_name: str, class_name: str, importable_classes: List[Any], pipelines: Any, is_pipeline_module: bool, pipeline_class: Any, torch_dtype: torch.dtype, provider: Any, sess_options: Any, device_map: Optional[Union[(Dict[(str, torch.device)], str)]], max_memory: Optional[Dict[(Union[(int, str)], Union[(int, str)])]], offload_folder: Optional[Union[(str, os.PathLike)]], offload_state_dict: bool, model_variants: Dict[(str, str)], name: str, from_flax: bool, variant: str, low_cpu_mem_usage: bool, cached_folder: Union[(str, os.PathLike)]): (class_obj, class_candidates) = get_class_obj_and_candidates(library_name, class_name, importable_classes, pipelines, is_pipeline_module) load_method_name = None for (class_name, class_candidate) in class_candidates.items(): if ((class_candidate is not None) and issubclass(class_obj, class_candidate)): load_method_name = importable_classes[class_name][1] if (load_method_name is None): none_module = class_obj.__module__ is_dummy_path = (none_module.startswith(DUMMY_MODULES_FOLDER) or none_module.startswith(TRANSFORMERS_DUMMY_MODULES_FOLDER)) if (is_dummy_path and ('dummy' in none_module)): class_obj() raise ValueError(f'The component {class_obj} of {pipeline_class} cannot be loaded as it does not seem to have any of the loading methods defined in {ALL_IMPORTABLE_CLASSES}.') load_method = getattr(class_obj, load_method_name) loading_kwargs = {} if issubclass(class_obj, torch.nn.Module): loading_kwargs['torch_dtype'] = torch_dtype if issubclass(class_obj, diffusers.OnnxRuntimeModel): loading_kwargs['provider'] = provider loading_kwargs['sess_options'] = sess_options is_diffusers_model = issubclass(class_obj, diffusers.ModelMixin) if is_transformers_available(): transformers_version = version.parse(version.parse(transformers.__version__).base_version) else: transformers_version = 'N/A' is_transformers_model = (is_transformers_available() and issubclass(class_obj, PreTrainedModel) and (transformers_version >= version.parse('4.20.0'))) if (is_diffusers_model or is_transformers_model): loading_kwargs['device_map'] = device_map loading_kwargs['max_memory'] = max_memory loading_kwargs['offload_folder'] = offload_folder loading_kwargs['offload_state_dict'] = offload_state_dict loading_kwargs['variant'] = model_variants.pop(name, None) if from_flax: loading_kwargs['from_flax'] = True if (is_transformers_model and (loading_kwargs['variant'] is not None) and (transformers_version < version.parse('4.27.0'))): raise ImportError(f"When passing `variant='{variant}'`, please make sure to upgrade your `transformers` version to at least 4.27.0.dev0") elif (is_transformers_model and (loading_kwargs['variant'] is None)): loading_kwargs.pop('variant') if (not (from_flax and is_transformers_model)): loading_kwargs['low_cpu_mem_usage'] = low_cpu_mem_usage else: loading_kwargs['low_cpu_mem_usage'] = False if os.path.isdir(os.path.join(cached_folder, name)): loaded_sub_model = load_method(os.path.join(cached_folder, name), **loading_kwargs) else: loaded_sub_model = load_method(cached_folder, **loading_kwargs) return loaded_sub_model
() ('--load_model', default=False) ('--data_path', default='./data/diabetes-vfl-1.csv') def run_client(load_model, data_path): init_fl_context(1) df_train = pd.read_csv(data_path) df_train['ID'] = df_train['ID'].astype(str) psi = PSI() intersection = psi.get_intersection(list(df_train['ID'])) df_train = df_train[df_train['ID'].isin(intersection)] df_x = df_train.drop('Outcome', 1) df_y = df_train['Outcome'] x = df_x.to_numpy(dtype='float32') y = np.expand_dims(df_y.to_numpy(dtype='float32'), axis=1) loss_fn = nn.BCELoss() if load_model: model = torch.load('/tmp/pytorch_client_model_1.pt') ppl = Estimator.from_torch(client_model=model, loss_fn=loss_fn, optimizer_cls=torch.optim.SGD, optimizer_args={'lr': 0.0001}, server_model_path='/tmp/pytorch_server_model', client_model_path='/tmp/pytorch_client_model_1.pt') ppl.load_server_model('/tmp/pytorch_server_model') response = ppl.fit(x, y, 5) else: model = LocalModel(len(df_x.columns)) server_model = ServerModel() ppl = Estimator.from_torch(client_model=model, loss_fn=loss_fn, optimizer_cls=torch.optim.SGD, optimizer_args={'lr': 0.0001}, server_model=server_model, server_model_path='/tmp/pytorch_server_model', client_model_path='/tmp/pytorch_client_model_1.pt') response = ppl.fit(x, y, 5) result = ppl.predict(x) print(result[:5])
def test_generate_motion_primitives(): vp = VehicleParameters.default_car() vg = VehicleGeometry.default_car() params = MPGParam(dt=Decimal('.2'), n_steps=3, velocity=(0, 50, 3), steering=((- vp.delta_max), vp.delta_max, 3)) vehicle = BicycleDynamics(vg=vg, vp=vp) mpg = MotionPrimitivesGenerator(param=params, vehicle_dynamics=vehicle.successor_ivp, vehicle_param=vp) traject = mpg.generate() _viz(traject, 'MotionPrimitivesGenerator')
class Extra(Component): def __init__(self, display_data=None, output_names=None, output_indexes=None, main_effects=None, hierarchical_values=None, clustering=None): self.fields = locals() del self.fields['self']
class LookupTable(Layer): def __init__(self, n_index, n_output, padding_value=0.0, max_norm=DOUBLEMAX, norm_type=2.0, should_scale_grad_by_freq=False, wRegularizer=None, bigdl_type='float'): super(LookupTable, self).__init__(None, bigdl_type, n_index, n_output, padding_value, max_norm, norm_type, should_scale_grad_by_freq, wRegularizer) def set_init_method(self, weight_init_method=None, bias_init_method=None): callBigDlFunc(self.bigdl_type, 'setInitMethod', self.value, weight_init_method, bias_init_method) return self
def union(x, y, parents, sizes): x_root = find(x, parents) y_root = find(y, parents) if (x_root == y_root): return if (sizes[x_root] > sizes[y_root]): parents[y_root] = x_root sizes[x_root] += sizes[y_root] else: parents[x_root] = y_root sizes[y_root] += sizes[x_root]
class FSMTTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__(self, langs=None, src_vocab_file=None, tgt_vocab_file=None, merges_file=None, unk_token='<unk>', bos_token='<s>', sep_token='</s>', pad_token='<pad>', **kwargs): super().__init__(unk_token=unk_token, bos_token=bos_token, sep_token=sep_token, pad_token=pad_token, **kwargs) self.src_vocab_file = src_vocab_file self.tgt_vocab_file = tgt_vocab_file self.merges_file = merges_file self.cache_moses_punct_normalizer = dict() self.cache_moses_tokenizer = dict() self.cache_moses_detokenizer = dict() if (langs and (len(langs) == 2)): (self.src_lang, self.tgt_lang) = langs else: raise ValueError(f"arg `langs` needs to be a list of 2 langs, e.g. ['en', 'ru'], but got {langs}. Usually that means that tokenizer can't find a mapping for the given model path in PRETRAINED_VOCAB_FILES_MAP, and other maps of this tokenizer.") with open(src_vocab_file, encoding='utf-8') as src_vocab_handle: self.encoder = json.load(src_vocab_handle) with open(tgt_vocab_file, encoding='utf-8') as tgt_vocab_handle: tgt_vocab = json.load(tgt_vocab_handle) self.decoder = {v: k for (k, v) in tgt_vocab.items()} with open(merges_file, encoding='utf-8') as merges_handle: merges = merges_handle.read().split('\n')[:(- 1)] merges = [tuple(merge.split()[:2]) for merge in merges] self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {} def get_vocab(self) -> Dict[(str, int)]: return self.get_src_vocab() def vocab_size(self) -> int: return self.src_vocab_size def moses_punct_norm(self, text, lang): if (lang not in self.cache_moses_punct_normalizer): punct_normalizer = sm.MosesPunctNormalizer(lang=lang) self.cache_moses_punct_normalizer[lang] = punct_normalizer return self.cache_moses_punct_normalizer[lang].normalize(text) def moses_tokenize(self, text, lang): if (lang not in self.cache_moses_tokenizer): moses_tokenizer = sm.MosesTokenizer(lang=lang) self.cache_moses_tokenizer[lang] = moses_tokenizer return self.cache_moses_tokenizer[lang].tokenize(text, aggressive_dash_splits=True, return_str=False, escape=True) def moses_detokenize(self, tokens, lang): if (lang not in self.cache_moses_tokenizer): moses_detokenizer = sm.MosesDetokenizer(lang=self.tgt_lang) self.cache_moses_detokenizer[lang] = moses_detokenizer return self.cache_moses_detokenizer[lang].detokenize(tokens) def moses_pipeline(self, text, lang): text = replace_unicode_punct(text) text = self.moses_punct_norm(text, lang) text = remove_non_printing_char(text) return text def src_vocab_size(self): return len(self.encoder) def tgt_vocab_size(self): return len(self.decoder) def get_src_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) def get_tgt_vocab(self): return dict(self.decoder, **self.added_tokens_decoder) def bpe(self, token): word = (tuple(token[:(- 1)]) + ((token[(- 1)] + '</w>'),)) if (token in self.cache): return self.cache[token] pairs = get_pairs(word) if (not pairs): return (token + '</w>') while True: bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf')))) if (bigram not in self.bpe_ranks): break (first, second) = bigram new_word = [] i = 0 while (i < len(word)): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)): new_word.append((first + second)) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if (len(word) == 1): break else: pairs = get_pairs(word) word = ' '.join(word) if (word == '\n </w>'): word = '\n</w>' self.cache[token] = word return word def _tokenize(self, text, lang='en', bypass_tokenizer=False): lang = self.src_lang if bypass_tokenizer: text = text.split() else: text = self.moses_pipeline(text, lang=lang) text = self.moses_tokenize(text, lang=lang) split_tokens = [] for token in text: if token: split_tokens.extend([t for t in self.bpe(token).split(' ')]) return split_tokens def _convert_token_to_id(self, token): return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): return self.decoder.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): tokens = [t.replace(' ', '').replace('</w>', ' ') for t in tokens] tokens = ''.join(tokens).split() text = self.moses_detokenize(tokens, self.tgt_lang) return text def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] if (token_ids_1 is None): return (token_ids_0 + sep) return (((token_ids_0 + sep) + token_ids_1) + sep) def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: if (token_ids_1 is not None): raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formated with special tokens for the model.') return list(map((lambda x: (1 if (x in [self.sep_token_id, self.cls_token_id]) else 0)), token_ids_0)) if (token_ids_1 is not None): return (((([0] * len(token_ids_0)) + [1]) + ([0] * len(token_ids_1))) + [1]) return (([0] * len(token_ids_0)) + [1]) def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] if (token_ids_1 is None): return (len((token_ids_0 + sep)) * [0]) return ((len((token_ids_0 + sep)) * [0]) + (len((token_ids_1 + sep)) * [1])) _start_docstrings(PREPARE_SEQ2SEQ_BATCH_DOCSTRING) def prepare_seq2seq_batch(self, src_texts: List[str], tgt_texts: Optional[List[str]]=None, max_length: Optional[int]=None, max_target_length: Optional[int]=None, return_tensors: str='pt', truncation=True, padding='longest', **unused) -> BatchEncoding: if (type(src_texts) is not list): raise ValueError('src_texts is expected to be a list') if ('' in src_texts): raise ValueError(f'found empty string in src_texts: {src_texts}') tokenizer_kwargs = dict(add_special_tokens=True, return_tensors=return_tensors, max_length=max_length, truncation=truncation, padding=padding) model_inputs: BatchEncoding = self(src_texts, **tokenizer_kwargs) if (tgt_texts is None): return model_inputs if (max_target_length is not None): tokenizer_kwargs['max_length'] = max_target_length model_inputs['labels'] = self(tgt_texts, **tokenizer_kwargs)['input_ids'] return model_inputs def save_vocabulary(self, save_directory): if (not os.path.isdir(save_directory)): logger.error('Vocabulary path ({}) should be a directory'.format(save_directory)) return src_vocab_file = os.path.join(save_directory, VOCAB_FILES_NAMES['src_vocab_file']) tgt_vocab_file = os.path.join(save_directory, VOCAB_FILES_NAMES['tgt_vocab_file']) merges_file = os.path.join(save_directory, VOCAB_FILES_NAMES['merges_file']) with open(src_vocab_file, 'w', encoding='utf-8') as f: f.write(json.dumps(self.encoder, ensure_ascii=False)) with open(tgt_vocab_file, 'w', encoding='utf-8') as f: tgt_vocab = {v: k for (k, v) in self.decoder.items()} f.write(json.dumps(tgt_vocab, ensure_ascii=False)) index = 0 with open(merges_file, 'w', encoding='utf-8') as writer: for (bpe_tokens, token_index) in sorted(self.bpe_ranks.items(), key=(lambda kv: kv[1])): if (index != token_index): logger.warning('Saving vocabulary to {}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!'.format(merges_file)) index = token_index writer.write((' '.join(bpe_tokens) + '\n')) index += 1 return (src_vocab_file, tgt_vocab_file, merges_file)
class baseline(nn.Module): def __init__(self, backbone, c=64): super(baseline, self).__init__() self.name = backbone self.encoder = Encoder(backbone, c) self.decoder = baseU(backbone, c) def forward(self, X, phase='te'): encoders = self.encoder(X) OutDict = self.decoder(encoders, phase) return OutDict
class PDF(DocumentParser): def __init__(self, path, output='txt'): self.content = self._parse(path, format=output) def _parse(self, path, *args, **kwargs): from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter from pdfminer.pdfpage import PDFPage from pdfminer.converter import TextConverter, HTMLConverter from pdfminer.layout import LAParams try: m = PDFResourceManager() s = StringIO() p = ((kwargs.get('format', 'txt').endswith('html') and HTMLConverter) or TextConverter) p = p(m, s, codec='utf-8', laparams=LAParams()) interpreter = PDFPageInterpreter(m, p) f = self._open(path) for page in PDFPage.get_pages(f, maxpages=0, password=''): interpreter.process_page(page) f.close() except Exception as e: raise PDFError(str(e)) s = s.getvalue() s = decode_utf8(s) s = s.strip() s = re.sub('([a-z])\\-\\n', '\\1', s) s = s.replace('\n\n', '<!-- #p -->') s = s.replace('\n', ' ') s = s.replace('<!-- #p -->', '\n\n') s = collapse_spaces(s) return s
def make_supervised_data_module(tokenizer: transformers.PreTrainedTokenizer, data_args) -> Dict: train_dataset = SupervisedDataset(tokenizer=tokenizer, data_path=data_args.data_path) data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer) return dict(train_dataset=train_dataset, eval_dataset=None, data_collator=data_collator)
class RRDB(nn.Module): def __init__(self, num_feat, num_grow_ch=32): super(RRDB, self).__init__() self.rdb1 = ResidualDenseBlock(num_feat, num_grow_ch) self.rdb2 = ResidualDenseBlock(num_feat, num_grow_ch) self.rdb3 = ResidualDenseBlock(num_feat, num_grow_ch) def forward(self, x): out = self.rdb1(x) out = self.rdb2(out) out = self.rdb3(out) return ((out * 0.2) + x)
def subset2dataset(subset): dataset = subset while isinstance(dataset, Subset): dataset = dataset.dataset return dataset
def prologue_init(args, input_args, args_dict): output = dict() subset_target = 'train' (placement_scr, parent, exp_name) = (None, None, None) placement_node = None eval = (not (args.fd_exp in [None, ''])) tag = [('id', args.exp_id), ('tsk', args.task), ('ds', args.dataset), ('sd', args.MYSEED), ('ecd', args.model['encoder_name']), ('st', args.split), ('fl', args.fold), ('epx', args.max_epochs), ('bsz', args.batch_size), ('lr', args.optimizer['opt__lr']), ('scale_in', args.model['scale_in']), ('pxl_sup', args.pxl_sup)] tag = [(el[0], str(el[1])) for el in tag] tag = '-'.join(['_'.join(el) for el in tag]) if (args.task == constants.F_CL): tag2 = [] if args.sl_fc: tag2.append(('sl_fc', 'yes')) if args.crf_fc: tag2.append(('crf_fc', 'yes')) if args.entropy_fc: tag2.append(('entropy_fc', 'yes')) if args.partuncertentro_lc: tag2.append(('partuncertentro_lc', 'yes')) if args.partcert_lc: tag2.append(('partcert_lc', 'yes')) if args.partcert_lc_elb: tag2.append(('partcert_lc_elb', 'yes')) if args.partcert_lc_logit: tag2.append(('partcert_lc_logit', 'yes')) if args.min_sizeneg_lc: tag2.append(('min_sizeneg_lc', 'yes')) if args.max_sizepos_lc: tag2.append(('max_sizepos_lc', 'yes')) if args.max_sizepos_fc: tag2.append(('max_sizepos_fc', 'yes')) if tag2: tag2 = [(el[0], str(el[1])) for el in tag2] tag2 = '-'.join(['_'.join(el) for el in tag2]) tag = '{}-{}'.format(tag, tag2) parent_lv = 'exps' if (args.debug_subfolder != ''): parent_lv = join(parent_lv, args.debug_subfolder) if (not eval): OUTD = join(root_dir, parent_lv, tag) else: OUTD = join(root_dir, args.fd_exp) OUTD = expanduser(OUTD) SELFLEARNEDD = join(OUTD, 'self_learned') lfolders = [OUTD, SELFLEARNEDD] for fdxx in lfolders: if (not os.path.exists(fdxx)): os.makedirs(fdxx) OUTD_TLB = join(OUTD, 'tlb') if (not eval): if (not os.path.exists(join(OUTD, 'code/'))): os.makedirs(join(OUTD, 'code/')) with open(join(OUTD, 'code/', input_args.yaml), 'w') as fyaml: args_dict['fd_exp'] = join(parent_lv, tag) yaml.dump(args_dict, fyaml) str_cmd = ('time python ' + ' '.join(sys.argv)) str_cmd = wrap_command_line(str_cmd) with open(join(OUTD, 'code/cmd.sh'), 'w') as frun: frun.write('#!/usr/bin/env bash \n') frun.write(str_cmd) copy_code(join(OUTD, 'code/'), compress=True, verbose=False) return (OUTD, OUTD_TLB, SELFLEARNEDD)
class ConvLayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, groups=1, IN=False): super(ConvLayer, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=False, groups=groups) if IN: self.bn = nn.InstanceNorm2d(out_channels, affine=True) else: self.bn = nn.BatchNorm2d(out_channels) self.relu = nn.ReLU(inplace=True) def forward(self, x): x = self.conv(x) x = self.bn(x) x = self.relu(x) return x
def remove_dup_initializers(onnx_file_path): model_file_folder = os.path.dirname(onnx_file_path) model_file_name = os.path.basename(onnx_file_path) model = onnx.load(os.path.join(model_file_folder, model_file_name)) inits = list(model.graph.initializer) dup_set = set() dup_map = {} ind_to_replace = [] total_reduced_size = 0 for i in range(len(inits)): if (i in dup_set): continue for j in range((i + 1), len(inits)): if (j in dup_set): continue if _is_equal_tensor_proto(inits[i], inits[j]): dup_set.add(i) dup_set.add(j) dtype = inits[j].data_type mem_size = numpy.prod(inits[j].dims) if (dtype == 1): mem_size *= 4 elif (dtype == 6): mem_size *= 4 elif ((dtype == 7) or (dtype == 11)): mem_size *= 8 else: print('unexpected data type: ', dtype) total_reduced_size += mem_size name_i = inits[i].name name_j = inits[j].name if (name_i in dup_map): dup_map[name_i].append(name_j) else: dup_map[name_i] = [name_j] ind_to_replace.append((j, i)) print('total reduced size: ', (((total_reduced_size / 1024) / 1024) / 1024), 'GB') ind_to_replace = sorted(ind_to_replace) _remove_dup_initializers_from_model(model, model, ind_to_replace) optimized_model_file_name = ('optimized_' + model_file_name) new_model = os.path.join(model_file_folder, optimized_model_file_name) onnx.save(model, new_model) return new_model
def parse_cmd_options(argv): parser = argparse.ArgumentParser() parser.add_argument('--no_reslink', action='store_true') parser.add_argument('--dropout_rate', type=float, default=None) (module_opt, _) = parser.parse_known_args(argv) return module_opt
class CorNet(nn.Module): def __init__(self, output_size, cornet_dim=1000, n_cornet_blocks=2, **kwargs): super(CorNet, self).__init__() self.intlv_layers = nn.ModuleList([CorNetBlock(cornet_dim, output_size, **kwargs) for _ in range(n_cornet_blocks)]) for layer in self.intlv_layers: nn.init.xavier_uniform_(layer.dstbn2cntxt.weight) nn.init.xavier_uniform_(layer.cntxt2dstbn.weight) def forward(self, logits): for layer in self.intlv_layers: logits = layer(logits) return logits
class TrainerCVRP(TrainerBase): def get_reward_name() -> str: return 'tour_length' def is_reward_positive() -> bool: return False def get_observation_type() -> Type[Observation]: return Observation def init_encoder(self, num_layers, name) -> EncoderBase: return CVRPEncoder(num_layers, name) def init_decoder(self, name) -> DecoderBase: return CVRPDecoder(name) def generate_problem(key: PRNGKey, problem_size: jnp.int32) -> Array: return generate_problem(key, problem_size) def use_augmentations() -> bool: return True def get_augmentations(problem: Array) -> Array: return get_augmentations(problem) def has_symmetric_starting_points(self) -> bool: return False
def one_d_convert(indices, cutoffs, shape_list): one_d_indices = {} for item in indices: layer_num = np.where((item > cutoffs))[0] curr_layer_index = (item - cutoffs[layer_num]) one_d_indices[str(layer_num)] = curr_layer_index return one_d_indices
class edic(dict): def __and__(self, other): return edic({k: other[k] for k in (set(self) & set(other))}) def __rand__(self, other): return edic({k: self[k] for k in (set(other) & set(self))}) def __or__(self, other): return edic({**self, **other}) def __ror__(self, other): return edic({**other, **self}) def __sub__(self, other): return edic({k: v for (k, v) in self.items() if (k not in other)}) def __rsub__(self, other): return edic({k: v for (k, v) in other.items() if (k not in self)}) def isdisjoint(self, other) -> bool: return set(self).isdisjoint(set(other)) def sub(self, it, fn=None): return edic(({k: self[k] for k in it} if (fn is None) else {k: fn(self[k]) for k in it})) def sub_expand_front(self, it, shape: tc.Size): return self.sub(it, partial(expand_front, shape=shape)) def subedic(self, it, fn=None, use_default=False, default=None): if (not use_default): if (fn is None): return edic({k: self[k] for k in it if (k in self)}) else: return edic({k: fn(self[k]) for k in it if (k in self)}) elif (fn is None): return edic({k: (self[k] if (k in self) else default) for k in it}) else: return edic({k: fn((self[k] if (k in self) else default)) for k in it}) def sublist(self, it, fn=None, use_default=False, default=None): if (not use_default): if (fn is None): return [self[k] for k in it if (k in self)] else: return [fn(self[k]) for k in it if (k in self)] elif (fn is None): return [(self[k] if (k in self) else default) for k in it] else: return [fn((self[k] if (k in self) else default)) for k in it] def key0(self): return next(iter(self)) def value0(self): return next(iter(self.values())) def item0(self): return next(iter(self.items())) def mean(self, dim, keepdim: bool=False): return edic({k: v.mean(dim, keepdim) for (k, v) in self.items()}) def expand_front(self, shape: tc.Size): return edic({k: v.expand((shape + v.shape)) for (k, v) in self.items()}) def broadcast(self): return edic(zip(self.keys(), tc.broadcast_tensors(*self.values())))
class AvgMeter(object): name = 'No name' def __init__(self, name='No name'): self.name = name self.reset() def reset(self): self.sum = 0 self.mean = 0 self.num = 0 self.now = 0 def update(self, mean_var, count=1): if math.isnan(mean_var): mean_var = 1000000.0 print('Avgmeter getting Nan!') self.now = mean_var self.num += count self.sum += (mean_var * count) self.mean = (float(self.sum) / self.num)
_registry(algorithm_type='weight_correction', location='post_quantization') class WeightCorrection(Algorithm): def __init__(self, eps=1e-05, channel_axis=1): self.eps = eps self.channel_axis = channel_axis def __call__(self, origin_model, q_model, adaptor, dataloader, iterations): graph_info = origin_model.graph_info op_list = [op_name for (op_name, op_type) in graph_info.items() if ('conv' in op_type.lower())] cap = adaptor.query_fw_capability(origin_model) quantize_cfg = {'op': cap['opwise']} fp32_data = adaptor.inspect_tensor(origin_model, dataloader, op_list=op_list, iteration_list=list(range(1, (iterations + 1))), inspect_type='weight', quantization_cfg=quantize_cfg) q_data = adaptor.inspect_tensor(q_model, dataloader, op_list=op_list, iteration_list=list(range(1, (iterations + 1))), inspect_type='weight', quantization_cfg=quantize_cfg) fp32_weights = fp32_data['weight'] q_weights = q_data['weight'] tensor_dict = {} for fp32_op in op_list: q_op = fp32_op if ((fp32_op not in fp32_weights) or (not (len(fp32_weights[fp32_op]) >= 1))): continue (fp32_weight, fp32_weight_name) = (None, '') (fp32_bias, fp32_bias_name) = (None, '') for (name, value) in fp32_weights[fp32_op].items(): if (len(value.shape) > 1): fp32_weight = value fp32_weight_name = name if (len(value.shape) == 1): fp32_bias = value fp32_bias_name = name (q_weight, q_weight_name) = (None, '') (q_bias, q_bias_name) = (None, '') for (name, value) in q_weights[q_op].items(): if (len(value.shape) > 1): q_weight = value q_weight_name = name if (len(value.shape) == 1): q_bias = value q_bias_name = name channel_shape = list(range(len(fp32_weight.shape))) transpose_shape = [channel_shape.pop(self.channel_axis)] transpose_shape.extend(channel_shape) t_fp32_weight = np.transpose(fp32_weight, transpose_shape) t_fp32_weight = t_fp32_weight.reshape(t_fp32_weight.shape[0], (- 1)) t_q_weight = np.transpose(q_weight, transpose_shape) t_q_weight = t_q_weight.reshape(t_q_weight.shape[0], (- 1)) channel_variance = (np.std(t_fp32_weight, axis=1) / (np.std(t_q_weight, axis=1) + self.eps)) broad_shape = np.ones(len(fp32_weight.shape), dtype=np.int32) broad_shape[self.channel_axis] = len(channel_variance) channel_variance = channel_variance.reshape(broad_shape) variance_q_weight = (q_weight * channel_variance) variance_q_weight = np.transpose(variance_q_weight, transpose_shape) variance_q_weight = variance_q_weight.reshape(variance_q_weight.shape[0], (- 1)) channel_mean = (np.mean(t_fp32_weight, axis=self.channel_axis) - np.mean(variance_q_weight, axis=self.channel_axis)) channel_mean = channel_mean.reshape(broad_shape) tensor_dict[q_weight_name] = ((channel_variance * fp32_weight) + channel_mean) if (len(tensor_dict) > 0): adaptor.set_tensor(q_model, tensor_dict) return q_model
class FC(tf.keras.Sequential): def __init__(self, in_size: int, out_size: int, *, activation=tf.keras.layers.ReLU(), bn: bool=False, init=None, preact: bool=False, training=True): super().__init__() fc = tf.keras.layers.Linear(in_size, out_size, use_bias=(not bn), kernel_initializer=init, bias_initializer=tf.keras.initializers.Constant(0.0)) if preact: if bn: self.add(tf.keras.layers.BatchNormalization(axis=1, momentum=0.9, epsilon=1e-05)) if (activation is not None): self.add(activation) self.add(fc) if (not preact): if bn: self.add(tf.keras.layers.BatchNormalization(axis=1, momentum=0.9, epsilon=1e-05)) if (activation is not None): self.add(activation)