code
stringlengths
101
5.91M
def add_change_in_new_deaths(df, target_days): deaths_cols = df.filter(regex='#Deaths_').columns latest_date_str = deaths_cols[(- 1)].replace('#Deaths_', '') latest_date = datetime.strptime(latest_date_str, '%m-%d-%Y').date() time_deltas = [timedelta(days=int(day)) for day in target_days] plot_dates = [(latest_date + time_delta).isoformat() for time_delta in time_deltas] for (i, target_day) in enumerate(target_days): if (target_day < 1): baseline = df[deaths_cols[(target_day - 2)]] incol = df[deaths_cols[(target_day - 1)]] outcol_name = f'Observed New Deaths {target_day}-day' else: if (target_day == 1): baseline = df['tot_deaths'] else: baseline = df[f'Predicted Deaths {(target_day - 1)}-day'] incol = df[f'Predicted Deaths {target_day}-day'] outcol_name = f'Predicted New Deaths {target_day}-day' df[outcol_name] = (incol - baseline) if (i == 0): df['choropleth_scale_max'] = df[outcol_name] else: df['choropleth_scale_max'] = df[['choropleth_scale_max', outcol_name]].max(axis=1)
class CloseGripperOption(AbstractOption): def __init__(self, position=None, **kwargs): super(CloseGripperOption, self).__init__(name='close_gripper') self.position = position def makePolicy(self, world): return (CloseGripperPolicy(pos=self.position), TimeCondition((world.time() + 0.8))) def samplePolicy(self, world): return (CloseGripperPolicy(pos=self.position), TimeCondition((world.time() + 0.8)))
def get_metrics(p_seq1, p_seq2, o_seq1, o_seq2): psim = pitch_similarity(p_seq1, p_seq2) osim = onset_similarity(o_seq1, o_seq2) pc = cross_correlation(p_seq1, p_seq2) oc = cross_correlation(o_seq1, o_seq2) return (psim, osim, pc, oc)
class BaselineDataset(Dataset): def __init__(self, dataset=None, baseline=None): super(BaselineDataset, self).__init__() self.dataset = dataset self.baseline = baseline assert (len(self.dataset) == len(self.baseline)) def __getitem__(self, item): return {'data': self.dataset[item], 'baseline': self.baseline[item]} def __len__(self): return len(self.dataset)
def gumbel_softmax_sample(logits, tau=1, eps=1e-10, dim=(- 1)): gumbel_noise = sample_gumbel(logits.size(), eps=eps) if logits.is_cuda: gumbel_noise = gumbel_noise.cuda() y = (logits + Variable(gumbel_noise)) return F.softmax((y / tau), dim=dim)
class DFA(nn.Module): def __init__(self, features, M=2, r=1, L=32): super(DFA, self).__init__() self.M = M self.features = features d = max(int((self.features / r)), L) self.fc = nn.Sequential(nn.Conv1d(self.features, d, kernel_size=1), nn.BatchNorm1d(d)) self.fc_out = nn.Sequential(nn.Conv1d(d, self.features, kernel_size=1), nn.BatchNorm1d(self.features)) def forward(self, x): shape = x[0].shape if (len(shape) > 3): assert NotImplemented('Don not support len(shape)>=3.') fea_U = (x[0] + x[1]) fea_z = self.fc(fea_U) fea_cat = self.fc_out(fea_z) attention_vectors = torch.sigmoid(fea_cat) fea_v = ((attention_vectors * x[0]) + ((1 - attention_vectors) * x[1])) return fea_v
def tarfile_to_samples_nothrow(src, handler=wds.warn_and_continue): streams = url_opener(src, handler=handler) files = tar_file_expander(streams, handler=handler) samples = group_by_keys_nothrow(files, handler=handler) return samples
class FALoss(torch.nn.Module): def __init__(self, subscale=0.0625): super(FALoss, self).__init__() self.subscale = int((1 / subscale)) def forward(self, feature1, feature2): feature1 = torch.nn.AvgPool2d(self.subscale)(feature1) feature2 = torch.nn.AvgPool2d(self.subscale)(feature2) (m_batchsize, C, height, width) = feature1.size() feature1 = feature1.view(m_batchsize, (- 1), (width * height)) mat1 = torch.bmm(feature1.permute(0, 2, 1), feature1) (m_batchsize, C, height, width) = feature2.size() feature2 = feature2.view(m_batchsize, (- 1), (width * height)) mat2 = torch.bmm(feature2.permute(0, 2, 1), feature2) L1norm = torch.norm((mat2 - mat1), 1) return (L1norm / ((height * width) ** 2))
def get_random_digit_image(): indx = randint(0, (len(images) - 1)) img = images[indx][0] return img
def main(opt=None, inputs=None, isEvaluate=False): if (not opt): parser = argparse.ArgumentParser() parser.add_argument('--bert_model', default=None, type=str, required=True, help='Bert pre-trained model selected in the list: bert-base-uncased, bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.') parser.add_argument('--model_recover_path', default=None, type=str, help='The file of fine-tuned pretraining model.') parser.add_argument('--cache_path', default=None, type=str, help='Yifan added, bert vocab path') parser.add_argument('--max_seq_length', default=512, type=int, help='The maximum total input sequence length after WordPiece tokenization. \nSequences longer than this will be truncated, and sequences shorter \nthan this will be padded.') parser.add_argument('--ffn_type', default=0, type=int, help='0: default mlp; 1: W((Wx+b) elem_prod x);') parser.add_argument('--num_qkv', default=0, type=int, help='Number of different <Q,K,V>.') parser.add_argument('--seg_emb', action='store_true', help='Using segment embedding for self-attention.') parser.add_argument('--fp16', action='store_true', help='Whether to use 16-bit float precision instead of 32-bit') parser.add_argument('--amp', action='store_true', help='Whether to use amp for fp16') parser.add_argument('--input_file', type=str, help='Input file') parser.add_argument('--subset', type=int, default=0, help='Decode a subset of the input dataset.') parser.add_argument('--output_file', type=str, help='output file') parser.add_argument('--split', type=str, default='', help='Data split (train/val/test).') parser.add_argument('--tokenized_input', action='store_true', help='Whether the input is tokenized.') parser.add_argument('--seed', type=int, default=123, help='random seed for initialization') parser.add_argument('--do_lower_case', action='store_true', help='Set this flag if you are using an uncased model.') parser.add_argument('--new_segment_ids', action='store_true', help='Use new segment ids for bi-uni-directional LM.') parser.add_argument('--new_pos_ids', action='store_true', help='Use new position ids for LMs.') parser.add_argument('--batch_size', type=int, default=4, help='Batch size for decoding.') parser.add_argument('--beam_size', type=int, default=1, help='Beam size for searching') parser.add_argument('--length_penalty', type=float, default=0, help='Length penalty for beam search') parser.add_argument('--forbid_duplicate_ngrams', action='store_true') parser.add_argument('--forbid_ignore_word', type=str, default=None, help='Ignore the word during forbid_duplicate_ngrams') parser.add_argument('--min_len', default=None, type=int) parser.add_argument('--need_score_traces', action='store_true') parser.add_argument('--ngram_size', type=int, default=3) parser.add_argument('--mode', default='s2s', choices=['s2s', 'l2r', 'both']) parser.add_argument('--max_tgt_length', type=int, default=128, help='maximum length of target sequence') parser.add_argument('--s2s_special_token', action='store_true', help='New special tokens ([S2S_SEP]/[S2S_CLS]) of S2S.') parser.add_argument('--s2s_add_segment', action='store_true', help='Additional segmental for the encoder of S2S.') parser.add_argument('--s2s_share_segment', action='store_true', help='Sharing segment embeddings for the encoder of S2S (used with --s2s_add_segment).') parser.add_argument('--pos_shift', action='store_true', help='Using position shift for fine-tuning.') parser.add_argument('--not_predict_token', type=str, default=None, help='Do not predict the tokens during decoding.') args = parser.parse_args() else: args = opt if (args.need_score_traces and (args.beam_size <= 1)): raise ValueError('Score trace is only available for beam search with beam size > 1.') if (args.max_tgt_length >= (args.max_seq_length - 2)): raise ValueError('Maximum tgt length exceeds max seq length - 2.') device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu')) n_gpu = torch.cuda.device_count() random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if (n_gpu > 0): torch.cuda.manual_seed_all(args.seed) bert_vocab_path = os.path.join(args.cache_path, '{}-vocab.txt'.format(args.bert_model)) tokenizer = BertTokenizer.from_pretrained(bert_vocab_path, cache_dir=args.cache_path, do_lower_case=args.do_lower_case) tokenizer.max_len = args.max_seq_length pair_num_relation = 0 bi_uni_pipeline = [] bi_uni_pipeline.append(seq2seq_loader.Preprocess4Seq2seqDecoder(list(tokenizer.vocab.keys()), tokenizer.convert_tokens_to_ids, args.max_seq_length, max_tgt_length=args.max_tgt_length, new_segment_ids=args.new_segment_ids, mode='s2s', num_qkv=args.num_qkv, s2s_special_token=args.s2s_special_token, s2s_add_segment=args.s2s_add_segment, s2s_share_segment=args.s2s_share_segment, pos_shift=args.pos_shift)) amp_handle = None if (args.fp16 and args.amp): from apex import amp amp_handle = amp.init(enable_caching=True) logger.info('enable fp16 with amp') cls_num_labels = 2 type_vocab_size = ((6 + (1 if args.s2s_add_segment else 0)) if args.new_segment_ids else 2) (mask_word_id, eos_word_ids, sos_word_id) = tokenizer.convert_tokens_to_ids(['[MASK]', '[SEP]', '[S2S_SOS]']) def _get_token_id_set(s): r = None if s: w_list = [] for w in s.split('|'): if (w.startswith('[') and w.endswith(']')): w_list.append(w.upper()) else: w_list.append(w) r = set(tokenizer.convert_tokens_to_ids(w_list)) return r forbid_ignore_set = _get_token_id_set(args.forbid_ignore_word) not_predict_set = _get_token_id_set(args.not_predict_token) for model_recover_path in glob.glob(args.model_recover_path.strip()): logger.info('***** Recover model: %s *****', model_recover_path) model_recover = torch.load(model_recover_path, map_location=(lambda storage, loc: storage)) model = BertForSeq2SeqDecoder.from_pretrained(args.cache_path, state_dict=model_recover, num_labels=cls_num_labels, num_rel=pair_num_relation, type_vocab_size=type_vocab_size, task_idx=3, mask_word_id=mask_word_id, search_beam_size=args.beam_size, length_penalty=args.length_penalty, eos_id=eos_word_ids, sos_id=sos_word_id, forbid_duplicate_ngrams=args.forbid_duplicate_ngrams, forbid_ignore_set=forbid_ignore_set, not_predict_set=not_predict_set, ngram_size=args.ngram_size, min_len=args.min_len, mode=args.mode, max_position_embeddings=args.max_seq_length, ffn_type=args.ffn_type, num_qkv=args.num_qkv, seg_emb=args.seg_emb, pos_shift=args.pos_shift) del model_recover if args.fp16: model.half() model.to(device) if (n_gpu > 1): model = torch.nn.DataParallel(model) torch.cuda.empty_cache() model.eval() next_i = 0 max_src_length = ((args.max_seq_length - 2) - args.max_tgt_length) if (not inputs): with open(args.input_file, encoding='utf-8') as fin: input_lines = [x.strip() for x in fin.readlines()] if (args.subset > 0): logger.info('Decoding subset: %d', args.subset) input_lines = input_lines[:args.subset] else: input_lines = inputs data_tokenizer = (WhitespaceTokenizer() if args.tokenized_input else tokenizer) input_lines = [data_tokenizer.tokenize(x)[:max_src_length] for x in input_lines] input_lines = sorted(list(enumerate(input_lines)), key=(lambda x: (- len(x[1])))) output_lines = ([''] * len(input_lines)) score_trace_list = ([None] * len(input_lines)) total_batch = math.ceil((len(input_lines) / args.batch_size)) with tqdm(total=total_batch) as pbar: while (next_i < len(input_lines)): _chunk = input_lines[next_i:(next_i + args.batch_size)] buf_id = [x[0] for x in _chunk] buf = [x[1] for x in _chunk] next_i += args.batch_size max_a_len = max([len(x) for x in buf]) instances = [] for instance in [(x, max_a_len) for x in buf]: for proc in bi_uni_pipeline: instances.append(proc(instance)) with torch.no_grad(): batch = seq2seq_loader.batch_list_to_batch_tensors(instances) batch = [(t.to(device) if (t is not None) else None) for t in batch] (input_ids, token_type_ids, position_ids, input_mask, mask_qkv, task_idx) = batch traces = model(input_ids, token_type_ids, position_ids, input_mask, task_idx=task_idx, mask_qkv=mask_qkv) if (args.beam_size > 1): traces = {k: v.tolist() for (k, v) in traces.items()} output_ids = traces['pred_seq'] else: output_ids = traces.tolist() for i in range(len(buf)): w_ids = output_ids[i] output_buf = tokenizer.convert_ids_to_tokens(w_ids) output_tokens = [] for t in output_buf: if (t in ('[SEP]', '[PAD]')): break output_tokens.append(t) output_sequence = ' '.join(detokenize(output_tokens)) output_lines[buf_id[i]] = output_sequence if args.need_score_traces: score_trace_list[buf_id[i]] = {'scores': traces['scores'][i], 'wids': traces['wids'][i], 'ptrs': traces['ptrs'][i]} pbar.update(1) if inputs: return output_lines elif isEvaluate: with open(args.input_file.replace('src', 'tgt'), encoding='utf-8') as fin: gold_lines = [x.strip() for x in fin.readlines()] gold_prepro = [prepro(sent) for sent in gold_lines] pred_prepro = [prepro(sent) for sent in output_lines] evaluator = MoreEvaluator() results = evaluator.evaluate(gold_prepro, pred_prepro) pprint(results) with open((((args.output_file + '.') + args.split) + '.json'), 'wt') as f: json.dump(results, f, indent=2) else: if args.output_file: fn_out = args.output_file else: fn_out = ((model_recover_path + '.') + args.split) with open(fn_out, 'w', encoding='utf-8') as fout: for l in output_lines: fout.write(l) fout.write('\n') if args.need_score_traces: with open((fn_out + '.trace.pickle'), 'wb') as fout_trace: pickle.dump({'version': 0.0, 'num_samples': len(input_lines)}, fout_trace) for x in score_trace_list: pickle.dump(x, fout_trace)
def dict_gather(comm, d, op='mean', assert_all_have_data=True): if (comm is None): return d alldicts = comm.allgather(d) size = comm.size k2li = defaultdict(list) for d in alldicts: for (k, v) in d.items(): k2li[k].append(v) result = {} for (k, li) in k2li.items(): if assert_all_have_data: assert (len(li) == size), ("only %i out of %i MPI workers have sent '%s'" % (len(li), size, k)) if (op == 'mean'): result[k] = np.mean(li, axis=0) elif (op == 'sum'): result[k] = np.sum(li, axis=0) else: assert 0, op return result
class FrameLevel(nn.Module): def __init__(self, input_dim, output_dim, hiddens=None, activation='ReLU', **kwargs): super().__init__() latest_dim = input_dim self.hiddens = [] if (hiddens is not None): for dim in hiddens: self.hiddens += [nn.Linear(latest_dim, dim), getattr(nn, activation)()] latest_dim = dim self.hiddens = nn.Sequential(*self.hiddens) self.linear = nn.Linear(latest_dim, output_dim) def forward(self, hidden_state, features_len=None): hidden_states = self.hiddens(hidden_state) logit = self.linear(hidden_state) return (logit, features_len)
class CachedAttribute(object): def __init__(self, method, name=None): self.method = method self.name = (name or method.__name__) def __get__(self, obj, objtype): if (obj is None): return self elif (self.name in obj.__dict__): return obj.__dict__[self.name] else: value = self.method(obj) obj.__dict__[self.name] = value return value def __set__(self, obj, value): raise AttributeError((('Cannot assign to ' + self.name) + '.')) def __delete__(self, obj): try: del obj.__dict__[self.name] except KeyError: pass
class Statistics(object): def __init__(self, loss=0, n_words=0, n_correct=0): self.loss = loss self.n_words = n_words self.n_correct = n_correct self.n_src_words = 0 self.start_time = time.time() def all_gather_stats(stat, max_size=4096): stats = Statistics.all_gather_stats_list([stat], max_size=max_size) return stats[0] def all_gather_stats_list(stat_list, max_size=4096): from torch.distributed import get_rank from onmt.utils.distributed import all_gather_list all_stats = all_gather_list(stat_list, max_size=max_size) our_rank = get_rank() our_stats = all_stats[our_rank] for (other_rank, stats) in enumerate(all_stats): if (other_rank == our_rank): continue for (i, stat) in enumerate(stats): our_stats[i].update(stat, update_n_src_words=True) return our_stats def update(self, stat, update_n_src_words=False): self.loss += stat.loss self.n_words += stat.n_words self.n_correct += stat.n_correct if update_n_src_words: self.n_src_words += stat.n_src_words def accuracy(self): return (100 * (self.n_correct / self.n_words)) def xent(self): return (self.loss / self.n_words) def ppl(self): return math.exp(min((self.loss / self.n_words), 100)) def elapsed_time(self): return (time.time() - self.start_time) def output(self, step, num_steps, learning_rate, start): t = self.elapsed_time() step_fmt = ('%2d' % step) if (num_steps > 0): step_fmt = ('%s/%5d' % (step_fmt, num_steps)) logger.info((('Step %s; acc: %6.2f; ppl: %5.2f; xent: %4.2f; ' + 'lr: %7.5f; %3.0f/%3.0f tok/s; %6.0f sec') % (step_fmt, self.accuracy(), self.ppl(), self.xent(), learning_rate, (self.n_src_words / (t + 1e-05)), (self.n_words / (t + 1e-05)), (time.time() - start)))) sys.stdout.flush() def log_tensorboard(self, prefix, writer, learning_rate, step): t = self.elapsed_time() writer.add_scalar((prefix + '/xent'), self.xent(), step) writer.add_scalar((prefix + '/ppl'), self.ppl(), step) writer.add_scalar((prefix + '/accuracy'), self.accuracy(), step) writer.add_scalar((prefix + '/tgtper'), (self.n_words / t), step) writer.add_scalar((prefix + '/lr'), learning_rate, step)
def test_add_batch_all_new(data): add_info = data.archive.add(solution=([[1, 2, 3]] * 4), objective=[0, 0, 0, 1], measures=[[0, 0], [0.25, 0.25], [0.5, 0.5], [0.5, 0.5]]) assert (add_info['status'] == 2).all() assert np.isclose(add_info['value'], [0, 0, 0, 1]).all() assert_archive_elites(archive=data.archive, batch_size=3, solution_batch=([[1, 2, 3]] * 3), objective_batch=[0, 0, 1], measures_batch=[[0, 0], [0.25, 0.25], [0.5, 0.5]], grid_indices_batch=[[5, 10], [6, 11], [7, 12]])
class BoundTransform(Transformation): def __init__(self, x_bound, y_bound): super().__init__(BoundedScaler(x_bound), BoundedScaler(y_bound))
def get_labels(path): with open(path, 'r') as f: labels = f.read().splitlines() if ('O' not in labels): labels = (['O'] + labels) return labels
class DenseNet(nn.Module): def __init__(self, growth_rate=8, block_config=(6, 12, 24, 16), num_init_features=16, bn_size=4, drop_rate=0, pretrained=False): super(DenseNet, self).__init__() self.start_features = nn.Sequential(OrderedDict([('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)), ('norm0', nn.BatchNorm2d(num_init_features)), ('relu0', nn.ReLU(inplace=True)), ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1))])) num_features = num_init_features init_weights = list(densenet121(pretrained=True).features.children()) start = 0 for (i, c) in enumerate(self.start_features.children()): start += 1 self.blocks = nn.ModuleList() for (i, num_layers) in enumerate(block_config): block = _DenseBlock(num_layers=num_layers, num_input_features=num_features, bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate, index=i) if pretrained: block.load_state_dict(init_weights[start].state_dict()) start += 1 self.blocks.append(block) setattr(self, ('denseblock%d' % (i + 1)), block) num_features = (num_features + (num_layers * growth_rate)) if (i != (len(block_config) - 1)): downsample = (i < 1) trans = _Transition(num_input_features=num_features, num_output_features=(num_features // 2), downsample=downsample) if pretrained: trans.load_state_dict(init_weights[start].state_dict()) start += 1 self.blocks.append(trans) setattr(self, ('transition%d' % (i + 1)), trans) num_features = (num_features // 2) def forward(self, x): out = self.start_features(x) deep_features = None for (i, block) in enumerate(self.blocks): out = block(out) if (i == 5): deep_features = out return (out, deep_features)
def vis(model, loader, save_dir, rank=None, world_size=1): attention_dir = os.path.join(save_dir, 'attention_probs') hidden_dir = os.path.join(save_dir, 'hidden_states') cos_dir = os.path.join(save_dir, 'cos_similarity') if (not os.path.exists(attention_dir)): makedirsExist(attention_dir) model.eval() for (i, data) in zip(trange(len(loader)), loader): data = to_cuda(data) output = model(*data) for (_i, (attention_probs, hidden_states)) in enumerate(zip(output['attention_probs'], output['hidden_states'])): index = int(data[2][_i][(- 1)]) if hasattr(loader.dataset, 'ids'): image_id = loader.dataset.ids[index] else: image_id = loader.dataset.database[index]['image'].split('/')[1].split('.')[0] attention_probs_arr = attention_probs.detach().cpu().numpy() hidden_states_arr = hidden_states.detach().cpu().numpy() cos_similarity_arr = (hidden_states hidden_states.transpose(1, 2)).detach().cpu().numpy() np.save(os.path.join(attention_dir, '{}.npy'.format(image_id)), attention_probs_arr)
class Storage(_IOMixin, metaclass=ABCMeta): def __init__(self) -> None: super().__init__() self._storage = defaultdict(HistoricalContainer) def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): pass def put(self, name: str, value: Dict[(str, float)], epoch=None, prefix='', postfix=''): self._storage[((prefix + name) + postfix)].add(value, epoch) def get(self, name, epoch=None): assert (name in self._storage), name if (epoch is None): return self._storage[name] return self._storage[name][epoch] def summary(self) -> pd.DataFrame: list_of_summary = [rename_df_columns(v.summary(), k) for (k, v) in self._storage.items()] summary = functools.reduce((lambda x, y: pd.merge(x, y, left_index=True, right_index=True)), list_of_summary) return pd.DataFrame(summary) def meter_names(self, sorted=False) -> List[str]: if sorted: return sorted(self._storage.keys()) return list(self._storage.keys()) def storage(self): return self._storage
class dAUTOMAP(nn.Module): def __init__(self, input_shape, output_shape, tfx_params, tfx_params2=None): super(dAUTOMAP, self).__init__() self.input_shape = input_shape self.output_shape = output_shape if (tfx_params2 is None): tfx_params2 = tfx_params self.domain_transform = GeneralisedIFT2Layer(**tfx_params) self.domain_transform2 = GeneralisedIFT2Layer(**tfx_params2) self.refinement_block = get_refinement_block('automap_scae', input_shape[0], output_shape[0]) def forward(self, x): x_mapped = self.domain_transform(x) x_mapped = F.tanh(x_mapped) x_mapped2 = self.domain_transform2(x_mapped) x_mapped2 = F.tanh(x_mapped2) out = self.refinement_block(x_mapped2) return out
def run_retraining(args: Any, test_only: bool=False, distributed: bool=False, batch_size: int=32, lr: float=0.01, train_epochs: int=20) -> tuple[(Any, int, int, int)]: (model_name, model, state_dict, data_loader_train, data_loader_val, args_checkpoint, halut_modules, checkpoint) = load_model(args.checkpoint, distributed=distributed, batch_size=batch_size) learned_path = args.learned if (model_name not in learned_path.lower()): learned_path += ('/' + model_name) Path(learned_path).mkdir(parents=True, exist_ok=True) halut_data_path = args.halutdata if (model_name not in halut_data_path.lower()): halut_data_path += ('/' + model_name) Path(halut_data_path).mkdir(parents=True, exist_ok=True) model_copy = deepcopy(model) model.to(args.gpu) torch.cuda.set_device(args.gpu) sys_info() device = torch.device((('cuda:' + str(args.gpu)) if torch.cuda.is_available() else 'cpu')) if (not hasattr(args, 'distributed')): args.distributed = False halut_model = HalutHelper(model, state_dict, dataset=data_loader_val, dataset_train=data_loader_train, batch_size_inference=32, batch_size_store=args_checkpoint.batch_size, data_path=halut_data_path, device=device, learned_path=learned_path, report_error=False, distributed=args.distributed, device_id=args.gpu) halut_model.print_available_module() layers = get_layers(model_name) if (halut_modules is None): next_layer_idx = 0 halut_modules = {} else: next_layer_idx = len(halut_modules.keys()) K = 16 use_prototype = False max = len(layers) max = (next_layer_idx + 1) for i in range(next_layer_idx, max): if ((not test_only) and (i < len(layers))): next_layer = layers[i] c_base = 16 loop_order = 'im2col' c_ = c_base module_ref = get_module_by_name(halut_model.model, next_layer) if isinstance(module_ref, HalutConv2d): inner_dim_im2col = ((module_ref.in_channels * module_ref.kernel_size[0]) * module_ref.kernel_size[1]) inner_dim_kn2col = module_ref.in_channels if (loop_order == 'im2col'): c_ = (inner_dim_im2col // 9) if ((module_ref.kernel_size[0] * module_ref.kernel_size[1]) == 1): c_ = (inner_dim_im2col // 4) else: c_ = (inner_dim_kn2col // 8) if (('downsample' in next_layer) or ('shortcut' in next_layer)): loop_order = 'im2col' c_ = (inner_dim_im2col // 4) print('module_ref', module_ref) if isinstance(module_ref, HalutLinear): c_ = (256 // 4) modules = ({next_layer: [c_, K, loop_order, use_prototype]} | halut_modules) else: modules = halut_modules for (k, v) in modules.items(): if (len(v) > 3): halut_model.activate_halut_module(k, C=v[HalutModuleConfig.C], K=v[HalutModuleConfig.K], loop_order=v[HalutModuleConfig.LOOP_ORDER], use_prototypes=v[HalutModuleConfig.USE_PROTOTYPES]) else: halut_model.activate_halut_module(k, C=v[HalutModuleConfig.C], K=v[HalutModuleConfig.K]) if args.distributed: dist.barrier() halut_model.run_inference() if args.distributed: dist.barrier() print(halut_model.get_stats()) if (not test_only): print('modules', halut_model.halut_modules) checkpoint['halut_modules'] = halut_model.halut_modules checkpoint['model'] = halut_model.model.state_dict() model_copy.load_state_dict(checkpoint['model']) params = {'other': [], 'thresholds': [], 'temperature': [], 'lut': []} def _add_params(module, prefix=''): for (name, p) in module.named_parameters(recurse=False): if (not p.requires_grad): continue if isinstance(module, (HalutConv2d, HalutLinear)): if (name == 'thresholds'): params['thresholds'].append(p) continue if (name == 'temperature'): continue if (name == 'lut'): params['lut'].append(p) continue params['other'].append(p) for (child_name, child_module) in module.named_children(): child_prefix = (f'{prefix}.{child_name}' if (prefix != '') else child_name) _add_params(child_module, prefix=child_prefix) _add_params(model) params['old_lut'] = params['lut'][:(- 1)] params['lut'] = params['lut'][(- 1):] params['old_thresholds'] = params['thresholds'][:(- 1)] params['thresholds'] = params['thresholds'][(- 1):] custom_lrs = {'temperature': (0.1 * 0.0), 'thresholds': (lr / 2), 'old_thresholds': (lr / 2), 'old_lut': lr, 'lut': lr, 'other': lr} args_checkpoint.lr = lr param_groups = [] for key in params: if (len(params[key]) > 0): if (key in custom_lrs.keys()): param_groups.append({'params': params[key], 'lr': custom_lrs[key]}) else: param_groups.append({'params': params[key]}) print('param_groups', len(param_groups)) weight_decay = 0.0 opt_name = 'adam' lr_scheduler_name = 'cosineannealinglr' args_checkpoint.lr_scheduler = lr_scheduler_name args_checkpoint.opt = opt_name if (opt_name == 'sgd'): optimizer = torch.optim.SGD(param_groups, lr=lr, momentum=args_checkpoint.momentum, weight_decay=weight_decay, nesterov=('nesterov' in opt_name)) opt_state_dict = optimizer.state_dict() elif (opt_name == 'adam'): optimizer = torch.optim.Adam(param_groups, lr=lr, weight_decay=weight_decay) opt_state_dict = optimizer.state_dict() else: raise ValueError('Unknown optimizer {}'.format(opt_name)) if (args_checkpoint.lr_scheduler == 'cosineannealinglr'): lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=train_epochs, eta_min=0.0002) elif (args_checkpoint.lr_scheduler == 'plateau'): lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=6, verbose=True, min_lr=0.0002) args_checkpoint.min_lr_to_break = 0.0002 else: raise Exception('Unknown lr scheduler {}'.format(args_checkpoint.lr_scheduler)) checkpoint['lr_scheduler'] = lr_scheduler.state_dict() checkpoint['optimizer'] = opt_state_dict args_checkpoint.output_dir = args.output_dir if ((not args.distributed) or (args.rank == 0)): save_on_master(checkpoint, os.path.join(args_checkpoint.output_dir, f'model_halut_{len(halut_model.halut_modules.keys())}.pth')) save_on_master(checkpoint, os.path.join(args_checkpoint.output_dir, f'retrained_checkpoint_{len(halut_model.halut_modules.keys())}.pth')) result_base_path = args.resultpath if (model_name not in result_base_path.lower()): result_base_path += (('/' + model_name) + '/') Path(result_base_path).mkdir(parents=True, exist_ok=True) if ((not args.distributed) or (args.rank == 0)): with open(f"{args.resultpath}/retrained_{len(halut_model.halut_modules.keys())}{('_trained' if test_only else '')}.json", 'w') as fp: json.dump(halut_model.get_stats(), fp, sort_keys=True, indent=4) idx = len(halut_model.halut_modules.keys()) del model torch.cuda.empty_cache() return (args_checkpoint, idx, len(layers), checkpoint['epoch'])
def _xgboost_min_child_weight(name): return scope.int(hp.loguniform(name, np.log(1), np.log(100)))
class TemperatureLogitsWarper(): def __init__(self, *args, **kwargs): requires_pytorch(self)
class GridAnchorGeneratorTest(tf.test.TestCase): def test_construct_single_anchor(self): scales = [0.5, 1.0, 2.0] aspect_ratios = [0.25, 1.0, 4.0] anchor_offset = [7, (- 3)] exp_anchor_corners = [[(- 121), (- 35), 135, 29], [(- 249), (- 67), 263, 61], [(- 505), (- 131), 519, 125], [(- 57), (- 67), 71, 61], [(- 121), (- 131), 135, 125], [(- 249), (- 259), 263, 253], [(- 25), (- 131), 39, 125], [(- 57), (- 259), 71, 253], [(- 121), (- 515), 135, 509]] anchor_generator = grid_anchor_generator.GridAnchorGenerator(scales, aspect_ratios, anchor_offset=anchor_offset) anchors = anchor_generator.generate(feature_map_shape_list=[(1, 1)]) anchor_corners = anchors.get() with self.test_session(): anchor_corners_out = anchor_corners.eval() self.assertAllClose(anchor_corners_out, exp_anchor_corners) def test_construct_anchor_grid(self): base_anchor_size = [10, 10] anchor_stride = [19, 19] anchor_offset = [0, 0] scales = [0.5, 1.0, 2.0] aspect_ratios = [1.0] exp_anchor_corners = [[(- 2.5), (- 2.5), 2.5, 2.5], [(- 5.0), (- 5.0), 5.0, 5.0], [(- 10.0), (- 10.0), 10.0, 10.0], [(- 2.5), 16.5, 2.5, 21.5], [(- 5.0), 14.0, 5, 24], [(- 10.0), 9.0, 10, 29], [16.5, (- 2.5), 21.5, 2.5], [14.0, (- 5.0), 24, 5], [9.0, (- 10.0), 29, 10], [16.5, 16.5, 21.5, 21.5], [14.0, 14.0, 24, 24], [9.0, 9.0, 29, 29]] anchor_generator = grid_anchor_generator.GridAnchorGenerator(scales, aspect_ratios, base_anchor_size=base_anchor_size, anchor_stride=anchor_stride, anchor_offset=anchor_offset) anchors = anchor_generator.generate(feature_map_shape_list=[(2, 2)]) anchor_corners = anchors.get() with self.test_session(): anchor_corners_out = anchor_corners.eval() self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def main(): result_file = os.path.join(settings.RESULT, 'result.csv') if (not os.path.exists(result_file)): raise ValueError(f"Need a result file for analysis, couldn't find {result_file}") result = pd.read_csv(result_file) result.sort_values('neuron', inplace=True) (model, dataset) = data.snli.load_for_analysis(settings.MODEL, settings.DATA, model_type=settings.MODEL_TYPE, cuda=settings.CUDA) (toks, states, feats, idxs) = analyze.extract_features(model, dataset, cache_file=None, pack_sequences=False) (token_masks, tok_feats_vocab) = analyze.to_sentence(toks, feats, dataset) def reverse_namer(i): return tok_feats_vocab['itos'][i] quantiles = analyze.get_quantiles(states, 0.01) breakpoint() masks = [FM.parse(x, reverse_namer) for x in result['feature']]
class OpenOutputDirOperator(Operator): bl_idname = 'scene.zpy_open_output_dir' bl_label = 'Open Output Dir' bl_description = 'Open file browser at output dir.' bl_category = 'ZPY' bl_options = {'REGISTER'} def execute(self, context): zpy.files.open_folder_in_explorer(context.scene.zpy_output_path, make=True) return {'FINISHED'}
def make_rl_data_module(tokenizer: transformers.PreTrainedTokenizer, data_args, training_args): prompt_dict = utils.jload(data_args.prompt_dict_path) alpaca_instructions = datasets.load_dataset(data_args.dataset_path, data_args.dataset_name) train_df = pd.concat([pd.DataFrame(alpaca_instructions[split]) for split in data_args.train_splits]) eval_df = pd.concat([pd.DataFrame(alpaca_instructions[split]) for split in data_args.eval_splits]) train_dataset = QueryResponseDataset(df=train_df, prompt_dict=prompt_dict, tokenizer=tokenizer, query_len=training_args.query_len) eval_dataset = QueryResponseDataset(df=eval_df, prompt_dict=prompt_dict, tokenizer=tokenizer, query_len=training_args.query_len) return dict(train_dataset=train_dataset, eval_dataset=eval_dataset, data_collator=DataCollatorForQueryResponseDataset())
def convbnrelu_bloc(input_dim, output_dim): return nn.Sequential(nn.Conv2d(in_channels=input_dim, out_channels=output_dim, kernel_size=3, stride=1, padding=0, bias=False), nn.BatchNorm2d(output_dim), nn.ReLU(inplace=True))
class CacheNow(Cache): def fetch_func(self): def _to_json_doc(data, current_case_count_risklayer): log.info('%s: serialize data to JSON', self) t_consulted_ger_tz_iso8601 = datetime.fromtimestamp(int(data['t_obtained_from_source']), tz=pytz.timezone('Europe/Amsterdam')).isoformat() output_dict = {'current_totals': {'cases': data['cases'], 'deaths': data['deaths'], 'recovered': data['recovered'], 'tested': 'unknown'}, 'meta': {'source': data['source'], 'info': ' 'contact': 'Dr. Jan-Philip Gehrcke, ', 'time_source_last_updated_iso8601': data['time_source_last_updated_iso8601'], 'time_source_last_consulted_iso8601': t_consulted_ger_tz_iso8601}} source_addendum = dedent("\n . Note: in this JSON response, the 'cases' value is based on\n a Tagesspiegel-verified, Risklayer-initated crowd-sourcing\n effort based on official Landkreis data. See 'info' URL.") source_addendum = ' '.join(source_addendum.strip().split()) if (current_case_count_risklayer is not None): if (current_case_count_risklayer > output_dict['current_totals']['cases']): output_dict['current_totals']['cases'] = current_case_count_risklayer output_dict['meta']['source'] = (output_dict['meta']['source'] + source_addendum) return json.dumps(output_dict, indent=2, ensure_ascii=False).encode('utf-8') data_zo = None data_mopo = None current_case_count_rl = None try: current_case_count_rl = get_fresh_case_data_from_ts_rl() except Exception as err: log.exception('err during TS/Rl/CS case count fetch: %s', err) try: data_zo = get_fresh_now_data_from_zeit() except Exception as err: log.exception('err during ZO /now fetch: %s', err) try: data_mopo = get_fresh_now_data_from_be_mopo() except Exception as err: log.exception('err during BM /now fetch: %s', err) if (data_zo is None): return _to_json_doc(data_zo, current_case_count_rl) if (data_mopo is None): return _to_json_doc(data_mopo, current_case_count_rl) if (data_zo['time_source_last_updated'] > data_mopo['time_source_last_updated']): log.info('zeit online data appears to be more recent') else: log.info('bemopo data appears to be more recent') if (data_zo['cases'] > data_mopo['cases']): log.info('zeit online data reports more cases') return _to_json_doc(data_zo, current_case_count_rl) else: log.info('bemopo data reports more cases') return _to_json_doc(data_mopo, current_case_count_rl)
def nnp_freq(importtext): text = word_tokenize(importtext) tokens = nltk.pos_tag(text) c = Counter((token for (word, token) in tokens)) return (c['NNP'] / len(text))
def make_bert_ner_model_fn(optimizer): import tensorflow as tf from bigdl.orca.tfpark import ZooOptimizer def _bert_ner_model_fn(features, labels, mode, params): output_layer = bert_model(features, labels, mode, params).get_sequence_output() if (mode == tf.estimator.ModeKeys.TRAIN): output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.layers.dense(output_layer, params['num_entities']) if (mode == tf.estimator.ModeKeys.TRAIN): logits = tf.reshape(logits, [(- 1), params['num_entities']]) labels = tf.reshape(labels, [(- 1)]) mask = tf.cast(features['input_mask'], dtype=tf.float32) one_hot_labels = tf.one_hot(labels, depth=params['num_entities'], dtype=tf.float32) loss = tf.losses.softmax_cross_entropy(logits=logits, onehot_labels=one_hot_labels) loss *= tf.reshape(mask, [(- 1)]) loss = tf.reduce_sum(loss) total_size = tf.reduce_sum(mask) total_size += 1e-12 loss /= total_size train_op = ZooOptimizer(optimizer).minimize(loss) return tf.estimator.EstimatorSpec(mode=mode, train_op=train_op, loss=loss) elif (mode == tf.estimator.ModeKeys.PREDICT): probabilities = tf.nn.softmax(logits, axis=(- 1)) predict = tf.argmax(probabilities, axis=(- 1)) return tf.estimator.EstimatorSpec(mode=mode, predictions=predict) else: invalidInputError(False, 'Currently only TRAIN and PREDICT modes are supported for NER') return _bert_ner_model_fn
def fetch_test_set(test_set_url): import wget fname = wget.download(test_set_url, 'opus_test.txt') lns = Path(fname).open().readlines() src = lmap(str.strip, lns[::4]) gold = lmap(str.strip, lns[1::4]) mar_model = lmap(str.strip, lns[2::4]) if (not (len(gold) == len(mar_model) == len(src))): raise ValueError(f'Gold, marian and source lengths {len(gold)}, {len(mar_model)}, {len(src)} mismatched') os.remove(fname) return (src, mar_model, gold)
class AdjustLayer(nn.Module): def __init__(self, in_channels, out_channels): super(AdjustLayer, self).__init__() self.downsample = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False), nn.BatchNorm2d(out_channels)) def forward(self, x): x = self.downsample(x) return x
def ignore_background_loss(y_true, y_pred): dont_cares = K.minimum(1.0, y_true) return (K.sum((K.abs((y_pred - y_true)) * dont_cares)) / K.sum(dont_cares))
def main(args): if (args.seed is None): colossalai.launch_from_torch(config={}) else: colossalai.launch_from_torch(config={}, seed=args.seed) local_rank = gpc.get_local_rank(ParallelMode.DATA) world_size = gpc.get_world_size(ParallelMode.DATA) if args.with_prior_preservation: class_images_dir = Path(args.class_data_dir) if (not class_images_dir.exists()): class_images_dir.mkdir(parents=True) cur_class_images = len(list(class_images_dir.iterdir())) if (cur_class_images < args.num_class_images): torch_dtype = (torch.float16 if (get_current_device() == 'cuda') else torch.float32) pipeline = DiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, torch_dtype=torch_dtype, safety_checker=None, revision=args.revision) pipeline.set_progress_bar_config(disable=True) num_new_images = (args.num_class_images - cur_class_images) logger.info(f'Number of class images to sample: {num_new_images}.') sample_dataset = PromptDataset(args.class_prompt, num_new_images) sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size) pipeline.to(get_current_device()) for example in tqdm(sample_dataloader, desc='Generating class images', disable=(not (local_rank == 0))): images = pipeline(example['prompt']).images for (i, image) in enumerate(images): hash_image = insecure_hashlib.sha1(image.tobytes()).hexdigest() image_filename = (class_images_dir / f"{(example['index'][i] + cur_class_images)}-{hash_image}.jpg") image.save(image_filename) del pipeline if (local_rank == 0): if (args.output_dir is not None): os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo(repo_id=(args.hub_model_id or Path(args.output_dir).name), exist_ok=True, token=args.hub_token).repo_id if args.tokenizer_name: logger.info(f'Loading tokenizer from {args.tokenizer_name}', ranks=[0]) tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, revision=args.revision, use_fast=False) elif args.pretrained_model_name_or_path: logger.info('Loading tokenizer from pretrained model', ranks=[0]) tokenizer = AutoTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer', revision=args.revision, use_fast=False) text_encoder_cls = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path) logger.info(f'Loading text_encoder from {args.pretrained_model_name_or_path}', ranks=[0]) text_encoder = text_encoder_cls.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder', revision=args.revision) logger.info(f'Loading AutoencoderKL from {args.pretrained_model_name_or_path}', ranks=[0]) vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae', revision=args.revision) logger.info(f'Loading UNet2DConditionModel from {args.pretrained_model_name_or_path}', ranks=[0]) with ColoInitContext(device=get_current_device()): unet = UNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet', revision=args.revision, low_cpu_mem_usage=False) vae.requires_grad_(False) text_encoder.requires_grad_(False) if args.gradient_checkpointing: unet.enable_gradient_checkpointing() if args.scale_lr: args.learning_rate = ((args.learning_rate * args.train_batch_size) * world_size) unet = gemini_zero_dpp(unet, args.placement) optimizer = GeminiAdamOptimizer(unet, lr=args.learning_rate, initial_scale=(2 ** 5), clipping_norm=args.max_grad_norm) noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder='scheduler') logger.info(f'Prepare dataset from {args.instance_data_dir}', ranks=[0]) train_dataset = DreamBoothDataset(instance_data_root=args.instance_data_dir, instance_prompt=args.instance_prompt, class_data_root=(args.class_data_dir if args.with_prior_preservation else None), class_prompt=args.class_prompt, tokenizer=tokenizer, size=args.resolution, center_crop=args.center_crop) def collate_fn(examples): input_ids = [example['instance_prompt_ids'] for example in examples] pixel_values = [example['instance_images'] for example in examples] if args.with_prior_preservation: input_ids += [example['class_prompt_ids'] for example in examples] pixel_values += [example['class_images'] for example in examples] pixel_values = torch.stack(pixel_values) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() input_ids = tokenizer.pad({'input_ids': input_ids}, padding='max_length', max_length=tokenizer.model_max_length, return_tensors='pt').input_ids batch = {'input_ids': input_ids, 'pixel_values': pixel_values} return batch train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn, num_workers=1) overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader)) if (args.max_train_steps is None): args.max_train_steps = (args.num_train_epochs * num_update_steps_per_epoch) overrode_max_train_steps = True lr_scheduler = get_scheduler(args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps) weight_dtype = torch.float32 if (args.mixed_precision == 'fp16'): weight_dtype = torch.float16 elif (args.mixed_precision == 'bf16'): weight_dtype = torch.bfloat16 vae.to(get_current_device(), dtype=weight_dtype) text_encoder.to(get_current_device(), dtype=weight_dtype) num_update_steps_per_epoch = math.ceil(len(train_dataloader)) if overrode_max_train_steps: args.max_train_steps = (args.num_train_epochs * num_update_steps_per_epoch) args.num_train_epochs = math.ceil((args.max_train_steps / num_update_steps_per_epoch)) total_batch_size = (args.train_batch_size * world_size) logger.info('***** Running training *****', ranks=[0]) logger.info(f' Num examples = {len(train_dataset)}', ranks=[0]) logger.info(f' Num batches each epoch = {len(train_dataloader)}', ranks=[0]) logger.info(f' Num Epochs = {args.num_train_epochs}', ranks=[0]) logger.info(f' Instantaneous batch size per device = {args.train_batch_size}', ranks=[0]) logger.info(f' Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}', ranks=[0]) logger.info(f' Total optimization steps = {args.max_train_steps}', ranks=[0]) progress_bar = tqdm(range(args.max_train_steps), disable=(not (local_rank == 0))) progress_bar.set_description('Steps') global_step = 0 torch.cuda.synchronize() for epoch in range(args.num_train_epochs): unet.train() for (step, batch) in enumerate(train_dataloader): torch.cuda.reset_peak_memory_stats() for (key, value) in batch.items(): batch[key] = value.to(get_current_device(), non_blocking=True) optimizer.zero_grad() latents = vae.encode(batch['pixel_values'].to(dtype=weight_dtype)).latent_dist.sample() latents = (latents * 0.18215) noise = torch.randn_like(latents) bsz = latents.shape[0] timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device) timesteps = timesteps.long() noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) encoder_hidden_states = text_encoder(batch['input_ids'])[0] model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample if (noise_scheduler.config.prediction_type == 'epsilon'): target = noise elif (noise_scheduler.config.prediction_type == 'v_prediction'): target = noise_scheduler.get_velocity(latents, noise, timesteps) else: raise ValueError(f'Unknown prediction type {noise_scheduler.config.prediction_type}') if args.with_prior_preservation: (model_pred, model_pred_prior) = torch.chunk(model_pred, 2, dim=0) (target, target_prior) = torch.chunk(target, 2, dim=0) loss = F.mse_loss(model_pred.float(), target.float(), reduction='none').mean([1, 2, 3]).mean() prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction='mean') loss = (loss + (args.prior_loss_weight * prior_loss)) else: loss = F.mse_loss(model_pred.float(), target.float(), reduction='mean') optimizer.backward(loss) optimizer.step() lr_scheduler.step() logger.info(f'max GPU_mem cost is {(torch.cuda.max_memory_allocated() / (2 ** 20))} MB', ranks=[0]) progress_bar.update(1) global_step += 1 logs = {'loss': loss.detach().item(), 'lr': optimizer.param_groups[0]['lr']} progress_bar.set_postfix(**logs) if ((global_step % args.save_steps) == 0): torch.cuda.synchronize() torch_unet = get_static_torch_model(unet) if (local_rank == 0): pipeline = DiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, unet=torch_unet, revision=args.revision) save_path = os.path.join(args.output_dir, f'checkpoint-{global_step}') pipeline.save_pretrained(save_path) logger.info(f'Saving model checkpoint to {save_path}', ranks=[0]) if (global_step >= args.max_train_steps): break torch.cuda.synchronize() unet = get_static_torch_model(unet) if (local_rank == 0): pipeline = DiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, unet=unet, revision=args.revision) pipeline.save_pretrained(args.output_dir) logger.info(f'Saving model checkpoint to {args.output_dir}', ranks=[0]) if args.push_to_hub: upload_folder(repo_id=repo_id, folder_path=args.output_dir, commit_message='End of training', ignore_patterns=['step_*', 'epoch_*'])
class PSPnet(nn.Module): def __init__(self, out_channels=256): super(PSPnet, self).__init__() self.layer6_0 = nn.Sequential(nn.Conv2d(out_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=True), nn.ReLU(), nn.Dropout2d(p=0.5)) self.layer6_1 = nn.Sequential(nn.Conv2d(out_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=True), nn.ReLU(), nn.Dropout2d(p=0.5)) self.layer6_2 = nn.Sequential(nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=6, dilation=6, bias=True), nn.ReLU(), nn.Dropout2d(p=0.5)) self.layer6_3 = nn.Sequential(nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=12, dilation=12, bias=True), nn.ReLU(), nn.Dropout2d(p=0.5)) self.layer6_4 = nn.Sequential(nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=18, dilation=18, bias=True), nn.ReLU(), nn.Dropout2d(p=0.5)) def forward(self, x): feature_size = x.shape[(- 2):] global_feature = F.avg_pool2d(x, kernel_size=feature_size) global_feature = self.layer6_0(global_feature) global_feature = global_feature.expand((- 1), (- 1), feature_size[0], feature_size[1]) out = torch.cat([global_feature, self.layer6_1(x), self.layer6_2(x), self.layer6_3(x), self.layer6_4(x)], dim=1) return out
def plot_causal_matrix_in_training(time_coef, name, log, log_step, threshold=0.5, plot_each_time=False): if (time_coef is None): return if ((np.max(time_coef) - np.min(time_coef)) > 0.01): time_coef = ((time_coef - np.min(time_coef)) / (np.max(time_coef) - np.min(time_coef))) (n, m, t) = time_coef.shape time_graph = time_coef sub_cg = plot_causal_matrix(np.max(time_graph, axis=2), figsize=[(1.5 * n), (1 * n)], vmin=0, vmax=1) log.log_figures(sub_cg, name=('Discovered Prob/' + name), iters=log_step) time_thres = (np.max(time_graph, axis=2) > threshold) sub_cg = plot_causal_matrix(time_thres, figsize=[(1.5 * n), (1 * n)]) log.log_figures(sub_cg, name=('Discovered Graph/' + name), iters=log_step) log.log_npz({'Discovered Graph Coef': time_coef, 'Discovered Prob': time_graph, 'Discovered Graph': time_thres}, name='Graph.npz', iters=log_step)
class Vgg19(torch.nn.Module): def __init__(self, model_path: str=None, requires_grad: bool=False): super(Vgg19, self).__init__() if (model_path is None): vgg_pretrained_features = models.vgg19(pretrained=True).features else: model = vgg19(pretrained=False) checkpoint = torch.load(model_path) del checkpoint['state_dict']['classifier.6.weight'] del checkpoint['state_dict']['classifier.6.bias'] model.load_state_dict(checkpoint['state_dict'], strict=False) vgg_pretrained_features = model.features self.slice1 = torch.nn.Sequential() self.slice2 = torch.nn.Sequential() self.slice3 = torch.nn.Sequential() self.slice4 = torch.nn.Sequential() self.slice5 = torch.nn.Sequential() for x in range(2): self.slice1.add_module(str(x), vgg_pretrained_features[x]) for x in range(2, 7): self.slice2.add_module(str(x), vgg_pretrained_features[x]) for x in range(7, 12): self.slice3.add_module(str(x), vgg_pretrained_features[x]) for x in range(12, 21): self.slice4.add_module(str(x), vgg_pretrained_features[x]) for x in range(21, 30): self.slice5.add_module(str(x), vgg_pretrained_features[x]) if (not requires_grad): for param in self.parameters(): param.requires_grad = False def forward(self, x): h_relu1 = self.slice1(x) h_relu2 = self.slice2(h_relu1) h_relu3 = self.slice3(h_relu2) h_relu4 = self.slice4(h_relu3) h_relu5 = self.slice5(h_relu4) out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5] return out
class JobLauncher(object): JOB_CONFIG = {'local': LocalJob} def __init__(self, yaml_file): self.yaml_file = yaml_file job_key = 'local' if yaml_file.endswith('.yaml'): config = recursive_config(yaml_file) if (config.task_type is not None): job_key = config.task_type.split('_')[0] else: raise ValueError('unknown extension of job file:', yaml_file) self.job_key = job_key def __call__(self, job_type=None, dryrun=False): if (job_type is not None): self.job_key = job_type.split('_')[0] print('[JobLauncher] job_key', self.job_key) job = JobLauncher.JOB_CONFIG[self.job_key](self.yaml_file, job_type=job_type, dryrun=dryrun) return job.submit()
def JSD(P, Q): _P = (P / np.linalg.norm(P, ord=1)) _Q = (Q / np.linalg.norm(Q, ord=1)) _M = (0.5 * (_P + _Q)) return (0.5 * (entropy(_P, _M) + entropy(_Q, _M)))
class TestSetAllParamValues(): def test_set_all_param_values(self): from lasagne.layers import InputLayer, DenseLayer, set_all_param_values from lasagne.utils import floatX l1 = InputLayer((10, 20)) l2 = DenseLayer(l1, 30) l3 = DenseLayer(l2, 40) a2 = floatX(numpy.random.normal(0, 1, (20, 30))) b2 = floatX(numpy.random.normal(0, 1, (30,))) a3 = floatX(numpy.random.normal(0, 1, (30, 40))) b3 = floatX(numpy.random.normal(0, 1, (40,))) set_all_param_values(l3, [a2, b2, a3, b3]) assert numpy.allclose(l3.W.get_value(), a3) assert numpy.allclose(l3.b.get_value(), b3) assert numpy.allclose(l2.W.get_value(), a2) assert numpy.allclose(l2.b.get_value(), b2) with pytest.raises(ValueError): set_all_param_values(l3, [a3, b3, a2]) with pytest.raises(ValueError): a3_bad = floatX(numpy.random.normal(0, 1, (25, 40))) set_all_param_values(l3, [a2, b2, a3_bad, b3])
def test_openimages_dataset(): tmp_dir = tempfile.TemporaryDirectory() label_file = osp.join(tmp_dir.name, 'label_file.csv') ann_file = osp.join(tmp_dir.name, 'ann_file.csv') label_level_file = osp.join(tmp_dir.name, 'label_level_file.csv') _create_oid_style_ann(label_file, ann_file, label_level_file) hierarchy_json = osp.join(tmp_dir.name, 'hierarchy.json') _create_hierarchy_json(hierarchy_json) with pytest.raises(AssertionError): OpenImagesDataset(ann_file=ann_file, label_file=label_file, image_level_ann_file=label_level_file, pipeline=[]) dataset = OpenImagesDataset(ann_file=ann_file, label_file=label_file, image_level_ann_file=label_level_file, hierarchy_file=hierarchy_json, pipeline=[]) ann = dataset.get_ann_info(0) assert (ann['bboxes'].shape[0] == ann['labels'].shape[0] == ann['gt_is_group_ofs'].shape[0] == 2) img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) test_pipeline = [dict(type='LoadImageFromFile'), dict(type='MultiScaleFlipAug', img_scale=(128, 128), flip=False, transforms=[dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img'])])] dataset = OpenImagesDataset(ann_file=ann_file, img_prefix='tests/data', label_file=label_file, image_level_ann_file=label_level_file, load_from_file=False, hierarchy_file=hierarchy_json, pipeline=test_pipeline) dataset.prepare_test_img(0) assert (len(dataset.test_img_metas) == 1) result = _create_dummy_results() dataset.evaluate(result) hierarchy_json = osp.join(tmp_dir.name, 'hierarchy.json') _create_hierarchy_json(hierarchy_json) with pytest.raises(AssertionError): fake_path = osp.join(tmp_dir.name, 'hierarchy.csv') OpenImagesDataset(ann_file=ann_file, img_prefix='tests/data', label_file=label_file, image_level_ann_file=label_level_file, load_from_file=False, hierarchy_file=fake_path, pipeline=test_pipeline) hierarchy = dataset.get_relation_matrix(hierarchy_json) hierarchy_gt = np.array([[1, 0, 0, 0], [1, 1, 0, 0], [1, 1, 1, 0], [1, 0, 0, 1]]) assert np.equal(hierarchy, hierarchy_gt).all() meta_file = osp.join(tmp_dir.name, 'meta.pkl') _create_metas(meta_file) dataset = OpenImagesDataset(ann_file=ann_file, label_file=label_file, image_level_ann_file=label_level_file, hierarchy_file=hierarchy_json, meta_file=meta_file, pipeline=[]) result = _create_dummy_results() parsed_results = dataset.evaluate(result) assert np.isclose(parsed_results['mAP'], 0.8333, 0.0001) dataset = OpenImagesDataset(ann_file=ann_file, label_file=label_file, load_image_level_labels=False, image_level_ann_file=label_level_file, hierarchy_file=hierarchy_json, meta_file=meta_file, pipeline=[]) result = _create_dummy_results() parsed_results = dataset.evaluate(result) assert np.isclose(parsed_results['mAP'], 0.8333, 0.0001) tmp_dir.cleanup()
class MyEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, np.integer): return int(obj) elif isinstance(obj, np.uint8): return int(obj) elif isinstance(obj, np.floating): return float(obj) elif isinstance(obj, np.ndarray): return obj.tolist() else: return super(MyEncoder, self).default(obj)
def get_callbacks(args): monitor_mode = ('min' if ('loss' in args.logger.monitor) else 'max') checkpoint_naming = '{epoch}-{step}-{valid_loss:.4f}' if (args.logger.monitor != 'valid_loss'): checkpoint_naming += (('-{' + args.logger.monitor) + ':.4f}') checkpoint_callback = ModelCheckpoint(filename=checkpoint_naming, monitor=args.logger.monitor, mode=monitor_mode, save_top_k=1) early_stop_callback = EarlyStopping(args.logger.monitor, patience=args.scheduler.earlystop_patience, mode=monitor_mode, verbose=True) return [checkpoint_callback, early_stop_callback]
class GAT(nn.Module): def __init__(self, num_layers, in_dim, num_hidden, num_classes, heads, activation, feat_drop=0, attn_drop=0, negative_slope=0.2, residual=False): super(GAT, self).__init__() self.num_layers = num_layers self.gat_layers = nn.ModuleList() self.activation = activation self.gat_layers.append(GATConv(in_dim, num_hidden, heads[0], feat_drop, attn_drop, negative_slope, False, self.activation)) for l in range(1, num_layers): self.gat_layers.append(GATConv((num_hidden * heads[(l - 1)]), num_hidden, heads[l], feat_drop, attn_drop, negative_slope, residual, self.activation)) self.gat_layers.append(GATConv((num_hidden * heads[(- 2)]), num_classes, heads[(- 1)], feat_drop, attn_drop, negative_slope, residual, None)) def forward(self, inputs, g): h = inputs for l in range(self.num_layers): h = self.gat_layers[l](g, h).flatten(1) logits = self.gat_layers[(- 1)](g, h).mean(1) return logits
def tst_keras(): from tensorflow import __version__ from tensorflow.compat.v1 import reset_default_graph from classification_models_3D.tfkeras import Classifiers print('Tensorflow version: {}'.format(__version__)) if 1: type = 'densenet121' print('Go for {}'.format(type)) (modelPoint, preprocess_input) = Classifiers.get(type) model = modelPoint(input_shape=(128, 128, 128, 2), include_top=False, weights=None, stride_size=(1, 1, 2, 2, 2, 2, 2), kernel_size=3, repetitions=(6, 12, 24, 16, 8, 4)) print(model.summary()) print(get_model_memory_usage(1, model), 'GB') reset_default_graph() if 1: type = 'efficientnetb0' print('Go for {}'.format(type)) (modelPoint, preprocess_input) = Classifiers.get(type) model = modelPoint(input_shape=(128, 128, 128, 2), include_top=False, weights=None, stride_size=(2, 2, 2, 4, 2)) print(model.summary()) print(get_model_memory_usage(1, model), 'GB') reset_default_graph() if 1: type = 'efficientnetv2-b0' print('Go for {}'.format(type)) (modelPoint, preprocess_input) = Classifiers.get(type) model = modelPoint(input_shape=(128, 128, 128, 2), include_top=False, weights=None, stride_size=(2, 2, 2, 4, 2)) print(model.summary()) print(get_model_memory_usage(1, model), 'GB') reset_default_graph() if 1: type = 'inceptionresnetv2' print('Go for {}'.format(type)) (modelPoint, preprocess_input) = Classifiers.get(type) model = modelPoint(input_shape=(299, 299, 299, 3), include_top=False, weights=None, stride_size=(2, 2, 2, 4, 2)) print(model.summary()) print(get_model_memory_usage(1, model), 'GB') reset_default_graph() if 1: type = 'inceptionv3' print('Go for {}'.format(type)) (modelPoint, preprocess_input) = Classifiers.get(type) model = modelPoint(input_shape=(299, 299, 299, 3), include_top=False, weights=None, stride_size=(2, 4, 2, 2, 2)) print(model.summary()) print(get_model_memory_usage(1, model), 'GB') reset_default_graph() if 1: type = 'mobilenet' print('Go for {}'.format(type)) (modelPoint, preprocess_input) = Classifiers.get(type) model = modelPoint(input_shape=(224, 224, 224, 3), include_top=False, weights=None, stride_size=(2, 4, 2, 2, 2)) print(model.summary()) print(get_model_memory_usage(1, model), 'GB') reset_default_graph() if 1: type = 'mobilenetv2' print('Go for {}'.format(type)) (modelPoint, preprocess_input) = Classifiers.get(type) model = modelPoint(input_shape=(224, 224, 224, 3), include_top=False, weights=None, stride_size=(2, 4, 2, 2, 2)) print(model.summary()) print(get_model_memory_usage(1, model), 'GB') reset_default_graph() if 1: type = 'resnet18' print('Go for {}'.format(type)) (modelPoint, preprocess_input) = Classifiers.get(type) model = modelPoint(input_shape=(256, 256, 256, 3), include_top=False, weights=None, stride_size=(2, 2, 2, 4, 2, 2, 2, 2), repetitions=(2, 2, 2, 2, 2, 2, 2), init_filters=16) print(model.summary()) print(get_model_memory_usage(1, model), 'GB') reset_default_graph() if 1: type = 'resnext50' print('Go for {}'.format(type)) (modelPoint, preprocess_input) = Classifiers.get(type) model = modelPoint(input_shape=(256, 256, 256, 3), include_top=False, weights=None, stride_size=(2, 2, 2, 4, 2, 2, 2), repetitions=(2, 2, 2, 2, 2, 2), init_filters=64) print(model.summary()) print(get_model_memory_usage(1, model), 'GB') reset_default_graph() if 1: type = 'seresnet101' print('Go for {}'.format(type)) (modelPoint, preprocess_input) = Classifiers.get(type) model = modelPoint(input_shape=(224, 224, 224, 3), include_top=False, weights=None, stride_size=(2, 2, 4, 2, 2, 2), repetitions=(2, 2, 2, 2, 2), init_filters=32) print(model.summary()) print(get_model_memory_usage(1, model), 'GB') reset_default_graph() if 1: type = 'vgg16' print('Go for {}'.format(type)) (modelPoint, preprocess_input) = Classifiers.get(type) model = modelPoint(input_shape=(256, 256, 256, 3), include_top=False, weights=None, stride_size=(2, 2, 4, 2, 2), repetitions=(2, 2, 3, 2, 2), init_filters=64, max_filters=1024) print(model.summary()) print(get_model_memory_usage(1, model), 'GB') reset_default_graph()
def log_board_file(fpath, args, metrics: dict): rt_list = [] if args.dbg: return time = datetime.datetime.now() rt_list.append('Time: {}'.format(time)) rt_list.append('Data Name') rt_list.append(args.data_name) rt_list.append('Compression') rt_list.append(args.compression) rt_list.append('aggressive_compression') rt_list.append(args.aggressive_compression) rt_list.append('alpha') rt_list.append(args.alpha) rt_list.append('fix_edu_num') rt_list.append(args.fix_edu_num) rt_list.append('schedule') rt_list.append(args.schedule) rt_list.append('compress_leadn') rt_list.append(args.compress_leadn) for k in metrics: if (k.startswith('validation_') or k.startswith('best_test_') or (k == 'training_epochs') or (k == 'training_loss') or (k == 'validation_loss')): rt_list.append(k) rt_list.append(metrics[k]) rt_list.append('FileName') rt_list.append(args.fname) rt_list = [str(x) for x in rt_list] rt = '\t'.join(rt_list) if (not os.path.isfile(fpath)): with open(fpath, 'w') as fd: fd.write('\n') with open(fpath, 'a') as file: file.write(('\n' + rt))
def gen_required_sigs(project_name: str, class_name: str, methods_list: list): full_text = '' class_row = db.select(table_name='class', conditions={'project_name': project_name, 'class_name': class_name}, result_cols=['signature', 'fields', 'has_constructor']) if (not class_row): raise RuntimeError('Error happened in function gen_required_sigs') (c_sig, fields, has_constructor) = class_row[0] full_text += (((c_sig + '\n') + fields) + '\n') gs_sigs = db.select(table_name='method', conditions={'project_name': project_name, 'class_name': class_name, 'is_get_set': True}, result_cols=['signature']) for gs in gs_sigs: full_text += (gs[0] + '\n') constructors = db.select(table_name='method', conditions={'project_name': project_name, 'class_name': class_name, 'is_constructor': True}, result_cols=['signature']) for cons in constructors: full_text += (cons[0] + '\n') for parameters in methods_list: m_sig = db.select(table_name='method', conditions={'project_name': project_name, 'class_name': class_name, 'parameters': parameters}, result_cols=['signature']) for sig in m_sig: full_text += (sig[0] + '\n') full_text += '\n}' return full_text
_config(framework_name=FRAMEWORK_NAME, algo_name=STATIC_QUANT) class StaticQuantConfig(BaseConfig): supported_configs: List[OperatorConfig] = [] params_list = ['weight_dtype', 'weight_sym', 'weight_granularity', 'act_dtype', 'act_sym', 'act_granularity'] name = STATIC_QUANT def __init__(self, weight_dtype: str='int8', weight_sym: bool=True, weight_granularity: str='per_tensor', act_dtype: str='int8', act_sym: bool=True, act_granularity: str='per_tensor', white_list: Optional[List[OP_NAME_OR_MODULE_TYPE]]=DEFAULT_WHITE_LIST): super().__init__(white_list=white_list) self.weight_dtype = weight_dtype self.weight_sym = weight_sym self.weight_granularity = weight_granularity self.act_dtype = act_dtype self.act_sym = act_sym self.act_granularity = act_granularity self._post_init() def to_dict(self): return super().to_dict(params_list=self.params_list, operator2str=operator2str) def from_dict(cls, config_dict): return super(StaticQuantConfig, cls).from_dict(config_dict=config_dict, str2operator=str2operator) def register_supported_configs(cls) -> List[OperatorConfig]: supported_configs = [] static_quant_config = StaticQuantConfig(weight_dtype=['int8', 'fp32'], weight_sym=[True, False], weight_granularity=['per_tensor', 'per_channel'], act_dtype=['int8', 'fp32'], act_sym=[True, False], act_granularity=['per_tensor', 'per_channel']) operators = [tf.keras.layers.Dense, tf.keras.layers.Conv2D, tf.keras.layers.DepthwiseConv2D, tf.keras.layers.SeparableConv2D, tf.keras.layers.AvgPool2D, tf.keras.layers.MaxPool2D, tf.keras.layers.AveragePooling2D, tf.keras.layers.MaxPooling2D] supported_configs.append(OperatorConfig(config=static_quant_config, operators=operators, backend=Backend.DEFAULT)) cls.supported_configs = supported_configs
def make_res_layer(block, inplanes, planes, blocks, stride=1, dilation=1, style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, gcb=None, sac=None, rfp=None, gen_attention=None, gen_attention_blocks=[]): downsample = None if ((stride != 1) or (inplanes != (planes * block.expansion))): downsample = nn.Sequential(build_conv_layer(conv_cfg, inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), build_norm_layer(norm_cfg, (planes * block.expansion))[1]) layers = [] layers.append(block(inplanes=inplanes, planes=planes, stride=stride, dilation=dilation, downsample=downsample, style=style, with_cp=with_cp, conv_cfg=conv_cfg, norm_cfg=norm_cfg, dcn=dcn, gcb=gcb, sac=sac, rfp=rfp, gen_attention=(gen_attention if (0 in gen_attention_blocks) else None))) inplanes = (planes * block.expansion) for i in range(1, blocks): layers.append(block(inplanes=inplanes, planes=planes, stride=1, dilation=dilation, style=style, with_cp=with_cp, conv_cfg=conv_cfg, norm_cfg=norm_cfg, dcn=dcn, gcb=gcb, sac=sac, gen_attention=(gen_attention if (i in gen_attention_blocks) else None))) return nn.Sequential(*layers)
def main(): args = parse_args() send_example_telemetry('run_swag_no_trainer', args) accelerator_log_kwargs = {} if args.with_tracking: accelerator_log_kwargs['log_with'] = args.report_to accelerator_log_kwargs['logging_dir'] = args.output_dir accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps, **accelerator_log_kwargs) logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() if (args.seed is not None): set_seed(args.seed) if accelerator.is_main_process: if args.push_to_hub: if (args.hub_model_id is None): repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token) else: repo_name = args.hub_model_id create_repo(repo_name, exist_ok=True, token=args.hub_token) repo = Repository(args.output_dir, clone_from=repo_name, token=args.hub_token) with open(os.path.join(args.output_dir, '.gitignore'), 'w+') as gitignore: if ('step_*' not in gitignore): gitignore.write('step_*\n') if ('epoch_*' not in gitignore): gitignore.write('epoch_*\n') elif (args.output_dir is not None): os.makedirs(args.output_dir, exist_ok=True) accelerator.wait_for_everyone() if (args.dataset_name is not None): raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name) else: data_files = {} if (args.train_file is not None): data_files['train'] = args.train_file if (args.validation_file is not None): data_files['validation'] = args.validation_file extension = args.train_file.split('.')[(- 1)] raw_datasets = load_dataset(extension, data_files=data_files) if args.debug: for split in raw_datasets.keys(): raw_datasets[split] = raw_datasets[split].select(range(100)) if (raw_datasets['train'] is not None): column_names = raw_datasets['train'].column_names else: column_names = raw_datasets['validation'].column_names ending_names = [f'ending{i}' for i in range(4)] context_name = 'sent1' question_header_name = 'sent2' label_column_name = ('label' if ('label' in column_names) else 'labels') if args.config_name: config = AutoConfig.from_pretrained(args.model_name_or_path) elif args.model_name_or_path: config = AutoConfig.from_pretrained(args.model_name_or_path) else: config = CONFIG_MAPPING[args.model_type]() logger.warning('You are instantiating a new config instance from scratch.') if args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=(not args.use_slow_tokenizer)) elif args.model_name_or_path: tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=(not args.use_slow_tokenizer)) else: raise ValueError('You are instantiating a new tokenizer from scratch. This is not supported by this script.You can do it from another script, save it, and load it from here, using --tokenizer_name.') if args.model_name_or_path: model = AutoModelForMultipleChoice.from_pretrained(args.model_name_or_path, from_tf=bool(('.ckpt' in args.model_name_or_path)), config=config) else: logger.info('Training new model from scratch') model = AutoModelForMultipleChoice.from_config(config) embedding_size = model.get_input_embeddings().weight.shape[0] if (len(tokenizer) > embedding_size): model.resize_token_embeddings(len(tokenizer)) padding = ('max_length' if args.pad_to_max_length else False) def preprocess_function(examples): first_sentences = [([context] * 4) for context in examples[context_name]] question_headers = examples[question_header_name] second_sentences = [[f'{header} {examples[end][i]}' for end in ending_names] for (i, header) in enumerate(question_headers)] labels = examples[label_column_name] first_sentences = list(chain(*first_sentences)) second_sentences = list(chain(*second_sentences)) tokenized_examples = tokenizer(first_sentences, second_sentences, max_length=args.max_seq_length, padding=padding, truncation=True) tokenized_inputs = {k: [v[i:(i + 4)] for i in range(0, len(v), 4)] for (k, v) in tokenized_examples.items()} tokenized_inputs['labels'] = labels return tokenized_inputs with accelerator.main_process_first(): processed_datasets = raw_datasets.map(preprocess_function, batched=True, remove_columns=raw_datasets['train'].column_names) train_dataset = processed_datasets['train'] eval_dataset = processed_datasets['validation'] for index in random.sample(range(len(train_dataset)), 3): logger.info(f'Sample {index} of the training set: {train_dataset[index]}.') if args.pad_to_max_length: data_collator = default_data_collator else: data_collator = DataCollatorForMultipleChoice(tokenizer, pad_to_multiple_of=(8 if accelerator.use_fp16 else None)) train_dataloader = DataLoader(train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size) eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size) no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}] optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate) device = accelerator.device model.to(device) overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil((len(train_dataloader) / args.gradient_accumulation_steps)) if (args.max_train_steps is None): args.max_train_steps = (args.num_train_epochs * num_update_steps_per_epoch) overrode_max_train_steps = True lr_scheduler = get_scheduler(name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=(args.num_warmup_steps * args.gradient_accumulation_steps), num_training_steps=(args.max_train_steps * args.gradient_accumulation_steps)) (model, optimizer, train_dataloader, eval_dataloader, lr_scheduler) = accelerator.prepare(model, optimizer, train_dataloader, eval_dataloader, lr_scheduler) num_update_steps_per_epoch = math.ceil((len(train_dataloader) / args.gradient_accumulation_steps)) if overrode_max_train_steps: args.max_train_steps = (args.num_train_epochs * num_update_steps_per_epoch) args.num_train_epochs = math.ceil((args.max_train_steps / num_update_steps_per_epoch)) checkpointing_steps = args.checkpointing_steps if ((checkpointing_steps is not None) and checkpointing_steps.isdigit()): checkpointing_steps = int(checkpointing_steps) if args.with_tracking: experiment_config = vars(args) experiment_config['lr_scheduler_type'] = experiment_config['lr_scheduler_type'].value accelerator.init_trackers('swag_no_trainer', experiment_config) metric = evaluate.load('accuracy') total_batch_size = ((args.per_device_train_batch_size * accelerator.num_processes) * args.gradient_accumulation_steps) logger.info('***** Running training *****') logger.info(f' Num examples = {len(train_dataset)}') logger.info(f' Num Epochs = {args.num_train_epochs}') logger.info(f' Instantaneous batch size per device = {args.per_device_train_batch_size}') logger.info(f' Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}') logger.info(f' Gradient Accumulation steps = {args.gradient_accumulation_steps}') logger.info(f' Total optimization steps = {args.max_train_steps}') progress_bar = tqdm(range(args.max_train_steps), disable=(not accelerator.is_local_main_process)) completed_steps = 0 starting_epoch = 0 if args.resume_from_checkpoint: if ((args.resume_from_checkpoint is not None) or (args.resume_from_checkpoint != '')): accelerator.print(f'Resumed from checkpoint: {args.resume_from_checkpoint}') accelerator.load_state(args.resume_from_checkpoint) path = os.path.basename(args.resume_from_checkpoint) else: dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()] dirs.sort(key=os.path.getctime) path = dirs[(- 1)] training_difference = os.path.splitext(path)[0] if ('epoch' in training_difference): starting_epoch = (int(training_difference.replace('epoch_', '')) + 1) resume_step = None else: resume_step = int(training_difference.replace('step_', '')) starting_epoch = (resume_step // len(train_dataloader)) resume_step -= (starting_epoch * len(train_dataloader)) for epoch in range(starting_epoch, args.num_train_epochs): model.train() if args.with_tracking: total_loss = 0 for (step, batch) in enumerate(train_dataloader): if (args.resume_from_checkpoint and (epoch == starting_epoch)): if ((resume_step is not None) and (step < resume_step)): completed_steps += 1 continue with accelerator.accumulate(model): outputs = model(**batch) loss = outputs.loss if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() if accelerator.sync_gradients: progress_bar.update(1) completed_steps += 1 if isinstance(checkpointing_steps, int): if ((completed_steps % checkpointing_steps) == 0): output_dir = f'step_{completed_steps}' if (args.output_dir is not None): output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) if (completed_steps >= args.max_train_steps): break model.eval() for (step, batch) in enumerate(eval_dataloader): with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=(- 1)) (predictions, references) = accelerator.gather_for_metrics((predictions, batch['labels'])) metric.add_batch(predictions=predictions, references=references) eval_metric = metric.compute() accelerator.print(f'epoch {epoch}: {eval_metric}') if args.with_tracking: accelerator.log({'accuracy': eval_metric, 'train_loss': (total_loss.item() / len(train_dataloader)), 'epoch': epoch, 'step': completed_steps}, step=completed_steps) if (args.push_to_hub and (epoch < (args.num_train_epochs - 1))): accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save) if accelerator.is_main_process: tokenizer.save_pretrained(args.output_dir) repo.push_to_hub(commit_message=f'Training in progress epoch {epoch}', blocking=False, auto_lfs_prune=True) if (args.checkpointing_steps == 'epoch'): output_dir = f'epoch_{epoch}' if (args.output_dir is not None): output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) if args.with_tracking: accelerator.end_training() if (args.output_dir is not None): accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save) if accelerator.is_main_process: tokenizer.save_pretrained(args.output_dir) if args.push_to_hub: repo.push_to_hub(commit_message='End of training', auto_lfs_prune=True) all_results = {f'eval_{k}': v for (k, v) in eval_metric.items()} with open(os.path.join(args.output_dir, 'all_results.json'), 'w') as f: json.dump(all_results, f)
def load_parameters(): FIXED_PARAMETERS = {'model_type': args.model_type, 'model_name': args.model_name, 'training_mnli': '{}/multinli_0.9/multinli_0.9_train.jsonl'.format(args.datapath), 'dev_matched': '{}/multinli_0.9/multinli_0.9_dev_matched.jsonl'.format(args.datapath), 'dev_mismatched': '{}/multinli_0.9/multinli_0.9_dev_mismatched.jsonl'.format(args.datapath), 'test_matched': '{}/multinli_0.9/multinli_0.9_test_matched_unlabeled.jsonl'.format(args.datapath), 'test_mismatched': '{}/multinli_0.9/multinli_0.9_test_mismatched_unlabeled.jsonl'.format(args.datapath), 'training_snli': '{}/snli_1.0/snli_1.0_train.jsonl'.format(args.datapath), 'dev_snli': '{}/snli_1.0/snli_1.0_dev.jsonl'.format(args.datapath), 'test_snli': '{}/snli_1.0/snli_1.0_test.jsonl'.format(args.datapath), 'embedding_data_path': '{}/glove.840B.300d.txt'.format(args.datapath), 'log_path': '{}/{}'.format(args.logpath, args.model_name), 'ckpt_path': '{}/{}'.format(args.ckptpath, args.model_name), 'embeddings_to_load': args.emb_to_load, 'word_embedding_dim': 300, 'hidden_embedding_dim': 300, 'seq_length': args.seq_length, 'keep_rate': args.keep_rate, 'batch_size': args.batch_size, 'learning_rate': args.learning_rate, 'emb_train': args.emb_train, 'alpha': args.alpha, 'genre': args.genre} return (FIXED_PARAMETERS, args)
def test_same_as_metrics_mse_implementation(): actual = (np.random.random(size=(5, 5, 8)) * 255) assert (np.min(actual) >= 0) assert (np.max(actual) <= 255) expected = (np.random.random(size=(5, 5, 8)) * 255) assert (np.min(expected) >= 0) assert (np.max(expected) <= 255) mask = np.random.randint(0, 2, size=(5, 5, 8)) assert (np.min(mask) >= 0) assert (np.max(mask) <= 1) mse_metrics = mse(actual=actual, expected=expected, mask=mask, use_np=True, mask_norm=True) _mse = compute_mse(actual=actual, expected=expected, full_mask=mask, config=OVERALLONLY_CONFIG_NUMPY) mse_scorecomp = _mse['']['mse_masked'] assert np.isclose(mse_scorecomp, mse_metrics)
class RoIAwarePool3d(nn.Module): def __init__(self, out_size, max_pts_each_voxel=128): super().__init__() self.out_size = out_size self.max_pts_each_voxel = max_pts_each_voxel def forward(self, rois, pts, pts_feature, pool_method='max'): assert (pool_method in ['max', 'avg']) return RoIAwarePool3dFunction.apply(rois, pts, pts_feature, self.out_size, self.max_pts_each_voxel, pool_method)
class RandomHorizontallyFlip(object): def __call__(self, img, mask): if (random.random() < 0.5): return (img.transpose(Image.FLIP_LEFT_RIGHT), mask.transpose(Image.FLIP_LEFT_RIGHT)) return (img, mask)
class W2lDecoder(object): def __init__(self, args, tgt_dict): self.tgt_dict = tgt_dict self.vocab_size = len(tgt_dict) self.nbest = args.nbest self.criterion_type = CriterionType.CTC self.blank = (tgt_dict.index('<ctc_blank>') if ('<ctc_blank>' in tgt_dict.indices) else tgt_dict.bos()) if ('<sep>' in tgt_dict.indices): self.silence = tgt_dict.index('<sep>') elif ('|' in tgt_dict.indices): self.silence = tgt_dict.index('|') else: self.silence = tgt_dict.eos() self.asg_transitions = None def generate(self, models, sample, **unused): encoder_input = {k: v for (k, v) in sample['net_input'].items() if (k != 'prev_output_tokens')} emissions = self.get_emissions(models, encoder_input) return self.decode(emissions) def get_emissions(self, models, encoder_input): model = models[0] encoder_out = model(**encoder_input) if hasattr(model, 'get_logits'): emissions = model.get_logits(encoder_out) else: emissions = model.get_normalized_probs(encoder_out, log_probs=True) return emissions.transpose(0, 1).float().cpu().contiguous() def get_tokens(self, idxs): idxs = (g[0] for g in it.groupby(idxs)) idxs = filter((lambda x: (x != self.blank)), idxs) return torch.LongTensor(list(idxs))
def prepare_corpus_vocabs(args): (language, data_set, data_type) = args print(f'Building vocabulary for {language} {data_type}') if utils.check_vocab(language, data_type): return docs = utils.load_doc(language, data_set) tokens = utils.flatten((doc[data_type] for doc in docs)) tokens = utils.flatten(preprocess_tokens(tokens, data_type)) vocabulary = BpeVocabulary(vocab_size=shared.VOCAB_SIZE, pct_bpe=shared.VOCAB_PCT_BPE) vocabulary.fit(Counter(tokens)) utils.dump_vocab(vocabulary, language, data_type) print(f'Done building vocabulary for {language} {data_type}')
def compute_pixel_coverage(instance_seg, object_id): cand_mask = (instance_seg == object_id) score = (cand_mask.sum().astype(np.float64) / cand_mask.size) return score
def demo_to_midi(data, names, bpm=90.0, shift_second=None, shift_beat=None): alpha = bpm_to_alpha(bpm) if (shift_second is None): shift_second = (alpha * shift_beat) midi = pretty_midi.PrettyMIDI(initial_tempo=bpm) for (track, name) in zip(data, names): ins = pretty_midi.Instrument(0, name=name) ins.notes = add_notes(track, shift_second, alpha) midi.instruments.append(ins) return midi
def logging_csv(file, header): i = 1 fname = file while os.path.isfile(fname): fname = (file + str(i)) i += 1 with open(fname, 'w', newline='') as f: writer = csv.writer(f) writer.writerow(header) def write_csv(s): with open(fname, 'a', newline='') as f: writer = csv.writer(f) writer.writerow(s) return write_csv
def convert_PubLayNet_blob_to_target_blob(PubLayNet_blob, lookup_table): PubLayNet_shape = PubLayNet_blob.shape leading_factor = int((PubLayNet_shape[0] / NUM_PUBLAYNET_CLS)) tail_shape = list(PubLayNet_shape[1:]) assert ((leading_factor == 1) or (leading_factor == 4)) PubLayNet_blob = PubLayNet_blob.reshape(([NUM_PUBLAYNET_CLS, (- 1)] + tail_shape)) std = PubLayNet_blob.std() mean = PubLayNet_blob.mean() target_shape = ([args.NUM_TARGET_CLS] + list(PubLayNet_blob.shape[1:])) target_blob = ((np.random.randn(*target_shape) * std) + mean).astype(np.float32) for i in range(args.NUM_TARGET_CLS): PubLayNet_cls_id = lookup_table[i] if (PubLayNet_cls_id >= 0): target_blob[i] = PubLayNet_blob[PubLayNet_cls_id] target_shape = ([(args.NUM_TARGET_CLS * leading_factor)] + tail_shape) return target_blob.reshape(target_shape)
def image_transform(image_size: Union[(int, List[int])], augmentation: dict, mean: List[float]=[0.485, 0.456, 0.406], std: List[float]=[0.229, 0.224, 0.225]) -> Callable: if isinstance(image_size, int): image_size = (image_size, image_size) else: image_size = tuple(image_size) horizontal_flip = augmentation.pop('horizontal_flip', None) if (horizontal_flip is not None): assert (isinstance(horizontal_flip, float) and (0 <= horizontal_flip <= 1)) vertical_flip = augmentation.pop('vertical_flip', None) if (vertical_flip is not None): assert (isinstance(vertical_flip, float) and (0 <= vertical_flip <= 1)) random_crop = augmentation.pop('random_crop', None) if (random_crop is not None): assert isinstance(random_crop, dict) center_crop = augmentation.pop('center_crop', None) if (center_crop is not None): assert isinstance(center_crop, (int, list)) if (len(augmentation) > 0): raise NotImplementedError(('Invalid augmentation options: %s.' % ', '.join(augmentation.keys()))) t = [(transforms.Resize(image_size) if (random_crop is None) else transforms.RandomResizedCrop(image_size[0], **random_crop)), (transforms.CenterCrop(center_crop) if (center_crop is not None) else None), (transforms.RandomHorizontalFlip(horizontal_flip) if (horizontal_flip is not None) else None), (transforms.RandomVerticalFlip(vertical_flip) if (vertical_flip is not None) else None), transforms.ToTensor(), transforms.Normalize(mean, std)] return transforms.Compose([v for v in t if (v is not None)])
def get_spatial_graph(num_node, self_link, inward, outward): I = edge2mat(self_link, num_node) In = normalize_digraph(edge2mat(inward, num_node)) Out = normalize_digraph(edge2mat(outward, num_node)) A = np.stack((I, In, Out)) return A
def get_vgg2l_odim(idim, in_channel=3, out_channel=128): idim = (idim / in_channel) idim = np.ceil((np.array(idim, dtype=np.float32) / 2)) idim = np.ceil((np.array(idim, dtype=np.float32) / 2)) return (int(idim) * out_channel)
def get_default_cfg(): _C = CN() _C.VERSION = 2 _C.DATA_DIR = './data/waymo/processed/validation/' _C.DATASET = 'waymo' _C.OUTPUT_DIR = './output/debug' _C.OBJECT_LIST_PATH = './data/waymo/splits/easy_list.json' _C.CKPT_PATH = '/root/code/shape-reconstruction/experiments/point_sdf/partial_point_weighted/ckpts/latest.pth' _C.EXP = CN() _C.EXP.USE_MEAN_CODE = False _C.EXP.UPDATE_NUM = 500 _C.EXP.WO_ENCODER = False _C.EXP.RAW_PC = False _C.EXP.USE_DETECTION = False _C.EXP.MEAN_CODE_PATH = ' /root/code/shape-reconstruction/experiments/point_sdf/partial_point_weighted/code/mean.npz' _C.DEBUG = CN() _C.DEBUG.IS_DEBUG = False _C.SHAPE_MODEL = CN() _C.SHAPE_MODEL.CODE_DIM = 512 _C.SHAPE_MODEL.HIDDEN_DIM = 512 _C.SHAPE_MODEL.POINT_FEAT_DIMS = [3, 64, 256, 512] _C.SHAPE_MODEL.DECODER_DIMS = [1024, 512, 256, 256, 1] _C.SHAPE_MODEL.USE_RES_DECODER = False _C.MOTION_MODEL = CN() _C.MOTION_MODEL.AVG_WEIGHT = 0.5 _C.MOTION_MODEL.PREV_WEIGHT = 0.5 _C.SEARCH_CODE = CN() _C.SEARCH_CODE.IF_FINETUNE = True _C.SEARCH_CODE.IF_ENCODE = False _C.SEARCH_CODE.FINETUNE_FIRST_FRAME = False _C.SEARCH_CODE.PREV_REG_WEIGHT = 5 _C.SEARCH_CODE.INIT_REG_WEIGHT = 10 _C.SEARCH_CODE.ZERO_REG_WEIGHT = 5 _C.SEARCH_CODE.ITER_NUM = 100 _C.SEARCH_CODE.LR = 0.001 _C.SEARCH_CODE.POLICY = 'agg' _C.LOSS = CN() _C.LOSS.SHAPE_LOSS_WEIGHT = 1.0 _C.LOSS.SHAPE_LOSS_TYPE = 'l1' _C.LOSS.USE_CD = True _C.LOSS.USE_SHAPE = True _C.LOSS.USE_MC = False _C.LOSS.USE_MP = False _C.LOSS.CD_WEIGHT = 0.4 _C.LOSS.CD_LOSS_TYPE = 'smooth_l1' _C.LOSS.CD_SMOOTH_L1_LOSS_BETA = 0.04 _C.CD_LOSS = CN() _C.CD_LOSS.USE_PREV_PC = True _C.CD_LOSS.USE_INIT_PC = True _C.CD_LOSS.USE_AGG_PC = True _C.CD_LOSS.PREV_PC_WEIGHT = 1.0 _C.CD_LOSS.INIT_PC_WEIGHT = 1.0 _C.CD_LOSS.AGG_PC_WEIGHT = 1.0 _C.CD_LOSS.PREV_PC_DIR = 'double' _C.CD_LOSS.INIT_PC_DIR = 'double' _C.CD_LOSS.AGG_PC_DIR = 'x-y' _C.CD_LOSS.MERGE_INIT_TO_PREV = False _C.CD_LOSS.MERGE_AGG_TO_PREV = False _C.CD_LOSS.MaxPCNum = 2000 _C.OPTIM = CN() _C.OPTIM.OPTIMIZER = 'SGD' _C.OPTIM.INIT_LR = 0.1 _C.OPTIM.GAMMA = 0.5 _C.OPTIM.MILESTONES = [100, 200, 300, 400] _C.OPTIM.ITER_NUM = 300 return _C.clone()
class TestLanguageModeling(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_fconv_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_fconv_lm') as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model(data_dir, 'fconv_lm', ['--decoder-layers', '[(850, 3)] * 2 + [(1024,4)]', '--decoder-embed-dim', '280', '--optimizer', 'nag', '--lr', '0.1']) eval_lm_main(data_dir) generate_main(data_dir, ['--task', 'language_modeling', '--sample-break-mode', 'eos', '--tokens-per-sample', '500']) def test_transformer_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_transformer_lm') as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model(data_dir, 'transformer_lm', ['--add-bos-token'], run_validation=True) eval_lm_main(data_dir) eval_lm_main(data_dir, extra_flags=['--context-window', '25']) generate_main(data_dir, ['--task', 'language_modeling', '--sample-break-mode', 'eos', '--tokens-per-sample', '500']) def test_transformer_lm_with_adaptive_softmax(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_transformer_lm_with_adaptive_softmax') as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model(data_dir, 'transformer_lm', ['--add-bos-token', '--criterion', 'adaptive_loss', '--adaptive-softmax-cutoff', '5,10,15'], run_validation=True) eval_lm_main(data_dir) generate_main(data_dir, ['--task', 'language_modeling', '--sample-break-mode', 'eos', '--tokens-per-sample', '500']) def test_lightconv_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_lightconv_lm') as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model(data_dir, 'lightconv_lm', ['--add-bos-token'], run_validation=True) eval_lm_main(data_dir) generate_main(data_dir, ['--task', 'language_modeling', '--sample-break-mode', 'eos', '--tokens-per-sample', '500']) def test_lstm_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_lstm_lm') as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model(data_dir, 'lstm_lm', ['--add-bos-token'], run_validation=True) eval_lm_main(data_dir) generate_main(data_dir, ['--task', 'language_modeling', '--sample-break-mode', 'eos', '--tokens-per-sample', '500']) def test_lstm_lm_residuals(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_lstm_lm_residuals') as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model(data_dir, 'lstm_lm', ['--add-bos-token', '--residuals'], run_validation=True) eval_lm_main(data_dir) generate_main(data_dir, ['--task', 'language_modeling', '--sample-break-mode', 'eos', '--tokens-per-sample', '500']) ((not has_hf_transformers), 'skip test if transformers is missing') def test_transformer_xl_bptt_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_transformer_xl_bptt_lm') as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) task_flags = ['--user-dir', 'examples/truncated_bptt', '--task', 'truncated_bptt_lm', '--batch-size', '2', '--tokens-per-sample', '50'] train_language_model(data_dir=data_dir, arch='transformer_xl', extra_flags=(task_flags + ['--n-layer', '2']), task='truncated_bptt_lm', run_validation=True, extra_valid_flags=task_flags) eval_lm_main(data_dir, extra_flags=task_flags)
class AutoEncoder(nn.Module): def __init__(self, d, encoded_size): super(AutoEncoder, self).__init__() self.encoded_size = encoded_size self.data_size = len(d.encoded_feature_names) self.encoded_categorical_feature_indexes = d.get_data_params()[2] self.encoded_continuous_feature_indexes = [] for i in range(self.data_size): valid = 1 for v in self.encoded_categorical_feature_indexes: if (i in v): valid = 0 if valid: self.encoded_continuous_feature_indexes.append(i) self.encoded_start_cat = len(self.encoded_continuous_feature_indexes) self.encoder_mean = nn.Sequential(nn.Linear(self.data_size, 20), nn.BatchNorm1d(20), nn.Dropout(0.1), nn.ReLU(), nn.Linear(20, 16), nn.BatchNorm1d(16), nn.Dropout(0.1), nn.ReLU(), nn.Linear(16, 14), nn.BatchNorm1d(14), nn.Dropout(0.1), nn.ReLU(), nn.Linear(14, 12), nn.BatchNorm1d(12), nn.Dropout(0.1), nn.ReLU(), nn.Linear(12, self.encoded_size)) self.encoder_var = nn.Sequential(nn.Linear(self.data_size, 20), nn.BatchNorm1d(20), nn.Dropout(0.1), nn.ReLU(), nn.Linear(20, 16), nn.BatchNorm1d(16), nn.Dropout(0.1), nn.ReLU(), nn.Linear(16, 14), nn.BatchNorm1d(14), nn.Dropout(0.1), nn.ReLU(), nn.Linear(14, 12), nn.BatchNorm1d(12), nn.Dropout(0.1), nn.ReLU(), nn.Linear(12, self.encoded_size), nn.Sigmoid()) self.decoder_mean = nn.Sequential(nn.Linear(self.encoded_size, 12), nn.BatchNorm1d(12), nn.Dropout(0.1), nn.ReLU(), nn.Linear(12, 14), nn.BatchNorm1d(14), nn.Dropout(0.1), nn.ReLU(), nn.Linear(14, 16), nn.BatchNorm1d(16), nn.Dropout(0.1), nn.ReLU(), nn.Linear(16, 20), nn.BatchNorm1d(20), nn.Dropout(0.1), nn.ReLU(), nn.Linear(20, self.data_size), nn.Sigmoid()) def encoder(self, x): mean = self.encoder_mean(x) logvar = (0.05 + self.encoder_var(x)) return (mean, logvar) def decoder(self, z): mean = self.decoder_mean(z) return mean def sample_latent_code(self, mean, logvar): eps = torch.randn_like(logvar) return (mean + (torch.sqrt(logvar) * eps)) def normal_likelihood(self, x, mean, logvar, raxis=1): return torch.sum(((- 0.5) * ((((x - mean) * (1.0 / logvar)) * (x - mean)) + torch.log(logvar))), axis=1) def forward(self, x): res = {} mc_samples = 50 (em, ev) = self.encoder(x) res['em'] = em res['ev'] = ev res['z'] = [] res['x_pred'] = [] res['mc_samples'] = mc_samples for _ in range(mc_samples): z = self.sample_latent_code(em, ev) x_pred = self.decoder(z) res['z'].append(z) res['x_pred'].append(x_pred) return res
def get_detector(name, **kwargs): if ('convs' == name): return Conv3x3ReLUBNs(kwargs['in_channels'], kwargs['inner_channels'], kwargs['out_channels'], kwargs['scale'], kwargs['num_convs'], kwargs.get('drop_rate', 0.0)) raise ValueError(f'{name} is not supported.')
def init_logger(log_folder, file_name='output.log'): if os.path.exists(log_folder): print(('WARNING: The results directory (%s) already exists. Delete previous results directory [y/N]? ' % log_folder), end='') var = input() if ((var is 'y') or (var is 'Y')): print('removing directory ...') shutil.rmtree(log_folder, ignore_errors=True) else: print(('ERROR: The results directory already exists: %s' % log_folder)) sys.exit(1) os.makedirs(log_folder) log_file_path = os.path.join(log_folder, file_name) logger = logging.getLogger('my_logger') logger.setLevel(logging.DEBUG) logger.addHandler(logging.FileHandler(log_file_path)) logger.addHandler(logging.StreamHandler()) return logger
class BaseImageDataset(BaseDataset): def print_dataset_statistics(self, train, query, gallery): (num_train_pids, num_train_imgs, num_train_cams, num_train_views) = self.get_imagedata_info(train) (num_query_pids, num_query_imgs, num_query_cams, num_train_views) = self.get_imagedata_info(query) (num_gallery_pids, num_gallery_imgs, num_gallery_cams, num_train_views) = self.get_imagedata_info(gallery) print('Dataset statistics:') print(' ') print(' subset | # ids | # images | # cameras') print(' ') print(' train | {:5d} | {:8d} | {:9d}'.format(num_train_pids, num_train_imgs, num_train_cams)) print(' query | {:5d} | {:8d} | {:9d}'.format(num_query_pids, num_query_imgs, num_query_cams)) print(' gallery | {:5d} | {:8d} | {:9d}'.format(num_gallery_pids, num_gallery_imgs, num_gallery_cams)) print(' ')
_config def model_resnet_cifar(): cfg = {'learner': {'model': 'ResnetiCifar44'}, 'training': {'resume_from_checkpoint_path': '/mnt/models/resnet44-nolinear-cifar.pth', 'resume_training': True}}
class FCBlock(nn.Module): def __init__(self, input_dim, hidden_dims, output_dim=10): super(FCBlock, self).__init__() self.fc1 = nn.Linear(input_dim, hidden_dims[0]) self.fc2 = nn.Linear(hidden_dims[0], hidden_dims[1]) self.fc3 = nn.Linear(hidden_dims[1], output_dim) def forward(self, x): x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x
def build_backbone(args): position_embedding = build_position_encoding(args) train_backbone = (args.lr_backbone > 0) return_interm_layers = (args.num_feature_levels > 1) backbone = Backbone(args.backbone, train_backbone, return_interm_layers, args) model = Joiner(backbone, position_embedding) return model
def gen_filename(filename, dataset='esc50'): if (dataset == 'fma'): dataset = 'fma_mp3' ext = filename.split('.')[(- 1)] filename = (((('/data/sls/scratch/yuangong/audiollm/src/data/prep_data_ltue/whisper_feat/' + str(dataset)) + '/whisper_large-v1/') + filename.split('/')[(- 1)][:((- len(ext)) - 1)]) + '.npz') return filename
class Binarizer(): def binarize(filename, dict, consumer, tokenize=tokenize_line, append_eos=True, reverse_order=False, offset=0, end=(- 1), already_numberized=False): (nseq, ntok) = (0, 0) replaced = Counter() def replaced_consumer(word, idx): if ((idx == dict.unk_index) and (word != dict.unk_word)): replaced.update([word]) with open(filename, 'r', encoding='utf-8') as f: f.seek(offset) line = safe_readline(f) while line: if ((end > 0) and (f.tell() > end)): break if already_numberized: id_strings = line.strip().split() id_list = [int(id_string) for id_string in id_strings] if reverse_order: id_list.reverse() if append_eos: id_list.append(dict.eos()) ids = torch.IntTensor(id_list) else: ids = dict.encode_line(line=line, line_tokenizer=tokenize, add_if_not_exist=False, consumer=replaced_consumer, append_eos=append_eos, reverse_order=reverse_order) nseq += 1 ntok += len(ids) consumer(ids) line = f.readline() return {'nseq': nseq, 'nunk': sum(replaced.values()), 'ntok': ntok, 'replaced': replaced} def binarize_alignments(filename, alignment_parser, consumer, offset=0, end=(- 1)): nseq = 0 with open(filename, 'r') as f: f.seek(offset) line = safe_readline(f) while line: if ((end > 0) and (f.tell() > end)): break ids = alignment_parser(line) nseq += 1 consumer(ids) line = f.readline() return {'nseq': nseq} def find_offsets(filename, num_chunks): with open(filename, 'r', encoding='utf-8') as f: size = os.fstat(f.fileno()).st_size chunk_size = (size // num_chunks) offsets = [0 for _ in range((num_chunks + 1))] for i in range(1, num_chunks): f.seek((chunk_size * i)) safe_readline(f) offsets[i] = f.tell() return offsets
class ImageDecoder(nn.Module): def __init__(self, input_size, n_channels, ngf, n_layers, activation='tanh'): super(ImageDecoder, self).__init__() ngf = (ngf * (2 ** (n_layers - 2))) layers = [nn.ConvTranspose2d(input_size, ngf, 4, 1, 0, bias=False), nn.BatchNorm2d(ngf), nn.ReLU(True)] for i in range(1, (n_layers - 1)): layers += [nn.ConvTranspose2d(ngf, (ngf // 2), 4, 2, 1, bias=False), nn.BatchNorm2d((ngf // 2)), nn.ReLU(True)] ngf = (ngf // 2) layers += [nn.ConvTranspose2d(ngf, n_channels, 4, 2, 1, bias=False)] if (activation == 'tanh'): layers += [nn.Tanh()] elif (activation == 'sigmoid'): layers += [nn.Sigmoid()] else: raise NotImplementedError self.main = nn.Sequential(*layers) def forward(self, x): if (len(x.size()) == 2): x = x.view(*x.size(), 1, 1) x = self.main(x) return x
_torch _torchaudio _sentencepiece class Speech2TextProcessorTest(unittest.TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() vocab = ['<s>', '<pad>', '</s>', '<unk>', 'This', 'is', 'a', 't', 'est'] vocab_tokens = dict(zip(vocab, range(len(vocab)))) save_dir = Path(self.tmpdirname) save_json(vocab_tokens, (save_dir / VOCAB_FILES_NAMES['vocab_file'])) if (not (save_dir / VOCAB_FILES_NAMES['spm_file']).exists()): copyfile(SAMPLE_SP, (save_dir / VOCAB_FILES_NAMES['spm_file'])) tokenizer = Speech2TextTokenizer.from_pretrained(self.tmpdirname) tokenizer.save_pretrained(self.tmpdirname) feature_extractor_map = {'feature_size': 24, 'num_mel_bins': 24, 'padding_value': 0.0, 'sampling_rate': 16000, 'return_attention_mask': False, 'do_normalize': True} save_json(feature_extractor_map, (save_dir / FEATURE_EXTRACTOR_NAME)) def get_tokenizer(self, **kwargs): return Speech2TextTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_feature_extractor(self, **kwargs): return Speech2TextFeatureExtractor.from_pretrained(self.tmpdirname, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def test_save_load_pretrained_default(self): tokenizer = self.get_tokenizer() feature_extractor = self.get_feature_extractor() processor = Speech2TextProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) processor.save_pretrained(self.tmpdirname) processor = Speech2TextProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) self.assertIsInstance(processor.tokenizer, Speech2TextTokenizer) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string()) self.assertIsInstance(processor.feature_extractor, Speech2TextFeatureExtractor) def test_save_load_pretrained_additional_features(self): processor = Speech2TextProcessor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor()) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)') feature_extractor_add_kwargs = self.get_feature_extractor(do_normalize=False, padding_value=1.0) processor = Speech2TextProcessor.from_pretrained(self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_normalize=False, padding_value=1.0) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, Speech2TextTokenizer) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string()) self.assertIsInstance(processor.feature_extractor, Speech2TextFeatureExtractor) def test_feature_extractor(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = Speech2TextProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) raw_speech = floats_list((3, 1000)) input_feat_extract = feature_extractor(raw_speech, return_tensors='np') input_processor = processor(raw_speech, return_tensors='np') for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=0.01) def test_tokenizer(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = Speech2TextProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) input_str = 'This is a test string' with processor.as_target_processor(): encoded_processor = processor(input_str) encoded_tok = tokenizer(input_str) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_tokenizer_decode(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = Speech2TextProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor)
def _validate_accelerator(accel_obj): if (not (((a1 is not None) and isinstance(accel_obj, a1)) or ((a2 is not None) and isinstance(accel_obj, a2)))): raise AssertionError(f'{accel_obj.__class__.__name__} accelerator is not subclass of BaseAccelerator')
class GroupCenterCrop(object): def __init__(self, size): self.worker = torchvision.transforms.CenterCrop(size) def __call__(self, img_group): return [self.worker(img) for img in img_group]
def get_dataloader(split, config, return_dict=False): dataset = AudioCaptionDataset(config.dataset, split, 'captioning', return_dict) if (split == 'train'): shuffle = True drop_last = True else: shuffle = False drop_last = False return DataLoader(dataset=dataset, batch_size=config.data.batch_size, shuffle=shuffle, drop_last=drop_last, num_workers=config.data.num_workers, collate_fn=collate_fn)
def test_seg_evaluate(): if (not torch.cuda.is_available()): pytest.skip() root_path = './tests/data/s3dis' ann_file = './tests/data/s3dis/s3dis_infos.pkl' s3dis_dataset = S3DISSegDataset(data_root=root_path, ann_files=ann_file, test_mode=True) results = [] pred_sem_mask = dict(semantic_mask=torch.tensor([2, 3, 1, 2, 2, 6, 1, 0, 1, 1, 9, 12, 3, 0, 2, 0, 2, 0, 8, 3, 1, 2, 0, 2, 1, 7, 2, 10, 2, 0, 0, 0, 2, 3, 2, 2, 2, 2, 2, 3, 0, 0, 4, 6, 7, 2, 1, 2, 0, 1, 7, 0, 2, 2, 2, 0, 2, 2, 1, 12, 0, 2, 2, 2, 2, 7, 2, 2, 0, 2, 6, 2, 12, 6, 3, 12, 2, 1, 6, 1, 2, 6, 8, 2, 10, 1, 11, 0, 6, 9, 4, 3, 0, 0, 12, 1, 1, 5, 3, 2]).long()) results.append(pred_sem_mask) ret_dict = s3dis_dataset.evaluate(results) assert (abs((ret_dict['miou'] - 0.7625)) < 0.01) assert (abs((ret_dict['acc'] - 0.9)) < 0.01) assert (abs((ret_dict['acc_cls'] - 0.9074)) < 0.01)
def mobilenetv3_large_100(pretrained=False, **kwargs): model = _gen_mobilenet_v3('mobilenetv3_large_100', 1.0, pretrained=pretrained, **kwargs) return model
class Kernel(Module): def __init__(self, scale=1.0, p=2.0, basis='Epanechnikov', fix_params=True): super().__init__() self.init_params(scale, fix_params) self.pairwise_distance = partial(torch.cdist, p=p) self.basis_func = torch.jit.script(getattr(diffsnn.nonparametric.basis, basis)()) def init_params(self, scale: float, fix_params: bool): self.scale = Parameter(torch.tensor(scale)) if fix_params: self.fix_params() def fix_params(self): for each_param in self.parameters(): each_param.requires_grad = False def forward(self, x, y): self._check_consistency(x, y) return self.basis_func((self.pairwise_distance(x, y) / self.scale)) "\n if self.basis_func.support == (-float('inf'), float('inf')) or x.shape[1] != 1:\n return self.basis_func(self.pairwise_distance(x, y) / self.scale)\n else:\n x_org2sort_idx = torch.argsort(x.reshape(-1))\n x_sort2org_idx = torch.argsort(x_org2sort_idx)\n x_sorted_tensor = x[x_org2sort_idx]\n x_sorted_list: List[int] = x_sorted_tensor.reshape(-1).tolist()\n y_org2sort_idx = torch.argsort(y.reshape(-1))\n y_sort2org_idx = torch.argsort(y_org2sort_idx)\n y_sorted_tensor = y[y_org2sort_idx]\n y_sorted_list: List[int] = y_sorted_tensor.reshape(-1).tolist()\n kernel_tensor = torch.zeros(x.shape[0], y.shape[0])\n\n y_start_idx = 0\n for each_x_idx in range(x.shape[0]):\n y_start_idx = y_start_idx + bisect_left(y_sorted_list[y_start_idx:],\n x_sorted_list[each_x_idx] + self.basis_func.support[0])\n y_end_idx = y_start_idx + bisect_right(y_sorted_list[y_start_idx:],\n x_sorted_list[each_x_idx] + self.basis_func.support[1])\n kernel_tensor[each_x_idx, y_start_idx:y_end_idx+1] = self.basis_func.forward(\n self.pairwise_distance(\n x_sorted_tensor[each_x_idx:each_x_idx+1, :],\n y_sorted_tensor[y_start_idx:y_end_idx+1, :]) / self.scale)\n return kernel_tensor[x_sort2org_idx, :][:, y_sort2org_idx]\n " def _check_consistency(x, y): if (x.shape[1] != y.shape[1]): raise ValueError('x and y have inconsistent shapes')
def make_student(run_seed: int, config) -> BaseStudent: trajs_path = config['TRAIN_TRAJ_PATH'] model_path = get_model_path(config['ENV'], ('student_' + config['ALG']), run_seed=run_seed) state_dim = config['STATE_DIM'] action_dim = config['ACTION_DIM'] num_training_envs = config['NUM_TRAINING_ENVS'] batch_size = config['BATCH_SIZE'] buffer_size_in_trajs = config['NUM_TRAJS_GIVEN'] adam_alpha = config['ADAM_ALPHA'] env = gym.make('CartPole-v1') teacher = make_agent('CartPole-v1', 'dqn', config['NUM_TRAINING_ENVS']) teacher.load_pretrained() buffer = fill_buffer(trajs_path=config['TRAIN_TRAJ_PATH'], batch_size=batch_size, run_seed=run_seed, traj_shift=None, buffer_size_in_trajs=buffer_size_in_trajs, sampling_rate=None, strictly_batch_data=True) energy_model = EnergyModel(in_dim=state_dim, width=config['MLP_WIDTHS'], batch_size=batch_size, adam_alpha=adam_alpha, buffer=buffer, sgld_buffer_size=config['SGLD_BUFFER_SIZE'], sgld_learn_rate=config['SGLD_LEARN_RATE'], sgld_noise_coef=config['SGLD_NOISE_COEF'], sgld_num_steps=config['SGLD_NUM_STEPS'], sgld_reinit_freq=config['SGLD_REINIT_FREQ']) energy_model.train(num_updates=config['NUM_STEPS_TRAIN_ENERGY_MODEL']) causal_features_encoder = FeaturesEncoder(input_size=state_dim, representation_size=config['REP_SIZE'], width=config['MLP_WIDTHS']) causal_features_decoder = FeaturesDecoder(action_size=action_dim, representation_size=config['REP_SIZE'], width=config['MLP_WIDTHS']) observations_decoder = ObservationsDecoder(representation_size=config['REP_SIZE'], out_size=state_dim, width=config['MLP_WIDTHS']) policy_network = StudentNetwork(in_dim=config['REP_SIZE'], out_dim=action_dim, width=config['MLP_WIDTHS']) env_discriminator = EnvDiscriminator(representation_size=config['REP_SIZE'], num_envs=config['NUM_TRAINING_ENVS'], width=config['MLP_WIDTHS']) noise_features_encoders = [FeaturesEncoder(input_size=state_dim, representation_size=config['REP_SIZE'], width=config['MLP_WIDTHS']) for i in range(num_training_envs)] noise_features_decoders = [FeaturesDecoder(action_size=action_dim, representation_size=config['REP_SIZE'], width=config['MLP_WIDTHS']) for i in range(num_training_envs)] mine_network = MineNetwork(x_dim=config['REP_SIZE'], z_dim=config['REP_SIZE'], width=config['MLP_WIDTHS']) return ICILStudent(env=env, trajs_paths=trajs_path, model_path=model_path, num_training_envs=num_training_envs, teacher=teacher, causal_features_encoder=causal_features_encoder, noise_features_encoders=noise_features_encoders, causal_features_decoder=causal_features_decoder, noise_features_decoders=noise_features_decoders, observations_decoder=observations_decoder, env_discriminator=env_discriminator, policy_network=policy_network, energy_model=energy_model, mine_network=mine_network, buffer=buffer, adam_alpha=adam_alpha)
def pad_seq(seq, max_length, PAD_token=0): seq += [PAD_token for i in range((max_length - len(seq)))] return seq
class TFXLMRobertaModel(): def __init__(self, *args, **kwargs): requires_tf(self) def from_pretrained(self, *args, **kwargs): requires_tf(self)
def initalizeEnvironment(environment, logger): if (environment != ''): db = Database(DB_NAME, DB_HOST, DB_PORT) ' Can be SimpleFog, BitbrainFog, AzureFog // Datacenter ' if (environment != ''): datacenter = Datacenter(HOSTS_IP, environment, 'Virtual') else: datacenter = AzureFog(HOSTS) ' Can be SWSD, BWGD2, Azure2017Workload, Azure2019Workload // DFW, AIoTW ' if (environment != ''): workload = DFW(NEW_CONTAINERS, 1.5, db) else: workload = BWGD2(NEW_CONTAINERS, 1.5) " Can be LRMMTR, RF, RL, RM, Random, RLRMMTR, TMCR, TMMR, TMMTR, GA, GOBI (arg = 'energy_latency_'+str(HOSTS)) " scheduler = GOBIScheduler(('energy_latency_' + str(HOSTS))) hostlist = datacenter.generateHosts() if (environment != ''): env = Framework(scheduler, CONTAINERS, INTERVAL_TIME, hostlist, db, environment, logger) else: env = Simulator(TOTAL_POWER, ROUTER_BW, scheduler, CONTAINERS, INTERVAL_TIME, hostlist) stats = Stats(env, workload, datacenter, scheduler) newcontainerinfos = workload.generateNewContainers(env.interval) deployed = env.addContainersInit(newcontainerinfos) start = time() decision = scheduler.placement(deployed) schedulingTime = (time() - start) migrations = env.allocateInit(decision) workload.updateDeployedContainers(env.getCreationIDs(migrations, deployed)) print("Deployed containers' creation IDs:", env.getCreationIDs(migrations, deployed)) print('Containers in host:', env.getContainersInHosts()) print('Schedule:', env.getActiveContainerList()) printDecisionAndMigrations(decision, migrations) stats.saveStats(deployed, migrations, [], deployed, decision, schedulingTime) return (datacenter, workload, scheduler, env, stats)
class Probe(nn.Module): def __init__(self, dim, n_probes): super(Probe, self).__init__() self.self_attn = nn.Linear(dim, n_probes, bias=False) nn.init.xavier_uniform_(self.self_attn.weight) def forward(self, birnn_outputs, masks): attn = self.self_attn(birnn_outputs).transpose(1, 2).masked_fill((~ masks), (- np.inf)) attn = F.softmax(attn, (- 1)) context_vectors = torch.bmm(attn, birnn_outputs) return context_vectors
def gen_filename(filename): ext = filename.split('.')[(- 1)] filename = filename.split('/')[(- 1)][:((- len(ext)) - 1)] return filename
def TrainPrepare(): if 1: WB97XDAtom = {} WB97XDAtom[1] = (- 0.) WB97XDAtom[6] = (- 37.) WB97XDAtom[7] = (- 54.) WB97XDAtom[8] = (- 75.) a = MSet('nicotine_aimd_rand') a.Load() b = MSet('nicotine_aimd_rand_train') for (mol_index, mol) in enumerate(a.mols): print('mol_index:', mol_index) mol.properties['gradients'] = (- mol.properties['forces']) mol.properties['atomization'] = mol.properties['energy'] for i in range(0, mol.NAtoms()): mol.properties['atomization'] -= WB97XDAtom[mol.atoms[i]] b.mols.append(mol) b.Save()
class Model(nn.Module): def __init__(self): super().__init__() self.param = nn.Parameter(torch.tensor([1.0])) def forward(self, x, **kwargs): return (self.param * x) def train_step(self, data_batch, optimizer, **kwargs): return {'loss': torch.sum(self(data_batch['x']))} def val_step(self, data_batch, optimizer, **kwargs): return {'loss': torch.sum(self(data_batch['x']))}
class NystromformerForSequenceClassification(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
.parametrize('loader_parameters', [{'path_data': [str(Path(__data_testing_dir__, 'microscopy_png'))], 'target_suffix': ['_seg-myelin-manual', '_seg-axon-manual'], 'extensions': ['.png'], 'roi_params': {'suffix': None, 'slice_filter_roi': None}, 'contrast_params': {'contrast_lst': [], 'balance': {}}, 'slice_axis': 'axial', 'slice_filter_params': {'filter_empty_mask': False, 'filter_empty_input': True}, 'patch_filter_params': {'filter_empty_mask': False, 'filter_empty_input': False}, 'multichannel': False}]) .parametrize('model_parameters', [{'name': 'Unet', 'dropout_rate': 0.3, 'bn_momentum': 0.1, 'depth': 2}]) .parametrize('transform_parameters', [{'NumpyToTensor': {}}]) def test_get_target_filename_list(loader_parameters, model_parameters, transform_parameters): loader_parameters.update({LoaderParamsKW.MODEL_PARAMS: model_parameters}) bids_df = BidsDataframe(loader_parameters, __tmp_dir__, derivatives=True) data_lst = ['sub-rat3_ses-01_sample-data9_SEM.png'] test_ds = imed_loader.load_dataset(bids_df, **{**loader_parameters, **{'data_list': data_lst, 'transforms_params': transform_parameters, 'dataset_type': 'training'}}) target_filename = test_ds.filename_pairs[0][1] assert (len(target_filename) == len(loader_parameters[LoaderParamsKW.TARGET_SUFFIX]))
def draw_success_precision(success_ret, name, videos, attr, precision_ret=None, norm_precision_ret=None, bold_name=None, axis=[0, 1]): (fig, ax) = plt.subplots() ax.grid(b=True) ax.set_aspect(1) plt.xlabel('Overlap threshold') plt.ylabel('Success rate') if (attr == 'ALL'): plt.title(('\\textbf{Success plots of OPE on %s}' % name)) else: plt.title(('\\textbf{Success plots of OPE - %s}' % attr)) plt.axis(([0, 1] + axis)) success = {} thresholds = np.arange(0, 1.05, 0.05) for tracker_name in success_ret.keys(): value = [v for (k, v) in success_ret[tracker_name].items() if (k in videos)] success[tracker_name] = np.mean(value) for (idx, (tracker_name, auc)) in enumerate(sorted(success.items(), key=(lambda x: x[1]), reverse=True)): if (tracker_name == bold_name): label = ('\\textbf{[%.3f] %s}' % (auc, tracker_name)) else: label = (('[%.3f] ' % auc) + tracker_name) value = [v for (k, v) in success_ret[tracker_name].items() if (k in videos)] plt.plot(thresholds, np.mean(value, axis=0), color=COLOR[idx], linestyle=LINE_STYLE[idx], label=label, linewidth=2) ax.legend(loc='lower left', labelspacing=0.2) ax.autoscale(enable=True, axis='both', tight=True) (xmin, xmax, ymin, ymax) = plt.axis() ax.autoscale(enable=False) ymax += 0.03 plt.axis([xmin, xmax, ymin, ymax]) plt.xticks(np.arange(xmin, (xmax + 0.01), 0.1)) plt.yticks(np.arange(ymin, ymax, 0.1)) ax.set_aspect(((xmax - xmin) / (ymax - ymin))) plt.show() if precision_ret: (fig, ax) = plt.subplots() ax.grid(b=True) ax.set_aspect(50) plt.xlabel('Location error threshold') plt.ylabel('Precision') if (attr == 'ALL'): plt.title(('\\textbf{Precision plots of OPE on %s}' % name)) else: plt.title(('\\textbf{Precision plots of OPE - %s}' % attr)) plt.axis(([0, 50] + axis)) precision = {} thresholds = np.arange(0, 51, 1) for tracker_name in precision_ret.keys(): value = [v for (k, v) in precision_ret[tracker_name].items() if (k in videos)] precision[tracker_name] = np.mean(value, axis=0)[20] for (idx, (tracker_name, pre)) in enumerate(sorted(precision.items(), key=(lambda x: x[1]), reverse=True)): if (tracker_name == bold_name): label = ('\\textbf{[%.3f] %s}' % (pre, tracker_name)) else: label = (('[%.3f] ' % pre) + tracker_name) value = [v for (k, v) in precision_ret[tracker_name].items() if (k in videos)] plt.plot(thresholds, np.mean(value, axis=0), color=COLOR[idx], linestyle=LINE_STYLE[idx], label=label, linewidth=2) ax.legend(loc='lower right', labelspacing=0.2) ax.autoscale(enable=True, axis='both', tight=True) (xmin, xmax, ymin, ymax) = plt.axis() ax.autoscale(enable=False) ymax += 0.03 plt.axis([xmin, xmax, ymin, ymax]) plt.xticks(np.arange(xmin, (xmax + 0.01), 5)) plt.yticks(np.arange(ymin, ymax, 0.1)) ax.set_aspect(((xmax - xmin) / (ymax - ymin))) plt.show() if norm_precision_ret: (fig, ax) = plt.subplots() ax.grid(b=True) plt.xlabel('Location error threshold') plt.ylabel('Precision') if (attr == 'ALL'): plt.title(('\\textbf{Normalized Precision plots of OPE on %s}' % name)) else: plt.title(('\\textbf{Normalized Precision plots of OPE - %s}' % attr)) norm_precision = {} thresholds = (np.arange(0, 51, 1) / 100) for tracker_name in precision_ret.keys(): value = [v for (k, v) in norm_precision_ret[tracker_name].items() if (k in videos)] norm_precision[tracker_name] = np.mean(value, axis=0)[20] for (idx, (tracker_name, pre)) in enumerate(sorted(norm_precision.items(), key=(lambda x: x[1]), reverse=True)): if (tracker_name == bold_name): label = ('\\textbf{[%.3f] %s}' % (pre, tracker_name)) else: label = (('[%.3f] ' % pre) + tracker_name) value = [v for (k, v) in norm_precision_ret[tracker_name].items() if (k in videos)] plt.plot(thresholds, np.mean(value, axis=0), color=COLOR[idx], linestyle=LINE_STYLE[idx], label=label, linewidth=2) ax.legend(loc='lower right', labelspacing=0.2) ax.autoscale(enable=True, axis='both', tight=True) (xmin, xmax, ymin, ymax) = plt.axis() ax.autoscale(enable=False) ymax += 0.03 plt.axis([xmin, xmax, ymin, ymax]) plt.xticks(np.arange(xmin, (xmax + 0.01), 0.05)) plt.yticks(np.arange(ymin, ymax, 0.1)) ax.set_aspect(((xmax - xmin) / (ymax - ymin))) plt.show()
def framework_operator_impl(framework_realizable_ops: List[Type[AbsOpBase]], all_framework_ops: List[Type[AbsOpBase]], op_type: AbsOpBase, *args, **kwargs): SanityCheck.true(issubclass(op_type, AbsOpBase), f'Decorator operator_impl takes AbsOpBase subclass, but got {op_type}') if (op_type is not Constant): dispatchables = [rtype for rtype in framework_realizable_ops if issubclass(rtype, op_type)] for rtype in dispatchables: all_framework_ops.append(rtype) SanityCheck.true((len(dispatchables) != 0), f'Decorator operator_impl only take types decorated by `mark_realize`, but got {op_type}') return dispatch(op_type, *args, **kwargs)
def _check_h3d_bbox_head(bbox_cfg, bbox_head): assert (bbox_cfg['type'] == bbox_head.__class__.__name__) assert ((bbox_cfg.num_proposal * 6) == bbox_head.surface_center_matcher.num_point[0]) assert ((bbox_cfg.num_proposal * 12) == bbox_head.line_center_matcher.num_point[0]) assert ((bbox_cfg.suface_matching_cfg.mlp_channels[(- 1)] * 18) == bbox_head.bbox_pred[0].in_channels)
class Cursor(Structure): _fields_ = [('_kind_id', c_int), ('xdata', c_int), ('data', (c_void_p * 3))] def from_location(tu, location): cursor = conf.lib.clang_getCursor(tu, location) cursor._tu = tu return cursor def __eq__(self, other): return conf.lib.clang_equalCursors(self, other) def __ne__(self, other): return (not self.__eq__(other)) def is_definition(self): return conf.lib.clang_isCursorDefinition(self) def is_const_method(self): return conf.lib.clang_CXXMethod_isConst(self) def is_converting_constructor(self): return conf.lib.clang_CXXConstructor_isConvertingConstructor(self) def is_copy_constructor(self): return conf.lib.clang_CXXConstructor_isCopyConstructor(self) def is_default_constructor(self): return conf.lib.clang_CXXConstructor_isDefaultConstructor(self) def is_move_constructor(self): return conf.lib.clang_CXXConstructor_isMoveConstructor(self) def is_default_method(self): return conf.lib.clang_CXXMethod_isDefaulted(self) def is_mutable_field(self): return conf.lib.clang_CXXField_isMutable(self) def is_pure_virtual_method(self): return conf.lib.clang_CXXMethod_isPureVirtual(self) def is_static_method(self): return conf.lib.clang_CXXMethod_isStatic(self) def is_virtual_method(self): return conf.lib.clang_CXXMethod_isVirtual(self) def get_definition(self): return conf.lib.clang_getCursorDefinition(self) def get_usr(self): return conf.lib.clang_getCursorUSR(self) def kind(self): return CursorKind.from_id(self._kind_id) def spelling(self): if (not hasattr(self, '_spelling')): self._spelling = conf.lib.clang_getCursorSpelling(self) return self._spelling def displayname(self): if (not hasattr(self, '_displayname')): self._displayname = conf.lib.clang_getCursorDisplayName(self) return self._displayname def mangled_name(self): if (not hasattr(self, '_mangled_name')): self._mangled_name = conf.lib.clang_Cursor_getMangling(self) return self._mangled_name def location(self): if (not hasattr(self, '_loc')): self._loc = conf.lib.clang_getCursorLocation(self) return self._loc def extent(self): if (not hasattr(self, '_extent')): self._extent = conf.lib.clang_getCursorExtent(self) return self._extent def storage_class(self): if (not hasattr(self, '_storage_class')): self._storage_class = conf.lib.clang_Cursor_getStorageClass(self) return StorageClass.from_id(self._storage_class) def access_specifier(self): if (not hasattr(self, '_access_specifier')): self._access_specifier = conf.lib.clang_getCXXAccessSpecifier(self) return AccessSpecifier.from_id(self._access_specifier) def type(self): if (not hasattr(self, '_type')): self._type = conf.lib.clang_getCursorType(self) return self._type def canonical(self): if (not hasattr(self, '_canonical')): self._canonical = conf.lib.clang_getCanonicalCursor(self) return self._canonical def result_type(self): if (not hasattr(self, '_result_type')): self._result_type = conf.lib.clang_getResultType(self.type) return self._result_type def underlying_typedef_type(self): if (not hasattr(self, '_underlying_type')): assert self.kind.is_declaration() self._underlying_type = conf.lib.clang_getTypedefDeclUnderlyingType(self) return self._underlying_type def enum_type(self): if (not hasattr(self, '_enum_type')): assert (self.kind == CursorKind.ENUM_DECL) self._enum_type = conf.lib.clang_getEnumDeclIntegerType(self) return self._enum_type def enum_value(self): if (not hasattr(self, '_enum_value')): assert (self.kind == CursorKind.ENUM_CONSTANT_DECL) underlying_type = self.type if (underlying_type.kind == TypeKind.ENUM): underlying_type = underlying_type.get_declaration().enum_type if (underlying_type.kind in (TypeKind.CHAR_U, TypeKind.UCHAR, TypeKind.CHAR16, TypeKind.CHAR32, TypeKind.USHORT, TypeKind.UINT, TypeKind.ULONG, TypeKind.ULONGLONG, TypeKind.UINT128)): self._enum_value = conf.lib.clang_getEnumConstantDeclUnsignedValue(self) else: self._enum_value = conf.lib.clang_getEnumConstantDeclValue(self) return self._enum_value def objc_type_encoding(self): if (not hasattr(self, '_objc_type_encoding')): self._objc_type_encoding = conf.lib.clang_getDeclObjCTypeEncoding(self) return self._objc_type_encoding def hash(self): if (not hasattr(self, '_hash')): self._hash = conf.lib.clang_hashCursor(self) return self._hash def semantic_parent(self): if (not hasattr(self, '_semantic_parent')): self._semantic_parent = conf.lib.clang_getCursorSemanticParent(self) return self._semantic_parent def lexical_parent(self): if (not hasattr(self, '_lexical_parent')): self._lexical_parent = conf.lib.clang_getCursorLexicalParent(self) return self._lexical_parent def translation_unit(self): return self._tu def referenced(self): if (not hasattr(self, '_referenced')): self._referenced = conf.lib.clang_getCursorReferenced(self) return self._referenced def brief_comment(self): return conf.lib.clang_Cursor_getBriefCommentText(self) def raw_comment(self): return conf.lib.clang_Cursor_getRawCommentText(self) def get_arguments(self): num_args = conf.lib.clang_Cursor_getNumArguments(self) for i in range(0, num_args): (yield conf.lib.clang_Cursor_getArgument(self, i)) def get_num_template_arguments(self): return conf.lib.clang_Cursor_getNumTemplateArguments(self) def get_template_argument_kind(self, num): return conf.lib.clang_Cursor_getTemplateArgumentKind(self, num) def get_template_argument_type(self, num): return conf.lib.clang_Cursor_getTemplateArgumentType(self, num) def get_template_argument_value(self, num): return conf.lib.clang_Cursor_getTemplateArgumentValue(self, num) def get_template_argument_unsigned_value(self, num): return conf.lib.clang_Cursor_getTemplateArgumentUnsignedValue(self, num) def get_children(self): def visitor(child, parent, children): assert (child != conf.lib.clang_getNullCursor()) child._tu = self._tu children.append(child) return 1 children = [] conf.lib.clang_visitChildren(self, callbacks['cursor_visit'](visitor), children) return iter(children) def walk_preorder(self): (yield self) for child in self.get_children(): for descendant in child.walk_preorder(): (yield descendant) def get_tokens(self): return TokenGroup.get_tokens(self._tu, self.extent) def get_field_offsetof(self): return conf.lib.clang_Cursor_getOffsetOfField(self) def is_anonymous(self): if (self.kind == CursorKind.FIELD_DECL): return self.type.get_declaration().is_anonymous() return conf.lib.clang_Cursor_isAnonymous(self) def is_bitfield(self): return conf.lib.clang_Cursor_isBitField(self) def get_bitfield_width(self): return conf.lib.clang_getFieldDeclBitWidth(self) def from_result(res, fn, args): assert isinstance(res, Cursor) if (res == conf.lib.clang_getNullCursor()): return None tu = None for arg in args: if isinstance(arg, TranslationUnit): tu = arg break if hasattr(arg, 'translation_unit'): tu = arg.translation_unit break assert (tu is not None) res._tu = tu return res def from_cursor_result(res, fn, args): assert isinstance(res, Cursor) if (res == conf.lib.clang_getNullCursor()): return None res._tu = args[0]._tu return res
class Alphabet(object): def __init__(self, name, defualt_value=False, keep_growing=True, singleton=False): self.__name = name self.instance2index = {} self.instances = [] self.default_value = defualt_value self.offset = (1 if self.default_value else 0) self.keep_growing = keep_growing self.singletons = (set() if singleton else None) self.default_index = (0 if self.default_value else None) self.next_index = self.offset self.logger = get_logger('Alphabet') def add(self, instance): if (instance not in self.instance2index): self.instances.append(instance) self.instance2index[instance] = self.next_index self.next_index += 1 def add_singleton(self, id): if (self.singletons is None): raise RuntimeError(('Alphabet %s does not have singleton.' % self.__name)) else: self.singletons.add(id) def add_singletons(self, ids): if (self.singletons is None): raise RuntimeError(('Alphabet %s does not have singleton.' % self.__name)) else: self.singletons.update(ids) def is_singleton(self, id): if (self.singletons is None): raise RuntimeError(('Alphabet %s does not have singleton.' % self.__name)) else: return (id in self.singletons) def get_index(self, instance): try: return self.instance2index[instance] except KeyError: if self.keep_growing: index = self.next_index self.add(instance) return index elif self.default_value: return self.default_index else: raise KeyError(('instance not found: %s' % instance)) def get_instance(self, index): if (self.default_value and (index == self.default_index)): return '<_UNK>' else: try: return self.instances[(index - self.offset)] except IndexError: raise IndexError(('unknown index: %d' % index)) def size(self): return (len(self.instances) + self.offset) def singleton_size(self): return len(self.singletons) def items(self): return self.instance2index.items() def enumerate_items(self, start): if ((start < self.offset) or (start >= self.size())): raise IndexError(('Enumerate is allowed between [%d : size of the alphabet)' % self.offset)) return zip(range(start, (len(self.instances) + self.offset)), self.instances[(start - self.offset):]) def close(self): self.keep_growing = False def open(self): self.keep_growing = True def get_content(self): if (self.singletons is None): return {'instance2index': self.instance2index, 'instances': self.instances} else: return {'instance2index': self.instance2index, 'instances': self.instances, 'singletions': list(self.singletons)} def __from_json(self, data): self.instances = data['instances'] self.instance2index = data['instance2index'] if ('singletions' in data): self.singletons = set(data['singletions']) else: self.singletons = None def save(self, output_directory, name=None): saving_name = (name if name else self.__name) try: if (not os.path.exists(output_directory)): os.makedirs(output_directory) json.dump(self.get_content(), open(os.path.join(output_directory, (saving_name + '.json')), 'w'), indent=4) except Exception as e: self.logger.warn(('Alphabet is not saved: %s' % repr(e))) def load(self, input_directory, name=None): loading_name = (name if name else self.__name) self.__from_json(json.load(open(os.path.join(input_directory, (loading_name + '.json'))))) self.next_index = (len(self.instances) + self.offset) self.keep_growing = False
def annotation_to_instances(ann: Annotation, docs: Dict[(str, List[List[int]])], class_interner: Dict[(str, int)]): evidences = defaultdict(set) for ev in ann.all_evidences(): evidences[ev.docid].add(ev) output_documents = dict() evidence_spans = dict() for (d, evs) in evidences.items(): output_documents[d] = list(chain.from_iterable(docs[d])) evidence_targets = [0 for _ in range(sum((len(s) for s in docs[d])))] for ev in evs: for t in range(ev.start_token, ev.end_token): evidence_targets[t] = 1 evidence_spans[d] = evidence_targets return (class_interner.get(ann.classification, (- 1)), output_documents, evidence_spans)