code
stringlengths
101
5.91M
class rm_vlc_upload(StreamUpload): mode = hl2ss.StreamMode.MODE_1 profile = hl2ss.VideoProfile.H264_MAIN bitrate = ((3 * 1024) * 1024) gop_size = hl2ss.get_gop_size(profile, hl2ss.Parameters_RM_VLC.FPS) def create_client(self): return hl2ss.rx_rm_vlc(self.host, self.port, hl2ss.ChunkSize.RM_VLC, self.mode, self.profile, self.bitrate)
class BasicConv3d(nn.Module): def __init__(self, in_planes, out_planes, kernel_size, stride, padding=0): super(BasicConv3d, self).__init__() self.conv = nn.Conv3d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=False) self.bn = nn.BatchNorm3d(out_planes, eps=0.001, momentum=0.001, affine=True) self.relu = nn.ReLU() def forward(self, x): x = self.conv(x) x = self.bn(x) x = self.relu(x) return x
_model def seresnet34(pretrained=False, **kwargs): model_args = dict(block=BasicBlock, layers=[3, 4, 6, 3], block_args=dict(attn_layer='se'), **kwargs) return _create_resnet('seresnet34', pretrained, **model_args)
def move_file_to_dir_url(url_file, path_read, file_to_write): with open(url_file, 'r', encoding='utf-8') as fd: lines = fd.read().splitlines() url_names = get_url_hashes(lines) print('len of urls {}'.format(len(url_names))) url_names = [os.path.join(path_read, url) for url in url_names] cnt = multiprocessing.cpu_count() pool = multiprocessing.Pool(processes=cnt) rt_bag = pool.map(read_one_file, url_names) pool.close() pool.join() rt_bag = [x for x in rt_bag if (x is not None)] wt_string = '\n'.join(rt_bag) with open(file_to_write, 'w') as fd: fd.write(wt_string)
def get_parser(allow_policy_list=False): parser = argparse.ArgumentParser() parser.add_argument('--config', type=str) parser.add_argument('--log-dir', type=str, default=None) parser.add_argument('--checkpoint-replay-pool', type=(lambda x: bool(strtobool(x))), default=None, help="Whether a checkpoint should also saved the replay pool. If set, takes precedence over variant['run_params']['checkpoint_replay_pool']. Note that the replay pool is saved (and constructed) piece by piece so that each experience is saved only once.") if allow_policy_list: parser.add_argument('--policy', type=str, nargs='+', choices=('gaussian',), default='gaussian') else: parser.add_argument('--policy', type=str, choices=('gaussian',), default='gaussian') parser.add_argument('--mode', type=str, default='local') parser.add_argument('--confirm-remote', type=(lambda x: bool(strtobool(x))), nargs='?', const=True, default=True, help='Whether or not to query yes/no on remote run.') parser.add_argument('--video-save-frequency', type=int, default=None, help='Save frequency for videos.') parser = add_ray_init_args(parser) parser = add_ray_tune_args(parser) return parser
def main(): parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]), formatter_class=argparse.RawDescriptionHelpFormatter, description=__doc__) parser.add_argument('input', help='XML wiki dump file') groupO = parser.add_argument_group('Output') groupO.add_argument('-o', '--output', default='text', help="directory for extracted files (or '-' for dumping to stdout)") groupO.add_argument('-b', '--bytes', default='1M', help='maximum bytes per output file (default %(default)s)', metavar='n[KMG]') groupO.add_argument('-c', '--compress', action='store_true', help='compress output files using bzip') groupO.add_argument('--json', action='store_true', help='write output in json format instead of the default one') groupP = parser.add_argument_group('Processing') groupP.add_argument('--html', action='store_true', help='produce HTML output, subsumes --links') groupP.add_argument('-l', '--links', action='store_true', help='preserve links') groupP.add_argument('-s', '--sections', action='store_true', help='preserve sections') groupP.add_argument('--lists', action='store_true', help='preserve lists') groupP.add_argument('-ns', '--namespaces', default='', metavar='ns1,ns2', help='accepted namespaces in links') groupP.add_argument('--templates', help='use or create file containing templates') groupP.add_argument('--no_templates', action='store_false', help='Do not expand templates') groupP.add_argument('-r', '--revision', action='store_true', default=options.print_revision, help='Include the document revision id (default=%(default)s)') groupP.add_argument('--min_text_length', type=int, default=options.min_text_length, help='Minimum expanded text length required to write document (default=%(default)s)') groupP.add_argument('--filter_disambig_pages', action='store_true', default=options.filter_disambig_pages, help='Remove pages from output that contain disabmiguation markup (default=%(default)s)') groupP.add_argument('-it', '--ignored_tags', default='', metavar='abbr,b,big', help='comma separated list of tags that will be dropped, keeping their content') groupP.add_argument('-de', '--discard_elements', default='', metavar='gallery,timeline,noinclude', help='comma separated list of elements that will be removed from the article text') groupP.add_argument('--keep_tables', action='store_true', default=options.keep_tables, help='Preserve tables in the output article text (default=%(default)s)') default_process_count = max(1, (cpu_count() - 1)) parser.add_argument('--processes', type=int, default=default_process_count, help='Number of processes to use (default %(default)s)') groupS = parser.add_argument_group('Special') groupS.add_argument('-q', '--quiet', action='store_true', help='suppress reporting progress info') groupS.add_argument('--debug', action='store_true', help='print debug info') groupS.add_argument('-a', '--article', action='store_true', help='analyze a file containing a single article (debug option)') groupS.add_argument('--log_file', help='path to save the log info') groupS.add_argument('-v', '--version', action='version', version=('%(prog)s ' + version), help='print program version') groupP.add_argument('--filter_category', help="specify the file that listing the Categories you want to include or exclude. One line for one category. starting with: 1) '#' comment, ignored; 2) '^' exclude; Note: excluding has higher priority than including") args = parser.parse_args() options.keepLinks = args.links options.keepSections = args.sections options.keepLists = args.lists options.toHTML = args.html options.write_json = args.json options.print_revision = args.revision options.min_text_length = args.min_text_length if args.html: options.keepLinks = True options.expand_templates = args.no_templates options.filter_disambig_pages = args.filter_disambig_pages options.keep_tables = args.keep_tables try: power = ('kmg'.find(args.bytes[(- 1)].lower()) + 1) file_size = (int(args.bytes[:(- 1)]) * (1024 ** power)) if (file_size < minFileSize): raise ValueError() except ValueError: logging.error('Insufficient or invalid size: %s', args.bytes) return if args.namespaces: options.acceptedNamespaces = set(args.namespaces.split(',')) if args.ignored_tags: ignoredTags = set(args.ignored_tags.split(',')) else: ignoredTags = ['abbr', 'b', 'big', 'blockquote', 'center', 'cite', 'em', 'font', 'h1', 'h2', 'h3', 'h4', 'hiero', 'i', 'kbd', 'p', 'plaintext', 's', 'span', 'strike', 'strong', 'tt', 'u', 'var'] for tag in ignoredTags: ignoreTag(tag) if args.discard_elements: options.discardElements = set(args.discard_elements.split(',')) FORMAT = '%(levelname)s: %(message)s' logging.basicConfig(format=FORMAT) options.quiet = args.quiet options.debug = args.debug options.log_file = args.log_file createLogger(options.quiet, options.debug, options.log_file) input_file = args.input if (not options.keepLinks): ignoreTag('a') if args.article: if args.templates: if os.path.exists(args.templates): with open(args.templates) as file: load_templates(file) file = fileinput.FileInput(input_file, openhook=fileinput.hook_compressed) for page_data in pages_from(file): (id, revid, title, ns, catSet, page) = page_data Extractor(id, revid, title, page).extract(sys.stdout) file.close() return output_path = args.output if ((output_path != '-') and (not os.path.isdir(output_path))): try: os.makedirs(output_path) except: logging.error('Could not create: %s', output_path) return filter_category = args.filter_category if ((filter_category != None) and (len(filter_category) > 0)): with open(filter_category) as f: error_cnt = 0 for line in f.readlines(): try: line = str(line.strip()) if (line.startswith('#') or (len(line) == 0)): continue elif line.startswith('^'): options.filter_category_exclude.add(line.lstrip('^')) else: options.filter_category_include.add(line) except Exception as e: error_cnt += 1 print(('Category not in utf8, ignored. error cnt %d:\t%s' % (error_cnt, e))) print(line) logging.info('Excluding categories:') logging.info(str(options.filter_category_exclude)) logging.info('Including categories:') logging.info(str(len(options.filter_category_include))) process_dump(input_file, args.templates, output_path, file_size, args.compress, args.processes)
def load_json_string(cont): cont = jsmin.jsmin(cont) cont = re.sub(',[ \t\r\n]*}', '}', cont) cont = re.sub((',[ \t\r\n]*' + '\\]'), ']', cont) return json.loads(cont)
class RSICD(torch.utils.data.Dataset): splits = ['train', 'val', 'test'] def __init__(self, root: str='.data/rsicd', split: str='train', transform: T.Compose=T.Compose([T.ToTensor()])): assert (split in self.splits) self.root = root self.transform = transform self.captions = self.load_captions(os.path.join(root, 'dataset_rsicd.json'), split) self.image_root = 'RSICD_images' def load_captions(path: str, split: str) -> List[Dict]: with open(path) as f: captions = json.load(f)['images'] return [c for c in captions if (c['split'] == split)] def __len__(self) -> int: return len(self.captions) def __getitem__(self, idx: int) -> Dict: captions = self.captions[idx] path = os.path.join(self.root, self.image_root, captions['filename']) x = Image.open(path).convert('RGB') x = self.transform(x) sentences = [sentence['raw'] for sentence in captions['sentences']] return dict(x=x, captions=sentences)
class RandomFourierFeatureKernel(AbstractSpectralKernel): def __init__(self, measure, manifold): super().__init__(measure, manifold) manifold.generate_lb_eigenspaces(measure) point = self.manifold.id self.normalizer = self.forward(point, point, normalize=False)[(0, 0)] def compute_normalizer(self): point = self.manifold.id self.normalizer = self.forward(point, point, normalize=False)[(0, 0)] def forward(self, x, y=None, normalize=True): if self.training: self.manifold.generate_lb_eigenspaces(self.measure) if normalize: self.compute_normalizer() if (y is None): y = x (x_, y_) = (self.manifold.to_group(x), self.manifold.to_group(y)) x_embed = self.manifold.lb_eigenspaces(x_) y_embed = self.manifold.lb_eigenspaces(y_) if normalize: x_embed = ((torch.sqrt(torch.abs(self.measure.variance[0])) * x_embed) / torch.sqrt(self.normalizer)) y_embed = ((torch.sqrt(torch.abs(self.measure.variance[0])) * y_embed) / torch.sqrt(self.normalizer)) (x_embed_real, x_embed_imag) = (x_embed.real.clone(), x_embed.imag.clone()) (y_embed_real, y_embed_imag) = (y_embed.real.clone(), y_embed.imag.clone()) x_embed_lazy = NonLazyTensor(torch.cat((x_embed_real, x_embed_imag), dim=(- 1))) y_embed_lazy = NonLazyTensor(torch.cat((y_embed_real, y_embed_imag), dim=(- 1))) return MatmulLazyTensor(x_embed_lazy, y_embed_lazy.t().clone())
class GraphConvolution(Layer): def __init__(self, input_dim, output_dim, adj, dropout=0.0, act=tf.nn.relu, **kwargs): super(GraphConvolution, self).__init__(**kwargs) with tf.variable_scope((self.name + '_vars')): self.vars['weights'] = weight_variable_glorot(input_dim, output_dim, name='weights') self.dropout = dropout self.adj = adj self.act = act def _call(self, inputs): x = inputs x = tf.nn.dropout(x, (1 - self.dropout)) x = tf.matmul(x, self.vars['weights']) x = tf.sparse_tensor_dense_matmul(self.adj, x) outputs = self.act(x) return outputs
def get_bond_between_indx_atoms(mol, idx_start, idx_end) -> float: bnd = mol.GetBondBetweenAtoms(idx_start, idx_end) bnd = (bnd.GetBondTypeAsDouble() if (bnd is not None) else 0.0) return bnd
def get_index(span): for (idx, s) in enumerate(span.doc.sents): if (span == s): return idx
class TextTransformer(Preprocessing): def __init__(self, bigdl_type='float', *args): super(TextTransformer, self).__init__(bigdl_type, *args) def transform(self, text_feature): res = callZooFunc(self.bigdl_type, 'transformTextFeature', self.value, text_feature.value) return TextFeature(jvalue=res)
class TileLabelInterleaver(StyleGAN2Interleaver): def __init__(self, tile_labels: str, resolution: Any=None, xflip: Any=None, labels: Any=None, **kwargs: Any) -> None: super().__init__(labels=tile_labels, **kwargs) self._process_labels_df() if (not isinstance(self.labels, pd.DataFrame)): raise ValueError('Labels must be a pandas DataFrame.') def _process_labels_df(self) -> None: assert isinstance(self.labels, pd.DataFrame) first_row = next(self.labels.itertuples()) self._label_shape = first_row.label.shape if ((self.rank == 0) and (self.num_tiles != len(self.labels))): log.warning(f'Number of tiles ({self.num_tiles}) does not equal the number of labels ({len(self.labels)}). ') def get_label(self, idx: Any) -> Any: idx = np.random.randint(len(self.labels)) return self.labels.iloc[idx].label
class TID2013Folder(data.Dataset): def __init__(self, root, index, transform, patch_num): refpath = os.path.join(root, 'reference_images') refname = getTIDFileName(refpath, '.bmp.BMP') txtpath = os.path.join(root, 'mos_with_names.txt') fh = open(txtpath, 'r') imgnames = [] target = [] refnames_all = [] for line in fh: line = line.split('\n') words = line[0].split() imgnames.append(words[1]) target.append(words[0]) ref_temp = words[1].split('_') refnames_all.append(ref_temp[0][1:]) labels = np.array(target).astype(np.float32) refnames_all = np.array(refnames_all) refname.sort() sample = [] for (i, item) in enumerate(index): train_sel = (refname[index[i]] == refnames_all) train_sel = np.where((train_sel == True)) train_sel = train_sel[0].tolist() for (j, item) in enumerate(train_sel): for aug in range(patch_num): sample.append((os.path.join(root, 'distorted_images', imgnames[item]), labels[item])) self.samples = sample self.transform = transform def __getitem__(self, index): (path, target) = self.samples[index] sample = pil_loader(path) sample = self.transform(sample) return (sample, target) def __len__(self): length = len(self.samples) return length
def make_schema_copying_data_provider(data_sources_source, data_sources_target, data_sources_schema, reader=tf.TextLineReader, num_samples=None, source_delimiter=' ', target_delimiter=' ', **kwargs): (dataset_source, dataset_schemas) = _make_copying_data_provider_base(data_sources_source, data_sources_schema, reader=tf.TextLineReader, num_samples=num_samples, source_delimiter=' ', **kwargs) dataset_target = None if (data_sources_target is not None): decoder_target = copying_decoder.SchemaCopyingDecoder(tokens_feature_name='target_tokens', length_feature_name='target_len', prepend_token='SEQUENCE_START', append_token='SEQUENCE_END', delimiter=target_delimiter) dataset_target = tf.contrib.slim.dataset.Dataset(data_sources=data_sources_target, reader=reader, decoder=decoder_target, num_samples=num_samples, items_to_descriptions={}) return SchemaCopyingDataProvider(dataset1=dataset_source, dataset2=dataset_target, schemas=dataset_schemas, **kwargs)
def ReadLexicon(lexicon_file_handle): lexicon = set() if lexicon_file_handle: for line in lexicon_file_handle.readlines(): splits = line.strip().split() if (len(splits) == 0): continue if (len(splits) < 2): raise Exception((('Invalid format of line ' + line) + ' in lexicon file.')) word = splits[0] phones = ' '.join(splits[1:]) lexicon.add((word, phones)) return lexicon
class DistStereoVisHook(DistVisHook): def visualize(self, runner, results): for result in results: if (result is None): continue for key in result.keys(): runner.log_buffer.output[key] = result[key] log_str = 'Epoch [{}] Visualization Finished!'.format((runner.epoch + 1)) runner.logger.info(log_str) runner.log_buffer.ready = True
def training_2nd_item_task_fbne(model, sess): best_loss = 0 saver = tf.train.Saver() data_train = fbne_data.Dataset(setting.oracle_training_file_item_task) train_batches = data_train.get_positive_instances_item_task(0, 'train') num_batch_train = ((data_train.oracle_num_items // setting.batch_size_item) + 1) train_batch_index = range(num_batch_train) data_valid = fbne_data.Dataset(setting.oracle_valid_file_item_task) valid_batches = data_valid.get_positive_instances_item_task(0, 'valid') num_batch_valid = ((data_valid.oracle_num_items // setting.batch_size_item) + 1) valid_batch_index = range(num_batch_valid) for epoch_count in range(setting.second_item_epoch): train_begin = time() training_batch_2nd_item_task_fbne(data_train, train_batch_index, model, sess, train_batches, True) train_time = (time() - train_begin) if ((epoch_count % setting.verbose) == 0): loss_begin = time() train_loss = training_loss_2nd_item_task_fbne(data_train, train_batch_index, model, sess, train_batches, True) loss_time = (time() - loss_begin) eval_begin = time() (cosine, pearson) = evaluate_2nd_item_task_fbne(data_valid, valid_batch_index, model, sess, valid_batches, False) eval_time = (time() - eval_begin) print(('epoch %d, train time is %.4f, loss time is %.4f, eval_time is %.4f, train_loss is %.4f, test cosine value is %.4f, test pearson value is %.4f' % (epoch_count, train_time, loss_time, eval_time, train_loss, cosine, pearson))) if (cosine < best_loss): best_loss = cosine saver.save(sess, setting.checkpoint_path_item_task, global_step=epoch_count) data_train = fbne_data.Dataset(setting.oracle_training_file_item_task) train_batches = data_train.get_positive_instances_item_task((epoch_count + 1), 'train') num_batch_train = ((data_train.oracle_num_items // setting.batch_size_item) + 1) train_batch_index = range(num_batch_train)
class TFElectraForMultipleChoice(): def __init__(self, *args, **kwargs): requires_tf(self) def from_pretrained(self, *args, **kwargs): requires_tf(self)
class FullGrad(): def __init__(self, model, im_size=(3, 224, 224)): self.model = model self.im_size = ((1,) + im_size) self.model_ext = FullGradExtractor(model, im_size) self.biases = self.model_ext.getBiases() self.checkCompleteness() def checkCompleteness(self): cuda = next(self.model.parameters()).is_cuda device = torch.device(('cuda' if cuda else 'cpu')) input = torch.randn(self.im_size).to(device) self.model.eval() raw_output = self.model(input) (input_grad, bias_grad) = self.fullGradientDecompose(input, target_class=None) fullgradient_sum = (input_grad * input).sum() for i in range(len(bias_grad)): fullgradient_sum += bias_grad[i].sum() err_message = '\nThis is due to incorrect computation of bias-gradients.' err_string = ((('Completeness test failed! Raw output = ' + str(raw_output.max().item())) + ' Full-gradient sum = ') + str(fullgradient_sum.item())) assert isclose(raw_output.max().item(), fullgradient_sum.item(), rel_tol=0.0001), (err_string + err_message) print('Completeness test passed for FullGrad.') def fullGradientDecompose(self, image, target_class=None): self.model.eval() image = image.requires_grad_() out = self.model(image) if (target_class is None): target_class = out.data.max(1, keepdim=True)[1] output_scalar = ((- 1.0) * F.nll_loss(out, target_class.flatten(), reduction='sum')) (input_gradient, feature_gradients) = self.model_ext.getFeatureGrads(image, output_scalar) bias_times_gradients = [] L = len(self.biases) for i in range(L): g = feature_gradients[((L - 1) - i)] bias_size = ([1] * len(g.size())) bias_size[1] = self.biases[i].size(0) b = self.biases[i].view(tuple(bias_size)) bias_times_gradients.append((g * b.expand_as(g))) return (input_gradient, bias_times_gradients) def _postProcess(self, input, eps=1e-06): input = abs(input) flatin = input.view((input.size(0), (- 1))) (temp, _) = flatin.min(1, keepdim=True) input = (input - temp.unsqueeze(1).unsqueeze(1)) flatin = input.view((input.size(0), (- 1))) (temp, _) = flatin.max(1, keepdim=True) input = (input / (temp.unsqueeze(1).unsqueeze(1) + eps)) return input def saliency(self, image, target_class=None): self.model.eval() (input_grad, bias_grad) = self.fullGradientDecompose(image, target_class=target_class) grd = (input_grad * image) gradient = self._postProcess(grd).sum(1, keepdim=True) cam = gradient im_size = image.size() for i in range(len(bias_grad)): if (len(bias_grad[i].size()) == len(im_size)): temp = self._postProcess(bias_grad[i]) gradient = F.interpolate(temp, size=(im_size[2], im_size[3]), mode='bilinear', align_corners=True) cam += gradient.sum(1, keepdim=True) return cam
def resnet152(pretrained: bool=False, **kwargs: Any) -> ResNet: return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, **kwargs)
_auth def fetch_accounts(filters, url, auth_headers): endpoint = f'{url}/api/v1/accounts/' r = requests.get(endpoint, headers=auth_headers, params=filters) if (r.status_code != 200): r.raise_for_status() return json.loads(r.text)['results']
def conv(x, channels, kernel=4, stride=2, pad=0, use_bias=True, scope='conv_0'): with tf.variable_scope(scope): x = tf.pad(x, [[0, 0], [pad, pad], [pad, pad], [0, 0]]) x = tf.layers.conv2d(inputs=x, filters=channels, padding='same', kernel_size=kernel, kernel_initializer=tf.contrib.layers.xavier_initializer(), strides=stride, use_bias=use_bias) return x
def to_bytes(string): if (sys.hexversion > ): return bytes(string, 'utf-8') return string
def preprocess_buys(path=DATA_PATH, file=DATA_FILE, path_proc=DATA_PATH_PROCESSED, version=VERSION): (data, buys) = load_data((path + file), version) store_buys(buys, (path_proc + file))
def vgg11(pretrained: bool=False, progress: bool=True, **kwargs: Any) -> VGG: return _vgg('vgg11', 'A', False, pretrained, progress, **kwargs)
class SmoothCrossEntropyLoss(_Loss): __constants__ = ['label_smoothing', 'vocab_size', 'ignore_index', 'reduction'] def __init__(self, label_smoothing, vocab_size, ignore_index=(- 100), reduction='mean', is_logits=True): assert (0.0 <= label_smoothing <= 1.0) super().__init__(reduction=reduction) self.label_smoothing = label_smoothing self.vocab_size = vocab_size self.ignore_index = ignore_index self.input_is_logits = is_logits def forward(self, input, target): mask = (target == self.ignore_index).unsqueeze((- 1)) q = F.one_hot(target.long(), self.vocab_size).type(torch.float32) u = (1.0 / self.vocab_size) q_prime = (((1.0 - self.label_smoothing) * q) + (self.label_smoothing * u)) q_prime = q_prime.masked_fill(mask, 0) ce = self.cross_entropy_with_logits(q_prime, input) if (self.reduction == 'mean'): lengths = torch.sum((target != self.ignore_index)) return (ce.sum() / lengths) elif (self.reduction == 'sum'): return ce.sum() else: raise NotImplementedError def cross_entropy_with_logits(self, p, q): return (- torch.sum((p * (q - q.logsumexp(dim=(- 1), keepdim=True))), dim=(- 1)))
def judge_is_nan(list_of_np_or_tensor): for m in list_of_np_or_tensor: if hasattr(m, 'numpy'): if np.any(np.isnan(m.numpy())): print(list_of_np_or_tensor) raise ValueError elif np.any(np.isnan(m)): print(list_of_np_or_tensor) raise ValueError
def main(): writer = SummaryWriter() (finetune_args, training_args) = HfArgumentParser((FinetuneArguments, TrainingArguments)).parse_args_into_dataclasses() model = AutoModel.from_pretrained('THUDM/chatglm-6b', load_in_8bit=True, trust_remote_code=True, device_map='auto', torch_dtype=torch.float16) model.gradient_checkpointing_enable() model.enable_input_require_grads() model.is_parallelizable = False model.model_parallel = False model.lm_head = CastOutputToFloat(model.lm_head) model.config.use_cache = False peft_config = LoraConfig(task_type=TaskType.CAUSAL_LM, inference_mode=False, r=finetune_args.lora_rank, lora_alpha=16, lora_dropout=0.05, bias='none') model = get_peft_model(model, peft_config) dataset = datasets.load_from_disk(finetune_args.dataset_path) print(f''' len(dataset)={len(dataset)!r} ''') print('training_args: ', training_args) training_args.gradient_accumulation_steps = 32 training_args.per_device_train_batch_size = 1 training_args.warmup_steps = 100 training_args.logging_steps = 10 training_args.optim = 'adamw_torch' training_args.fp16 = True training_args.num_train_epochs = 3 training_args.group_by_length = False trainer = ModifiedTrainer(model=model, train_dataset=dataset, args=training_args, callbacks=[TensorBoardCallback(writer)], data_collator=data_collator) trainer.train() writer.close() model.save_pretrained(training_args.output_dir)
def transform_finished_strategies_to_hebo(space, opt_lib_group, finished_strategies, included_opts=[]): opt_to_group_hash = {} for (group_name, opt_candidates) in opt_lib_group.items(): for opt_name in opt_candidates: assert (not (opt_name in opt_to_group_hash.keys())), 'We should not have duplicate opts in different groups!!' opt_to_group_hash[opt_name] = group_name X = pd.DataFrame(columns=space.para_names, dtype=object) y = [] for strategy_info in finished_strategies.values(): X_dict = {} for opt in strategy_info.strategy: if (opt[0] == 'parallel_mode'): config = {item[0]: item[1] for item in pickle.loads(opt[1])[0]} if ('tensor' not in config.keys()): config['tensor'] = 1 config = {key: config[key] for key in ['data', 'tensor']} X_dict[opt_to_group_hash[opt[0]]] = json.dumps(config) elif (opt[0] in ['zero1', 'zero2', 'fsdp']): if (opt[0] == 'zero1'): X_dict[opt_to_group_hash[opt[0]]] = opt[0] elif (opt[0] == 'zero2'): zero_config = pickle.loads(opt[1]) if zero_config['not_use_fsdp']: X_dict[opt_to_group_hash[opt[0]]] = 'zero2_fairscale' else: X_dict[opt_to_group_hash[opt[0]]] = 'zero2_fsdp' else: assert (opt[0] == 'fsdp') X_dict[opt_to_group_hash[opt[0]]] = 'fsdp' else: X_dict[opt_to_group_hash[opt[0]]] = opt[0] for group_name in space.para_names: if (not (group_name in X_dict.keys())): X_dict[group_name] = 'NotChosen' X = pd.concat([X, pd.DataFrame(X_dict, index=[0], dtype=object)], ignore_index=True, axis=0) dryrun_result = strategy_info.dryrun_result if dryrun_result: y.append(strategy_info.dryrun_result['throughput']) else: y.append(0.0) return (X, ((- 1) * np.array(y).reshape((- 1), 1)))
class ConLL2003Standardiser(SpanAnnotator): def __init__(self): super(ConLL2003Standardiser, self).__init__('') def __call__(self, doc): for source in doc.spans: new_spans = [] for span in doc.spans[source]: if ('\n' in span.text): continue elif (span.label_ == 'PERSON'): new_spans.append(Span(doc, span.start, span.end, label='PER')) elif (span.label_ in {'ORGANIZATION', 'ORGANISATION', 'COMPANY'}): new_spans.append(Span(doc, span.start, span.end, label='ORG')) elif (span.label_ in {'GPE'}): new_spans.append(Span(doc, span.start, span.end, label='LOC')) elif (span.label_ in {'EVENT', 'FAC', 'LANGUAGE', 'LAW', 'NORP', 'PRODUCT', 'WORK_OF_ART'}): new_spans.append(Span(doc, span.start, span.end, label='MISC')) elif (span.label_ in {'PER', 'ORG', 'LOC', 'MISC'}): new_spans.append(span) else: pass doc.spans[source] = new_spans return doc
def make_env(with_ns: bool, PATHS: dict, PARAMS: dict, log: bool=False, max_steps: int=1000): def _init(): ns = (f'eval_sim' if with_ns else '') env = GazeboEnv(ns, PARAMS['reward_fnc'], PARAMS['discrete_action_space'], goal_radius=0.05, max_steps_per_episode=max_steps, train_mode=False, task_mode='scenario', PATHS=PATHS, curr_stage=4, extended_eval=True) if log: env = Monitor(env, PATHS['log'], False, info_keywords=('collisions', 'distance_travelled', 'time_safe_dist', 'time', 'done_reason', 'is_success')) return env return _init
def DeeplabMulti(pretrained=True, num_classes=21): model = ResNetMulti(Bottleneck, [3, 4, 23, 3], num_classes) if pretrained: saved_state_dict = model_zoo.load_url(RESTORE_FROM) new_params = model.state_dict().copy() for i in saved_state_dict: i_parts = i.split('.') if ((not (num_classes == 19)) or (not (i_parts[1] == 'layer5'))): new_params['.'.join(i_parts[1:])] = saved_state_dict[i] model.load_state_dict(new_params) return model
def saliency_map_gradient(numpy_image, model, attr_func): img_tensor = torch.from_numpy(numpy_image) img_tensor.requires_grad_(True) result = model(_add_batch_one(img_tensor)) target = attr_func(result) target.backward() return (img_tensor.grad.numpy(), result)
class LeNet5Base(nn.Module): def __init__(self, num_classes): super(LeNet5Base, self).__init__() self.conv_part = nn.Sequential(nn.Conv2d(1, 20, kernel_size=5), nn.ReLU(True), nn.MaxPool2d(kernel_size=2), nn.Conv2d(20, 50, kernel_size=5), nn.ReLU(True), nn.MaxPool2d(kernel_size=2)) self.fc_part = nn.Sequential(nn.Linear(800, 500), nn.ReLU(True), nn.Linear(500, num_classes.item())) for m in self.conv_part.modules(): if isinstance(m, nn.Conv2d): n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels) m.weight.data.normal_(0, math.sqrt((2.0 / n))) m.bias.data.zero_() def forward(self, x): x = self.conv_part(x) x = x.view(x.size(0), (- 1)) x = self.fc_part(x) return x
def download_url(url, dst): from six.moves import urllib print('* url="{}"'.format(url)) print('* destination="{}"'.format(dst)) def _reporthook(count, block_size, total_size): global start_time if (count == 0): start_time = time.time() return duration = (time.time() - start_time) progress_size = int((count * block_size)) speed = int((progress_size / (1024 * duration))) percent = int((((count * block_size) * 100) / total_size)) sys.stdout.write(('\r...%d%%, %d MB, %d KB/s, %d seconds passed' % (percent, (progress_size / (1024 * 1024)), speed, duration))) sys.stdout.flush() urllib.request.urlretrieve(url, dst, _reporthook) sys.stdout.write('\n')
class DomainNetDataset(Dataset): all_domains = ['clipart', 'infograph', 'painting', 'quickdraw', 'real', 'sketch'] resorted_domains = {0: ['real', 'clipart', 'infograph', 'painting', 'quickdraw', 'sketch'], 1: ['clipart', 'infograph', 'painting', 'quickdraw', 'sketch', 'real'], 2: ['infograph', 'painting', 'quickdraw', 'sketch', 'real', 'clipart'], 3: ['painting', 'quickdraw', 'sketch', 'real', 'clipart', 'infograph'], 4: ['quickdraw', 'sketch', 'real', 'clipart', 'infograph', 'painting'], 5: ['sketch', 'real', 'clipart', 'infograph', 'painting', 'quickdraw']} num_classes = 10 def __init__(self, site, train=True, transform=None, full_set=False): self.full_set = full_set self.base_path = DATA_PATHS['DomainNet'] if full_set: (classes, class_to_idx) = find_classes(f'{self.base_path}/{site}') self.text_labels = classes (self.paths, self.labels) = make_dataset_from_dir(f'{self.base_path}/{site}', class_to_idx, IMG_EXTENSIONS) self.num_classes = len(class_to_idx) else: (self.paths, self.text_labels) = np.load('{}/DomainNet/{}_{}.pkl'.format(DATA_PATHS['DomainNetPathList'], site, ('train' if train else 'test')), allow_pickle=True) class_to_idx = {'bird': 0, 'feather': 1, 'headphones': 2, 'ice_cream': 3, 'teapot': 4, 'tiger': 5, 'whale': 6, 'windmill': 7, 'wine_glass': 8, 'zebra': 9} self.labels = [class_to_idx[text] for text in self.text_labels] self.num_classes = len(class_to_idx) self.transform = transform self.classes = np.unique(self.labels) def __len__(self): return len(self.labels) def __getitem__(self, idx): (site, cls, fname) = self.paths[idx].split('/')[(- 3):] img_path = os.path.join(self.base_path, site, cls, fname) label = self.labels[idx] image = Image.open(img_path) if (len(image.split()) != 3): image = transforms.Grayscale(num_output_channels=3)(image) if (self.transform is not None): image = self.transform(image) return (image, label)
_model def nfnet_f0(pretrained=False, **kwargs): return _create_normfreenet('nfnet_f0', pretrained=pretrained, **kwargs)
def _resample(img, class_info, magnitude): x = img m = float_parameter(magnitude, 1) noise = torch.randn(img.size()).cuda() x_hat = (x + ((noise * class_info['sd'].cuda()) * m)) return (x_hat, [])
def ApplyFont(ax): ticks = (ax.get_xticklabels() + ax.get_yticklabels()) text_size = 14.0 for t in ticks: t.set_fontname('Times New Roman') t.set_fontsize(text_size) txt = ax.get_xlabel() txt_obj = ax.set_xlabel(txt) txt_obj.set_fontname('Times New Roman') txt_obj.set_fontsize(text_size) txt = ax.get_ylabel() txt_obj = ax.set_ylabe(txt) txt_obj.set_fontname('Times New Roman') txt_obj.set_fontsize(text_size) txt = ax.get_title() txt_obj = ax.set_title(txt) txt_obj.set_fontname('Times New Roman') txt_obj.set_fontsize(text_size)
def parse_log(log_path): with open(log_path, 'r') as f: log = f.read().splitlines() results = {} if (('limit-annos' in str(log_path)) and ('keypoints' in str(log_path))): metrics = {'mean_iod'} expected_occurences = 1 elif ('keypoints' in str(log_path)): metrics = {'iod'} expected_occurences = 300 else: metrics = {'same-identity', 'different-identity'} expected_occurences = 1 for metric in metrics: if (metric == 'iod'): tag = 'val_inter_ocular_error' elif (metric == 'mean_iod'): tag = 'val_inter_ocular_error -> mean' else: tag = f'Mean Pixel Error ({metric})' results[metric] = OrderedDict() presence = [(tag in row) for row in log] msg = f'expected {expected_occurences} occurences of {metric} tag in {log_path}' assert (sum(presence) == expected_occurences), msg pos = np.where(presence)[0][(- 1)] row = log[pos] tokens = row.split(' ') if (metric == 'mean_iod'): mean_str = tokens[(- 3)][:(- 1)] std_str = tokens[(- 1)] val = (float(mean_str), float(std_str)) else: val = float(tokens[(- 1)]) results[metric] = val print(f'{log_path.parent.parent.stem}: {metric} {val}') for row in log: if ('Trainable parameters' in row): results['params'] = int(row.split(' ')[(- 1)]) return results
class RobertaConfig(PretrainedConfig): model_type = 'roberta' def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=1, bos_token_id=0, eos_token_id=2, position_embedding_type='absolute', use_cache=True, classifier_dropout=None, **kwargs): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.position_embedding_type = position_embedding_type self.use_cache = use_cache self.classifier_dropout = classifier_dropout
def create_testing_dataset_files(name_to_prepend, dataset, reactants_to_reactant_id_dict): print(f'Going through dataset {name_to_prepend}') reactants_interested_in_set = set(reactants_to_reactant_id_dict.keys()) reactant_bags = [] corresponding_products = [] unreachable_reactants = [] unreachable_products = [] num_reachable = 0 num_unreachable = 0 for (reaction_smi_frozen_set, product_smi_frozen_set) in tqdm.tqdm(dataset, desc=f'Going through {name_to_prepend}'): if reaction_smi_frozen_set.issubset(reactants_interested_in_set): reactant_bags.append(','.join([str(reactants_to_reactant_id_dict[react]) for react in reaction_smi_frozen_set])) corresponding_products.append('.'.join(sorted(list(product_smi_frozen_set)))) num_reachable += 1 else: unreachable_reactants.append('.'.join(sorted(list(reaction_smi_frozen_set)))) unreachable_products.append('.'.join(sorted(list(product_smi_frozen_set)))) num_unreachable += 1 print(f'For dataset {name_to_prepend} have found {num_reachable} and {num_unreachable}') with open(path.join(mchef_config.get_processed_data_dir(), f'{name_to_prepend}_react_bags.txt'), 'w') as fo: fo.write('\n'.join(reactant_bags)) with open(path.join(mchef_config.get_processed_data_dir(), f'{name_to_prepend}_products.txt'), 'w') as fo: fo.write('\n'.join(corresponding_products)) with open(path.join(mchef_config.get_processed_data_dir(), f'{name_to_prepend}_unreachable_reactants.txt'), 'w') as fo: fo.write('\n'.join(unreachable_reactants)) with open(path.join(mchef_config.get_processed_data_dir(), f'{name_to_prepend}_unreachable_products.txt'), 'w') as fo: fo.write('\n'.join(unreachable_products))
def get_tens_mem(tensor): if torch.is_tensor(tensor): a = (tensor.element_size() * tensor.nelement()) else: a = 0 tensor_memory = round((a / 1048576), 4) return tensor_memory
class EnvDiscrete(EnvFeature): def __init__(self, code='000001', day='', data_norm=True, latency=1, T=50, wo_lob_state=False, wo_market_state=False, wo_agent_state=False, wo_dampened_pnl=False, wo_matched_pnl=False, wo_inv_punish=False, **kwargs): super().__init__(**kwargs) print('Environment: EnvDiscrete') self.code = code self.day = day2date(day) self.latency = latency self.T = T self.wo_lob_state = wo_lob_state self.wo_market_state = wo_market_state self.wo_agent_state = wo_agent_state self.r_da = (0 if wo_dampened_pnl else 1) self.r_ma = (0 if wo_matched_pnl else 1) self.r_ip = (0 if wo_inv_punish else 1) self.theta = 0.01 self.eta = 0.5 self.init_states() self.load_orderbook(code=code, day=day) self.load_price(code=code, day=day) self.load_trade(code=code, day=day) self.load_msg(code=code, day=day) def init_states(self): self.__states_space__ = dict() if (not self.wo_lob_state): self.__states_space__['lob_state'] = dict(type='float', shape=(self.T, 40, 1)) if (not self.wo_market_state): self.__states_space__['market_state'] = dict(type='float', shape=(24,)) if (not self.wo_agent_state): self.__states_space__['agent_state'] = dict(type='float', shape=(24,)) def states(self): return self.__states_space__ def actions(self): return dict(type='int', num_values=5) def max_episode_timesteps(self): return self.__max_episode_timesteps__ def action2order(self, actions): (t_1_mid_price, t_1_a1_price, t_1_b1_price, t_1_spread) = self.get_price_info((self.i - self.latency)) (ask_price, bid_price) = (0, 0) (ask_volume, bid_volume) = ((- TRADE_UNIT), TRADE_UNIT) if (actions in range(7)): if (actions == 0): ask_price = t_1_a1_price bid_price = t_1_b1_price elif (actions == 1): ask_price = t_1_a1_price bid_price = (t_1_b1_price - 0.01) elif (actions == 2): ask_price = (t_1_a1_price + 0.01) bid_price = t_1_b1_price elif (actions == 3): ask_price = (t_1_a1_price + 0.01) bid_price = (t_1_b1_price - 0.01) elif (actions == 4): ask_price = t_1_a1_price bid_price = (t_1_b1_price - 0.02) elif (actions == 5): ask_price = (t_1_a1_price + 0.02) bid_price = t_1_b1_price elif (actions == 6): ask_price = (t_1_a1_price + 0.02) bid_price = (t_1_b1_price - 0.02) elif (actions == 7): if (self.inventory < 0): (bid_price, bid_volume) = (np.inf, (- self.inventory)) elif (self.inventory > 0): (ask_price, ask_volume) = (0.01, (- self.inventory)) else: (trade_price, trade_volume) = (0, 0) if (self.inventory < ((- 10) * TRADE_UNIT)): ask_price = 0 ask_volume = 0 elif (self.inventory > (10 * TRADE_UNIT)): bid_price = 0 bid_volume = 0 orders = {'ask_price': ask_price, 'ask_vol': ask_volume, 'bid_price': bid_price, 'bid_vol': bid_volume} return orders def get_reward(self, trade_price, trade_volume): pnl = (self.value - self.value_) asymmetric_dampen = max(0, (self.eta * pnl)) dampened_pnl = (pnl - asymmetric_dampen) matched_pnl = ((self.mid_price - trade_price) * trade_volume) delta_inventory = (abs(self.inventory) - abs(self.inventory_)) inventory_punishment = (self.theta * (delta_inventory / TRADE_UNIT)) reward = pnl self.value_ = self.value return reward def get_state_at_t(self, t): self.__state__ = dict() if (not self.wo_lob_state): lob = self.episode_state.iloc[(t - self.T):t] mid_price = ((lob.ask1_price + lob.bid1_price) / 2) lob_normed = lob_norm(lob, mid_price) self.__state__['lob_state'] = np.expand_dims(np.array(lob_normed), (- 1)) if (not self.wo_market_state): self.__state__['market_state'] = (self._get_market_state(t) + self._get_order_strength_index(t)) if (not self.wo_agent_state): self.__state__['agent_state'] = (([(self.inventory / (10 * TRADE_UNIT))] * 12) + ([(t / self.episode_length)] * 12)) return self.__state__
def reshape_patch_back(patch_tensor, patch_size): assert (5 == patch_tensor.ndim) batch_size = np.shape(patch_tensor)[0] seq_length = np.shape(patch_tensor)[1] patch_height = np.shape(patch_tensor)[2] patch_width = np.shape(patch_tensor)[3] channels = np.shape(patch_tensor)[4] img_channels = (channels / (patch_size * patch_size)) a = np.reshape(patch_tensor, [batch_size, seq_length, patch_height, patch_width, patch_size, patch_size, img_channels]) b = np.transpose(a, [0, 1, 2, 4, 3, 5, 6]) img_tensor = np.reshape(b, [batch_size, seq_length, (patch_height * patch_size), (patch_width * patch_size), img_channels]) return img_tensor
.slow def test_correlated_samples(): (nsamples, nchains) = _get_sample_size() decay_factor = 0.9 correlated_samples = _construct_correlated_samples(nsamples, nchains, decay_factor) (autocorr_curve, _) = statistics.multi_chain_autocorr_and_variance(correlated_samples) tau = statistics.tau(autocorr_curve) nautocorr_to_check = 100 expected_autocorr = (decay_factor ** jnp.arange(nautocorr_to_check)) (np.testing.assert_allclose(autocorr_curve[0:nautocorr_to_check], expected_autocorr, atol=0.01),) infinite_exponential_sum = (1 / (1 - decay_factor)) expected_tau = ((- 1) + (2 * infinite_exponential_sum)) np.testing.assert_allclose(tau, expected_tau, 0.05)
def gender_cla(ref, pred): (ref, pred) = (ref.lower(), pred.lower()) if (('female' in ref) and ('female' in pred)): return True elif (('female' not in ref) and ('female' not in pred)): return True else: return False
_module() class Mask2FormerHead(MaskFormerHead): def __init__(self, in_channels: List[int], feat_channels: int, out_channels: int, num_things_classes: int=80, num_stuff_classes: int=53, num_queries: int=100, num_transformer_feat_level: int=3, pixel_decoder: ConfigType=..., enforce_decoder_input_project: bool=False, transformer_decoder: ConfigType=..., positional_encoding: ConfigType=dict(num_feats=128, normalize=True), loss_cls: ConfigType=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=2.0, reduction='mean', class_weight=(([1.0] * 133) + [0.1])), loss_mask: ConfigType=dict(type='CrossEntropyLoss', use_sigmoid=True, reduction='mean', loss_weight=5.0), loss_dice: ConfigType=dict(type='DiceLoss', use_sigmoid=True, activate=True, reduction='mean', naive_dice=True, eps=1.0, loss_weight=5.0), train_cfg: OptConfigType=None, test_cfg: OptConfigType=None, init_cfg: OptMultiConfig=None, **kwargs) -> None: super(AnchorFreeHead, self).__init__(init_cfg=init_cfg) self.num_things_classes = num_things_classes self.num_stuff_classes = num_stuff_classes self.num_classes = (self.num_things_classes + self.num_stuff_classes) self.num_queries = num_queries self.num_transformer_feat_level = num_transformer_feat_level self.num_heads = transformer_decoder.layer_cfg.cross_attn_cfg.num_heads self.num_transformer_decoder_layers = transformer_decoder.num_layers assert (pixel_decoder.encoder.layer_cfg.self_attn_cfg.num_levels == num_transformer_feat_level) pixel_decoder_ = copy.deepcopy(pixel_decoder) pixel_decoder_.update(in_channels=in_channels, feat_channels=feat_channels, out_channels=out_channels) self.pixel_decoder = MODELS.build(pixel_decoder_) self.transformer_decoder = Mask2FormerTransformerDecoder(**transformer_decoder) self.decoder_embed_dims = self.transformer_decoder.embed_dims self.decoder_input_projs = ModuleList() for _ in range(num_transformer_feat_level): if ((self.decoder_embed_dims != feat_channels) or enforce_decoder_input_project): self.decoder_input_projs.append(Conv2d(feat_channels, self.decoder_embed_dims, kernel_size=1)) else: self.decoder_input_projs.append(nn.Identity()) self.decoder_positional_encoding = SinePositionalEncoding(**positional_encoding) self.query_embed = nn.Embedding(self.num_queries, feat_channels) self.query_feat = nn.Embedding(self.num_queries, feat_channels) self.level_embed = nn.Embedding(self.num_transformer_feat_level, feat_channels) self.cls_embed = nn.Linear(feat_channels, (self.num_classes + 1)) self.mask_embed = nn.Sequential(nn.Linear(feat_channels, feat_channels), nn.ReLU(inplace=True), nn.Linear(feat_channels, feat_channels), nn.ReLU(inplace=True), nn.Linear(feat_channels, out_channels)) self.test_cfg = test_cfg self.train_cfg = train_cfg if train_cfg: self.assigner = TASK_UTILS.build(self.train_cfg['assigner']) self.sampler = TASK_UTILS.build(self.train_cfg['sampler'], default_args=dict(context=self)) self.num_points = self.train_cfg.get('num_points', 12544) self.oversample_ratio = self.train_cfg.get('oversample_ratio', 3.0) self.importance_sample_ratio = self.train_cfg.get('importance_sample_ratio', 0.75) self.class_weight = loss_cls.class_weight self.loss_cls = MODELS.build(loss_cls) self.loss_mask = MODELS.build(loss_mask) self.loss_dice = MODELS.build(loss_dice) def init_weights(self) -> None: for m in self.decoder_input_projs: if isinstance(m, Conv2d): caffe2_xavier_init(m, bias=0) self.pixel_decoder.init_weights() for p in self.transformer_decoder.parameters(): if (p.dim() > 1): nn.init.xavier_normal_(p) def _get_targets_single(self, cls_score: Tensor, mask_pred: Tensor, gt_instances: InstanceData, img_meta: dict) -> Tuple[Tensor]: gt_labels = gt_instances.labels gt_masks = gt_instances.masks num_queries = cls_score.shape[0] num_gts = gt_labels.shape[0] point_coords = torch.rand((1, self.num_points, 2), device=cls_score.device) mask_points_pred = point_sample(mask_pred.unsqueeze(1), point_coords.repeat(num_queries, 1, 1)).squeeze(1) gt_points_masks = point_sample(gt_masks.unsqueeze(1).float(), point_coords.repeat(num_gts, 1, 1)).squeeze(1) sampled_gt_instances = InstanceData(labels=gt_labels, masks=gt_points_masks) sampled_pred_instances = InstanceData(scores=cls_score, masks=mask_points_pred) assign_result = self.assigner.assign(pred_instances=sampled_pred_instances, gt_instances=sampled_gt_instances, img_meta=img_meta) pred_instances = InstanceData(scores=cls_score, masks=mask_pred) sampling_result = self.sampler.sample(assign_result=assign_result, pred_instances=pred_instances, gt_instances=gt_instances) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds labels = gt_labels.new_full((self.num_queries,), self.num_classes, dtype=torch.long) labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds] label_weights = gt_labels.new_ones((self.num_queries,)) mask_targets = gt_masks[sampling_result.pos_assigned_gt_inds] mask_weights = mask_pred.new_zeros((self.num_queries,)) mask_weights[pos_inds] = 1.0 return (labels, label_weights, mask_targets, mask_weights, pos_inds, neg_inds, sampling_result) def _loss_by_feat_single(self, cls_scores: Tensor, mask_preds: Tensor, batch_gt_instances: List[InstanceData], batch_img_metas: List[dict]) -> Tuple[Tensor]: num_imgs = cls_scores.size(0) cls_scores_list = [cls_scores[i] for i in range(num_imgs)] mask_preds_list = [mask_preds[i] for i in range(num_imgs)] (labels_list, label_weights_list, mask_targets_list, mask_weights_list, avg_factor) = self.get_targets(cls_scores_list, mask_preds_list, batch_gt_instances, batch_img_metas) labels = torch.stack(labels_list, dim=0) label_weights = torch.stack(label_weights_list, dim=0) mask_targets = torch.cat(mask_targets_list, dim=0) mask_weights = torch.stack(mask_weights_list, dim=0) cls_scores = cls_scores.flatten(0, 1) labels = labels.flatten(0, 1) label_weights = label_weights.flatten(0, 1) class_weight = cls_scores.new_tensor(self.class_weight) loss_cls = self.loss_cls(cls_scores, labels, label_weights, avg_factor=class_weight[labels].sum()) num_total_masks = reduce_mean(cls_scores.new_tensor([avg_factor])) num_total_masks = max(num_total_masks, 1) mask_preds = mask_preds[(mask_weights > 0)] if (mask_targets.shape[0] == 0): loss_dice = mask_preds.sum() loss_mask = mask_preds.sum() return (loss_cls, loss_mask, loss_dice) with torch.no_grad(): points_coords = get_uncertain_point_coords_with_randomness(mask_preds.unsqueeze(1), None, self.num_points, self.oversample_ratio, self.importance_sample_ratio) mask_point_targets = point_sample(mask_targets.unsqueeze(1).float(), points_coords).squeeze(1) mask_point_preds = point_sample(mask_preds.unsqueeze(1), points_coords).squeeze(1) loss_dice = self.loss_dice(mask_point_preds, mask_point_targets, avg_factor=num_total_masks) mask_point_preds = mask_point_preds.reshape((- 1)) mask_point_targets = mask_point_targets.reshape((- 1)) loss_mask = self.loss_mask(mask_point_preds, mask_point_targets, avg_factor=(num_total_masks * self.num_points)) return (loss_cls, loss_mask, loss_dice) def _forward_head(self, decoder_out: Tensor, mask_feature: Tensor, attn_mask_target_size: Tuple[(int, int)]) -> Tuple[Tensor]: decoder_out = self.transformer_decoder.post_norm(decoder_out) cls_pred = self.cls_embed(decoder_out) mask_embed = self.mask_embed(decoder_out) mask_pred = torch.einsum('bqc,bchw->bqhw', mask_embed, mask_feature) attn_mask = F.interpolate(mask_pred, attn_mask_target_size, mode='bilinear', align_corners=False) attn_mask = attn_mask.flatten(2).unsqueeze(1).repeat((1, self.num_heads, 1, 1)).flatten(0, 1) attn_mask = (attn_mask.sigmoid() < 0.5) attn_mask = attn_mask.detach() return (cls_pred, mask_pred, attn_mask) def forward(self, x: List[Tensor], batch_data_samples: SampleList) -> Tuple[List[Tensor]]: batch_img_metas = [data_sample.metainfo for data_sample in batch_data_samples] batch_size = len(batch_img_metas) (mask_features, multi_scale_memorys) = self.pixel_decoder(x) decoder_inputs = [] decoder_positional_encodings = [] for i in range(self.num_transformer_feat_level): decoder_input = self.decoder_input_projs[i](multi_scale_memorys[i]) decoder_input = decoder_input.flatten(2).permute(0, 2, 1) level_embed = self.level_embed.weight[i].view(1, 1, (- 1)) decoder_input = (decoder_input + level_embed) mask = decoder_input.new_zeros(((batch_size,) + multi_scale_memorys[i].shape[(- 2):]), dtype=torch.bool) decoder_positional_encoding = self.decoder_positional_encoding(mask) decoder_positional_encoding = decoder_positional_encoding.flatten(2).permute(0, 2, 1) decoder_inputs.append(decoder_input) decoder_positional_encodings.append(decoder_positional_encoding) query_feat = self.query_feat.weight.unsqueeze(0).repeat((batch_size, 1, 1)) query_embed = self.query_embed.weight.unsqueeze(0).repeat((batch_size, 1, 1)) cls_pred_list = [] mask_pred_list = [] (cls_pred, mask_pred, attn_mask) = self._forward_head(query_feat, mask_features, multi_scale_memorys[0].shape[(- 2):]) cls_pred_list.append(cls_pred) mask_pred_list.append(mask_pred) for i in range(self.num_transformer_decoder_layers): level_idx = (i % self.num_transformer_feat_level) attn_mask[torch.where((attn_mask.sum((- 1)) == attn_mask.shape[(- 1)]))] = False layer = self.transformer_decoder.layers[i] query_feat = layer(query=query_feat, key=decoder_inputs[level_idx], value=decoder_inputs[level_idx], query_pos=query_embed, key_pos=decoder_positional_encodings[level_idx], cross_attn_mask=attn_mask, query_key_padding_mask=None, key_padding_mask=None) (cls_pred, mask_pred, attn_mask) = self._forward_head(query_feat, mask_features, multi_scale_memorys[((i + 1) % self.num_transformer_feat_level)].shape[(- 2):]) cls_pred_list.append(cls_pred) mask_pred_list.append(mask_pred) return (cls_pred_list, mask_pred_list)
def rouge_score(preds, golds): rouge_results = {} rouge1 = [] rouge2 = [] rougeL = [] for (srcs, tgts) in zip(preds, golds): references = ' '.join(tgts) predictions = ' '.join(srcs) res = rougeScore(predictions, references) rouge1.append(res['rouge1_fmeasure']) rouge2.append(res['rouge2_fmeasure']) rougeL.append(res['rougeL_fmeasure']) rouge_results['rouge1'] = np.mean(rouge1) rouge_results['rouge2'] = np.mean(rouge2) rouge_results['rougeL'] = np.mean(rougeL) return rouge_results
def clustered_broadcast(Y, groups, counts, factors, X=None): device = Y.device if (X is None): X = torch.zeros((groups.shape + (Y.shape[(- 1)],)), device=device, dtype=Y.dtype) if (device.type == 'cpu'): broadcast_cpu(Y, groups, factors, X) else: (N, H, C, E) = Y.shape (_, _, L, _) = X.shape with torch.no_grad(): threads = 256 G = set_group(C, E) group_counts = counts.view(N, H, G, (- 1)).sum((- 1)) block_counts = (((group_counts + threads) - 1) // threads) total_blocks = block_counts.sum().item() indx_maps = torch.ones((total_blocks, 5), device=X.device, dtype=torch.int32) clustered_broadcast_gpu(Y, groups, factors, X, block_counts.int(), group_counts.int(), threads, G, total_blocks, indx_maps) return X
class NNCG(): root_node: Edge = None test_nodes: List[KerasLayerNode] = [] def __init__(self): self.id = '' self.test_nodes = [] self.testing = None self.model = None self.min_in = 0 self.max_in = 0 def keras_compile(self, imdb, model, code_path, identifier=None, image_mean=0, arch='general', testing=(- 1), test_mode='error', quatization=False, weights_method='direct'): self.testing = testing if (identifier is not None): path = (((code_path + '/cnn_') + identifier) + '.cpp') else: path = code_path exe_return_filename = 'result.txt' self.model = model input_shape = model.layers[0].input.shape[1:].as_list() STEPS = 7 print_progress_bar(0, STEPS, prefix='Adding CNN nodes to graph') self.root_node = Edge('root', CHeaderNode(identifier, input_shape, weights_method), None, 'forward') cur_node = MeanNode(image_mean, self.root_node.target) cur_node = self.add_test_node(cur_node, None) for (i, layer) in enumerate(model.layers): if ((type(layer) == Convolution2D) or (type(layer) == kl.convolutional.Conv2D)): cur_node = self.add_conv2d(layer, cur_node) elif ((type(layer) == MaxPooling2D) or (type(layer) == kl.pooling.MaxPooling2D)): cur_node = self.add_maxpool2d(layer, cur_node) elif (type(layer) == LeakyReLU): pass elif ((type(layer) == Dense) or (type(layer) == kl.core.Dense)): cur_node = self.add_dense(layer, cur_node) elif ((type(layer) == Flatten) or (type(layer) == kl.core.Flatten)): cur_node = self.add_flatten(cur_node) elif ((type(layer) == Dropout) or (type(layer) == kl.Dropout)): pass elif (type(layer) == InputLayer): pass elif (type(layer) == BatchNormalization): print('Warning: BatchNormalization not implemented') else: print(('Unknown layer: ' + str(type(layer)))) sys.exit(1) CFooterNode(exe_return_filename, weights_method, cur_node) print_progress_bar(1, STEPS, prefix='Quantization') if quatization: if (arch == 'sse3'): self.quantize(imdb, 'uint8') print_progress_bar(2, STEPS, prefix='Lowering') self.abstract_to_c() print_progress_bar(3, STEPS, prefix='Optimization') if (arch == 'general'): pass elif (arch == 'sse3'): if quatization: self.to_quantized_sse3() self.to_sse3() else: self.to_sse3() print_progress_bar(4, STEPS, prefix='Writing C code') self.write_c(path) print_progress_bar(5, STEPS, prefix='Compiling') if (os.system(compiler_check) == 1): print('Compiler not found, not checking code file --> Finished.') sys.exit(0) if (testing == 0): print_progress_bar(STEPS, STEPS, prefix='Finished') sys.exit(0) if (testing == (- 1)): testing = len(imdb) print_progress_bar(6, STEPS, prefix='Compiling') compile(path, optimize=False) print_progress_bar(STEPS, STEPS, prefix='Finished') tested = 0 fail = 0 for im in np.random.permutation(imdb): if (tested > testing): print('\nTest finished.') break im.astype('float32').tofile('img.bin') im = im.reshape(1, *im.shape) if (os.name == 'nt'): res = os.system(path[:path.rfind('.')]) else: res = (os.system(('./' + path[:path.rfind('.')])) >> 8) assert (res == 0) res = 0 c_res = 0 res_list = [] c_res_list = [] for n in self.test_nodes: (res, c_res) = n.test(im, exit_on_err=(test_mode == 'error')) res_list.append(res) c_res_list.append(c_res) tested += 1 if (test_mode == 'classification'): if (np.argmax(res) != np.argmax(c_res)): fail += 1 elif (test_mode == 'regression'): pass err = (((tested - fail) / tested) * 100) percent_err = '{:.1f}'.format(err) err_overview = str(['{:.3f}'.format(np.max((a - b))) for (a, b) in zip(res_list, c_res_list)]) print_progress_bar(tested, testing, suffix=(((', ' + percent_err) + '% ok, errors: ') + err_overview), prefix='Evaluating') Allocation.reset() CHeaderNode.instance().reset() return err def quantize(self, imdb, required_dtype): action = QuantizeAction(imdb, required_dtype) self.root_node.traverse(action) def get_feature_value_range(imdb, model): print('Predicting images to find min/max for quantization and others...', end='') max_in = [np.max(imdb)] min_in = [np.min(imdb)] outputs = [layer.output for layer in model.layers if (not (type(layer) is InputLayer))] func = K.function([model.input, K.learning_phase()], outputs) layer_outs = func([np.array(imdb), 0]) for l in layer_outs: max_in.append(np.max(l)) min_in.append(np.min(l)) print(' finished') return (min_in, max_in) def to_sse3(self): desired_unroll = 4 node_type = MACNode NNCG.join_loops(self.root_node, desired_unroll, node_type) action = SearchNodeByType(node_type) self.root_node.traverse(action) for r in action.result: loop_to_unroll = r[(- 2)] if (loop_to_unroll.step == 1): loop_to_unroll.unroll(desired_unroll) action = SearchNodeByType(UnrolledOperation) self.root_node.traverse(action) CHeaderNode.instance().intel_intr_required = True for r in action.result: u: UnrolledOperation = r[(- 1)] node = u.get_node('content') if (type(node) is LoopNode): node = node.get_node('content') if (type(node) is MACNode): if MACNodeSSE3.applicable(node): MACNodeSSE3.apply(node) def to_quantized_sse3(self): conv_search = SearchNodeByType(Conv2DNode) incl_alternatives = (lambda n: (n.name_equal('next') or n.name_equal('content') or (n.n_type == 'alternative'))) conv_search.traverse_edges = incl_alternatives self.root_node.traverse(conv_search) for n in [r[(- 1)] for r in conv_search.result]: if MACNodeInt8SSE3.applicable(n): desired_unroll = 16 node_type = MACNode mac_search = SearchNodeByType(node_type) n.traverse(mac_search) r = mac_search.result[0] r[(- 1)].get_node('var1').transpose([0, 1, 3, 2]) r[(- 4)].add_edge('content', r[(- 2)], replace=True) r[(- 2)].add_edge('content', r[(- 3)], replace=True) r[(- 3)].add_edge('content', r[(- 1)], replace=True) mac_search = SearchNodeByType(node_type) n.traverse(mac_search) for r in mac_search.result: loop_to_unroll = r[(- 2)] loop_to_unroll.unroll(desired_unroll) mac_search = SearchNodeByType(UnrolledOperation) mac_search.traverse_edges = incl_alternatives self.root_node.traverse(mac_search) CHeaderNode.instance().intel_intr_required = True for r in mac_search.result: u: UnrolledOperation = r[(- 1)] node = u.get_node('content').get_node('content') if type((node is MACNode)): if MACNodeInt8SSE3.applicable(node): MACNodeInt8SSE3.apply(node) last_qn = [i for i in range(len(r)) if (type(r[i]) is QuantizedNode)][(- 1)] source_node = r[last_qn] target_node = r[(last_qn + 1)] source_node.select(target_node) def join_loops(start_node, desired_unroll, node_type): action = SearchNodeByType(node_type) start_node.traverse(action) mac_instances = [] for r in action.result: r.pop() cur_list = [] mac_instances.append(cur_list) for _node in reversed(r): if (type(_node) is not LoopNode): break cur_list.append(_node) for mi in mac_instances: depth = 0 loops_to_join = [] for _mac in mi: depth += _mac.stop loops_to_join.append(_mac) if (depth >= desired_unroll): break mac_instances[mac_instances.index(mi)] = loops_to_join for i in mac_instances: root_loop = i[(- 1)] while (type(root_loop.get_node('content')) is LoopNode): root_loop = root_loop.deep_join() def add_conv2d(self, layer: Convolution2D, prev_node) -> Node: w = K.eval(layer.weights[0]) b = K.eval(layer.bias) strides = layer.strides padding = layer.padding activation = layer.activation cur_node = Conv2DNode(w, b, strides, padding, prev_node) cur_node = self.add_activation(activation, cur_node) if (self.testing != 0): cur_node = self.add_test_node(cur_node, layer) return cur_node def write_c(self, path): action = CollectVars(self.root_node.target) self.root_node.traverse(action) a = WriteCAction(path) self.root_node.traverse(a) def add_activation(self, activation: str, prev_node) -> Node: if (activation.__name__ == 'relu'): prev_node = self.add_leaky_relu(0, prev_node) elif (activation.__name__ == 'softmax'): prev_node = self.add_softmax(prev_node) elif (activation.__name__ == 'sigmoid'): prev_node = self.add_sigmoid(prev_node) return prev_node def add_softmax(prev_node) -> SoftmaxNode: return SoftmaxNode(prev_node) def add_sigmoid(prev_node) -> SigmoidNode: return SigmoidNode(prev_node) def add_flatten(prev_node) -> FlattenNode: return FlattenNode(prev_node) def add_maxpool2d(self, layer: MaxPooling2D, prev_node) -> MaxPoolingNode: size = layer.pool_size stride = layer.strides cur_node = MaxPoolingNode(size, stride, prev_node) if (self.testing != 0): cur_node = self.add_test_node(cur_node, layer) return cur_node def add_dense(self, layer: Dense, prev_node) -> Node: w = K.eval(layer.weights[0]) b = K.eval(layer.bias) activation = layer.activation cur_node = DenseNode(w, b, prev_node) cur_node = self.add_activation(activation, cur_node) if (self.testing != 0): cur_node = self.add_test_node(cur_node, layer) return cur_node def add_leaky_relu(alpha, prev_node): return LeakyReLUNode(alpha, prev_node) def add_test_node(self, prev_node, layer): if (layer is None): func = None name = 'input' else: func = K.function([self.model.input], [layer.output]) name = layer.name n = KerasLayerNode(prev_node, func, name) self.test_nodes.append(n) return n def abstract_to_c(self): action = LowerAction() self.root_node.traverse(action)
def get_scheduler(args): if (args.sched in ['multistep', 'cosine', 'linear', 'exponential', 'uneven_multistep']): return LrScheduler(args) else: raise NotImplementedError('The scheduler {} is not implemented! Please choose from [multistep, cosine, linear, exponential]'.format(args.scheduler))
class MyModelCannotComputeOutputShape(tf.keras.Model): def __init__(self): super().__init__() self.dense = tf.keras.layers.Dense(4, activation=tf.nn.relu) def call(self, inputs): return self.dense(inputs) def compute_output_shape(self, input_shape): raise NotImplementedError()
class VehiclePIDController(): def __init__(self, vehicle, args_lateral, args_longitudinal, offset=0, max_throttle=0.75, max_brake=0.3, max_steering=0.8): self.max_brake = max_brake self.max_throt = max_throttle self.max_steer = max_steering self._vehicle = vehicle self._world = self._vehicle.get_world() self.past_steering = self._vehicle.get_control().steer self._lon_controller = PIDLongitudinalController(self._vehicle, **args_longitudinal) self._lat_controller = PIDLateralController(self._vehicle, offset, **args_lateral) def run_step(self, target_speed, waypoint): acceleration = self._lon_controller.run_step(target_speed) current_steering = self._lat_controller.run_step(waypoint) control = carla.VehicleControl() if (acceleration >= 0.0): control.throttle = min(acceleration, self.max_throt) control.brake = 0.0 else: control.throttle = 0.0 control.brake = min(abs(acceleration), self.max_brake) if (current_steering > (self.past_steering + 0.1)): current_steering = (self.past_steering + 0.1) elif (current_steering < (self.past_steering - 0.1)): current_steering = (self.past_steering - 0.1) if (current_steering >= 0): steering = min(self.max_steer, current_steering) else: steering = max((- self.max_steer), current_steering) control.steer = steering control.hand_brake = False control.manual_gear_shift = False self.past_steering = steering return control def change_longitudinal_PID(self, args_longitudinal): self._lon_controller.change_parameters(**args_longitudinal) def change_lateral_PID(self, args_lateral): self._lon_controller.change_parameters(**args_lateral)
class ModelArguments(): model_name_or_path: str = field() new_adapter_name: str = field(default=None)
def test_target_pipe(X_iris, y_iris) -> None: X_types = {'continuous': ['sepal_length', 'sepal_width', 'petal_length'], 'confounds': ['petal_width']} target_pipeline = TargetPipelineCreator().add('confound_removal', confounds=['confounds', 'continuous']) pipeline_creator = PipelineCreator(problem_type='regression').add(target_pipeline, apply_to='target').add('svm', C=[1, 2]) pipe = pipeline_creator.to_pipeline(X_types, search_params={'kind': 'random'}) pipe.fit(X_iris, y_iris)
class DeepONet(NN): def __init__(self, layer_sizes_branch, layer_sizes_trunk, activation, kernel_initializer): super().__init__() if isinstance(activation, dict): activation_branch = activations.get(activation['branch']) self.activation_trunk = activations.get(activation['trunk']) else: activation_branch = self.activation_trunk = activations.get(activation) if callable(layer_sizes_branch[1]): self.branch = layer_sizes_branch[1] else: self.branch = FNN(layer_sizes_branch, activation_branch, kernel_initializer) self.trunk = FNN(layer_sizes_trunk, self.activation_trunk, kernel_initializer) self.b = torch.nn.parameter.Parameter(torch.tensor(0.0)) def forward(self, inputs): x_func = inputs[0] x_loc = inputs[1] x_func = self.branch(x_func) if (self._input_transform is not None): x_loc = self._input_transform(x_loc) x_loc = self.activation_trunk(self.trunk(x_loc)) if (x_func.shape[(- 1)] != x_loc.shape[(- 1)]): raise AssertionError('Output sizes of branch net and trunk net do not match.') x = torch.einsum('bi,bi->b', x_func, x_loc) x = torch.unsqueeze(x, 1) x += self.b if (self._output_transform is not None): x = self._output_transform(inputs, x) return x
class QueryDataset(Dataset): def __init__(self, tokenizer: PreTrainedTokenizer, qrel_path: str, query_path: str, max_query_len: int, index_doc_ids: np.ndarray, rel_threshold=1, verbose=True): super().__init__() self.tokenizer = tokenizer docid2offset = dict(((str(docid), idx) for (idx, docid) in enumerate(index_doc_ids))) (self.queries, qid2offset) = ([], dict()) for (idx, line) in enumerate(tqdm(open(query_path), disable=(not verbose), mininterval=10)): (qid, query) = line.split('\t') qid2offset[qid] = idx self.queries.append(query.strip()) self.qrels = defaultdict(list) for line in tqdm(open(qrel_path), disable=(not verbose), mininterval=10): (qid, _, docid, rel) = line.split() if (int(rel) >= rel_threshold): qoffset = qid2offset[qid] docoffset = docid2offset[docid] self.qrels[qoffset].append(docoffset) self.qids = sorted(self.qrels.keys()) self.max_query_len = max_query_len self.qrels = dict(self.qrels) def get_qrels(self): return self.qrels def __len__(self): return len(self.qids) def __getitem__(self, index): qid = self.qids[index] query = self.queries[qid] data = {'query': query, 'qid': qid} return data
def pixelAccuracy(imPred, imLab): pixel_labeled = np.sum((imLab >= 0)) pixel_correct = np.sum(((imPred == imLab) * (imLab >= 0))) pixel_accuracy = ((1.0 * pixel_correct) / pixel_labeled) return (pixel_accuracy, pixel_correct, pixel_labeled)
class DynamicGraphConvolution(nn.Module): def __init__(self, in_features, out_features, num_nodes): super(DynamicGraphConvolution, self).__init__() self.static_adj = nn.Sequential(nn.Conv1d(num_nodes, num_nodes, 1, bias=False), nn.LeakyReLU(0.2)) self.static_weight = nn.Sequential(nn.Conv1d(in_features, out_features, 1), nn.LeakyReLU(0.2)) self.gap = nn.AdaptiveAvgPool1d(1) self.conv_global = nn.Conv1d(in_features, in_features, 1) self.bn_global = nn.BatchNorm1d(in_features) self.relu = nn.LeakyReLU(0.2) self.conv_create_co_mat = nn.Conv1d((in_features * 2), num_nodes, 1) self.dynamic_weight = nn.Conv1d(in_features, out_features, 1) def forward_static_gcn(self, x): x = self.static_adj(x.transpose(1, 2)) x = self.static_weight(x.transpose(1, 2)) return x def forward_construct_dynamic_graph(self, x): x_glb = self.gap(x) x_glb = self.conv_global(x_glb) x_glb = self.bn_global(x_glb) x_glb = self.relu(x_glb) x_glb = x_glb.expand(x_glb.size(0), x_glb.size(1), x.size(2)) x = torch.cat((x_glb, x), dim=1) dynamic_adj = self.conv_create_co_mat(x) dynamic_adj = torch.sigmoid(dynamic_adj) return dynamic_adj def forward_dynamic_gcn(self, x, dynamic_adj): x = torch.matmul(x, dynamic_adj) x = self.relu(x) x = self.dynamic_weight(x) x = self.relu(x) return x def forward(self, x): out_static = self.forward_static_gcn(x) x = (x + out_static) dynamic_adj = self.forward_construct_dynamic_graph(x) x = self.forward_dynamic_gcn(x, dynamic_adj) return x
def attack_success(cleancrop, x, initial_pic, target_class, searchspace, sticker, opstickercv, magnification, zstore, facemask, targeted_attack=False): (attack_image, valid) = predict.perturb_image(x, initial_pic, sticker, opstickercv, magnification, zstore, searchspace, facemask) (rank, _) = eval('predict.predict_type_{}(attack_image,cleancrop)'.format(threat_model)) predicted_class = rank[0][0] if ((targeted_attack and (predicted_class == target_class) and (valid[0] == 1)) or ((not targeted_attack) and (predicted_class != target_class) and (valid[0] == 1))): return True
def load_tf_weights_in_gpt_neo(*args, **kwargs): requires_backends(load_tf_weights_in_gpt_neo, ['torch'])
class _append_return_to_pipe(object): def __init__(self, func): self.func = func def __call__(self, queue, *args, **kwargs): res = self.func(*args, **kwargs) queue.put(res)
def test_tuple(): assert (_make_annotation_str_for_obj(()) == 'Tuple') assert (_make_annotation_str_for_obj((3, 4)) == 'Tuple[int, int]') assert (_make_annotation_str_for_obj((3, 4, 5.0)) == 'Tuple[int, int, float]') assert (_make_annotation_str_for_obj((3, 4, 5, 6)) == 'Tuple[int, ...]') assert (_make_annotation_str_for_obj((3, 4, 5, 6.0)) == 'Tuple[float, ...]')
def test_stochastic_network_4(net): net.add_connections_between(['A'], ['B'], rate=0.0) assert (not net.graph.has_edge('A', 'B')) net.resample_connectivity() assert (not net.graph.has_edge('A', 'B'))
def playback_dataset(args): write_video = (args.video_path is not None) assert (not (args.render and write_video)) if (args.render_image_names is None): env_meta = FileUtils.get_env_metadata_from_dataset(dataset_path=args.dataset) env_type = EnvUtils.get_env_type(env_meta=env_meta) args.render_image_names = DEFAULT_CAMERAS[env_type] if args.render: assert (len(args.render_image_names) == 1) if args.use_obs: assert write_video, 'playback with observations can only write to video' assert (not args.use_actions), 'playback with observations is offline and does not support action playback' if (not args.use_obs): dummy_spec = dict(obs=dict(low_dim=['robot0_eef_pos'], rgb=[])) ObsUtils.initialize_obs_utils_with_obs_specs(obs_modality_specs=dummy_spec) env_meta = FileUtils.get_env_metadata_from_dataset(dataset_path=args.dataset) env = EnvUtils.create_env_from_metadata(env_meta=env_meta, render=args.render, render_offscreen=write_video) is_robosuite_env = EnvUtils.is_robosuite_env(env_meta) f = h5py.File(args.dataset, 'r') if (args.filter_key is not None): print('using filter key: {}'.format(args.filter_key)) demos = [elem.decode('utf-8') for elem in np.array(f['mask/{}'.format(args.filter_key)])] else: demos = list(f['data'].keys()) inds = np.argsort([int(elem[5:]) for elem in demos]) demos = [demos[i] for i in inds] if (args.n is not None): demos = demos[:args.n] video_writer = None if write_video: video_writer = imageio.get_writer(args.video_path, fps=20) for ind in range(len(demos)): ep = demos[ind] print('Playing back episode: {}'.format(ep)) if args.use_obs: playback_trajectory_with_obs(traj_grp=f['data/{}'.format(ep)], video_writer=video_writer, video_skip=args.video_skip, image_names=args.render_image_names, first=args.first) continue states = f['data/{}/states'.format(ep)][()] initial_state = dict(states=states[0]) if is_robosuite_env: initial_state['model'] = f['data/{}'.format(ep)].attrs['model_file'] actions = None if args.use_actions: actions = f['data/{}/actions'.format(ep)][()] playback_trajectory_with_env(env=env, initial_state=initial_state, states=states, actions=actions, render=args.render, video_writer=video_writer, video_skip=args.video_skip, camera_names=args.render_image_names, first=args.first) f.close() if write_video: video_writer.close()
def test_two_sided_pval_from_pval(): pval = np.asarray([1.0, 0.025, 0.5]) one_minus_pval = np.asarray([0.0, 0.975, 0.5]) (two_sided_pval, two_sided_pval_corr) = two_sided_pval_from_pval(pval, one_minus_pval) expected = np.asarray([[0.0, 0.05, 1.0], [0.0, 0.15, 1.0]]) assert_almost_equal(two_sided_pval, expected[0], decimal=2) assert_almost_equal(two_sided_pval_corr, expected[1], decimal=2)
class Attention_50(nn.Module): def __init__(self): super(Attention_50, self).__init__() self.name = 'Attention_50' self.lr = 0.0008 self.n_hosts = 50 self.n_feats = (3 * self.n_hosts) self.n_window = 3 self.n_latent = 10 self.n_hidden = 16 self.n = ((self.n_window * self.n_feats) + (self.n_hosts * self.n_hosts)) self.encoder = nn.Sequential(nn.Linear((self.n_window * self.n_feats), (self.n_hosts * self.n_latent)), nn.LeakyReLU(True)) self.anomaly_decoder = nn.Sequential(nn.Linear(self.n_latent, 2), nn.Softmax(dim=0)) self.prototype_decoder = nn.Sequential(nn.Linear(self.n_latent, PROTO_DIM), nn.Sigmoid()) self.prototype = [torch.rand(PROTO_DIM, requires_grad=False, dtype=torch.double) for _ in range(3)] def encode(self, t, s): t = self.encoder(t.view((- 1))).view(self.n_hosts, self.n_latent) return t def anomaly_decode(self, t): anomaly_scores = [] for elem in t: anomaly_scores.append(self.anomaly_decoder(elem).view(1, (- 1))) return anomaly_scores def prototype_decode(self, t): prototypes = [] for elem in t: prototypes.append(self.prototype_decoder(elem)) return prototypes def forward(self, t, s): t = self.encode(t, s) anomaly_scores = self.anomaly_decode(t) prototypes = self.prototype_decode(t) return (anomaly_scores, prototypes)
def get_raytune_search_alg(raytune_cfg, seeds=False): if ((raytune_cfg['sched'] == 'pbt') or (raytune_cfg['sched'] == 'pb2')): if (raytune_cfg['search_alg'] is not None): print("INFO: Using schedule '{}' is not compatible with Ray Tune search algorithms.".format(raytune_cfg['sched'])) print('INFO: Uing the Ray Tune {} scheduler without search algorithm'.format(raytune_cfg['sched'])) return None if ((raytune_cfg['sched'] == 'bohb') or (raytune_cfg['sched'] == 'BOHB')): print('INFO: Using TuneBOHB search algorithm since it is required for BOHB shedule') if seeds: seed = 1234 else: seed = None return TuneBOHB(metric=raytune_cfg['default_metric'], mode=raytune_cfg['default_mode'], seed=seed) if (raytune_cfg['search_alg'] == 'bayes'): print('INFO: Using BayesOptSearch') return BayesOptSearch(metric=raytune_cfg['default_metric'], mode=raytune_cfg['default_mode'], random_search_steps=raytune_cfg['bayes']['n_random_steps']) if (raytune_cfg['search_alg'] == 'hyperopt'): print('INFO: Using HyperOptSearch') return HyperOptSearch(metric=raytune_cfg['default_metric'], mode=raytune_cfg['default_mode'], n_initial_points=raytune_cfg['hyperopt']['n_random_steps']) if (raytune_cfg['search_alg'] == 'scikit'): print('INFO: Using bayesian optimization from scikit-learn') return SkOptSearch(metric=raytune_cfg['default_metric'], mode=raytune_cfg['default_mode'], convert_to_python=True) if (raytune_cfg['search_alg'] == 'nevergrad'): print('INFO: Using bayesian optimization from nevergrad') import nevergrad as ng return NevergradSearch(optimizer=ng.optimizers.BayesOptim(pca=False, init_budget=raytune_cfg['nevergrad']['n_random_steps']), metric=raytune_cfg['default_metric'], mode=raytune_cfg['default_mode']) else: print('INFO: Not using any Ray Tune search algorithm') return None
class QuantitativeClassifier(): def __init__(self, rules, default_class): self.rules = rules self.default_class = default_class def rule_model_accuracy(self, quantitative_dataframe, ground_truth): predicted = self.predict(quantitative_dataframe) return accuracy_score(predicted, ground_truth) def predict(self, quantitative_dataframe): predicted_classes = [] for (_, row) in quantitative_dataframe.dataframe.iterrows(): appended = False for rule in self.rules: antecedent_dict = dict(rule.antecedent) counter = True for (name, value) in row.iteritems(): if (name in antecedent_dict): interval = antecedent_dict[name] if (type(interval) == str): counter &= (interval == value) else: result = interval.isin(value) counter &= result if counter: (_, predicted_class) = rule.consequent predicted_classes.append(predicted_class) appended = True break if (not appended): predicted_classes.append(self.default_class) return predicted_classes
def gaussian_square(times: np.ndarray, amp: complex, center: float, width: float, sigma: float, zeroed_width: Union[(None, float)]=None) -> np.ndarray: square_start = (center - (width / 2)) square_stop = (center + (width / 2)) if zeroed_width: zeroed_width = min(width, zeroed_width) gauss_zeroed_width = (zeroed_width - width) else: gauss_zeroed_width = None funclist = [functools.partial(gaussian, amp=amp, center=square_start, sigma=sigma, zeroed_width=gauss_zeroed_width, rescale_amp=True), functools.partial(gaussian, amp=amp, center=square_stop, sigma=sigma, zeroed_width=gauss_zeroed_width, rescale_amp=True), functools.partial(constant, amp=amp)] condlist = [(times <= square_start), (times >= square_stop)] return np.piecewise(times.astype(np.complex_), condlist, funclist)
class PipeGradScaler(GradScaler): def __init__(self, init_scale=(2.0 ** 16), growth_factor=2.0, backoff_factor=0.5, growth_interval=2000, enabled=True, process_group='pipe', stage_id=None): super().__init__(init_scale=init_scale, growth_factor=growth_factor, backoff_factor=backoff_factor, growth_interval=growth_interval, enabled=enabled) self.stage_id = stage_id self.process_group = process_group if (torch.distributed.is_initialized() and torch.cuda.is_available()): self.device = f'cuda:{local_rank()}' self._lazy_init_scale_growth_tracker(self.device) self._maybe_init_process_group() def _maybe_init_process_group(self): if (isinstance(self.process_group, str) and (parallel_group(self.process_group) is not None)): self.process_group = parallel_group(self.process_group) def _sync_inf_status(self, found_inf_per_device): if isinstance(self.process_group, str): self._maybe_init_process_group() found_inf = torch.tensor([sum((v.item() for v in found_inf_per_device.values()))], device=self.device) counter = AutoAccelerateContext.counter AutoAccelerateContext.grad_scaler_store[counter].update({self.stage_id: found_inf}) condition = AutoAccelerateContext.grad_scaler_condition[counter] with condition: AutoAccelerateContext.grad_scaler_counter[counter] += 1 condition.notify() found_inf = local_found_inf() dist.all_reduce(found_inf, op=dist.ReduceOp.SUM, group=self.process_group) found_inf = found_inf[0] return found_inf def _maybe_opt_step(self, optimizer, optimizer_state, *args, **kwargs): retval = None if (not self._sync_inf_status(optimizer_state['found_inf_per_device']).item()): retval = optimizer.step(*args, **kwargs) return retval def step(self, optimizer, *args, **kwargs): if (not self._enabled): return optimizer.step(*args, **kwargs) if ('closure' in kwargs): raise RuntimeError('Closure use is not currently supported if GradScaler is enabled.') self._check_scale_growth_tracker('step') optimizer_state = self._per_optimizer_states[id(optimizer)] if (optimizer_state['stage'] is OptState.STEPPED): raise RuntimeError('step() has already been called since the last update().') retval = None if (hasattr(optimizer, '_step_supports_amp_scaling') and optimizer._step_supports_amp_scaling): kwargs_ = kwargs has_grad_scaler_kwarg = ('grad_scaler' in inspect.signature(optimizer.step).parameters) if has_grad_scaler_kwarg: warnings.warn('GradScaler is going to stop passing itself as a keyword argument to the passed optimizer. In the near future GradScaler registers `grad_scale: Tensor` and `found_inf: Tensor` to the passed optimizer and let the optimizer use them directly.', FutureWarning) kwargs_.update({'grad_scaler': self}) else: scaler = self._get_scale_async() found_inf = self._sync_inf_status(self._check_inf_per_device(optimizer)) optimizer.grad_scale = (None if (optimizer_state['stage'] == OptState.UNSCALED) else scaler) optimizer.found_inf = found_inf retval = optimizer.step(*args, **kwargs_) optimizer_state['stage'] = OptState.STEPPED if (not has_grad_scaler_kwarg): del optimizer.grad_scale del optimizer.found_inf return retval if (optimizer_state['stage'] is OptState.READY): self.unscale_(optimizer) assert (len(optimizer_state['found_inf_per_device']) > 0), 'No inf checks were recorded for this optimizer.' retval = self._maybe_opt_step(optimizer, optimizer_state, *args, **kwargs) optimizer_state['stage'] = OptState.STEPPED return retval def update(self, new_scale=None): if (not self._enabled): return (_scale, _growth_tracker) = self._check_scale_growth_tracker('update') if (new_scale is not None): if isinstance(new_scale, float): self._scale.fill_(new_scale) else: reason = 'new_scale should be a float or a 1-element torch.cuda.FloatTensor with requires_grad=False.' assert isinstance(new_scale, torch.cuda.FloatTensor), reason assert (new_scale.numel() == 1), reason assert (new_scale.requires_grad is False), reason self._scale.copy_(new_scale) else: found_infs = [found_inf.to(device=_scale.device, non_blocking=True) for state in self._per_optimizer_states.values() for found_inf in state['found_inf_per_device'].values()] assert (len(found_infs) > 0), 'No inf checks were recorded prior to update.' found_inf_combined = found_infs[0] if (len(found_infs) > 1): for i in range(1, len(found_infs)): found_inf_combined += found_infs[i] counter = AutoAccelerateContext.counter AutoAccelerateContext.grad_scaler_store[counter].update({self.stage_id: found_inf_combined}) condition = AutoAccelerateContext.grad_scaler_condition[counter] with condition: AutoAccelerateContext.grad_scaler_counter[counter] += 1 condition.notify() found_inf_combined = local_found_inf() dist.all_reduce(found_inf_combined, op=dist.ReduceOp.SUM, group=self.process_group) torch._amp_update_scale_(_scale, _growth_tracker, found_inf_combined, self._growth_factor, self._backoff_factor, self._growth_interval) self._per_optimizer_states = collections.defaultdict(_refresh_per_optimizer_state)
(config_path='./confs', config_name='gala', version_base='1.1') def main(opt): pl.seed_everything(0) model = NeRFModel.load_from_checkpoint('model.ckpt') datamodule = hydra.utils.instantiate(opt.dataset, train=False) trainer = pl.Trainer(accelerator='gpu', **opt.trainer_args) result = trainer.test(model, datamodule=datamodule)[0]
def DenseNet201(include_top=False, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, **kwargs): return DenseNet([6, 12, 48, 32], include_top, weights, input_tensor, input_shape, pooling, classes, **kwargs)
class _DenseBlock(nn.Sequential): def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate): super(_DenseBlock, self).__init__() for i in range(num_layers): layer = _DenseLayer((num_input_features + (i * growth_rate)), growth_rate, bn_size, drop_rate) self.add_module(('denselayer%d' % (i + 1)), layer)
def rotate_mesh_for_webview(meshes): verts_packed = meshes.verts_packed() faces_list = meshes.faces_list() tex = meshes.textures rot_matrix = torch.FloatTensor(np.linalg.inv(np.array([[1, 0, 0], [0, 0.9816272, (- 0.190809)], [0, 0.190809, 0.9816272]]))) verts_packed = torch.mm(rot_matrix, verts_packed.T).T verts_list = list(verts_packed.split(meshes.num_verts_per_mesh().tolist(), dim=0)) return Meshes(verts=verts_list, faces=faces_list, textures=tex)
def write_config_files(config_dir, all_layers): config_basename_to_lines = defaultdict(list) config_basename_to_header = get_config_headers() for layer in all_layers: try: pairs = layer.get_full_config() for (config_basename, line) in pairs: config_basename_to_lines[config_basename].append(line) except Exception as e: print("{0}: error producing config lines from xconfig line '{1}': error was: {2}".format(sys.argv[0], str(layer), repr(e)), file=sys.stderr) raise try: os.remove((config_dir + '/init.config')) except OSError: pass for (basename, lines) in config_basename_to_lines.items(): num_output_node_lines = sum([(1 if line.startswith('output-node') else 0) for line in lines]) if (num_output_node_lines == 0): if (basename == 'init'): continue else: print('{0}: error in xconfig file {1}: may be lack of a output layer'.format(sys.argv[0], sys.argv[2]), file=sys.stderr) raise header = config_basename_to_header[basename] filename = '{0}/{1}.config'.format(config_dir, basename) try: f = open(filename, 'w') print(header, file=f) for line in lines: print(line, file=f) f.close() except Exception as e: print('{0}: error writing to config file {1}: error is {2}'.format(sys.argv[0], filename, repr(e)), file=sys.stderr) raise
def latitude_and_longitude_convert_to_decimal_system(*arg): return (float(arg[0]) + ((float(arg[1]) + ((float(arg[2].split('/')[0]) / float(arg[2].split('/')[(- 1)])) / 60)) / 60))
def snapshotToMovie(snap, filename, *args, **kwargs): if kwargs.has_key('tmpdir'): tmpdir = kwargs['tmpdir'] kwargs.pop('tmpdir') else: tmpdir = '/tmp' if kwargs.has_key('framerate'): framerate = kwargs['framerate'] kwargs.pop('framerate') else: framerate = 25 if kwargs.has_key('bitrate'): bitrate = kwargs['bitrate'] kwargs.pop('bitrate') else: bitrate = 1000 if (kwargs.has_key('thumbnail') and kwargs['thumbnail']): thumbnail = True kwargs.pop('thumbnail') elif kwargs.has_key('thumbnail'): kwargs.pop('thumbnail') thumbnail = False else: thumbnail = False if kwargs.has_key('thumbsize'): thumbsize = kwargs['thumbsize'] else: thumbsize = 300 tempdir = tempfile.mkdtemp(dir=tmpdir) tmpfiles = [] nsnap = len(snap) file_length = int(m.ceil(m.log10(nsnap))) if (not kwargs.has_key('xrange')): pass if (not kwargs.has_key('yrange')): pass for ii in range(nsnap): tmpfiles.append(os.path.join(tempdir, str(ii).zfill(file_length))) galpy_plot.print() snap[ii].plot(*args, **kwargs) galpy_plot.end_print((tmpfiles[ii] + '.pdf')) try: subprocess.check_call(['convert', (tmpfiles[ii] + '.pdf'), (tmpfiles[ii] + '.jpg')]) except subprocess.CalledProcessError: print("'convert' failed") raise subprocess.CalledProcessError try: subprocess.check_call(['ffmpeg', '-r', str(framerate), '-b', str(bitrate), '-i', os.path.join(tempdir, ('%' + ('0%id.jpg' % file_length))), '-y', filename]) if thumbnail: thumbnameTemp = re.split('\\.', filename) thumbnameTemp = thumbnameTemp[0:(len(thumbnameTemp) - 1)] thumbname = '' for t in thumbnameTemp: thumbname += t thumbname += '.jpg' subprocess.check_call(['ffmpeg', '-itsoffset', '-4', '-y', '-i', filename, '-vcodec', 'mjpeg', '-vframes', '1', '-an', '-f', 'rawvideo', '-s', ('%ix%i' % (thumbsize, thumbsize)), thumbname]) except subprocess.CalledProcessError: print("'ffmpeg' failed") _cleanupMovieTempdir(tempdir) raise subprocess.CalledProcessError finally: _cleanupMovieTempdir(tempdir)
def generate_urban_atlas_boundaries(): ua_bounds = [(ua.split('_')[(- 1)], gpd.read_file((((UA_DIR + ua) + '/Shapefiles/') + x)).geometry.values[0]) for ua in os.listdir(UA_DIR) for x in os.listdir(((UA_DIR + ua) + '/Shapefiles/')) if x.endswith('_UA2012_Boundary.shp')] ua_gdf = gpd.GeoDataFrame(ua_bounds, columns=['city', 'geometry']) ua_gdf.crs = {'init': 'epsg:3035'} return ua_gdf
class RTFMAbstractEnv(RTFMEnv): def __init__(self, room_size=6): super(RTFMAbstractEnv, self).__init__(room_size) self.nb_physical = (self.world.height * self.world.width) self.nb_entities = (self.nb_physical + len(self.abstract_entities)) self.entity2idx = {entity: (self.nb_physical + i) for (i, entity) in enumerate(self.abstract_entities)} self.nb_unary = (2 + len(self.abstract_entities)) self.nb_binary = 8 self.initilize() self.with_vkb = True def position2index(self, position): return ((position[1] * self.room_size) + position[0]) def get_zeros_vkb(self, arity): return np.zeros([self.nb_entities for _ in range(arity)], dtype=np.float32) def get_abstract_kb(self): abstract_ids = [(entity, self.get_zeros_vkb(1)) for entity in self.abstract_entities] for (entity, abstract_id) in abstract_ids: abstract_id[self.entity2idx[entity]] = 1.0 beat = self.get_zeros_vkb(2) for (element, modifiers) in self.task.modifier_assignment: for modifier in modifiers: beat[(self.entity2idx[modifier], self.entity2idx[element.describe()])] = 1.0 belong = self.get_zeros_vkb(2) for (group, monsters) in self.task.group_assignment: for monster in monsters: belong[(self.entity2idx[group], self.entity2idx[monster])] = 1.0 target = self.get_zeros_vkb(1) target[self.entity2idx[self.task.target_group]] = 1.0 return ([], ([target] + [abstract_id for (entity, abstract_id) in abstract_ids]), [beat, belong]) def get_assignment_kb(self): modifiers_assignment = self.get_zeros_vkb(2) elements_assignment = self.get_zeros_vkb(2) monster_assignment = self.get_zeros_vkb(2) has_modifier = self.get_zeros_vkb(2) has_element = self.get_zeros_vkb(2) has_monster_type = self.get_zeros_vkb(2) for (position, entity) in self.task.world.map.items(): if (not entity): continue entity = list(entity)[0] if isinstance(entity, HostileMonster): monster_assignment[(self.entity2idx[entity.monster_name], self.position2index(position))] = 1.0 has_monster_type[(self.position2index(position), self.entity2idx[entity.monster_name])] = 1.0 elements_assignment[(self.entity2idx[entity.element.describe()], self.position2index(position))] = 1.0 has_element[(self.position2index(position), self.entity2idx[entity.element.describe()])] = 1.0 if isinstance(entity, BaseItem): modifiers_assignment[(self.entity2idx[entity.name.split()[0]], self.position2index(position))] = 1.0 has_modifier[(self.position2index(position), self.entity2idx[entity.name.split()[0]])] = 1.0 return ([], [], [modifiers_assignment, elements_assignment, monster_assignment, has_modifier, has_element, has_monster_type]) def get_inventory_kb(self): inv_modifier = self.get_zeros_vkb(1) if self.task.agent.inventory.equipped_items: modifier = self.task.agent.inventory.equipped_items[0].name.split()[0] inv_modifier[self.entity2idx[modifier]] = 1.0 return ([], [inv_modifier], []) def get_vkb(self): vkb = self.background[:] vkb = join_vkb_lists(vkb, self.get_inventory_kb()) vkb = join_vkb_lists(vkb, self.get_assignment_kb()) return stack_vkb(vkb) def initilize(self): kb_abstract = self.get_abstract_kb() self.background = kb_abstract def get_obs(self): return dict(image=self.get_image(), VKB=self.get_vkb())
class eca_layer(nn.Module): def __init__(self, channel, k_size=3): super().__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.conv = nn.Conv1d(1, 1, kernel_size=k_size, padding=((k_size - 1) // 2), bias=False) self.sigmoid = nn.Sigmoid() def forward(self, x): y = self.avg_pool(x) y = self.conv(y.squeeze((- 1)).transpose((- 1), (- 2))).transpose((- 1), (- 2)).unsqueeze((- 1)) y = self.sigmoid(y) return (x * y.expand_as(x))
def _load_checkpoint(checkpoint_path, model, optimizer, scheduler, logger, distributed): print('loading from a checkpoint at {}'.format(checkpoint_path)) if distributed: checkpoint_state = torch.load(checkpoint_path, map_location=(lambda storage, loc: storage)) else: checkpoint_state = torch.load(checkpoint_path) iter_init = (checkpoint_state['iter_no'] + 1) model.load_state_dict(checkpoint_state['model']) optimizer.load_state_dict(checkpoint_state['optimizer']) logger.load_state_dict(checkpoint_state['logger']) if ('scheduler_iter' in checkpoint_state): scheduler.step(checkpoint_state['scheduler_iter']) return iter_init
(events=subsets(_ALL_EVENTS_WITH_HANDLERS)) _events_with_registered_handlers_to_subset def test_for_loop_nested_in_while_loop(events): assert (_RECORDED_EVENTS == []) run_cell('\n i = 0\n while i < 10:\n for j in range(2):\n i += 1\n ') throw_and_print_diff_if_recorded_not_equal_to(filter_events_to_subset((([TraceEvent.init_module, TraceEvent.before_stmt, TraceEvent.before_assign_rhs, TraceEvent.after_assign_rhs, TraceEvent.after_stmt, TraceEvent.after_module_stmt, TraceEvent.before_stmt] + ([TraceEvent.load_name, TraceEvent.before_while_loop_body, TraceEvent.before_stmt, TraceEvent.before_load_complex_symbol, TraceEvent.load_name, TraceEvent.before_call, TraceEvent.after_argument, TraceEvent.after_call, TraceEvent.after_load_complex_symbol, TraceEvent.before_for_loop_body, TraceEvent.before_stmt, TraceEvent.after_stmt, TraceEvent.after_for_loop_iter, TraceEvent.after_stmt, TraceEvent.after_while_loop_iter] * 1)) + [TraceEvent.after_stmt, TraceEvent.after_module_stmt]), events))
def get_rouge(hypotheses, reference, sent_split=True, use_cf=False): assert (len(hypotheses) == len(reference)) assert (len(hypotheses) > 0) hyps = [] refs = [] for (hyp, ref) in zip(hypotheses, reference): hyp = ' '.join(hyp) ref = ' '.join(ref) if sent_split: hs = [x.strip() for x in hyp.split('.') if (len(x.strip()) > 0)] rs = [x.strip() for x in ref.split('.') if (len(x.strip()) > 0)] hyps += [hs] refs += [[rs]] else: hyps += [[hyp]] refs += [[[ref]]] print('Calculating ROUGE...') rouge = Pythonrouge(summary_file_exist=False, summary=hyps, reference=refs, n_gram=2, ROUGE_SU4=False, ROUGE_L=True, recall_only=False, stemming=False, stopwords=False, word_level=True, length_limit=False, use_cf=use_cf, cf=95, scoring_formula='average', resampling=True, samples=1000, favor=True, p=0.5) score = rouge.calc_score() print('ROUGE done.') r1 = (score['ROUGE-1-F'] * 100) r2 = (score['ROUGE-2-F'] * 100) rl = (score['ROUGE-L-F'] * 100) if (not use_cf): return (r1, r2, rl) else: r1_cf = [(x * 100) for x in score['ROUGE-1-F-cf95']] r2_cf = [(x * 100) for x in score['ROUGE-2-F-cf95']] rl_cf = [(x * 100) for x in score['ROUGE-L-F-cf95']] return (r1, r2, rl, r1_cf, r2_cf, rl_cf)
def FuseGTestH(gtest_root, output_dir): output_file = open(os.path.join(output_dir, GTEST_H_OUTPUT), 'w') processed_files = set() def ProcessFile(gtest_header_path): if (gtest_header_path in processed_files): return processed_files.add(gtest_header_path) for line in open(os.path.join(gtest_root, gtest_header_path), 'r'): m = INCLUDE_GTEST_FILE_REGEX.match(line) if m: ProcessFile(('include/' + m.group(1))) else: output_file.write(line) ProcessFile(GTEST_H_SEED) output_file.close()
def generate_labels(img_info, detail_api, out_dir): def _class_to_index(mask, _mapping, _key): values = np.unique(mask) for i in range(len(values)): assert (values[i] in _mapping) index = np.digitize(mask.ravel(), _mapping, right=True) return _key[index].reshape(mask.shape) sem_seg = _class_to_index(detail_api.getMask(img_info), _mapping=_mapping, _key=_key) sem_seg = (sem_seg - 1) filename = img_info['file_name'] Image.fromarray(sem_seg).save((out_dir / filename.replace('jpg', 'png')))
class VecEnv(ABC): closed = False viewer = None metadata = {'render.modes': ['human', 'rgb_array']} def __init__(self, num_envs, observation_space, action_space): self.num_envs = num_envs self.observation_space = observation_space self.action_space = action_space def reset(self): pass def step_async(self, actions): pass def step_wait(self): pass def close_extras(self): pass def close(self): if self.closed: return if (self.viewer is not None): self.viewer.close() self.close_extras() self.closed = True def step(self, actions): self.step_async(actions) return self.step_wait() def render(self, mode='human'): imgs = self.get_images() bigimg = None if (mode == 'human'): self.get_viewer().imshow(bigimg) return self.get_viewer().isopen elif (mode == 'rgb_array'): return bigimg else: raise NotImplementedError def get_images(self): raise NotImplementedError def unwrapped(self): if isinstance(self, VecEnvWrapper): return self.venv.unwrapped else: return self def get_viewer(self): if (self.viewer is None): from gym.envs.classic_control import rendering self.viewer = rendering.SimpleImageViewer() return self.viewer
class GCRN(nn.Module): input_dim: int feature_dim: int hidden_dim: int output_dim: int feature_pre: bool layer_num: int dropout: float duration: int rnn_type: str bias: bool method_name: str def __init__(self, input_dim, feature_dim, hidden_dim, output_dim, feature_pre=True, layer_num=2, dropout=0.5, bias=True, duration=1, rnn_type='GRU'): super(GCRN, self).__init__() self.input_dim = input_dim self.feature_dim = feature_dim self.output_dim = output_dim self.feature_pre = feature_pre self.layer_num = layer_num self.dropout = dropout self.bias = bias self.duration = duration self.rnn_type = rnn_type self.method_name = 'GCRN' self.gcn_list = nn.ModuleList() for i in range(self.duration): self.gcn_list.append(GCN(input_dim, hidden_dim, output_dim, dropout=dropout, bias=bias)) assert (self.rnn_type in ['LSTM', 'GRU']) if (self.rnn_type == 'LSTM'): self.rnn = nn.LSTM(output_dim, output_dim, num_layers=1, bias=bias, batch_first=True) else: self.rnn = nn.GRU(output_dim, output_dim, num_layers=1, bias=bias, batch_first=True) self.norm = nn.LayerNorm(output_dim) def forward(self, x_list, edge_list): time_num = len(x_list) hx_list = [] for i in range(time_num): x = self.gcn_list[i](x_list[i], edge_list[i]) x = F.normalize(x, p=2) hx_list.append(x) hx = torch.stack(hx_list, dim=0).transpose(0, 1) (out, _) = self.rnn(hx) out = self.norm(out) return out.transpose(0, 1)
class FunnelBaseModel(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
_grad() def test(model, xs, y_true, evaluator): model.eval() y_preds = [] loader = DataLoader(range(y_true.size(0)), batch_size=400000) for perm in loader: y_pred = model([x[perm] for x in xs]).argmax(dim=(- 1), keepdim=True) y_preds.append(y_pred.cpu()) y_pred = torch.cat(y_preds, dim=0) return evaluator.eval({'y_true': y_true, 'y_pred': y_pred})['acc']
class SensorManager(Singleton): def __init__(self, param_dict): self.param_dict = param_dict self.sensor_dict = {} self.known_sensors = ['camera', 'lidar', 'imu', 'gps'] def init(self, key): if (key in self.param_dict): sensor_type = self.get_type(key) if (sensor_type == 'camera'): sensor = Camera() self.sensor_dict[key] = sensor elif (sensor_type == 'lidar'): sensor = LiDAR() sensor.start() self.sensor_dict[key] = sensor elif (sensor_type == 'imu'): sensor = XSensDriver(scan_usb('IMU')) sensor.start() self.sensor_dict[key] = sensor elif (sensor_type == 'gps'): sensor = GPS(scan_usb('GPS')) sensor.start() self.sensor_dict[key] = sensor else: debug(info=(str(key) + ' not initialized'), info_type='warning') else: debug(info=('Unknown sensor ' + str(key)), info_type='error') return None def init_all(self): for key in self.param_dict: try: self.init(key) except: debug(info=(str(key) + ' initialize failed'), info_type='error') def close_all(self): for key in self.param_dict: try: self.sensor_dict[key].close() debug(info=(str(key) + ' closed'), info_type='success') except: debug(info=(str(key) + " has no attribute called 'close'"), info_type='message') def __del__(self): self.close_all() def __getitem__(self, key): if (key in self.sensor_dict): return self.sensor_dict[key] else: debug(info=('No sensor called ' + str(key)), info_type='error') return None def __setitem__(self, key, value): if (key in self.param_dict): self.param_dict[key] = value return True else: debug(info=('No sensor called ' + str(key)), info_type='error') return None def get_type(self, key): sensor_type = key.split(':')[0] if (sensor_type in self.known_sensors): return sensor_type else: debug(info=('Unknown sensor type ' + str(key)), info_type='error') return None
class BaseModel(): def __init__(self, model, fp16=False, device='cuda', max_batch_size=16, embedding_dim=768, text_maxlen=77): self.model = model self.name = 'SD Model' self.fp16 = fp16 self.device = device self.min_batch = 1 self.max_batch = max_batch_size self.min_image_shape = 256 self.max_image_shape = 1024 self.min_latent_shape = (self.min_image_shape // 8) self.max_latent_shape = (self.max_image_shape // 8) self.embedding_dim = embedding_dim self.text_maxlen = text_maxlen def get_model(self): return self.model def get_input_names(self): pass def get_output_names(self): pass def get_dynamic_axes(self): return None def get_sample_input(self, batch_size, image_height, image_width): pass def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape): return None def get_shape_dict(self, batch_size, image_height, image_width): return None def optimize(self, onnx_graph): opt = Optimizer(onnx_graph) opt.cleanup() opt.fold_constants() opt.infer_shapes() onnx_opt_graph = opt.cleanup(return_onnx=True) return onnx_opt_graph def check_dims(self, batch_size, image_height, image_width): assert ((batch_size >= self.min_batch) and (batch_size <= self.max_batch)) assert (((image_height % 8) == 0) or ((image_width % 8) == 0)) latent_height = (image_height // 8) latent_width = (image_width // 8) assert ((latent_height >= self.min_latent_shape) and (latent_height <= self.max_latent_shape)) assert ((latent_width >= self.min_latent_shape) and (latent_width <= self.max_latent_shape)) return (latent_height, latent_width) def get_minmax_dims(self, batch_size, image_height, image_width, static_batch, static_shape): min_batch = (batch_size if static_batch else self.min_batch) max_batch = (batch_size if static_batch else self.max_batch) latent_height = (image_height // 8) latent_width = (image_width // 8) min_image_height = (image_height if static_shape else self.min_image_shape) max_image_height = (image_height if static_shape else self.max_image_shape) min_image_width = (image_width if static_shape else self.min_image_shape) max_image_width = (image_width if static_shape else self.max_image_shape) min_latent_height = (latent_height if static_shape else self.min_latent_shape) max_latent_height = (latent_height if static_shape else self.max_latent_shape) min_latent_width = (latent_width if static_shape else self.min_latent_shape) max_latent_width = (latent_width if static_shape else self.max_latent_shape) return (min_batch, max_batch, min_image_height, max_image_height, min_image_width, max_image_width, min_latent_height, max_latent_height, min_latent_width, max_latent_width)
def node_label_and_degree_worker(G, node_map): freq = np.zeros(len(node_map)) for u in G.nodes(): freq[node_map[(G.degree[u], G.nodes[u]['label'])]] += 1 return freq
class ContinuousMLPBaseline(Baseline): def __init__(self, env_spec, num_seq_inputs=1, regressor_args=None, name='ContinuousMLPBaseline'): super().__init__(env_spec) if (regressor_args is None): regressor_args = dict() self._regressor = ContinuousMLPRegressor(input_shape=((env_spec.observation_space.flat_dim * num_seq_inputs),), output_dim=1, name=name, **regressor_args) self.name = name def fit(self, paths): observations = np.concatenate([p['observations'] for p in paths]) returns = np.concatenate([p['returns'] for p in paths]) self._regressor.fit(observations, returns.reshape(((- 1), 1))) def predict(self, path): return self._regressor.predict(path['observations']).flatten() def get_param_values(self): return self._regressor.get_param_values() def set_param_values(self, flattened_params): self._regressor.set_param_values(flattened_params) def get_params_internal(self): return self._regressor.get_params_internal()
def get_linear_layer(input_dim: int, output_dim: int, weight_norm=False, initializer: Initializer=Initializer.Xavier_uniform, *args, **kwargs): layer = torch.nn.Linear(input_dim, output_dim) init_method = InitializerFactory.get_initializer(initializer=initializer, **kwargs) init_method(layer.weight) torch.nn.init.constant_(layer.bias, 0.0) if weight_norm: layer = torch.nn.utils.weight_norm(layer) return layer
def compute_contracted(ilegs, jlegs, appearances): ip = 0 jp = 0 ni = len(ilegs) nj = len(jlegs) new_legs = [] while True: if (ip == ni): new_legs.extend(jlegs[jp:]) break if (jp == nj): new_legs.extend(ilegs[ip:]) break (iix, ic) = ilegs[ip] (jix, jc) = jlegs[jp] if (iix < jix): new_legs.append((iix, ic)) ip += 1 elif (iix > jix): new_legs.append((jix, jc)) jp += 1 else: ijc = (ic + jc) if (ijc != appearances[iix]): new_legs.append((iix, ijc)) ip += 1 jp += 1 return new_legs
class BartTokenizerFast(metaclass=DummyObject): _backends = ['tokenizers'] def __init__(self, *args, **kwargs): requires_backends(self, ['tokenizers'])