code stringlengths 101 5.91M |
|---|
class TenCrop(object):
def __init__(self, size, vertical_flip=False):
self.size = size
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
assert (len(size) == 2), 'Please provide only two dimensions (h, w) for size.'
self.size = size
self.vertical_flip = vertical_flip
def __call__(self, img):
return F.ten_crop(img, self.size, self.vertical_flip)
def __repr__(self):
return (self.__class__.__name__ + '(size={0}, vertical_flip={1})'.format(self.size, self.vertical_flip)) |
def run_window_all(conf):
print('run test window')
slices = conf['data']['slices']
slices = list(range(slices))
if ('skip' in conf['data']):
for i in conf['data']['skip']:
slices.remove(i)
for i in slices:
print('start run for slice ', str(i))
send_message(('start run for slice ' + str(i)))
results = run_single_all(conf, slice=i)
if (i == 0):
avg_results = results
else:
for (k, l) in results.items():
for i in range(len(l)):
avg_results[k][i] = [avg_results[k][i][0], (avg_results[k][i][1] + results[k][i][1])]
for (k, l) in avg_results.items():
for i in range(len(l)):
avg_results[k][i][1] /= len(slices)
write_results_csv(avg_results, conf, extra='avg') |
def _load_conf(conf='.spdrc.json', var_dict=SYS):
if os.path.isfile(conf):
with open(conf) as json_data:
var_dict.add(json.load(json_data)) |
class DNATokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, do_lower_case=False, never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, **kwargs):
super().__init__(unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, **kwargs)
self.max_len_single_sentence = (self.max_len - 2)
self.max_len_sentences_pair = (self.max_len - 3)
if (not os.path.isfile(vocab_file)):
raise ValueError("Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.kmer = VOCAB_KMER[str(len(self.vocab))]
self.ids_to_tokens = collections.OrderedDict([(ids, tok) for (tok, ids) in self.vocab.items()])
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars)
def vocab_size(self):
return len(self.vocab)
def _tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
split_tokens.append(token)
return split_tokens
def _convert_token_to_id(self, token):
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
return self.ids_to_tokens.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
out_string = ' '.join(tokens).replace(' ##', '').strip()
return out_string
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
cls = [self.cls_token_id]
sep = [self.sep_token_id]
if (token_ids_1 is None):
if (len(token_ids_0) < 510):
return ((cls + token_ids_0) + sep)
else:
output = []
num_pieces = (int((len(token_ids_0) // 510)) + 1)
for i in range(num_pieces):
output.extend(((cls + token_ids_0[(510 * i):min(len(token_ids_0), (510 * (i + 1)))]) + sep))
return output
return ((((cls + token_ids_0) + sep) + token_ids_1) + sep)
def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
if already_has_special_tokens:
if (token_ids_1 is not None):
raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formated with special tokens for the model.')
return list(map((lambda x: (1 if (x in [self.sep_token_id, self.cls_token_id]) else 0)), token_ids_0))
if (token_ids_1 is not None):
return (((([1] + ([0] * len(token_ids_0))) + [1]) + ([0] * len(token_ids_1))) + [1])
if (len(token_ids_0) < 510):
return (([1] + ([0] * len(token_ids_0))) + [1])
else:
output = []
num_pieces = (int((len(token_ids_0) // 510)) + 1)
for i in range(num_pieces):
output.extend((([1] + ([0] * (min(len(token_ids_0), (510 * (i + 1))) - (510 * i)))) + [1]))
return output
return (([1] + ([0] * len(token_ids_0))) + [1])
def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None):
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
if (len(token_ids_0) < 510):
return (len(((cls + token_ids_0) + sep)) * [0])
else:
num_pieces = (int((len(token_ids_0) // 510)) + 1)
return ((len(((cls + token_ids_0) + sep)) + (2 * (num_pieces - 1))) * [0])
return ((len(((cls + token_ids_0) + sep)) * [0]) + (len((token_ids_1 + sep)) * [1]))
def save_vocabulary(self, vocab_path):
index = 0
if os.path.isdir(vocab_path):
vocab_file = os.path.join(vocab_path, VOCAB_FILES_NAMES['vocab_file'])
else:
vocab_file = vocab_path
with open(vocab_file, 'w', encoding='utf-8') as writer:
for (token, token_index) in sorted(self.vocab.items(), key=(lambda kv: kv[1])):
if (index != token_index):
logger.warning('Saving vocabulary to {}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!'.format(vocab_file))
index = token_index
writer.write((token + '\n'))
index += 1
return (vocab_file,) |
class XLMModelTester():
def __init__(self, parent, batch_size=13, seq_length=7, is_training=True, use_input_lengths=True, use_token_type_ids=True, use_labels=True, gelu_activation=True, sinusoidal_embeddings=False, causal=False, asm=False, n_langs=2, vocab_size=99, n_special=0, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_sequence_label_size=2, initializer_range=0.02, num_labels=2, num_choices=4, summary_type='last', use_proj=True, scope=None, bos_token_id=0):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_lengths = use_input_lengths
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.gelu_activation = gelu_activation
self.sinusoidal_embeddings = sinusoidal_embeddings
self.causal = causal
self.asm = asm
self.n_langs = n_langs
self.vocab_size = vocab_size
self.n_special = n_special
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.summary_type = summary_type
self.use_proj = use_proj
self.scope = scope
self.bos_token_id = bos_token_id
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = random_attention_mask([self.batch_size, self.seq_length])
input_lengths = None
if self.use_input_lengths:
input_lengths = ((ids_tensor([self.batch_size], vocab_size=2) + self.seq_length) - 2)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.n_langs)
sequence_labels = None
token_labels = None
is_impossible_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
is_impossible_labels = ids_tensor([self.batch_size], 2).float()
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return (config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask)
def get_config(self):
return XLMConfig(vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, num_labels=self.num_labels, bos_token_id=self.bos_token_id)
def create_and_check_xlm_model(self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask):
model = XLMModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, lengths=input_lengths, langs=token_type_ids)
result = model(input_ids, langs=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_xlm_lm_head(self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask):
model = XLMWithLMHeadModel(config)
model.to(torch_device)
model.eval()
result = model(input_ids, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_xlm_simple_qa(self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask):
model = XLMForQuestionAnsweringSimple(config)
model.to(torch_device)
model.eval()
outputs = model(input_ids)
outputs = model(input_ids, start_positions=sequence_labels, end_positions=sequence_labels)
result = outputs
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def create_and_check_xlm_qa(self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask):
model = XLMForQuestionAnswering(config)
model.to(torch_device)
model.eval()
result = model(input_ids)
result_with_labels = model(input_ids, start_positions=sequence_labels, end_positions=sequence_labels, cls_index=sequence_labels, is_impossible=is_impossible_labels, p_mask=input_mask)
result_with_labels = model(input_ids, start_positions=sequence_labels, end_positions=sequence_labels, cls_index=sequence_labels, is_impossible=is_impossible_labels)
(total_loss,) = result_with_labels.to_tuple()
result_with_labels = model(input_ids, start_positions=sequence_labels, end_positions=sequence_labels)
(total_loss,) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, ())
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.end_top_log_probs.shape, (self.batch_size, (model.config.start_n_top * model.config.end_n_top)))
self.parent.assertEqual(result.end_top_index.shape, (self.batch_size, (model.config.start_n_top * model.config.end_n_top)))
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,))
def create_and_check_xlm_sequence_classif(self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask):
model = XLMForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids)
result = model(input_ids, labels=sequence_labels)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def create_and_check_xlm_token_classif(self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask):
config.num_labels = self.num_labels
model = XLMForTokenClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_xlm_for_multiple_choice(self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask):
config.num_choices = self.num_choices
model = XLMForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand((- 1), self.num_choices, (- 1)).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand((- 1), self.num_choices, (- 1)).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand((- 1), self.num_choices, (- 1)).contiguous()
result = model(multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask) = config_and_inputs
inputs_dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return (config, inputs_dict) |
_criterion('cross_entropy', dataclass=CrossEntropyCriterionConfig)
class CrossEntropyCriterion(FairseqCriterion):
def __init__(self, task, sentence_avg):
super().__init__(task)
self.sentence_avg = sentence_avg
def forward(self, model, sample, reduce=True):
net_output = model(**sample['net_input'])
(loss, _) = self.compute_loss(model, net_output, sample, reduce=reduce)
sample_size = (sample['target'].size(0) if self.sentence_avg else sample['ntokens'])
logging_output = {'loss': loss.data, 'ntokens': sample['ntokens'], 'nsentences': sample['target'].size(0), 'sample_size': sample_size}
return (loss, sample_size, logging_output)
def compute_loss(self, model, net_output, sample, reduce=True):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.view((- 1), lprobs.size((- 1)))
target = model.get_targets(sample, net_output).view((- 1))
loss = F.nll_loss(lprobs, target, ignore_index=self.padding_idx, reduction=('sum' if reduce else 'none'))
return (loss, loss)
def reduce_metrics(logging_outputs) -> None:
loss_sum = sum((log.get('loss', 0) for log in logging_outputs))
ntokens = sum((log.get('ntokens', 0) for log in logging_outputs))
sample_size = sum((log.get('sample_size', 0) for log in logging_outputs))
metrics.log_scalar('loss', ((loss_sum / sample_size) / math.log(2)), sample_size, round=3)
if (sample_size != ntokens):
metrics.log_scalar('nll_loss', ((loss_sum / ntokens) / math.log(2)), ntokens, round=3)
metrics.log_derived('ppl', (lambda meters: utils.get_perplexity(meters['nll_loss'].avg)))
else:
metrics.log_derived('ppl', (lambda meters: utils.get_perplexity(meters['loss'].avg)))
def logging_outputs_can_be_summed() -> bool:
return True |
_with_exponential_backoff(ERRORS)
def chat_completions_with_backoff(*args, **kwargs):
assert (OPENAI_CLIENT is not None)
return OPENAI_CLIENT.chat.completions.create(*args, **kwargs) |
def adjacent_coordinates(x, y, s):
adj = []
adj.append([(x - s), (y - s)])
adj.append([x, (y - s)])
adj.append([(x + s), (y - s)])
adj.append([(x - s), y])
adj.append([(x + s), y])
adj.append([(x - s), (y + s)])
adj.append([x, (y + s)])
adj.append([(x + s), (y + s)])
return adj |
class KandinskyCombinedPipeline(DiffusionPipeline):
_load_connected_pipes = True
model_cpu_offload_seq = 'text_encoder->unet->movq->prior_prior->prior_image_encoder->prior_text_encoder'
def __init__(self, text_encoder: MultilingualCLIP, tokenizer: XLMRobertaTokenizer, unet: UNet2DConditionModel, scheduler: Union[(DDIMScheduler, DDPMScheduler)], movq: VQModel, prior_prior: PriorTransformer, prior_image_encoder: CLIPVisionModelWithProjection, prior_text_encoder: CLIPTextModelWithProjection, prior_tokenizer: CLIPTokenizer, prior_scheduler: UnCLIPScheduler, prior_image_processor: CLIPImageProcessor):
super().__init__()
self.register_modules(text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, movq=movq, prior_prior=prior_prior, prior_image_encoder=prior_image_encoder, prior_text_encoder=prior_text_encoder, prior_tokenizer=prior_tokenizer, prior_scheduler=prior_scheduler, prior_image_processor=prior_image_processor)
self.prior_pipe = KandinskyPriorPipeline(prior=prior_prior, image_encoder=prior_image_encoder, text_encoder=prior_text_encoder, tokenizer=prior_tokenizer, scheduler=prior_scheduler, image_processor=prior_image_processor)
self.decoder_pipe = KandinskyPipeline(text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, movq=movq)
def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable]=None):
self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op)
def enable_sequential_cpu_offload(self, gpu_id=0):
self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id)
self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id)
def progress_bar(self, iterable=None, total=None):
self.prior_pipe.progress_bar(iterable=iterable, total=total)
self.decoder_pipe.progress_bar(iterable=iterable, total=total)
self.decoder_pipe.enable_model_cpu_offload()
def set_progress_bar_config(self, **kwargs):
self.prior_pipe.set_progress_bar_config(**kwargs)
self.decoder_pipe.set_progress_bar_config(**kwargs)
_grad()
_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING)
def __call__(self, prompt: Union[(str, List[str])], negative_prompt: Optional[Union[(str, List[str])]]=None, num_inference_steps: int=100, guidance_scale: float=4.0, num_images_per_prompt: int=1, height: int=512, width: int=512, prior_guidance_scale: float=4.0, prior_num_inference_steps: int=25, generator: Optional[Union[(torch.Generator, List[torch.Generator])]]=None, latents: Optional[torch.FloatTensor]=None, output_type: Optional[str]='pil', callback: Optional[Callable[([int, int, torch.FloatTensor], None)]]=None, callback_steps: int=1, return_dict: bool=True):
prior_outputs = self.prior_pipe(prompt=prompt, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, num_inference_steps=prior_num_inference_steps, generator=generator, latents=latents, guidance_scale=prior_guidance_scale, output_type='pt', return_dict=False)
image_embeds = prior_outputs[0]
negative_image_embeds = prior_outputs[1]
prompt = ([prompt] if (not isinstance(prompt, (list, tuple))) else prompt)
if ((len(prompt) < image_embeds.shape[0]) and ((image_embeds.shape[0] % len(prompt)) == 0)):
prompt = ((image_embeds.shape[0] // len(prompt)) * prompt)
outputs = self.decoder_pipe(prompt=prompt, image_embeds=image_embeds, negative_image_embeds=negative_image_embeds, width=width, height=height, num_inference_steps=num_inference_steps, generator=generator, guidance_scale=guidance_scale, output_type=output_type, callback=callback, callback_steps=callback_steps, return_dict=return_dict)
self.maybe_free_model_hooks()
return outputs |
def test(args, test_loader, model, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end = time.time()
model.eval()
if (not args.no_progress):
test_loader = tqdm(test_loader, disable=(args.local_rank not in [(- 1), 0]))
with torch.no_grad():
for (batch_idx, (inputs, targets)) in enumerate(test_loader):
data_time.update((time.time() - end))
inputs = inputs.to(args.device)
targets = targets.to(args.device)
outputs = model(inputs)
loss = F.cross_entropy(outputs, targets)
(prec1, prec5) = accuracy(outputs, targets, topk=(1, 5))
losses.update(loss.item(), inputs.shape[0])
top1.update(prec1.item(), inputs.shape[0])
top5.update(prec5.item(), inputs.shape[0])
batch_time.update((time.time() - end))
end = time.time()
if (not args.no_progress):
test_loader.set_description('Test Iter: {batch:4}/{iter:4}. Data: {data:.3f}s. Batch: {bt:.3f}s. Loss: {loss:.4f}. top1: {top1:.2f}. top5: {top5:.2f}. '.format(batch=(batch_idx + 1), iter=len(test_loader), data=data_time.avg, bt=batch_time.avg, loss=losses.avg, top1=top1.avg, top5=top5.avg))
if (not args.no_progress):
test_loader.close()
model.train()
return (losses.avg, top1.avg) |
def get_acc_diff(row, scores_df, task_list):
score_row1 = scores_df.iloc[row['seed1']]
score_row2 = scores_df.iloc[row['seed2']]
for task in task_list:
acc1 = score_row1[task]
acc2 = score_row2[task]
row[f'{task}_diff'] = abs((acc1 - acc2))
return row |
def main_worker(args):
global best_acc1
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(traindir, transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize]))
val_dataset = datasets.ImageFolder(valdir, transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize]))
train_sampler = None
val_sampler = None
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=(val_sampler is None), num_workers=args.workers, pin_memory=True, sampler=val_sampler)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
def eval_func(model):
accu = validate(val_loader, model, criterion, args)
return float(accu)
if args.prune:
num_iterations = ((len(train_dataset) / args.batch_size) * args.end_epoch)
num_warm = ((len(train_dataset) / args.batch_size) * args.start_epoch)
from neural_compressor.training import WeightPruningConfig
p_conf = WeightPruningConfig(pruning_type=args.pruning_type, target_sparsity=args.target_sparsity, pruning_frequency=1, op_names=['layer1.0.conv1', 'layer1.0.conv2'], start_step=num_warm, end_step=num_iterations)
from neural_compressor.training import prepare_compression
compression_manager = prepare_compression(model, p_conf)
compression_manager.callbacks.on_train_begin()
model = compression_manager.model
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
if args.distributed:
train_loader.sampler.set_epoch(epoch)
train(train_loader, model, criterion, optimizer, epoch, args, compression_manager)
acc1 = validate(val_loader, model, criterion, args)
is_best = (acc1 > best_acc1)
best_acc1 = max(acc1, best_acc1)
print('=> save checkpoint')
save_checkpoint({'epoch': (epoch + 1), 'arch': args.arch, 'state_dict': model.state_dict(), 'best_acc1': best_acc1, 'optimizer': optimizer.state_dict()}, is_best)
compression_manager.callbacks.on_train_end()
compression_manager.save(args.output_model)
(df, total_sparsity) = compression_manager.model.report_sparsity()
print('Total sparsity of FP32 model is {}'.format(total_sparsity))
print(df)
if args.quantize:
from neural_compressor import PostTrainingQuantConfig
from neural_compressor import quantization
q_conf = PostTrainingQuantConfig()
q_model = quantization.fit(model.model, q_conf, calib_dataloader=val_loader, eval_func=eval_func)
(df, total_sparsity) = q_model.report_sparsity()
print('Total sparsity of INT8 model is {}'.format(total_sparsity))
print(df)
q_model.save(args.output_model) |
_model
def ssl_resnext101_32x16d(pretrained=True, **kwargs):
model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=16, **kwargs)
return _create_resnet('ssl_resnext101_32x16d', pretrained, **model_args) |
def get_data(bs, sz):
img_patch = torch.randn(bs, 3, sz, sz)
att_mask = (torch.rand(bs, sz, sz) > 0.5)
return NestedTensor(img_patch, att_mask) |
def test_reference_wrapper():
assert (m.refwrap_builtin(42) == 420)
assert (m.refwrap_usertype(UserType(42)) == 42)
with pytest.raises(TypeError) as excinfo:
m.refwrap_builtin(None)
assert ('incompatible function arguments' in str(excinfo.value))
with pytest.raises(TypeError) as excinfo:
m.refwrap_usertype(None)
assert ('incompatible function arguments' in str(excinfo.value))
a1 = m.refwrap_list(copy=True)
a2 = m.refwrap_list(copy=True)
assert ([x.value for x in a1] == [2, 3])
assert ([x.value for x in a2] == [2, 3])
assert ((not (a1[0] is a2[0])) and (not (a1[1] is a2[1])))
b1 = m.refwrap_list(copy=False)
b2 = m.refwrap_list(copy=False)
assert ([x.value for x in b1] == [1, 2])
assert ([x.value for x in b2] == [1, 2])
assert ((b1[0] is b2[0]) and (b1[1] is b2[1]))
assert (m.refwrap_iiw(IncType(5)) == 5)
assert (m.refwrap_call_iiw(IncType(10), m.refwrap_iiw) == [10, 10, 10, 10]) |
class TFElectraForQuestionAnswering():
def __init__(self, *args, **kwargs):
requires_tf(self)
def from_pretrained(self, *args, **kwargs):
requires_tf(self) |
def ContrastPredict(PixelPath, PatchPath, batch_size, temperature, projection_dim, input_dim):
from DefinedModels import Contrast, MoCo
from Preprocess import feature_normalize2
model = MoCo(projection_dim=projection_dim, input_dim=input_dim, r=640, m=0.999, T=temperature)
print(model)
train_data = PairDataset(PixelPath, PatchPath)
test_loader = torch.utils.data.DataLoader(dataset=train_data, batch_size=batch_size, shuffle=False, drop_last=False)
model.load_state_dict(torch.load('./models/contrast.pth'))
model.eval()
features = []
for (step, (x_i, x_j, index)) in enumerate(tqdm(test_loader)):
with torch.no_grad():
x_i = x_i.cuda().float()
x_j = x_j.cuda().float()
(feat, _) = model(x_i, is_eval=True)
for num in range(len(feat)):
features.append(np.array(feat[num].cpu().detach().numpy()))
features = feature_normalize2(features)
return features |
def test_list(capture, doc):
with capture:
lst = m.get_list()
assert (lst == ['inserted-0', 'overwritten', 'inserted-2'])
lst.append('value2')
m.print_list(lst)
assert (capture.unordered == '\n Entry at position 0: value\n list item 0: inserted-0\n list item 1: overwritten\n list item 2: inserted-2\n list item 3: value2\n ')
assert (doc(m.get_list) == 'get_list() -> list')
assert (doc(m.print_list) == 'print_list(arg0: list) -> None') |
def dws_conv3x3_block(in_channels, out_channels, activate):
return DwsConvBlock(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1, activate=activate) |
class DiagGaussian(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(DiagGaussian, self).__init__()
self.num_outputs = num_outputs
init_ = (lambda m: init(m, init_normc_, (lambda x: nn.init.constant_(x, 0))))
self.fc_mean = init_(nn.Linear(num_inputs, num_outputs))
self.logstd = AddBias(torch.zeros(num_outputs))
def forward(self, x):
action_mean = self.fc_mean(x)
zeros = torch.zeros(action_mean.size())
if x.is_cuda:
zeros = zeros.cuda()
action_logstd = self.logstd(zeros)
return FixedNormal(action_mean, action_logstd.exp()) |
class Encoder(nn.Module):
c: Config
def __call__(self, obs):
x = obs.reshape((((- 1),) + obs.shape[2:]))
Conv = partial(nn.Conv, kernel_size=(4, 4), strides=(2, 2), padding='VALID')
x = leaky_relu(Conv(self.c.total_filters)(x))
x = leaky_relu(Conv((self.c.total_filters * 2))(x))
x = leaky_relu(Conv((self.c.total_filters * 4))(x))
x = leaky_relu(Conv((self.c.total_filters * 8))(x))
x = x.reshape((obs.shape[:2] + ((- 1),)))
layers = [x]
print(f'Input shape at level 0: {x.shape}')
feat_size = x.shape[(- 1)]
for level in range(1, self.c.levels):
for _ in range((self.c.enc_dense_layers - 1)):
x = nn.relu(nn.Dense(self.c.enc_dense_embed_size)(x))
if (self.c.enc_dense_layers > 0):
x = nn.Dense(feat_size)(x)
layer = x
timesteps_to_merge = (self.c.tmp_abs_factor ** level)
timesteps_to_pad = ((- layer.shape[1]) % timesteps_to_merge)
layer = jnp.pad(layer, ((0, 0), (0, timesteps_to_pad), (0, 0)))
layer = layer.reshape((layer.shape[0], (- 1), timesteps_to_merge, layer.shape[2]))
layer = jnp.sum(layer, axis=2)
layers.append(layer)
print(f'Input shape at level {level}: {layer.shape}')
return layers |
def build_trainer(hp: 'ModelParams', outdir: str, labels: Dict[(str, Any)], **kwargs) -> Trainer:
if (hp.model_type() == 'categorical'):
return Trainer(hp, outdir, labels, **kwargs)
if (hp.model_type() == 'linear'):
return LinearTrainer(hp, outdir, labels, **kwargs)
if (hp.model_type() == 'cph'):
return CPHTrainer(hp, outdir, labels, **kwargs)
else:
raise ValueError(f'Unknown model type: {hp.model_type()}') |
def save_logger(logfile_path='../dataset/cogkge.log', rank=(- 1)):
standard_format = '[%(asctime)s][%(threadName)s:%(thread)d][task_id:%(name)s][%(filename)s:%(lineno)d][%(levelname)s][%(message)s]'
simple_format = '[%(asctime)s] - [%(message)s]'
LOGGING_DIC = {'version': 1, 'disable_existing_loggers': False, 'formatters': {'standard': {'format': standard_format}, 'simple': {'format': simple_format}}, 'filters': {}, 'handlers': {'stream': {'level': 'INFO', 'class': 'logging.StreamHandler', 'formatter': 'simple'}, 'file': {'level': 20, 'class': 'logging.handlers.RotatingFileHandler', 'formatter': 'standard', 'filename': None, 'maxBytes': ((1024 * 1024) * 5), 'backupCount': 5, 'encoding': 'utf-8'}}, 'loggers': {'': {'handlers': ['stream', 'file'], 'level': 'INFO', 'propagate': True}}}
LOGGING_DIC['loggers']['']['level'] = ('INFO' if (rank in [(- 1), 0]) else 'WARN')
LOGGING_DIC['handlers']['file']['filename'] = logfile_path
if (rank not in [(- 1), 0]):
LOGGING_DIC['loggers']['']['handlers'] = ['stream']
del LOGGING_DIC['handlers']['file']
logging.config.dictConfig(LOGGING_DIC)
logger = logging.getLogger(__name__)
return logger |
def main():
path = '163459__littlebigsounds__lbs-fx-dog-small-alert-bark001.wav'
(y, sr) = librosa.load(path, offset=0.1, duration=1.2)
fig = plot_augmentations(y, sr)
out = __file__.replace('.py', '.png')
fig.savefig(out, bbox_inches='tight') |
def ema_update(wa_model, model, global_step, decay_rate=0.995, warmup_steps=0, dynamic_decay=True):
factor = int((global_step >= warmup_steps))
if dynamic_decay:
delta = (global_step - warmup_steps)
decay = (min(decay_rate, ((1.0 + delta) / (10.0 + delta))) if ((10.0 + delta) != 0) else decay_rate)
else:
decay = decay_rate
decay *= factor
for (p_swa, p_model) in zip(wa_model.parameters(), model.parameters()):
p_swa.data *= decay
p_swa.data += (p_model.data * (1 - decay)) |
def embedded_dropout(embed, words, dropout, scale=None):
if dropout:
mask = (embed.weight.data.new().resize_((embed.weight.size(0), 1)).bernoulli_((1 - dropout)).expand_as(embed.weight) / (1 - dropout))
masked_embed_weight = (mask * embed.weight)
else:
masked_embed_weight = embed.weight
if scale:
masked_embed_weight = (scale.expand_as(masked_embed_weight) * masked_embed_weight)
padding_idx = embed.padding_idx
if (padding_idx is None):
padding_idx = (- 1)
X = torch.nn.functional.embedding(words, masked_embed_weight, padding_idx, embed.max_norm, embed.norm_type, embed.scale_grad_by_freq, embed.sparse)
return X |
class ClusterNet5gMultiHead(ResNet):
num_name_mapping = {1: 'A', 2: 'B', 3: 'C', 4: 'D', 5: 'E', 6: 'F', 7: 'G'}
name_num_mapping = {v: k for (k, v) in num_name_mapping.items()}
def __init__(self, num_channel: int=3, output_k_list: List[int]=[70, 10], semisup: bool=False, num_sub_heads: int=5, batchnorm_track: bool=True, verbose=False):
super(ClusterNet5gMultiHead, self).__init__()
if isinstance(output_k_list, int):
output_k_list = [output_k_list]
assert isinstance(output_k_list, (list, tuple)), f'output_k_list should be a list or tuple, given {output_k_list}.'
self.output_k_list: List[int] = output_k_list
self.batchnorm_track = batchnorm_track
self.trunk = ClusterNet5gTrunk(num_channel=num_channel, batchnorm_track=self.batchnorm_track)
for (head_i, cluster_num) in enumerate(self.output_k_list):
setattr(self, f'head_{self.num_name_mapping[(head_i + 1)]}', ClusterNet5gMultiHeadHead(output_k=cluster_num, num_sub_heads=num_sub_heads, semisup=semisup, batchnorm_track=self.batchnorm_track))
self.verbose = verbose
if self.verbose:
print(('semisup: %s' % semisup))
self._initialize_weights()
def forward(self, x, head=None, kmeans_use_features=False, trunk_features=False, penultimate_features=False):
if (head is None):
warnings.warn(('head is None, using the last head: head_%s.' % self.num_name_mapping[len(self.output_k_list)]))
head = self.num_name_mapping[len(self.output_k_list)]
assert (isinstance(head, str) and (head in list(self.name_num_mapping.keys()))), f"head given {head} should be within {', '.join(list(self.name_num_mapping.keys())[:len(self.output_k_list)])}."
x = self.trunk(x, penultimate_features=penultimate_features)
if trunk_features:
return x
else:
x = getattr(self, f'head_{head}')(x, kmeans_use_features=kmeans_use_features)
return x |
class SimulatorProcessStateExchange(SimulatorProcessBase):
def __init__(self, idx, pipe_c2s, pipe_s2c):
super(SimulatorProcessStateExchange, self).__init__(idx)
self.c2s = pipe_c2s
self.s2c = pipe_s2c
def run(self):
player = self._build_player()
context = zmq.Context()
c2s_socket = context.socket(zmq.PUSH)
c2s_socket.setsockopt(zmq.IDENTITY, self.identity)
c2s_socket.set_hwm(2)
c2s_socket.connect(self.c2s)
s2c_socket = context.socket(zmq.DEALER)
s2c_socket.setsockopt(zmq.IDENTITY, self.identity)
s2c_socket.connect(self.s2c)
state = player.current_state()
(reward, isOver) = (0, False)
while True:
c2s_socket.send(dumps((self.identity, state, reward, isOver)), copy=False)
action = loads(s2c_socket.recv(copy=False).bytes)
(reward, isOver) = player.action(action)
state = player.current_state() |
def validate(model, loader, loss_fn, args, amp_autocast=suppress, log_suffix=''):
batch_time_m = AverageMeter()
losses_m = AverageMeter()
top1_m = AverageMeter()
top5_m = AverageMeter()
model.eval()
end = time.time()
last_idx = (len(loader) - 1)
with torch.no_grad():
for (batch_idx, (input, target)) in enumerate(loader):
last_batch = (batch_idx == last_idx)
if (not args.prefetcher):
input = input.cuda()
target = target.cuda()
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
output = model(input)
if isinstance(output, (tuple, list)):
output = output[0]
reduce_factor = args.tta
if (reduce_factor > 1):
output = output.unfold(0, reduce_factor, reduce_factor).mean(dim=2)
target = target[0:target.size(0):reduce_factor]
loss = loss_fn(output, target)
(acc1, acc5) = accuracy(output, target, topk=(1, 5))
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
acc1 = reduce_tensor(acc1, args.world_size)
acc5 = reduce_tensor(acc5, args.world_size)
else:
reduced_loss = loss.data
torch.cuda.synchronize()
losses_m.update(reduced_loss.item(), input.size(0))
top1_m.update(acc1.item(), output.size(0))
top5_m.update(acc5.item(), output.size(0))
batch_time_m.update((time.time() - end))
end = time.time()
if ((dist.get_rank() == 0) and (last_batch or ((batch_idx % args.log_interval) == 0))):
log_name = ('Test' + log_suffix)
_logger.info('{0}: [{1:>4d}/{2}] Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) : {top1.val:>7.4f} ({top1.avg:>7.4f}) : {top5.val:>7.4f} ({top5.avg:>7.4f})'.format(log_name, batch_idx, last_idx, batch_time=batch_time_m, loss=losses_m, top1=top1_m, top5=top5_m))
metrics = OrderedDict([('loss', losses_m.avg), ('top1', top1_m.avg), ('top5', top5_m.avg)])
return metrics |
def save_dataframe(df, fname, path):
with open(os.path.join(path, (fname + '.pkl')), 'wb') as fd:
pickle.dump(df, fd) |
_model
def efficientnet_b0(pretrained=False, **kwargs):
model = _gen_efficientnet('efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model |
class CosineProximityCriterion(Criterion):
def __init__(self, bigdl_type='float'):
super(CosineProximityCriterion, self).__init__(None, bigdl_type) |
def unet_cct(in_channels, num_classes):
model = UNet_CCT(in_channels, num_classes)
init_weights(model, 'kaiming')
return model |
class SimpleGray(nn.Module):
def __init__(self):
super(SimpleGray, self).__init__()
gray_vector = torch.tensor([0.2989, 0.587, 0.114]).view(1, 3, 1, 1)
self.register_buffer('buf', gray_vector)
return
def forward(self, x):
w = Variable(self.buf)
return F.conv2d(x, w, padding=0) |
class SegNet():
def __init__(self, encoderPth, decoderPth, segId=1, segFg=True):
net_encoder = segModel.ModelBuilder.build_encoder(fc_dim=2048, weights=encoderPth)
net_decoder = segModel.ModelBuilder.build_decoder(fc_dim=2048, num_class=150, weights=decoderPth)
self.net = segModel.SegmentationModule(net_encoder, net_decoder)
self.net.eval()
self.net.cuda()
self.dataset_test = segData.TestDataset(imgSizes=(300, 375, 450, 525, 600), imgMaxSize=500, padding_constant=8)
self.segId = segId
self.segFg = segFg
def getSky(self, imgPath):
I_Tensor = self.dataset_test.getImg(imgPath)
with torch.no_grad():
segSize = (I_Tensor['img_ori'].shape[0], I_Tensor['img_ori'].shape[1])
scores = torch.zeros(1, 150, segSize[0], segSize[1]).cuda()
for img in I_Tensor['img_data']:
pred_tmp = self.net(img.cuda(), segSize=segSize)
scores = (scores + (pred_tmp / 5))
(_, pred) = torch.max(scores, dim=1)
pred = pred.squeeze(0).cpu().numpy()
if self.segFg:
return (1 - (pred == self.segId).astype(np.float32))
else:
return (pred == self.segId).astype(np.float32) |
_SAMPLERS.register_module()
class PseudoSampler(BaseSampler):
def __init__(self, **kwargs):
pass
def _sample_pos(self, **kwargs):
raise NotImplementedError
def _sample_neg(self, **kwargs):
raise NotImplementedError
def sample(self, assign_result, bboxes, gt_bboxes, **kwargs):
pos_inds = torch.nonzero((assign_result.gt_inds > 0), as_tuple=False).squeeze((- 1)).unique()
neg_inds = torch.nonzero((assign_result.gt_inds == 0), as_tuple=False).squeeze((- 1)).unique()
gt_flags = bboxes.new_zeros(bboxes.shape[0], dtype=torch.uint8)
sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, assign_result, gt_flags)
return sampling_result |
class Calib_Dataloader(object):
def __init__(self):
pass
def register_transformation(self):
if (globals.code_domain == 'transformers_trainer'):
globals.list_calib_dataloader_name.append('trainer.get_eval_dataloader()')
elif (globals.code_domain == 'transformers_no_trainer'):
pass
elif (globals.code_domain == 'torchvision'):
globals.list_calib_dataloader_name.append('val_loader')
elif (globals.code_domain == 'onnx'):
codes = open(globals.list_code_path[0], 'r').read().split('\n')
for line in codes:
line = line.strip()
if (('loader' in line) and ('=' in line)):
end = 0
for i in range(len(line)):
if (line[i] == '='):
end = i
if (line[(end - 1)] == ' '):
globals.list_calib_dataloader_name.append(line[:(end - 1)])
else:
globals.list_calib_dataloader_name.append(line[:end])
else:
pass |
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, 'dcn/src')
main_file = glob.glob(os.path.join(extensions_dir, '*.cpp'))
source_cpu = glob.glob(os.path.join(extensions_dir, 'cpu', '*.cpp'))
source_cuda = glob.glob(os.path.join(extensions_dir, 'cuda', '*.cu'))
sources = (main_file + source_cpu)
extension = CppExtension
extra_compile_args = {'cxx': []}
define_macros = []
if (torch.cuda.is_available() and (CUDA_HOME is not None)):
extension = CUDAExtension
sources += source_cuda
define_macros += [('WITH_CUDA', None)]
extra_compile_args['nvcc'] = ['-DCUDA_HAS_FP16=1', '-D__CUDA_NO_HALF_OPERATORS__', '-D__CUDA_NO_HALF_CONVERSIONS__', '-D__CUDA_NO_HALF2_OPERATORS__']
else:
raise NotImplementedError('Cuda is not availabel')
sources = [os.path.join(extensions_dir, s) for s in sources]
include_dirs = [extensions_dir]
ext_modules = [extension('DCN', sources, include_dirs=include_dirs, define_macros=define_macros, extra_compile_args=extra_compile_args)]
return ext_modules |
def save_image(net, fixed_z, args, sample_dir, i):
net.eval()
with torch.no_grad():
([sampled_src, sampled_dst, rec_dst, without_color_dst], loss) = net([fixed_z], truncation=args.sample_truncation, inference=True)
grid_rows = int((args.n_sample ** 0.5))
save_images(sampled_dst, sample_dir, 'dst', grid_rows, (i + 1))
save_images(without_color_dst, sample_dir, 'without_color', grid_rows, (i + 1))
toPIL(((rec_dst[0] + 1) / 2).cpu().detach().clamp(0, 1)).save(f'{sample_dir}/rec_{(i + 1)}.png') |
class ReweightedWakeSleep(HelmholtzMachine):
def __init__(self, p_layers, q_layers, **kwargs):
super(ReweightedWakeSleep, self).__init__(p_layers, q_layers, **kwargs)
def log_prob_p(self, samples):
n_layers = len(self.p_layers)
n_samples = samples[0].shape[0]
log_p = ([None] * n_layers)
for l in xrange((n_layers - 1)):
log_p[l] = self.p_layers[l].log_prob(samples[l], samples[(l + 1)])
log_p[(n_layers - 1)] = self.p_layers[(n_layers - 1)].log_prob(samples[(n_layers - 1)])
return log_p
def log_prob_q(self, samples):
n_layers = len(self.p_layers)
n_samples = samples[0].shape[0]
log_q = ([None] * n_layers)
log_q[0] = tensor.zeros([n_samples])
for l in xrange((n_layers - 1)):
log_q[(l + 1)] = self.q_layers[l].log_prob(samples[(l + 1)], samples[l])
return log_q
(inputs=['n_samples'], outputs=['samples', 'log_p', 'log_q'])
def sample_p(self, n_samples):
p_layers = self.p_layers
q_layers = self.q_layers
n_layers = len(p_layers)
samples = ([None] * n_layers)
log_p = ([None] * n_layers)
(samples[(n_layers - 1)], log_p[(n_layers - 1)]) = p_layers[(n_layers - 1)].sample(n_samples)
for l in reversed(xrange(1, n_layers)):
(samples[(l - 1)], log_p[(l - 1)]) = p_layers[(l - 1)].sample(samples[l])
log_q = self.log_prob_q(samples)
return (samples, log_p, log_q)
(inputs=['features'], outputs=['samples', 'log_p', 'log_q'])
def sample_q(self, features):
p_layers = self.p_layers
q_layers = self.q_layers
n_layers = len(p_layers)
batch_size = features.shape[0]
samples = ([None] * n_layers)
log_p = ([None] * n_layers)
log_q = ([None] * n_layers)
samples[0] = features
log_q[0] = tensor.zeros([batch_size])
for l in xrange((n_layers - 1)):
(samples[(l + 1)], log_q[(l + 1)]) = q_layers[l].sample(samples[l])
log_p[(n_layers - 1)] = p_layers[(n_layers - 1)].log_prob(samples[(n_layers - 1)])
for l in reversed(range(1, n_layers)):
log_p[(l - 1)] = p_layers[(l - 1)].log_prob(samples[(l - 1)], samples[l])
return (samples, log_p, log_q)
(inputs=['n_samples'], outputs=['samples', 'log_p', 'log_q'])
def sample(self, n_samples):
return self.sample_p(n_samples)
(inputs=['log_p', 'log_q'], outputs=['w'])
def importance_weights(self, log_p, log_q):
log_p_all = sum(log_p)
log_q_all = sum(log_q)
log_pq = (log_p_all - log_q_all)
w_norm = logsumexp(log_pq, axis=1)
log_w = (log_pq - tensor.shape_padright(w_norm))
w = tensor.exp(log_w)
return w
(inputs=['features', 'n_samples'], outputs=['log_px', 'log_psx'])
def log_likelihood(self, features, n_samples):
p_layers = self.p_layers
q_layers = self.q_layers
n_layers = len(p_layers)
batch_size = features.shape[0]
x = replicate_batch(features, n_samples)
(samples, log_p, log_q) = self.sample_q(x)
samples = unflatten_values(samples, batch_size, n_samples)
log_p = unflatten_values(log_p, batch_size, n_samples)
log_q = unflatten_values(log_q, batch_size, n_samples)
log_p_all = sum(log_p)
log_q_all = sum(log_q)
log_px = (logsumexp((log_p_all - log_q_all), axis=(- 1)) - tensor.log(n_samples))
return (log_px, log_px)
(inputs=['features', 'n_samples'], outputs=['log_px', 'log_psx', 'gradients'])
def get_gradients(self, features, n_samples):
p_layers = self.p_layers
q_layers = self.q_layers
n_layers = len(p_layers)
batch_size = features.shape[0]
x = replicate_batch(features, n_samples)
(samples, log_p, log_q) = self.sample_q(x)
samples = unflatten_values(samples, batch_size, n_samples)
log_p = unflatten_values(log_p, batch_size, n_samples)
log_q = unflatten_values(log_q, batch_size, n_samples)
log_p_all = sum(log_p)
log_q_all = sum(log_q)
w = self.importance_weights(log_p, log_q)
log_px = (logsumexp((log_p_all - log_q_all), axis=(- 1)) - tensor.log(n_samples))
w = w.reshape(((batch_size * n_samples),))
samples = flatten_values(samples, (batch_size * n_samples))
gradients = OrderedDict()
for l in xrange((n_layers - 1)):
gradients = merge_gradients(gradients, p_layers[l].get_gradients(samples[l], samples[(l + 1)], weights=w))
gradients = merge_gradients(gradients, q_layers[l].get_gradients(samples[(l + 1)], samples[l], weights=w), 0.5)
gradients = merge_gradients(gradients, p_layers[(- 1)].get_gradients(samples[(- 1)], weights=w))
(samples, log_p, log_q) = self.sample_p(batch_size)
for l in xrange((n_layers - 1)):
gradients = merge_gradients(gradients, q_layers[l].get_gradients(samples[(l + 1)], samples[l]), 0.5)
return (log_px, log_px, gradients) |
def display_batch(batch, size=10):
(imgs, tars) = next(iter(batch))
plt.figure(figsize=((size * 5), 5))
for img_idx in range(size):
if (NUM_CLASSES > 2):
lb = string_label[tf.argmax(tars[img_idx]).numpy()]
else:
lb = string_label[tars[img_idx].numpy()]
plt.subplot(1, size, (img_idx + 1))
plt.title(lb)
plt.imshow(imgs[img_idx])
plt.xticks([])
plt.yticks([])
plt.tight_layout()
plt.show() |
class HTTPRedirect(Exception):
def __init__(self, url, code=303):
self.url = url
self.code = code
def __repr__(self):
return ('HTTPRedirect(url=%s)' % repr(self.url)) |
class RandomCropFromBorders(DualTransform):
def __init__(self, crop_value=None, crop_0_min=None, crop_0_max=None, crop_1_min=None, crop_1_max=None, crop_2_min=None, crop_2_max=None, always_apply=False, p=1.0):
super(RandomCropFromBorders, self).__init__(always_apply, p)
self.crop_0_min = 0.1
self.crop_0_max = 0.1
self.crop_1_min = 0.1
self.crop_1_max = 0.1
self.crop_2_min = 0.1
self.crop_2_max = 0.1
if (crop_value is not None):
self.crop_0_min = crop_value
self.crop_0_max = crop_value
self.crop_1_min = crop_value
self.crop_1_max = crop_value
self.crop_2_min = crop_value
self.crop_2_max = crop_value
if (crop_0_min is not None):
self.crop_0_min = crop_0_min
if (crop_0_max is not None):
self.crop_0_max = crop_0_max
if (crop_1_min is not None):
self.crop_1_min = crop_1_min
if (crop_1_max is not None):
self.crop_1_max = crop_1_max
if (crop_2_min is not None):
self.crop_2_min = crop_2_min
if (crop_2_max is not None):
self.crop_2_max = crop_2_max
def get_params(self, **data):
img = data['image']
sh0_min = random.randint(0, int((self.crop_0_min * img.shape[0])))
sh0_max = random.randint(max((sh0_min + 1), int(((1 - self.crop_0_max) * img.shape[0]))), img.shape[0])
sh1_min = random.randint(0, int((self.crop_1_min * img.shape[1])))
sh1_max = random.randint(max((sh1_min + 1), int(((1 - self.crop_1_max) * img.shape[1]))), img.shape[1])
sh2_min = random.randint(0, int((self.crop_2_min * img.shape[2])))
sh2_max = random.randint(max((sh2_min + 1), int(((1 - self.crop_2_max) * img.shape[2]))), img.shape[2])
return {'sh0_min': sh0_min, 'sh0_max': sh0_max, 'sh1_min': sh1_min, 'sh1_max': sh1_max, 'sh2_min': sh2_min, 'sh2_max': sh2_max}
def apply(self, img, sh0_min=0, sh0_max=0, sh1_min=0, sh1_max=0, sh2_min=0, sh2_max=0, **params):
return F.clamping_crop(img, sh0_min, sh1_min, sh2_min, sh0_max, sh1_max, sh2_max)
def apply_to_mask(self, mask, sh0_min=0, sh0_max=0, sh1_min=0, sh1_max=0, sh2_min=0, sh2_max=0, **params):
return F.clamping_crop(mask, sh0_min, sh1_min, sh2_min, sh0_max, sh1_max, sh2_max) |
def negative_r2(y_true, y_predicted, sample_weight=None):
val = r2_score(y_true, y_predicted, sample_weight=sample_weight)
return ((- 1.0) * val) |
def _deregister_tracers(tracers):
shell().tracer_cleanup_pending = True
for tracer in tracers:
tracer.clear_instance()
try:
shell().registered_tracers.remove(tracer)
except ValueError:
pass |
class Conll03Processor(QueryNERProcessor):
def get_labels(self):
return ['ORG', 'PER', 'LOC', 'MISC', 'O'] |
def test_probability_raises(model, X):
f = getattr(model, 'probability')
assert_raises(ValueError, f, [X])
assert_raises(ValueError, f, X[0])
assert_raises((ValueError, TypeError, RuntimeError), f, X[0][0])
if (MIN_VALUE is not None):
assert_raises(ValueError, f, [[[(MIN_VALUE - 0.1) for i in range(model.d)] for j in range(4)]])
if (MAX_VALUE is not None):
assert_raises(ValueError, f, [[[(MAX_VALUE + 0.1) for i in range(model.d)] for j in range(4)]]) |
_registry(operator_type='View')
class View(Operator):
def __init__(self):
super().__init__()
def set_attr(self, framework, node):
if (framework == 'torch'):
shape_list = []
if (node.inputsAt(1).type().kind() == 'ListType'):
shape_list = parseTorchListConstruct(node.inputsAt(1))
else:
for i in range(1, node.inputsSize()):
shape_list.append(node.inputsAt(i).toIValue())
shape_list = [((- 1) if (x is None) else x) for x in shape_list]
self._attr['shape'] = list2str(shape_list) |
def build_sampler(cfg, **default_args):
warnings.warn('``build_sampler`` would be deprecated soon, please use ``mmdet.registry.TASK_UTILS.build()`` ')
return TASK_UTILS.build(cfg, default_args=default_args) |
def batch_iterator(batch_size=10):
for _ in tqdm(range(0, args.n_examples, batch_size)):
(yield [next(iter_dataset)[args.text_column] for _ in range(batch_size)]) |
_model
def nest_tiny(pretrained=False, **kwargs):
model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 8), **kwargs)
model = _create_nest('nest_tiny', pretrained=pretrained, **model_kwargs)
return model |
class NumericalImputation(AutotabularPreprocessingAlgorithm):
def __init__(self, strategy: str='mean', random_state: Optional[np.random.RandomState]=None):
self.strategy = strategy
self.random_state = random_state
def fit(self, X: PIPELINE_DATA_DTYPE, y: Optional[PIPELINE_DATA_DTYPE]=None) -> 'NumericalImputation':
import sklearn.impute
self.preprocessor = sklearn.impute.SimpleImputer(strategy=self.strategy, copy=False)
self.preprocessor.fit(X)
return self
def transform(self, X: PIPELINE_DATA_DTYPE) -> PIPELINE_DATA_DTYPE:
if (self.preprocessor is None):
raise NotImplementedError()
return self.preprocessor.transform(X)
def get_properties(dataset_properties: Optional[DATASET_PROPERTIES_TYPE]=None) -> Dict[(str, Optional[Union[(str, int, bool, Tuple)]])]:
return {'shortname': 'NumericalImputation', 'name': 'Numerical Imputation', 'handles_missing_values': True, 'handles_nominal_values': True, 'handles_numerical_features': True, 'prefers_data_scaled': False, 'prefers_data_normalized': False, 'handles_regression': True, 'handles_classification': True, 'handles_multiclass': True, 'handles_multilabel': True, 'handles_multioutput': True, 'is_deterministic': True, 'handles_sparse': True, 'handles_dense': True, 'input': (DENSE, SPARSE, UNSIGNED_DATA), 'output': (INPUT,), 'preferred_dtype': None}
def get_hyperparameter_search_space(dataset_properties: Optional[DATASET_PROPERTIES_TYPE]=None) -> ConfigurationSpace:
strategy = CategoricalHyperparameter('strategy', ['mean', 'median', 'most_frequent'], default_value='mean')
cs = ConfigurationSpace()
cs.add_hyperparameter(strategy)
return cs |
def highway_layer(arg, bias, bias_start=0.0, scope=None, wd=0.0, input_keep_prob=1.0, is_train=None, output_size=None):
with tf.variable_scope((scope or 'highway_layer')):
if (output_size is not None):
d = output_size
else:
d = arg.get_shape()[(- 1)]
trans = linear([arg], d, bias, bias_start=bias_start, scope='trans', wd=wd, input_keep_prob=input_keep_prob, is_train=is_train)
trans = tf.nn.relu(trans)
gate = linear([arg], d, bias, bias_start=bias_start, scope='gate', wd=wd, input_keep_prob=input_keep_prob, is_train=is_train)
gate = tf.nn.sigmoid(gate)
if (d != arg.get_shape()[(- 1)]):
arg = linear([arg], d, bias, bias_start=bias_start, scope='arg_resize', wd=wd, input_keep_prob=input_keep_prob, is_train=is_train)
out = ((gate * trans) + ((1 - gate) * arg))
return out |
def load_aliases(alias_path):
aliases = {}
print(('Loading aliases from "%s"' % alias_path))
with open(alias_path, 'r') as f:
for line in f:
line = [s.strip() for s in line.split(',')]
for s in line:
aliases[s] = line[0]
return aliases |
def compute_bleu(reference_corpus, translation_corpus, max_order=4, smooth=False):
matches_by_order = ([0] * max_order)
possible_matches_by_order = ([0] * max_order)
reference_length = 0
translation_length = 0
for (references, translation) in zip(reference_corpus, translation_corpus):
reference_length += min((len(r) for r in references))
translation_length += len(translation)
merged_ref_ngram_counts = collections.Counter()
for reference in references:
merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
translation_ngram_counts = _get_ngrams(translation, max_order)
overlap = (translation_ngram_counts & merged_ref_ngram_counts)
for ngram in overlap:
matches_by_order[(len(ngram) - 1)] += overlap[ngram]
for order in range(1, (max_order + 1)):
possible_matches = ((len(translation) - order) + 1)
if (possible_matches > 0):
possible_matches_by_order[(order - 1)] += possible_matches
precisions = ([0] * max_order)
for i in range(0, max_order):
if smooth:
precisions[i] = ((matches_by_order[i] + 1.0) / (possible_matches_by_order[i] + 1.0))
elif (possible_matches_by_order[i] > 0):
precisions[i] = (float(matches_by_order[i]) / possible_matches_by_order[i])
else:
precisions[i] = 0.0
if (min(precisions) > 0):
p_log_sum = sum((((1.0 / max_order) * math.log(p)) for p in precisions))
geo_mean = math.exp(p_log_sum)
else:
geo_mean = 0
ratio = (float(translation_length) / reference_length)
if (ratio > 1.0):
bp = 1.0
elif (ratio < 1e-06):
bp = 0
else:
bp = math.exp((1 - (1.0 / ratio)))
bleu = (geo_mean * bp)
return (bleu, precisions, bp, ratio, translation_length, reference_length) |
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img):
for t in self.transforms:
img = t(img)
return img
def __repr__(self):
format_string = (self.__class__.__name__ + '(')
for t in self.transforms:
format_string += '\n'
format_string += ' {}'.format(t)
format_string += '\n)'
return format_string |
(Kernel, AbstractSampler, TensorLike, TensorLike)
def _decoupled_fallback(kern: Kernel, prior: AbstractSampler, Z: TensorLike, u: TensorLike, *, mean_function: Callable=None, update_rule: Callable=exact_update, join_rule: Callable=sum, **kwargs):
f = prior(Z, sample_axis=None)
update = update_rule(kern, Z, u, f, **kwargs)
return CompositeSampler(samplers=[prior, update], join_rule=join_rule, mean_function=mean_function) |
def test_add_package_dependency_invalid_version_raises(ing):
with pytest.raises(ValueError):
ing.add_package_dependency('django', 'foobar') |
def has_modal(span):
for token in span:
if (token.tag_ == 'MD'):
return 1
return 0 |
class A2CAlgo(BaseAlgo):
def __init__(self, envs, acmodel, device=None, num_frames_per_proc=None, discount=0.99, lr=0.01, gae_lambda=0.95, entropy_coef=0.01, value_loss_coef=0.5, max_grad_norm=0.5, recurrence=4, rmsprop_alpha=0.99, rmsprop_eps=1e-08, preprocess_obss=None, reshape_reward=None):
num_frames_per_proc = (num_frames_per_proc or 8)
super().__init__(envs, acmodel, device, num_frames_per_proc, discount, lr, gae_lambda, entropy_coef, value_loss_coef, max_grad_norm, recurrence, preprocess_obss, reshape_reward)
self.optimizer = torch.optim.RMSprop(self.acmodel.parameters(), lr, alpha=rmsprop_alpha, eps=rmsprop_eps)
def update_parameters(self, exps):
inds = self._get_starting_indexes()
update_entropy = 0
update_value = 0
update_policy_loss = 0
update_value_loss = 0
update_loss = 0
if self.acmodel.recurrent:
memory = exps.memory[inds]
for i in range(self.recurrence):
sb = exps[(inds + i)]
if self.acmodel.recurrent:
(dist, value, memory) = self.acmodel(sb.obs, (memory * sb.mask))
else:
(dist, value) = self.acmodel(sb.obs)
entropy = dist.entropy().mean()
policy_loss = (- (dist.log_prob(sb.action) * sb.advantage).mean())
value_loss = (value - sb.returnn).pow(2).mean()
loss = ((policy_loss - (self.entropy_coef * entropy)) + (self.value_loss_coef * value_loss))
update_entropy += entropy.item()
update_value += value.mean().item()
update_policy_loss += policy_loss.item()
update_value_loss += value_loss.item()
update_loss += loss
update_entropy /= self.recurrence
update_value /= self.recurrence
update_policy_loss /= self.recurrence
update_value_loss /= self.recurrence
update_loss /= self.recurrence
self.optimizer.zero_grad()
update_loss.backward()
update_grad_norm = (sum(((p.grad.data.norm(2) ** 2) for p in self.acmodel.parameters())) ** 0.5)
torch.nn.utils.clip_grad_norm_(self.acmodel.parameters(), self.max_grad_norm)
self.optimizer.step()
logs = {'entropy': update_entropy, 'value': update_value, 'policy_loss': update_policy_loss, 'value_loss': update_value_loss, 'grad_norm': update_grad_norm}
return logs
def _get_starting_indexes(self):
starting_indexes = numpy.arange(0, self.num_frames, self.recurrence)
return starting_indexes |
class Conv3dGRUCell(ConvRNNCellBase):
def __init__(self, in_channels, out_channels, kernel_size, bias=True, stride=1, dilation=1, groups=1):
super().__init__(mode='GRU', in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, bias=bias, convndim=3, stride=stride, dilation=dilation, groups=groups) |
class SubtokenizerTest(tf.test.TestCase):
def _init_subtokenizer(self, vocab_list):
temp_file = tempfile.NamedTemporaryFile(delete=False)
with tf.io.gfile.GFile(temp_file.name, 'w') as w:
for subtoken in vocab_list:
w.write(("'%s'" % subtoken))
w.write('\n')
return tokenizer.Subtokenizer(temp_file.name, reserved_tokens=[])
def test_encode(self):
vocab_list = ['123_', 'test', 'ing_']
subtokenizer = self._init_subtokenizer(vocab_list)
s = 'testing 123'
encoded_list = subtokenizer.encode(s)
self.assertEqual([1, 2, 0], encoded_list)
def test_decode(self):
vocab_list = ['123_', 'test', 'ing_']
subtokenizer = self._init_subtokenizer(vocab_list)
encoded_list = [1, 2, 0]
decoded_str = subtokenizer.decode(encoded_list)
self.assertEqual('testing 123', decoded_str)
def test_subtoken_ids_to_tokens(self):
vocab_list = ['123_', 'test', 'ing_']
subtokenizer = self._init_subtokenizer(vocab_list)
encoded_list = [1, 2, 0]
token_list = subtokenizer._subtoken_ids_to_tokens(encoded_list)
self.assertEqual([u'testing', u'123'], token_list) |
def make_supervised_data_module(tokenizer: transformers.PreTrainedTokenizer, data_args) -> Dict:
dataset_cls = (LazySupervisedDataset if data_args.lazy_preprocess else SupervisedDataset)
rank0_print('Loading data...')
raw_data = json.load(open(data_args.data_path, 'r'))
perm = np.random.permutation(len(raw_data))
split = int((len(perm) * 0.98))
train_indices = perm[:split]
eval_indices = perm[split:]
train_raw_data = [raw_data[i] for i in train_indices]
eval_raw_data = [raw_data[i] for i in eval_indices]
rank0_print(f'#train {len(train_raw_data)}, #eval {len(eval_raw_data)}')
train_dataset = dataset_cls(train_raw_data, tokenizer=tokenizer)
eval_dataset = dataset_cls(eval_raw_data, tokenizer=tokenizer)
return dict(train_dataset=train_dataset, eval_dataset=eval_dataset) |
def generate_binary_sequence(size) -> torch.Tensor:
def _gen(sequence_possessed: torch.Tensor, _size: int) -> torch.Tensor:
if (_size == sequence_possessed.shape[1]):
return sequence_possessed
base = sequence_possessed.repeat([2, 1])
appendix = torch.cat([torch.ones((base.shape[0] // 2), 1), torch.zeros((base.shape[0] // 2), 1)], dim=0)
return _gen(torch.cat([base, appendix], dim=1), _size)
binary_sequence_generated = _gen(torch.Tensor([[1.0], [0.0]]), size).float().unsqueeze((- 1))
return binary_sequence_generated |
def get_one_from_grid_search(config, index=0):
config = dcopy(config)
if is_grid_search(config):
return config['grid_search'][index]
else:
return config |
class MyPaintWidgetRobot(Widget):
def file_len(self, fname):
with open(fname) as f:
for (i, l) in enumerate(f):
pass
if ('i' in locals()):
return (i + 1)
else:
return 0
def calculate_radius_robot(self):
x_scale = (pos_scales[0][0] * map_res)
y_scale = (pos_scales[0][1] * map_res)
x_scale_reverse = (1 / x_scale)
y_scale_reverse = (1 / y_scale)
global robot_radius, robot_radius_rviz
robot_radius = (robot_radius_rviz * x_scale_reverse)
def on_touch_down(self, touch):
lines = []
with open('output/internal/internal.txt') as file:
lines = file.readlines()
if (not ((touch.y < float(lines[0])) and (touch.y > float(lines[1])) and (touch.x < float(lines[2])) and (touch.x > float(lines[3])))):
return
fob = open('output/internal/robot.txt', 'a')
if (self.file_len('output/internal/robot.txt') < 2):
with self.canvas:
Color(0, 1, 1)
self.calculate_radius_robot()
d = (2 * robot_radius)
Ellipse(pos=((touch.x - (d / 2)), (touch.y - (d / 2))), size=(d, d))
if (self.file_len('output/internal/robot.txt') == 0):
fob.write((((('robot start position (x,y): ' + str(touch.x)) + ',') + str(touch.y)) + '\n'))
label_start = Label(text='', pos=(touch.x, touch.y), size=(1, 1), color=(0, 0, 0), disabled_color=(0, 0, 0))
self.add_widget(label_start)
if (self.file_len('output/internal/robot.txt') == 1):
fob.write((((('robot end position (x,y): ' + str(touch.x)) + ',') + str(touch.y)) + '\n'))
label_end = Label(text='-', font_size='15sp', pos=(touch.x, touch.y), size=(1, 1), color=(0, 0, 0), disabled_color=(0, 0, 0))
self.add_widget(label_end)
fob.close() |
def _read_tensor_from_buf(value, shm_tensor_buffer):
if isinstance(value, TensorMeta):
if (value.numel == 0):
return torch.tensor([], dtype=value.dtype)
else:
shm_tensor = torch.frombuffer(buffer=shm_tensor_buffer.buf, dtype=value.dtype, offset=value.offset, count=value.numel)
value = shm_tensor.reshape(value.shape)
return value
else:
return value |
def preprocess_function(examples, tokenizer=tokenizer):
args = ((examples[sentence1_key],) if (sentence2_key is None) else (examples[sentence1_key], examples[sentence2_key]))
result = tokenizer(*args, padding=False, max_length=max_seq_length, truncation=True)
if ((label_to_id is not None) and ('label' in examples)):
result['label'] = [(label_to_id[l] if (l != (- 1)) else (- 1)) for l in examples['label']]
return result |
def plot_prediction(row, scale=True, log=False):
gold_key = outcome_type
start_point = len(row[gold_key])
for (i, val) in enumerate(row['deaths']):
if (val > 10):
start_point = i
break
start_point = 60
if (len(row[gold_key][start_point:]) < 3):
return
data = row[gold_key][start_point:]
if scale:
data = [((d / row['PopulationEstimate2018']) * mean_pop) for d in data]
if log:
data = [np.log((d + 1)) for d in data]
sns.lineplot(list(range(len(row[gold_key][start_point:]))), data, label=row['CountyName'])
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0) |
def plot_diffs(c1):
c1 = {k: v for (k, v) in c1.items() if (v != 0)}
(fig, ax) = plt.subplots(figsize=(19, 6))
xs = np.arange(len(c1))
ax.set_xticks(xs)
ax.set_xticklabels(c1.keys(), rotation=45)
plt.plot(xs, c1.values(), '-')
plt.show() |
class BlenderbotConverter(Converter):
def converted(self) -> Tokenizer:
ot = self.original_tokenizer
vocab = ot.encoder
merges = list(ot.bpe_ranks.keys())
tokenizer = Tokenizer(BPE(vocab=vocab, merges=merges, dropout=None, continuing_subword_prefix='', end_of_word_suffix='', fuse_unk=False))
tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=ot.add_prefix_space)
tokenizer.decoder = decoders.ByteLevel()
tokenizer.post_processor = processors.TemplateProcessing(single=f'$A:0 {ot.eos_token}:0', special_tokens=[(ot.eos_token, ot.eos_token_id)])
return tokenizer |
def test_show():
import mmcv
from os import path as osp
from mmdet3d.core.bbox import LiDARInstance3DBoxes
tmp_dir = tempfile.TemporaryDirectory()
temp_dir = tmp_dir.name
root_path = './tests/data/lyft'
ann_file = './tests/data/lyft/lyft_infos.pkl'
class_names = ('car', 'truck', 'bus', 'emergency_vehicle', 'other_vehicle', 'motorcycle', 'bicycle', 'pedestrian', 'animal')
eval_pipeline = [dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=5, use_dim=5, file_client_args=dict(backend='disk')), dict(type='LoadPointsFromMultiSweeps', sweeps_num=10, file_client_args=dict(backend='disk')), dict(type='DefaultFormatBundle3D', class_names=class_names, with_label=False), dict(type='Collect3D', keys=['points'])]
kitti_dataset = LyftDataset(ann_file, None, root_path)
boxes_3d = LiDARInstance3DBoxes(torch.tensor([[46.1218, (- 4.6496), (- 0.9275), 0.5316, 1.4442, 1.745, 1.1749], [33.3189, 0.1981, 0.3136, 0.5656, 1.2301, 1.7985, 1.5723], [46.1366, (- 4.6404), (- 0.951), 0.5162, 1.6501, 1.754, 1.3778], [33.2646, 0.2297, 0.3446, 0.5746, 1.3365, 1.7947, 1.543], [58.9079, 16.6272, (- 1.5829), 1.5656, 3.9313, 1.4899, 1.5505]]))
scores_3d = torch.tensor([0.1815, 0.1663, 0.5792, 0.2194, 0.278])
labels_3d = torch.tensor([0, 0, 1, 1, 2])
result = dict(boxes_3d=boxes_3d, scores_3d=scores_3d, labels_3d=labels_3d)
results = [dict(pts_bbox=result)]
kitti_dataset.show(results, temp_dir, show=False, pipeline=eval_pipeline)
file_name = 'host-a017_lidar1_'
pts_file_path = osp.join(temp_dir, file_name, f'{file_name}_points.obj')
gt_file_path = osp.join(temp_dir, file_name, f'{file_name}_gt.obj')
pred_file_path = osp.join(temp_dir, file_name, f'{file_name}_pred.obj')
mmcv.check_file_exist(pts_file_path)
mmcv.check_file_exist(gt_file_path)
mmcv.check_file_exist(pred_file_path)
tmp_dir.cleanup() |
def test_center_region_assigner():
self = CenterRegionAssigner(pos_scale=0.3, neg_scale=1)
bboxes = torch.FloatTensor([[0, 0, 10, 10], [10, 10, 20, 20], [8, 8, 9, 9]])
gt_bboxes = torch.FloatTensor([[0, 0, 11, 11], [10, 10, 20, 20], [4.5, 4.5, 5.5, 5.5], [0, 0, 10, 10]])
gt_labels = torch.LongTensor([2, 3, 4, 5])
assign_result = self.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
assert (len(assign_result.gt_inds) == 3)
assert (len(assign_result.labels) == 3)
expected_gt_inds = torch.LongTensor([4, 2, 0])
assert torch.all((assign_result.gt_inds == expected_gt_inds))
shadowed_labels = assign_result.get_extra_property('shadowed_labels')
assert torch.any((shadowed_labels == torch.LongTensor([[2, 2]])))
assert torch.any((shadowed_labels == torch.LongTensor([[2, 5]])))
assert torch.any((shadowed_labels == torch.LongTensor([[0, 2]]))) |
def coarsify2abstract(shoppinglist: list[dict], abstract_scene_description: str) -> list[dict]:
shoppinglist_ablated = copy.deepcopy(shoppinglist)
for el in shoppinglist_ablated:
assert (('class_name' in el) and ('attributes' in el))
el['class_name'] = abstract_scene_description
el['attributes'] = ''
return shoppinglist_ablated |
def prepare_jit_inputs(inputs, model, tokenizer):
num_batch = len(inputs)
dummy_input = tokenizer.batch_encode_plus(inputs, return_tensors='pt', padding=True)
(num_block_layers, num_attention_heads, num_embedding_size_per_head) = sparse_model_config(model.config)
if (model.config.model_type == 'bloom'):
past_key_values = tuple(((torch.zeros(int((num_attention_heads * num_batch)), num_embedding_size_per_head, 1).to(model.config.torch_dtype).to(model.device), torch.zeros(int((num_attention_heads * num_batch)), 1, num_embedding_size_per_head).to(model.config.torch_dtype).to(model.device)) for _ in range(num_block_layers)))
else:
past_key_values = tuple(((torch.zeros(num_batch, num_attention_heads, 1, num_embedding_size_per_head).to(model.config.torch_dtype).to(model.device), torch.zeros(num_batch, num_attention_heads, 1, num_embedding_size_per_head).to(model.config.torch_dtype).to(model.device)) for _ in range(num_block_layers)))
dummy_input['attention_mask'] = torch.cat([torch.zeros(dummy_input['attention_mask'].shape[0], 1).to(dummy_input['attention_mask'].dtype), dummy_input['attention_mask']], (- 1))
if model.config.use_cache:
jit_inputs = (dummy_input['input_ids'].to(model.device), past_key_values, dummy_input['attention_mask'].to(model.device))
else:
jit_inputs = (dummy_input['input_ids'].to(model.device), dummy_input['attention_mask'].to(model.device))
return jit_inputs |
def find_weather_presets():
presets = [x for x in dir(carla.WeatherParameters) if re.match('[A-Z].+', x)]
return [(getattr(carla.WeatherParameters, x), x) for x in presets] |
def _get_depths(alpha: float) -> List[int]:
depths = [32, 16, 24, 40, 80, 96, 192, 320]
return [_round_to_multiple_of((depth * alpha), 8) for depth in depths] |
def make_layers(cfg, **kwargs):
layers = []
in_channels = 3
for v in cfg:
if (v == 'M1'):
layers += [nn.MaxPool2d(kernel_size=3, stride=2, padding=1)]
elif (v == 'M2'):
layers += [nn.MaxPool2d(kernel_size=3, stride=1, padding=1)]
elif (v == 'M'):
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
layers += [conv2d, nn.ReLU(inplace=False)]
in_channels = v
return nn.Sequential(*layers) |
class PixelShuffle_ICNR(Module):
def __init__(self, ni: int, nf: int=None, scale: int=2, blur: bool=False, norm_type=NormType.Weight, leaky: float=None):
nf = ifnone(nf, ni)
self.conv = conv_layer(ni, (nf * (scale ** 2)), ks=1, norm_type=norm_type, use_activ=False)
icnr(self.conv[0].weight)
self.shuf = nn.PixelShuffle(scale)
self.pad = nn.ReplicationPad2d((1, 0, 1, 0))
self.blur = nn.AvgPool2d(2, stride=1)
self.relu = relu(True, leaky=leaky)
def forward(self, x):
x = self.shuf(self.relu(self.conv(x)))
return (self.blur(self.pad(x)) if self.blur else x) |
def conv1d(inputs, num_output_channels, kernel_size, scope, stride=1, padding='SAME', data_format='NHWC', use_xavier=True, stddev=0.001, weight_decay=None, activation_fn=tf.nn.relu, bn=False, bn_decay=None, is_training=None):
with tf.variable_scope(scope) as sc:
assert ((data_format == 'NHWC') or (data_format == 'NCHW'))
if (data_format == 'NHWC'):
num_in_channels = inputs.get_shape()[(- 1)].value
elif (data_format == 'NCHW'):
num_in_channels = inputs.get_shape()[1].value
kernel_shape = [kernel_size, num_in_channels, num_output_channels]
kernel = _variable_with_weight_decay('weights', shape=kernel_shape, use_xavier=use_xavier, stddev=stddev, wd=weight_decay)
outputs = tf.nn.conv1d(inputs, kernel, stride=stride, padding=padding, data_format=data_format)
biases = _variable_on_cpu('biases', [num_output_channels], tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases, data_format=data_format)
if bn:
outputs = batch_norm_for_conv1d(outputs, is_training, bn_decay=bn_decay, scope='bn', data_format=data_format)
if (activation_fn is not None):
outputs = activation_fn(outputs)
return outputs |
class InferenceOptimizer(BaseInferenceOptimizer):
ALL_INFERENCE_ACCELERATION_METHOD: Dict = {'original': TFAccelerationOption(), 'static_int8': TFAccelerationOption(inc=True), 'bf16': TFAccelerationOption(bf16=True), 'openvino_fp32': TFAccelerationOption(openvino=True), 'openvino_bf16': TFAccelerationOption(openvino=True, bf16=True), 'openvino_fp16': TFAccelerationOption(openvino=True, fp16=True), 'openvino_int8': TFAccelerationOption(openvino=True, pot=True), 'onnxruntime_fp32': TFAccelerationOption(onnxruntime=True), 'onnxruntime_int8_qlinear': TFAccelerationOption(onnxruntime=True, inc=True, method='qlinear'), 'onnxruntime_int8_integer': TFAccelerationOption(onnxruntime=True, inc=True, method='integer')}
def optimize(self, model: Model, x: Union[(tf.Tensor, np.ndarray, tf.data.Dataset)], y: Union[(tf.Tensor, np.ndarray)]=None, validation_data: Optional[Dataset]=None, input_spec=None, batch_size: int=1, metric: Optional[Metric]=None, direction: str='max', thread_num: Optional[int]=None, logging: bool=False, latency_sample_num: int=100, includes: Optional[List[str]]=None, excludes: Optional[List[str]]=None, output_filename: Optional[str]=None) -> None:
invalidInputError(isinstance(model, Model), 'model should be a Keras Model.')
invalidInputError((direction in ['min', 'max']), "Only support direction 'min', 'max'.")
available_dict: Dict = available_acceleration_combination(excludes=excludes, includes=includes, full_methods=self.ALL_INFERENCE_ACCELERATION_METHOD)
self._direction: str = direction
if ((validation_data is None) or (metric is None)):
self._calculate_accuracy = False
else:
batched_validation_data = validation_data.batch(batch_size)
self._calculate_accuracy = True
if (os.getenv('OMP_NUM_THREADS') is not None):
default_threads: int = int(os.getenv('OMP_NUM_THREADS'))
else:
default_threads = None
thread_num = (default_threads if (thread_num is None) else int(thread_num))
result_map: Dict[(str, Dict)] = {}
if isinstance(x, Dataset):
batched_training_dataset = x.batch(batch_size)
input_sample = next(iter(batched_training_dataset))
if (isinstance(input_sample, (list, tuple)) and (len(input_sample) == 2)):
input_sample = input_sample[:(- 1)]
else:
input_sample = tf.convert_to_tensor(x[:batch_size])
if (isinstance(input_sample, (list, tuple)) and (len(input_sample) == 1)):
input_sample = input_sample[0]
st = time.perf_counter()
try:
if isinstance(input_sample, tf.Tensor):
model(input_sample)
else:
model(*input_sample)
except Exception:
invalidInputError(False, 'x is incompatible with your model input.')
baseline_time = (time.perf_counter() - st)
if (baseline_time > 0.1):
sample_size_for_pot = 15
else:
sample_size_for_pot = 100
print('Start Optimization')
start_time = time.perf_counter()
for (idx, (method, available)) in enumerate(available_dict.items()):
result_map[method] = {}
if (available is False):
result_map[method]['status'] = 'lack dependency'
else:
print(f'Start test {method} model ({(idx + 1)}/{len(available_dict)})')
option: AccelerationOption = self.ALL_INFERENCE_ACCELERATION_METHOD[method]
precision: str = option.get_precision()
try:
acce_model = option.optimize(model=model, x=x, y=y, input_spec=input_spec, thread_num=thread_num, logging=logging, sample_size_for_pot=sample_size_for_pot)
except Exception:
traceback.print_exc()
result_map[method]['status'] = 'fail to convert'
print(f'Failed to convert to {method}')
continue
result_map[method]['status'] = 'successful'
def func_test(model, *args):
model(*args)
try:
if ((method in ('original', 'static_int8')) and (thread_num is not None)):
_flag = True
params = {'iterrun': latency_sample_num, 'func': func_test, 'model': model, 'input_sample': input_sample, 'method': method}
with tempfile.TemporaryDirectory() as temp_dir:
if (method != 'original'):
InferenceOptimizer.save(acce_model, temp_dir)
_filename = os.path.join(temp_dir, 'params')
cloudpickle.dump(params, open(_filename, 'wb'))
my_env = os.environ.copy()
my_env['OMP_NUM_THREADS'] = str(thread_num)
worker_file = os.path.join(os.path.split(os.path.realpath(__file__))[0], '_worker.py')
try:
result = subprocess.run(['python', worker_file, _filename, str(thread_num)], capture_output=True, universal_newlines=True, env=my_env)
latency = float(result.stdout.strip())
result_map[method]['latency'] = latency
except Exception:
_flag = False
if ((method != 'original') or (thread_num is None) or (_flag is False)):
if isinstance(input_sample, tf.Tensor):
(result_map[method]['latency'], status) = latency_calculate_helper(latency_sample_num, baseline_time, func_test, acce_model, input_sample)
else:
(result_map[method]['latency'], status) = latency_calculate_helper(latency_sample_num, baseline_time, func_test, acce_model, *input_sample)
if ((status is False) and (method != 'original')):
result_map[method]['status'] = 'early stopped'
continue
except Exception:
traceback.print_exc()
result_map[method]['status'] = 'fail to forward'
print(f'{method} failed to forward')
continue
if self._calculate_accuracy:
if ((precision == 'fp32') and (method != 'original')):
_accuracy = result_map['original']['accuracy']
_accuracy = sigfig.round(_accuracy, sigfigs=5)
result_map[method]['accuracy'] = (str(_accuracy) + '*')
elif (method == 'original'):
try:
result_map[method]['accuracy'] = _accuracy_calculate_helper(acce_model, metric, batched_validation_data)
except Exception:
traceback.print_exc()
self._calculate_accuracy = False
else:
result_map[method]['accuracy'] = _accuracy_calculate_helper(acce_model, metric, batched_validation_data)
else:
result_map[method]['accuracy'] = None
result_map[method]['model'] = acce_model
print(f'Finish test {method} model ({(idx + 1)}/{len(available_dict)})')
self.optimized_model_dict: Dict = result_map
print('\n\nOptimization Results')
self._optimize_result = format_optimize_result(self.optimized_model_dict, self._calculate_accuracy)
if self._calculate_accuracy:
self._optimize_result += "* means we assume the metric value of the traced model does not change, so we don't recompute metric value to save time.\n"
time_cost = (time.perf_counter() - start_time)
time_cost_str = f'Optimization cost {time_cost:.1f}s in total.'
self._optimize_result += time_cost_str
if (output_filename is not None):
with open(output_filename, 'w') as f:
f.write(self._optimize_result)
print(self._optimize_result)
print('Stop Optimization')
def trace(model: Model, accelerator: Optional[str]=None, input_spec=None, thread_num: Optional[int]=None, device: Optional[str]='CPU', onnxruntime_session_options=None, openvino_config=None, logging=True, **kwargs):
invalidInputError(((device == 'CPU') or ('GPU' in device)), 'Now we only support fp32 for CPU and GPU, not {}'.format(device))
if ((device != 'CPU') and (accelerator != 'openvino')):
invalidInputError(False, 'Now we only support {} device when accelerator is openvino.'.format(device))
if (accelerator == 'openvino'):
final_openvino_option = ({'INFERENCE_PRECISION_HINT': 'f32'} if (device == 'CPU') else {})
if (openvino_config is not None):
final_openvino_option.update(openvino_config)
result = KerasOpenVINOModel(model, input_spec=input_spec, precision='fp32', thread_num=thread_num, device=device, config=final_openvino_option, logging=logging, **kwargs)
elif (accelerator == 'onnxruntime'):
if (onnxruntime_session_options is None):
import onnxruntime
onnxruntime_session_options = onnxruntime.SessionOptions()
if (thread_num is not None):
onnxruntime_session_options.intra_op_num_threads = thread_num
onnxruntime_session_options.inter_op_num_threads = thread_num
result = KerasONNXRuntimeModel(model, input_spec, onnxruntime_session_options)
else:
invalidInputError(False, 'Accelerator {} is invalid.'.format(accelerator))
return patch_compiled_and_attrs(result, model)
def quantize(model: Model, x: Union[(tf.Tensor, np.ndarray, tf.data.Dataset)]=None, y: Union[(tf.Tensor, np.ndarray)]=None, precision: str='int8', accelerator: Optional[str]=None, input_spec=None, eval_func: Optional[Callable]=None, metric: Optional[Metric]=None, accuracy_criterion: Optional[dict]=None, approach: str='static', method: Optional[str]=None, conf: Optional[str]=None, tuning_strategy: Optional[str]=None, timeout: Optional[int]=None, max_trials: Optional[int]=None, batch: Optional[int]=None, thread_num: Optional[int]=None, device: Optional[str]='CPU', custom_objects=None, inputs: List[str]=None, outputs: List[str]=None, sample_size: int=100, onnxruntime_session_options=None, openvino_config=None, logging: bool=True, **kwargs):
invalidInputError((precision in ['int8', 'fp16', 'bf16']), "Only support 'int8', 'bf16', 'fp16' now, no support for {}.".format(precision))
invalidInputError(((device == 'CPU') or ('GPU' in device) or (device == 'VPUX')), 'Now we only support CPU, GPU and VPUX, not {}'.format(device))
if ((device != 'CPU') and (accelerator != 'openvino')):
invalidInputError(False, 'Now we only support {} device when accelerator is openvino.'.format(device))
if isinstance(model, _ModuleWrapper):
original_model = model.source_obj
model = model.target_obj
else:
original_model = model
if (precision == 'fp16'):
invalidInputError((accelerator == 'openvino'), 'fp16 is not supported on {} accelerator.'.format(accelerator))
if (device == 'VPUX'):
invalidInputError(('mean_value' in kwargs), 'If you want to quantize with openvino float16 precision on VPUX device, you must specify mean_value for model optimizer function. For more details about model optimizer, you can see mo --help .')
from bigdl.nano.deps.openvino.tf.model import KerasOpenVINOModel
result = KerasOpenVINOModel(model, input_spec=input_spec, precision=precision, thread_num=thread_num, device=device, config=openvino_config, logging=logging, **kwargs)
return patch_compiled_and_attrs(result, original_model)
elif (precision == 'bf16'):
invalidInputError(((accelerator == 'openvino') or (accelerator is None)), 'Accelerator {} is invalid for BF16.'.format(accelerator))
invalidInputError((device == 'CPU'), "Device {} don't support bfloat16.".format(device))
if (accelerator == 'openvino'):
final_openvino_option = {'INFERENCE_PRECISION_HINT': 'bf16'}
if (openvino_config is not None):
final_openvino_option.update(openvino_config)
from bigdl.nano.deps.openvino.tf.model import KerasOpenVINOModel
result = KerasOpenVINOModel(model, input_spec=input_spec, precision=precision, thread_num=thread_num, device=device, config=final_openvino_option, logging=logging, **kwargs)
elif (accelerator is None):
return BF16Model(model, custom_objects=custom_objects)
return patch_compiled_and_attrs(result, original_model)
invalidInputError((approach == 'static'), "Only 'static' approach is supported now.")
if ((not isinstance(x, tf.data.Dataset)) and (y is None)):
y = range(len(x))
if isinstance(x, tf.data.Dataset):
batch_data = next(iter(x))
if (isinstance(batch_data, tf.Tensor) or (isinstance(batch_data, tuple) and (len(batch_data) != 2))):
y = range(len(x))
y = tf.data.Dataset.from_tensor_slices(y)
x = tf.data.Dataset.zip((x, y))
if (accelerator is None):
if isinstance(x, tf.data.Dataset):
calib_dataset = x
else:
calib_dataset = tf.data.Dataset.from_tensor_slices((x, y))
if batch:
calib_dataset = calib_dataset.batch(batch)
try_fake_inference(model, input_spec)
if ((model.inputs is None) or (model.outputs is None)):
INC_LESS_14 = compare_version('neural_compressor', operator.lt, '1.14')
if (not INC_LESS_14):
signature = inspect.signature(model.call)
input_names = []
for param in signature.parameters.values():
input_names.append(param.name)
if (inputs is None):
inputs = input_names
if (outputs is None):
outputs = 'outputs'
result = inc_quantzie(model, dataloader=calib_dataset, eval_func=eval_func, metric=metric, framework='tensorflow', conf=conf, approach=approach, tuning_strategy=tuning_strategy, accuracy_criterion=accuracy_criterion, timeout=timeout, max_trials=max_trials, inputs=inputs, outputs=outputs)
elif (accelerator == 'openvino'):
from bigdl.nano.deps.openvino.tf.model import KerasOpenVINOModel
if isinstance(model, KerasOpenVINOModel):
openvino_model = model
else:
_precision = ('fp16' if (device != 'CPU') else 'fp32')
if (device == 'VPUX'):
invalidInputError(('mean_value' in kwargs), 'If you want to quantize with openvino on VPUX device, you must specify mean_value for model optimizer function. For more details about model optimizer, you can see mo --help .')
openvino_model = KerasOpenVINOModel(model, input_spec=input_spec, precision=_precision, thread_num=thread_num, device=device, config=openvino_config, logging=logging, **kwargs)
if metric:
if (not isinstance(accuracy_criterion, dict)):
accuracy_criterion = {'relative': 0.99, 'higher_is_better': True}
drop_type = ('relative' if ('relative' in accuracy_criterion) else 'absolute')
higher_is_better = accuracy_criterion.get('higher_is_better', None)
maximal_drop = accuracy_criterion.get(drop_type, None)
else:
(drop_type, higher_is_better, maximal_drop) = (None, None, None)
result = openvino_model.pot(x=x, y=y, metric=metric, higher_better=higher_is_better, drop_type=drop_type, maximal_drop=maximal_drop, max_iter_num=max_trials, sample_size=sample_size, config=openvino_config, thread_num=thread_num)
elif (accelerator == 'onnxruntime'):
from bigdl.nano.deps.onnxruntime.tensorflow.model import KerasONNXRuntimeModel
if isinstance(model, KerasONNXRuntimeModel):
onnx_model = model
else:
onnx_model = InferenceOptimizer.trace(model=model, accelerator='onnxruntime', input_spec=input_spec, thread_num=thread_num)
method_map = {'qlinear': 'onnxrt_qlinearops', 'integer': 'onnxrt_integerops', None: 'onnxrt_qlinearops'}
framework = method_map.get(method, None)
result = inc_quantzie(onnx_model, dataloader=(x, y), eval_func=eval_func, metric=metric, framework=framework, thread_num=thread_num, conf=conf, approach=approach, tuning_strategy=tuning_strategy, accuracy_criterion=accuracy_criterion, timeout=timeout, max_trials=max_trials, inputs=inputs, outputs=outputs, onnx_option='tensorflow', onnxruntime_session_options=onnxruntime_session_options)
result._inputs_dtypes = onnx_model._inputs_dtypes
result._mode = 'arg'
else:
invalidInputError(False, 'Accelerator {} is invalid.'.format(accelerator))
return patch_compiled_and_attrs(result, original_model)
def save(model: Model, path):
import yaml
path = Path(path)
path.mkdir(parents=path.parent, exist_ok=True)
if hasattr(model, '_save'):
model._save(path)
else:
meta_path = (Path(path) / 'nano_model_meta.yml')
with open(meta_path, 'w+') as f:
metadata = {'ModelType': 'KerasModel', 'checkpoint': 'saved_weight.ckpt'}
yaml.safe_dump(metadata, f)
checkpoint_path = (path / metadata['checkpoint'])
model.save(checkpoint_path)
def load(path, model: Optional[Model]=None, device=None, custom_objects=None):
import yaml
path = Path(path)
invalidInputError(path.exists(), "{} doesn't exist.".format(path))
meta_path = (path / 'nano_model_meta.yml')
invalidInputError(meta_path.exists(), 'File {} is required to load model.'.format(str(meta_path)))
with open(meta_path, 'r') as f:
metadata = yaml.safe_load(f)
model_type = metadata.get('ModelType', None)
if (model_type == 'KerasOpenVINOModel'):
result = load_openvino_model(path, framework='tensorflow', device=device)
return patch_attrs(result, model)
if (model_type == 'KerasONNXRuntimeModel'):
result = load_onnxruntime_model(path, framework='tensorflow')
return patch_attrs(result, model)
if (model_type == 'KerasQuantizedModel'):
result = load_inc_model(path, model, framework='tensorflow')
return patch_attrs(result, model)
checkpoint_path = metadata.get('checkpoint', None)
invalidInputError((checkpoint_path is not None), "Key 'checkpoint' must be specified.")
checkpoint_path = (path / metadata['checkpoint'])
model = keras.models.load_model(checkpoint_path, custom_objects=custom_objects)
return model |
def process_datasets(config, api_config, max_suffix_length=0):
possible_datasets = __filter_datasets_from_config(config)
for (idx, dataset) in enumerate(possible_datasets):
max_suffix_length = _print_progress_bar((idx + len(possible_datasets)), (len(possible_datasets) * 3), ('Process ' + dataset.name), max_suffix_length)
__process_dataset(config, dataset, api_config)
return max_suffix_length |
class AutoTokenizerTest(unittest.TestCase):
def test_tokenizer_from_pretrained(self):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if ('japanese' not in x)):
tokenizer = AutoTokenizer.from_pretrained(model_name)
self.assertIsNotNone(tokenizer)
self.assertIsInstance(tokenizer, (BertTokenizer, BertTokenizerFast))
self.assertGreater(len(tokenizer), 0)
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
tokenizer = AutoTokenizer.from_pretrained(model_name)
self.assertIsNotNone(tokenizer)
self.assertIsInstance(tokenizer, (GPT2Tokenizer, GPT2TokenizerFast))
self.assertGreater(len(tokenizer), 0)
def test_tokenizer_from_pretrained_identifier(self):
tokenizer = AutoTokenizer.from_pretrained(SMALL_MODEL_IDENTIFIER)
self.assertIsInstance(tokenizer, (BertTokenizer, BertTokenizerFast))
self.assertEqual(tokenizer.vocab_size, 12)
def test_tokenizer_from_model_type(self):
tokenizer = AutoTokenizer.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER)
self.assertIsInstance(tokenizer, (RobertaTokenizer, RobertaTokenizerFast))
self.assertEqual(tokenizer.vocab_size, 20)
def test_tokenizer_from_tokenizer_class(self):
config = AutoConfig.from_pretrained(DUMMY_DIFF_TOKENIZER_IDENTIFIER)
self.assertIsInstance(config, RobertaConfig)
tokenizer = AutoTokenizer.from_pretrained(DUMMY_DIFF_TOKENIZER_IDENTIFIER, config=config)
self.assertIsInstance(tokenizer, (BertTokenizer, BertTokenizerFast))
self.assertEqual(tokenizer.vocab_size, 12)
def test_tokenizer_from_type(self):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt', os.path.join(tmp_dir, 'vocab.txt'))
tokenizer = AutoTokenizer.from_pretrained(tmp_dir, tokenizer_type='bert', use_fast=False)
self.assertIsInstance(tokenizer, BertTokenizer)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json', os.path.join(tmp_dir, 'vocab.json'))
shutil.copy('./tests/fixtures/merges.txt', os.path.join(tmp_dir, 'merges.txt'))
tokenizer = AutoTokenizer.from_pretrained(tmp_dir, tokenizer_type='gpt2', use_fast=False)
self.assertIsInstance(tokenizer, GPT2Tokenizer)
_tokenizers
def test_tokenizer_from_type_fast(self):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt', os.path.join(tmp_dir, 'vocab.txt'))
tokenizer = AutoTokenizer.from_pretrained(tmp_dir, tokenizer_type='bert')
self.assertIsInstance(tokenizer, BertTokenizerFast)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json', os.path.join(tmp_dir, 'vocab.json'))
shutil.copy('./tests/fixtures/merges.txt', os.path.join(tmp_dir, 'merges.txt'))
tokenizer = AutoTokenizer.from_pretrained(tmp_dir, tokenizer_type='gpt2')
self.assertIsInstance(tokenizer, GPT2TokenizerFast)
def test_tokenizer_from_type_incorrect_name(self):
with pytest.raises(ValueError):
AutoTokenizer.from_pretrained('./', tokenizer_type='xxx')
_tokenizers
def test_tokenizer_identifier_with_correct_config(self):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
tokenizer = tokenizer_class.from_pretrained('wietsedv/bert-base-dutch-cased')
self.assertIsInstance(tokenizer, (BertTokenizer, BertTokenizerFast))
if isinstance(tokenizer, BertTokenizer):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case, False)
else:
self.assertEqual(tokenizer.do_lower_case, False)
self.assertEqual(tokenizer.model_max_length, 512)
_tokenizers
def test_tokenizer_identifier_non_existent(self):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(EnvironmentError, 'julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier'):
_ = tokenizer_class.from_pretrained('julien-c/herlolip-not-exists')
def test_model_name_edge_cases_in_mappings(self):
tokenizers = TOKENIZER_MAPPING.values()
tokenizer_names = []
for (slow_tok, fast_tok) in tokenizers:
if (slow_tok is not None):
tokenizer_names.append(slow_tok.__name__)
if (fast_tok is not None):
tokenizer_names.append(fast_tok.__name__)
for tokenizer_name in tokenizer_names:
tokenizer_class_from_name(tokenizer_name)
_tokenizers
def test_from_pretrained_use_fast_toggle(self):
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased', use_fast=False), BertTokenizer)
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased'), BertTokenizerFast)
_tokenizers
def test_do_lower_case(self):
tokenizer = AutoTokenizer.from_pretrained('distilbert-base-uncased', do_lower_case=False)
sample = 'Hello, world. How are you?'
tokens = tokenizer.tokenize(sample)
self.assertEqual('[UNK]', tokens[0])
tokenizer = AutoTokenizer.from_pretrained('microsoft/mpnet-base', do_lower_case=False)
tokens = tokenizer.tokenize(sample)
self.assertEqual('[UNK]', tokens[0])
_tokenizers
def test_PreTrainedTokenizerFast_from_pretrained(self):
tokenizer = AutoTokenizer.from_pretrained('robot-test/dummy-tokenizer-fast-with-model-config')
self.assertEqual(type(tokenizer), PreTrainedTokenizerFast)
self.assertEqual(tokenizer.model_max_length, 512)
self.assertEqual(tokenizer.vocab_size, 30000)
self.assertEqual(tokenizer.unk_token, '[UNK]')
self.assertEqual(tokenizer.padding_side, 'right')
self.assertEqual(tokenizer.truncation_side, 'right')
def test_auto_tokenizer_from_local_folder(self):
tokenizer = AutoTokenizer.from_pretrained(SMALL_MODEL_IDENTIFIER)
self.assertIsInstance(tokenizer, (BertTokenizer, BertTokenizerFast))
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(tmp_dir)
tokenizer2 = AutoTokenizer.from_pretrained(tmp_dir)
self.assertIsInstance(tokenizer2, tokenizer.__class__)
self.assertEqual(tokenizer2.vocab_size, 12)
def test_auto_tokenizer_fast_no_slow(self):
tokenizer = AutoTokenizer.from_pretrained('ctrl')
self.assertIsInstance(tokenizer, CTRLTokenizer)
def test_get_tokenizer_config(self):
config = get_tokenizer_config('bert-base-cased')
_ = config.pop('_commit_hash', None)
self.assertEqual(config, {'do_lower_case': False})
config = get_tokenizer_config(SMALL_MODEL_IDENTIFIER)
self.assertDictEqual(config, {})
tokenizer = AutoTokenizer.from_pretrained(SMALL_MODEL_IDENTIFIER)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(tmp_dir)
config = get_tokenizer_config(tmp_dir)
self.assertEqual(config['tokenizer_class'], 'BertTokenizer')
def test_new_tokenizer_registration(self):
try:
AutoConfig.register('custom', CustomConfig)
AutoTokenizer.register(CustomConfig, slow_tokenizer_class=CustomTokenizer)
with self.assertRaises(ValueError):
AutoTokenizer.register(BertConfig, slow_tokenizer_class=BertTokenizer)
tokenizer = CustomTokenizer.from_pretrained(SMALL_MODEL_IDENTIFIER)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(tmp_dir)
new_tokenizer = AutoTokenizer.from_pretrained(tmp_dir)
self.assertIsInstance(new_tokenizer, CustomTokenizer)
finally:
if ('custom' in CONFIG_MAPPING._extra_content):
del CONFIG_MAPPING._extra_content['custom']
if (CustomConfig in TOKENIZER_MAPPING._extra_content):
del TOKENIZER_MAPPING._extra_content[CustomConfig]
_tokenizers
def test_new_tokenizer_fast_registration(self):
try:
AutoConfig.register('custom', CustomConfig)
AutoTokenizer.register(CustomConfig, slow_tokenizer_class=CustomTokenizer)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig], (CustomTokenizer, None))
AutoTokenizer.register(CustomConfig, fast_tokenizer_class=CustomTokenizerFast)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig], (CustomTokenizer, CustomTokenizerFast))
del TOKENIZER_MAPPING._extra_content[CustomConfig]
AutoTokenizer.register(CustomConfig, slow_tokenizer_class=CustomTokenizer, fast_tokenizer_class=CustomTokenizerFast)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig], (CustomTokenizer, CustomTokenizerFast))
with self.assertRaises(ValueError):
AutoTokenizer.register(BertConfig, fast_tokenizer_class=BertTokenizerFast)
with tempfile.TemporaryDirectory() as tmp_dir:
bert_tokenizer = BertTokenizerFast.from_pretrained(SMALL_MODEL_IDENTIFIER)
bert_tokenizer.save_pretrained(tmp_dir)
tokenizer = CustomTokenizerFast.from_pretrained(tmp_dir)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(tmp_dir)
new_tokenizer = AutoTokenizer.from_pretrained(tmp_dir)
self.assertIsInstance(new_tokenizer, CustomTokenizerFast)
new_tokenizer = AutoTokenizer.from_pretrained(tmp_dir, use_fast=False)
self.assertIsInstance(new_tokenizer, CustomTokenizer)
finally:
if ('custom' in CONFIG_MAPPING._extra_content):
del CONFIG_MAPPING._extra_content['custom']
if (CustomConfig in TOKENIZER_MAPPING._extra_content):
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def test_from_pretrained_dynamic_tokenizer(self):
tokenizer = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer', trust_remote_code=True)
self.assertTrue(tokenizer.special_attribute_present)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(tmp_dir)
reloaded_tokenizer = AutoTokenizer.from_pretrained(tmp_dir, trust_remote_code=True)
self.assertTrue(reloaded_tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__, 'NewTokenizerFast')
self.assertEqual(reloaded_tokenizer.__class__.__name__, 'NewTokenizerFast')
tokenizer = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer', trust_remote_code=True, use_fast=False)
self.assertTrue(tokenizer.special_attribute_present)
self.assertEqual(tokenizer.__class__.__name__, 'NewTokenizer')
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(tmp_dir)
reloaded_tokenizer = AutoTokenizer.from_pretrained(tmp_dir, trust_remote_code=True, use_fast=False)
self.assertEqual(reloaded_tokenizer.__class__.__name__, 'NewTokenizer')
self.assertTrue(reloaded_tokenizer.special_attribute_present)
else:
self.assertEqual(tokenizer.__class__.__name__, 'NewTokenizer')
self.assertEqual(reloaded_tokenizer.__class__.__name__, 'NewTokenizer')
def test_from_pretrained_dynamic_tokenizer_legacy_format(self):
tokenizer = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer_legacy', trust_remote_code=True)
self.assertTrue(tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__, 'NewTokenizerFast')
tokenizer = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer_legacy', trust_remote_code=True, use_fast=False)
self.assertTrue(tokenizer.special_attribute_present)
self.assertEqual(tokenizer.__class__.__name__, 'NewTokenizer')
else:
self.assertEqual(tokenizer.__class__.__name__, 'NewTokenizer')
def test_repo_not_found(self):
with self.assertRaisesRegex(EnvironmentError, 'bert-base is not a local folder and is not a valid model identifier'):
_ = AutoTokenizer.from_pretrained('bert-base')
def test_revision_not_found(self):
with self.assertRaisesRegex(EnvironmentError, 'aaaaaa is not a valid git identifier \\(branch name, tag name or commit id\\)'):
_ = AutoTokenizer.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER, revision='aaaaaa')
def test_cached_tokenizer_has_minimum_calls_to_head(self):
_ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert')
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert')
self.assertEqual(counter.get_request_count, 0)
self.assertEqual(counter.head_request_count, 1)
self.assertEqual(counter.other_request_count, 0) |
def run_exp(exp_config: str, run_type: str, base_config='', ckpt_path='', eval_viz=False, debug=False, train_split=False, gt_semantics=False, run_id=None, run_suffix=None, wipe_only=False, deterministic=False, record_all=False, skip_log=False, simple_eval=False, opts=None) -> None:
if (run_suffix is not None):
(exp_dir, exp_yaml) = os.path.split(exp_config)
exp_config = os.path.join(exp_dir, run_suffix, exp_yaml)
config = get_config([base_config, exp_config], opts)
variant_name = os.path.split(exp_config)[1].split('.')[0]
config.defrost()
if (run_suffix is not None):
if RUN_FOLDER_MODE:
config.TENSORBOARD_DIR = os.path.join(config.TENSORBOARD_DIR, run_suffix, variant_name)
config.CHECKPOINT_FOLDER = os.path.join(config.CHECKPOINT_FOLDER, run_suffix, variant_name)
config.VIDEO_DIR = os.path.join(config.VIDEO_DIR, run_suffix, variant_name)
variant_name = f'{variant_name}-{run_suffix}'
if (not RUN_FOLDER_MODE):
config.TENSORBOARD_DIR = os.path.join(config.TENSORBOARD_DIR, variant_name)
config.CHECKPOINT_FOLDER = os.path.join(config.CHECKPOINT_FOLDER, variant_name)
config.VIDEO_DIR = os.path.join(config.VIDEO_DIR, variant_name)
config.LOG_FILE = os.path.join(config.LOG_FILE, f'{variant_name}.log')
if wipe_only:
wipe_and_exit(config)
if debug:
config.DEBUG = True
config.LOG_INTERVAL = 1
config.NUM_PROCESSES = 1
if train_split:
config.EVAL.SPLIT = 'train'
if deterministic:
config.EVAL.DETERMINISTIC = True
config.RL.PPO.POLICY.EVAL_GT_SEMANTICS = gt_semantics
diagnostic_label = ('train_gt_' if train_split else 'eval_gt_')
diagnostic_label = f'{diagnostic_label}{config.RL.PPO.POLICY.EVAL_GT_SEMANTICS}'
eval_stats_dir = ''
if (run_type == 'eval'):
config.TRAINER_NAME = 'ppo'
if (not debug):
config.NUM_PROCESSES = 6
if skip_log:
config.NUM_PROCESSES = 1
map_cfg = config.TASK_CONFIG.TASK.TOP_DOWN_MAP
map_cfg.MAP_RESOLUTION = 1200
log_diagnostics = []
eval_stats_dir = osp.join(f'/srv/share/jye72/objectnav_eval/', variant_name)
if eval_viz:
config.TEST_EPISODE_COUNT = 30
config.VIDEO_OPTION = ['disk']
else:
config.VIDEO_OPTION = []
log_diagnostics = [Diagnostics.basic, Diagnostics.episode_info]
if record_all:
eval_stats_dir = osp.join(f'/srv/share/jye72/objectnav_detailed/', variant_name)
config.TEST_EPISODE_COUNT = 300
if train_split:
config.EVAL.SPLIT = f'train_{config.TEST_EPISODE_COUNT}'
else:
config.EVAL.SPLIT = f'val_{config.TEST_EPISODE_COUNT}'
config.VIDEO_DIR = osp.join('/srv/share/jye72/vis/videos/objectnav_detailed/', variant_name)
config.TASK_CONFIG.TASK.MEASUREMENTS.append('GOAL_OBJECT_VISIBLE_PIXELS')
config.TASK_CONFIG.TASK.MEASUREMENTS.append('REGION_LEVEL_INFO')
log_diagnostics = [Diagnostics.basic, Diagnostics.internal_activations, Diagnostics.observations, Diagnostics.actions, Diagnostics.episode_info, Diagnostics.weights, Diagnostics.d2g, Diagnostics.room_cat, Diagnostics.visit_count, Diagnostics.collisions_t, Diagnostics.coverage_t, Diagnostics.sge_t]
if ((run_type == 'train') or (config.RL.PPO.POLICY.USE_SEMANTICS and config.RL.PPO.POLICY.EVAL_GT_SEMANTICS)):
if ((config.ENV_NAME == 'ExploreThenNavRLEnv') or ('SemanticGoalExists' in config.RL.AUX_TASKS.tasks)):
config.TASK_CONFIG.TASK.MEASUREMENTS.append('GOAL_OBJECT_VISIBLE_PIXELS')
if (config.RL.COVERAGE_TYPE == 'VIEW'):
config.TASK_CONFIG.TASK.MEASUREMENTS.append('TOP_DOWN_MAP')
train_sensors = config.RL.AUX_TASKS.required_sensors
train_sim_sensors = list(filter((lambda x: (x in SIM_SENSORS)), train_sensors))
if (not config.RL.POLICY.TRAIN_PRED_SEMANTICS):
config.SENSORS.extend(train_sim_sensors)
train_task_sensors = list(filter((lambda x: (x in TASK_SENSORS)), train_sensors))
config.TASK_CONFIG.TASK.SENSORS.extend(train_task_sensors)
if ((run_type == 'eval') and config.RL.PPO.POLICY.USE_SEMANTICS):
if config.RL.PPO.POLICY.EVAL_GT_SEMANTICS:
config.TENSORBOARD_DIR = osp.join(config.TENSORBOARD_DIR, 'gt_sem')
config.VIDEO_DIR = osp.join(config.VIDEO_DIR, 'gt_sem')
else:
config.TENSORBOARD_DIR = osp.join(config.TENSORBOARD_DIR, 'pred_sem')
config.VIDEO_DIR = osp.join(config.VIDEO_DIR, 'pred_sem')
print('Running evaluation with semantic predictions')
if (ckpt_path is not None):
(ckpt_dir, ckpt_file) = os.path.split(ckpt_path)
(_, *ckpt_file_others) = ckpt_file.split('.')
ckpt_num = ckpt_file_others[(- 2)]
eval_stats_dir = osp.join(eval_stats_dir, ckpt_num)
ckpt_file = '.'.join([variant_name, *ckpt_file_others])
ckpt_path = os.path.join(config.CHECKPOINT_FOLDER, ckpt_file)
if (run_id is None):
random.seed(config.TASK_CONFIG.SEED)
np.random.seed(config.TASK_CONFIG.SEED)
trainer_init = baseline_registry.get_trainer(config.TRAINER_NAME)
assert (trainer_init is not None), f'{config.TRAINER_NAME} is not supported'
trainer = trainer_init(config)
if (run_type == 'train'):
if (ckpt_path is None):
ckpt_path = config.RL.POLICY.PRETRAINED_CKPT
if (ckpt_path != ''):
(ckpt_dir, ckpt_file) = os.path.split(ckpt_path)
ckpt_index = ckpt_file.split('.')[1]
ckpt = int(ckpt_index)
start_updates = ((ckpt * config.CHECKPOINT_INTERVAL) + 1)
trainer.train(ckpt_path=ckpt_path, ckpt=ckpt, start_updates=start_updates)
elif (not DO_PRESERVE_RUNS):
if os.path.exists(config.TENSORBOARD_DIR):
print('Removing tensorboard directory...')
shutil.rmtree(config.TENSORBOARD_DIR, ignore_errors=True)
if os.path.exists(config.CHECKPOINT_FOLDER):
print('Removing checkpoint folder...')
shutil.rmtree(config.CHECKPOINT_FOLDER, ignore_errors=True)
if os.path.exists(config.LOG_FILE):
print('Removing log file...')
shutil.rmtree(config.LOG_FILE, ignore_errors=True)
trainer.train()
else:
if (os.path.exists(config.TENSORBOARD_DIR) or os.path.exists(config.CHECKPOINT_FOLDER) or os.path.exists(config.LOG_FILE)):
print(f'TB dir exists: {os.path.exists(config.TENSORBOARD_DIR)}')
print(f'Ckpt dir exists: {os.path.exists(config.CHECKPOINT_FOLDER)}')
print(f'Log file exists: {os.path.exists(config.LOG_FILE)}')
print('Run artifact exists, please clear manually')
exit(1)
trainer.train()
else:
if debug:
seed = 7
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.random.manual_seed(seed)
config.defrost()
config.RANDOM_SEED = seed
config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE = False
config.freeze()
trainer.eval(ckpt_path, log_diagnostics=log_diagnostics, output_dir=eval_stats_dir, label=diagnostic_label, skip_log=skip_log, simple_eval=simple_eval)
return
run_prefix = f'run_{run_id}'
seed = run_id
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(config.TASK_CONFIG.SEED)
tb_dir = os.path.join(config.TENSORBOARD_DIR, run_prefix)
ckpt_dir = os.path.join(config.CHECKPOINT_FOLDER, run_prefix)
(log_dir, log_file) = os.path.split(config.LOG_FILE)
log_file_extended = f'{run_prefix}--{log_file}'
log_file_path = os.path.join(log_dir, log_file_extended)
config.TASK_CONFIG.SEED = seed
config.TENSORBOARD_DIR = tb_dir
config.CHECKPOINT_FOLDER = ckpt_dir
config.LOG_FILE = log_file_path
trainer_init = baseline_registry.get_trainer(config.TRAINER_NAME)
assert (trainer_init is not None), f'{config.TRAINER_NAME} is not supported'
trainer = trainer_init(config)
if (run_type == 'train'):
if ((ckpt_path is None) and (config.RL.POLICY.PRETRAINED_CKPT == '')):
if (DO_PRESERVE_RUNS and (os.path.exists(tb_dir) or os.path.exists(ckpt_dir) or os.path.exists(log_file_extended))):
print(f'TB dir exists: {os.path.exists(tb_dir)}')
print(f'Ckpt dir exists: {os.path.exists(ckpt_dir)}')
print(f'Log file exists: {os.path.exists(log_file_extended)}')
print('Run artifact exists, please clear manually')
exit(1)
else:
shutil.rmtree(tb_dir, ignore_errors=True)
shutil.rmtree(ckpt_dir, ignore_errors=True)
if os.path.exists(log_file_extended):
os.remove(log_file_extended)
trainer.train()
else:
if (ckpt_path is None):
ckpt_path = config.RL.POLICY.PRETRAINED_CKPT
(ckpt_dir, ckpt_file) = os.path.split(ckpt_path)
ckpt_index = ckpt_file.split('.')[1]
if osp.exists(ckpt_path):
true_path = ckpt_path
else:
true_path = os.path.join(ckpt_dir, run_prefix, f'{run_prefix}.{ckpt_index}.pth')
ckpt = int(ckpt_index)
start_updates = ((ckpt * config.CHECKPOINT_INTERVAL) + 1)
trainer.train(ckpt_path=true_path, ckpt=ckpt, start_updates=start_updates)
else:
(ckpt_dir, ckpt_file) = os.path.split(ckpt_path)
ckpt_index = ckpt_file.split('.')[1]
true_path = os.path.join(ckpt_dir, run_prefix, f'{run_prefix}.{ckpt_index}.pth')
trainer.eval(true_path, log_diagnostics=log_diagnostics, output_dir=eval_stats_dir, label=diagnostic_label) |
def make_dataset(root, label):
images = []
labeltxt = open(label)
for line in labeltxt:
data = line.strip().split(' ')
if is_image_file(data[0]):
path = os.path.join(root, data[0])
gt = int(data[1])
item = (path, gt)
images.append(item)
return images |
class SegmentationDataSet1(data.Dataset):
def __init__(self, inputs: list, targets: list, transform=None):
self.inputs = inputs
self.targets = targets
self.transform = transform
self.inputs_dtype = torch.float32
self.targets_dtype = torch.long
def __len__(self):
return len(self.inputs)
def __getitem__(self, index: int):
input_ID = self.inputs[index]
target_ID = self.targets[index]
(x, y) = (imread(str(input_ID)), imread(str(target_ID)))
if (self.transform is not None):
(x, y) = self.transform(x, y)
(x, y) = (torch.from_numpy(x).type(self.inputs_dtype), torch.from_numpy(y).type(self.targets_dtype))
return (x, y) |
def get_top_n_labels(n, hist=None):
hist = (hist or calculate_label_distribution())
labels = sorted([(k, v) for (k, v) in hist.items()], reverse=True)
answer = []
for (_count, kws) in labels:
answer.extend(kws)
if (len(answer) >= n):
break
return answer[:n] |
def main(args):
utils.import_user_module(args)
if (args.buffer_size < 1):
args.buffer_size = 1
if ((args.max_tokens is None) and (args.max_sentences is None)):
args.max_sentences = 1
assert ((not args.sampling) or (args.nbest == args.beam)), '--sampling requires --nbest to be equal to --beam'
assert ((not args.max_sentences) or (args.max_sentences <= args.buffer_size)), '--max-sentences/--batch-size cannot be larger than --buffer-size'
print(args)
use_cuda = (torch.cuda.is_available() and (not args.cpu))
task = tasks.setup_task(args)
print('| loading model(s) from {}'.format(args.path))
(models, _model_args) = checkpoint_utils.load_model_ensemble(args.path.split(':'), arg_overrides=eval(args.model_overrides), task=task)
src_dict = task.source_dictionary
tgt_dict = task.target_dictionary
for model in models:
model.make_generation_fast_(beamable_mm_beam_size=(None if args.no_beamable_mm else args.beam), need_attn=args.print_alignment)
if args.fp16:
model.half()
if use_cuda:
model.cuda()
generator = task.build_generator(args)
align_dict = utils.load_align_dict(args.replace_unk)
max_positions = utils.resolve_max_positions(task.max_positions(), *[model.max_positions() for model in models])
if (args.buffer_size > 1):
print('| Sentence buffer size:', args.buffer_size)
print('| Type the input sentence and press return:')
start_id = 0
for inputs in buffered_read(args.input, args.buffer_size):
results = []
for batch in make_batches(inputs, args, task, max_positions):
src_tokens = batch.src_tokens
src_lengths = batch.src_lengths
if use_cuda:
src_tokens = src_tokens.cuda()
src_lengths = src_lengths.cuda()
sample = {'net_input': {'src_tokens': src_tokens, 'src_lengths': src_lengths}}
translations = task.inference_step(generator, models, sample)
for (i, (id, hypos)) in enumerate(zip(batch.ids.tolist(), translations)):
src_tokens_i = utils.strip_pad(src_tokens[i], tgt_dict.pad())
results.append(((start_id + id), src_tokens_i, hypos))
for (id, src_tokens, hypos) in sorted(results, key=(lambda x: x[0])):
if (src_dict is not None):
src_str = src_dict.string(src_tokens, args.remove_bpe)
print('S-{}\t{}'.format(id, src_str))
for hypo in hypos[:min(len(hypos), args.nbest)]:
(hypo_tokens, hypo_str, alignment) = utils.post_process_prediction(hypo_tokens=hypo['tokens'].int().cpu(), src_str=src_str, alignment=(hypo['alignment'].int().cpu() if (hypo['alignment'] is not None) else None), align_dict=align_dict, tgt_dict=tgt_dict, remove_bpe=args.remove_bpe)
print('H-{}\t{}\t{}'.format(id, hypo['score'], hypo_str))
print('P-{}\t{}'.format(id, ' '.join(map((lambda x: '{:.4f}'.format(x)), hypo['positional_scores'].tolist()))))
if args.print_alignment:
print('A-{}\t{}'.format(id, ' '.join(map((lambda x: str(utils.item(x))), alignment))))
start_id += len(inputs) |
def any_broadcast(data, root_rank, max_size=4096):
if ((not hasattr(any_broadcast, '_in_buffer')) or (max_size != any_broadcast._in_buffer.size())):
any_broadcast._buffer = torch.cuda.ByteTensor(max_size)
buffer_ = any_broadcast._buffer
enc = pickle.dumps(data)
enc_size = len(enc)
if ((enc_size + 2) > max_size):
raise ValueError('encoded data exceeds max_size: {}'.format((enc_size + 2)))
assert (max_size < (255 * 256))
buffer_[0] = (enc_size // 255)
buffer_[1] = (enc_size % 255)
buffer_[2:(enc_size + 2)] = torch.ByteTensor(list(enc))
hvd.broadcast_(buffer_, root_rank)
size = ((255 * buffer_[0].item()) + buffer_[1].item())
bytes_list = bytes(buffer_[2:(size + 2)].tolist())
result = pickle.loads(bytes_list)
return result |
class ResBlock(nn.Module):
def __init__(self, nFin, nFout):
super(ResBlock, self).__init__()
self.conv_block = nn.Sequential()
self.conv_block.add_module('ConvL1', nn.Conv2d(nFin, nFout, kernel_size=3, padding=1, bias=False))
self.conv_block.add_module('BNorm1', nn.BatchNorm2d(nFout))
self.conv_block.add_module('LRelu1', nn.LeakyReLU(0.1, inplace=True))
self.conv_block.add_module('ConvL2', nn.Conv2d(nFout, nFout, kernel_size=3, padding=1, bias=False))
self.conv_block.add_module('BNorm2', nn.BatchNorm2d(nFout))
self.conv_block.add_module('LRelu2', nn.LeakyReLU(0.1, inplace=True))
self.conv_block.add_module('ConvL3', nn.Conv2d(nFout, nFout, kernel_size=3, padding=1, bias=False))
self.conv_block.add_module('BNorm3', nn.BatchNorm2d(nFout))
self.conv_block.add_module('LRelu3', nn.LeakyReLU(0.1, inplace=True))
self.skip_layer = nn.Conv2d(nFin, nFout, kernel_size=1, stride=1)
def forward(self, x):
return (self.skip_layer(x) + self.conv_block(x)) |
class LightConv3x3(nn.Module):
def __init__(self, in_channels, out_channels):
super(LightConv3x3, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, 1, stride=1, padding=0, bias=False)
self.conv2 = nn.Conv2d(out_channels, out_channels, 3, stride=1, padding=1, bias=False, groups=out_channels)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.bn(x)
x = self.relu(x)
return x |
def rmse_bootstrap(y, y_pred, target=1, m=10000):
np.random.seed(1)
e = []
for i in range(m):
idx = np.arange(len(y))
sel = np.random.choice(idx, len(idx), replace=True)
e.append(rmse(y[sel], y_pred[sel], target))
return (rmse(y, y_pred, target), np.std(e)) |
class UNetMidBlockCrossAttnMotion(nn.Module):
def __init__(self, in_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, transformer_layers_per_block: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, num_attention_heads: int=1, output_scale_factor: float=1.0, cross_attention_dim: int=1280, dual_cross_attention: float=False, use_linear_projection: float=False, upcast_attention: float=False, attention_type: str='default', temporal_num_attention_heads: int=1, temporal_cross_attention_dim: Optional[int]=None, temporal_max_seq_length: int=32):
super().__init__()
self.has_cross_attention = True
self.num_attention_heads = num_attention_heads
resnet_groups = (resnet_groups if (resnet_groups is not None) else min((in_channels // 4), 32))
resnets = [ResnetBlock2D(in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)]
attentions = []
motion_modules = []
for _ in range(num_layers):
if (not dual_cross_attention):
attentions.append(Transformer2DModel(num_attention_heads, (in_channels // num_attention_heads), in_channels=in_channels, num_layers=transformer_layers_per_block, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, attention_type=attention_type))
else:
attentions.append(DualTransformer2DModel(num_attention_heads, (in_channels // num_attention_heads), in_channels=in_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups))
resnets.append(ResnetBlock2D(in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm))
motion_modules.append(TransformerTemporalModel(num_attention_heads=temporal_num_attention_heads, attention_head_dim=(in_channels // temporal_num_attention_heads), in_channels=in_channels, norm_num_groups=resnet_groups, cross_attention_dim=temporal_cross_attention_dim, attention_bias=False, positional_embeddings='sinusoidal', num_positional_embeddings=temporal_max_seq_length, activation_fn='geglu'))
self.attentions = nn.ModuleList(attentions)
self.resnets = nn.ModuleList(resnets)
self.motion_modules = nn.ModuleList(motion_modules)
self.gradient_checkpointing = False
def forward(self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, cross_attention_kwargs: Optional[Dict[(str, Any)]]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, num_frames: int=1) -> torch.FloatTensor:
lora_scale = (cross_attention_kwargs.get('scale', 1.0) if (cross_attention_kwargs is not None) else 1.0)
hidden_states = self.resnets[0](hidden_states, temb, scale=lora_scale)
blocks = zip(self.attentions, self.resnets[1:], self.motion_modules)
for (attn, resnet, motion_module) in blocks:
if (self.training and self.gradient_checkpointing):
def create_custom_forward(module, return_dict=None):
def custom_forward(*inputs):
if (return_dict is not None):
return module(*inputs, return_dict=return_dict)
else:
return module(*inputs)
return custom_forward
ckpt_kwargs: Dict[(str, Any)] = ({'use_reentrant': False} if is_torch_version('>=', '1.11.0') else {})
hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False)[0]
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states, temb, **ckpt_kwargs)
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, **ckpt_kwargs)
else:
hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False)[0]
hidden_states = motion_module(hidden_states, num_frames=num_frames)[0]
hidden_states = resnet(hidden_states, temb, scale=lora_scale)
return hidden_states |
class SetTransformer(nn.Module):
def __init__(self, dim_input, num_outputs, dim_output, num_inds=32, dim_hidden=128, num_heads=4, ln=False):
super(SetTransformer, self).__init__()
self.enc = nn.Sequential(ISAB(dim_input, dim_hidden, num_heads, num_inds, ln=ln), ISAB(dim_hidden, dim_hidden, num_heads, num_inds, ln=ln))
self.dec = nn.Sequential(PMA(dim_hidden, num_heads, num_outputs, ln=ln), SAB(dim_hidden, dim_hidden, num_heads, ln=ln), SAB(dim_hidden, dim_hidden, num_heads, ln=ln), nn.Linear(dim_hidden, dim_output))
def forward(self, X):
return self.dec(self.enc(X)) |
class Parser(BaseParser):
def __post_init__(self):
self.data = json.loads(self.content)
def _parse(self, selector: str) -> List[Dict[(str, str)]]:
return (jmespath.search(selector, self.data) or [])
def _raw_urls(self) -> List[Dict[(str, str)]]:
return (self._parse(self.follower) if self.follower else [])
def entries(self) -> List[RawFeedEntry]:
return [RawFeedEntry(e) for e in self._parse(cast(str, self.selector))] |
class Factory(BaseFactory):
def pt_defaults_scope_value():
return {'activation_fn': default_activation.current_value, 'batch_normalize': True, 'learned_moments_update_rate': 0.0003, 'variance_epsilon': 0.001, 'scale_after_normalization': True}
default_patch_feature_dim = 8
def __init__(self, recon_dist_param_num=1, options=None):
super().__init__(recon_dist_param_num, options)
if ('image_channels' in options):
self.image_channels = options['image_channels']
else:
self.image_channels = 3
def image_size(self):
return (64, 64)
def input_feature_dim(self):
return 64
def feature2image(self, feature_tensor):
output_channels = (3 * self.recon_dist_param_num)
hgd = [{'type': 'conv2d', 'depth': 64, 'decoder_depth': output_channels, 'decoder_activation_fn': None}, {'type': 'conv2d', 'depth': 64, 'decoder_depth': 32}, {'type': 'skip', 'layer_num': 2}, {'type': 'pool', 'pool': 'max', 'kernel': 2, 'stride': 2}, {'type': 'conv2d', 'depth': 128, 'decoder_depth': 64}, {'type': 'skip', 'layer_num': 2}, {'type': 'pool', 'pool': 'max', 'kernel': 2, 'stride': 2}, {'type': 'conv2d', 'depth': 256}, {'type': 'skip', 'layer_num': 2}, {'type': 'pool', 'pool': 'max', 'kernel': 2, 'stride': 2}, {'type': 'conv2d', 'depth': 512}, {'type': 'skip', 'layer_num': 2}, {'type': 'pool', 'pool': 'max', 'kernel': 2, 'stride': 2}, {'type': 'conv2d', 'depth': 512}]
with pt.defaults_scope(**self.pt_defaults_scope_value()):
output_tensor = hourglass(feature_tensor, hgd, net_type=(self.options['hourglass_type'] if ('hourglass_type' in self.options) else None), extra_highlevel_feature=None)
return output_tensor
rotate_dominating_features_if_necessary = GenericDecoderFactory.rotate_dominating_features_if_necessary |
class SynapseGroup():
__slots__ = ['id', '_synEntries', '_maxNumBitsPerWord', '_numSyn', '_numSynEntries', '_numSynMemWords', '_maxNumWords', '_maxNumSynMemWords', '_cost']
def __init__(self, groupId, synEntries):
self._maxNumSynMemWords = 16384
self._maxNumBitsPerWord = 64
self.id = groupId
self._synEntries = synEntries
self._numSyn = None
self._numSynEntries = None
self._numSynMemWords = None
self._maxNumWords = None
self._cost = None
self._updateCost()
def _updateCost(self):
self._numSyn = 0
self._numSynEntries = 0
numBitsOfNeurons = []
for synEntriesOfNeuron in self._synEntries:
self._numSynEntries += len(synEntriesOfNeuron)
numBitsOfNeuron = 0
for synEntry in synEntriesOfNeuron:
numBitsOfNeuron += synEntry.numBits
self._numSyn += synEntry.numSyn
numBitsOfNeurons.append(numBitsOfNeuron)
numBitsOfNeurons = np.asarray(numBitsOfNeurons, int)
numSynMemWords = np.ceil((numBitsOfNeurons / self._maxNumBitsPerWord))
remainder = (numBitsOfNeurons % self._maxNumBitsPerWord)
incNumSynMemWords = np.logical_or((remainder == 0), (remainder >= 59))
self._maxNumWords = int(np.max((numSynMemWords + incNumSynMemWords)))
numNeurons = len(self._synEntries)
self._numSynMemWords = (self._maxNumWords * numNeurons)
self._cost = (self._numSynMemWords / self._maxNumSynMemWords)
def synEntries(self):
return self._synEntries
def synEntries(self, synEntries):
self._synEntries = synEntries
self._updateCost()
def numSyn(self):
return self._numSyn
def numSynEntries(self):
return self._numSynEntries
def numSynMemWords(self):
return self._numSynMemWords
def maxSynMemLen(self):
return self._maxNumWords
def cost(self):
return self._cost |
class _RepeatSampler(object):
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
(yield from iter(self.sampler)) |
class TwoPlayer_Env1(TwoPlayerSokobanEnv):
metadata = {'render.modes': ['human', 'rgb_array', 'tiny_human', 'tiny_rgb_array']}
def __init__(self):
super(TwoPlayer_Env1, self).__init__(num_boxes=3, max_steps=200, dim_room=(7, 7)) |
def test_arg_and_kwargs():
args = ('arg1_value', 'arg2_value', 3)
assert (m.args_function(*args) == args)
args = ('a1', 'a2')
kwargs = dict(arg3='a3', arg4=4)
assert (m.args_kwargs_function(*args, **kwargs) == (args, kwargs)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.