code stringlengths 101 5.91M |
|---|
def construct_length_mask(seq_lengths):
max_sequence_length = max(seq_lengths)
mask = torch.zeros([len(seq_lengths), max_sequence_length]).bool()
for (line, length) in zip(mask, seq_lengths):
line[:length] = True
return mask |
def dobldobl_estimated_distance():
from phcpy.phcpy2c3 import py2c_padcon_dobldobl_estimated_distance
return py2c_padcon_dobldobl_estimated_distance() |
def load_weights(output_folder, weight_load_name, num_layers):
weights = []
biases = []
for i in xrange(0, (num_layers + 1)):
weight_i = np.loadtxt(((((output_folder + weight_load_name) + '/w_') + str(i)) + '.txt'), delimiter=',')
w_i = tf.Variable(weight_i, dtype=tf.float32)
weights.append(w_i)
bias_i = np.loadtxt(((((output_folder + weight_load_name) + '/b_') + str(i)) + '.txt'), delimiter=',')
b_i = tf.Variable(bias_i, dtype=tf.float32)
biases.append(b_i)
return (weights, biases) |
def main():
m = build_low_latency_conv(41, 40)
m.summary()
m = build_tiny_conv(32, 40)
m.summary()
m = build_one()
m.summary() |
def get_bucketer(method, encoding_method=None, case_id_col=None, cat_cols=None, num_cols=None, n_clusters=None, random_state=None, n_neighbors=None):
if (method == 'cluster'):
bucket_encoder = EncoderFactory.get_encoder(method=encoding_method, case_id_col=case_id_col, dynamic_cat_cols=cat_cols, dynamic_num_cols=num_cols)
clustering = KMeans(n_clusters, random_state=random_state)
return ClusterBasedBucketer(encoder=bucket_encoder, clustering=clustering)
elif (method == 'state'):
bucket_encoder = EncoderFactory.get_encoder(method=encoding_method, case_id_col=case_id_col, dynamic_cat_cols=cat_cols, dynamic_num_cols=num_cols)
return StateBasedBucketer(encoder=bucket_encoder)
elif (method == 'single'):
return NoBucketer(case_id_col=case_id_col)
elif (method == 'prefix'):
return PrefixLengthBucketer(case_id_col=case_id_col)
elif (method == 'knn'):
bucket_encoder = EncoderFactory.get_encoder(method=encoding_method, case_id_col=case_id_col, dynamic_cat_cols=cat_cols, dynamic_num_cols=num_cols)
return KNNBucketer(encoder=bucket_encoder, n_neighbors=n_neighbors)
else:
print('Invalid bucketer type')
return None |
def render_pose(cfg, i4d, dataset, epoch, specific_obj, pose):
basedir = cfg.basedir
expname = cfg.expname
dataloader = dataset.get_loader(num_workers=0)
savedir = os.path.join(basedir, expname, 'renderings', f'{specific_obj}_epoch_{epoch}_renderfactor_{cfg.render_factor}_batch_{cfg.fixed_batch}')
os.makedirs(savedir, exist_ok=True)
img_outpath = os.path.join(savedir, f'pose_{pose[0]}.png')
c2w = pose[1]
if os.path.exists(img_outpath):
return
dataloader.dataset.load_specific_input = specific_obj
dataloader.dataset.load_specific_rendering_pose = c2w
print(f'generating {dataloader.dataset.load_specific_input}, pose: {pose[0]}')
render_data = dataloader.__iter__().__next__()['complete']
render_and_save(i4d, dataset, render_data, savedir, img_outpath, True)
dataloader.dataset.load_specific_input = None
dataloader.dataset.load_specific_rendering_pose = None |
class DebugVisualizer():
def __init__(self):
plt.figure(1)
self.debug_lines = [plt.plot([], color='tab:orange')[0] for _ in range(4)]
self.debug_texts = [plt.text(0, 0, None, ha='center', va='center') for _ in range(4)]
def draw_debug_data(self, debug_data):
for i in range(4):
if (i > (len(debug_data) - 1)):
self.debug_lines[i].set_data([], [])
self.debug_texts[i].set_text(None)
else:
self.debug_lines[i].set_data([debug_data[i][1][0], debug_data[i][2][0]], [debug_data[i][1][1], debug_data[i][2][1]])
self.debug_texts[i].set_text(debug_data[i][0])
self.debug_texts[i].set_position((debug_data[i][1][0], debug_data[i][1][1]))
if (len(debug_data) > 4):
print(f'Warning: Found {len(debug_data)} debug lines but only first 4 will be drawn.') |
class RRDBNet(nn.Module):
def __init__(self, in_nc, out_nc, nf, nb, gc=32, upscale=4):
super(RRDBNet, self).__init__()
RRDB_block_f = functools.partial(RRDB, nf=nf, gc=gc)
self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)
self.RRDB_trunk = make_layer(RRDB_block_f, nb)
self.trunk_conv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.upconv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.upconv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.HRconv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
def forward(self, x):
fea = self.conv_first(x)
trunk = self.trunk_conv(self.RRDB_trunk(fea))
fea = (fea + trunk)
fea = self.lrelu(self.upconv1(F.interpolate(fea, scale_factor=2, mode='nearest')))
fea = self.lrelu(self.upconv2(F.interpolate(fea, scale_factor=2, mode='nearest')))
out = self.conv_last(self.lrelu(self.HRconv(fea)))
return out |
class BaseContrastSpladeFinetuner():
def get_quadratic_increase_flop_factor(self, flop_factor):
if (self.state.epoch >= self.args.flop_increase_epoch_factor):
return flop_factor
else:
return (flop_factor * ((self.state.epoch / self.args.flop_increase_epoch_factor) ** 2))
def maybe_log_flop(self, q_factor, p_factor, q_flops, p_flops, rank_loss, scaled_flops):
if ((self.args.flop_log_steps > 0) and ((self.state.global_step % self.args.flop_log_steps) == 0)):
(q_factor, p_factor, q_flops, p_flops) = (float(q_factor), float(p_factor), float(q_flops), float(p_flops))
self.log({'q_factor': round(q_factor, 4), 'p_factor': round(p_factor, 4), 'q_flops': round(q_flops, 2), 'd_flops': round(p_flops, 2), 'rank_loss': round(float(rank_loss), 2), 'scaled_flops': round(float(scaled_flops), 2)})
def _flops(*inputs):
sum_distrib = torch.sum(torch.abs(inputs[0]), dim=0)
tensor_num = len(inputs[0])
for tensor in inputs[1:]:
sum_distrib = (sum_distrib + torch.sum(torch.abs(tensor), dim=0))
tensor_num += len(tensor)
average_distrib = (sum_distrib / tensor_num)
return torch.sum((average_distrib ** 2))
def compute_loss(self, model, inputs, return_outputs=False):
query_embeds = model(**inputs['query_input'])
doc_embeds = model(**inputs['doc_input'])
qids = self._prepare_input(inputs['qids']).contiguous()
docids = self._prepare_input(inputs['docids']).contiguous()
q_flops_loss_factor = self.get_quadratic_increase_flop_factor(self.args.q_flops_loss_factor)
p_flops_loss_factor = self.get_quadratic_increase_flop_factor(self.args.p_flops_loss_factor)
if (self.args.local_rank > (- 1)):
query_embeds = self._gather_tensor(query_embeds)
doc_embeds = self._gather_tensor(doc_embeds)
(qids, docids) = (self._gather_tensor(qids), self._gather_tensor(docids))
q_flops_loss = self._flops(query_embeds)
if ('neg_doc_input' not in inputs):
rank_loss = self.compute_inbatch_contrastive_loss(query_embeds, doc_embeds, qids, docids)
p_flops_loss = self._flops(doc_embeds)
flops_loss = ((q_flops_loss_factor * q_flops_loss) + (p_flops_loss_factor * p_flops_loss))
if (self.args.local_rank > (- 1)):
flops_loss = (flops_loss * dist.get_world_size())
self.maybe_log_flop(q_flops_loss_factor, p_flops_loss_factor, q_flops_loss, p_flops_loss, rank_loss, flops_loss)
loss = (rank_loss + flops_loss)
return ((loss, (query_embeds, doc_embeds)) if return_outputs else loss)
else:
neg_doc_embeds = model(**inputs['neg_doc_input'])
neg_docids = self._prepare_input(inputs['neg_docids']).contiguous()
if (self.args.local_rank > (- 1)):
neg_doc_embeds = self._gather_tensor(neg_doc_embeds)
neg_docids = self._gather_tensor(neg_docids)
rank_loss = self.compute_contrastive_loss(query_embeds, doc_embeds, neg_doc_embeds, qids, docids, neg_docids)
p_flops_loss = self._flops(doc_embeds, neg_doc_embeds)
flops_loss = ((q_flops_loss_factor * q_flops_loss) + (p_flops_loss_factor * p_flops_loss))
if (self.args.local_rank > (- 1)):
flops_loss = (flops_loss * dist.get_world_size())
loss = (rank_loss + flops_loss)
self.maybe_log_flop(q_flops_loss_factor, p_flops_loss_factor, q_flops_loss, p_flops_loss, rank_loss, flops_loss)
return ((loss, (query_embeds, doc_embeds, neg_doc_embeds)) if return_outputs else loss)
def compute_inbatch_contrastive_loss(self, query_embeds, doc_embeds, qids, docids):
labels = torch.arange(len(query_embeds), dtype=torch.long, device=query_embeds.device)
all_doc_embeds = doc_embeds
all_docids = docids
negative_mask = self._compute_negative_mask(qids, all_docids)
similarities = torch.matmul(query_embeds, all_doc_embeds.transpose(0, 1))
similarities = (similarities * self.args.inv_temperature)
similarities = (similarities - (10000.0 * negative_mask))
contrast_loss = F.cross_entropy(similarities, labels)
if (self.args.local_rank > (- 1)):
contrast_loss = (contrast_loss * dist.get_world_size())
return contrast_loss
def compute_contrastive_loss(self, query_embeds, doc_embeds, neg_doc_embeds, qids, docids, neg_docids):
labels = torch.arange(len(query_embeds), dtype=torch.long, device=query_embeds.device)
all_doc_embeds = torch.vstack((doc_embeds, neg_doc_embeds))
all_docids = torch.hstack((docids, neg_docids))
negative_mask = self._compute_negative_mask(qids, all_docids)
similarities = torch.matmul(query_embeds, all_doc_embeds.transpose(0, 1))
similarities = (similarities * self.args.inv_temperature)
similarities = (similarities - (10000.0 * negative_mask))
contrast_loss = F.cross_entropy(similarities, labels)
if (self.args.local_rank > (- 1)):
contrast_loss = (contrast_loss * dist.get_world_size())
return contrast_loss
_grad()
def _compute_negative_mask(self, qids, docids):
negative_mask = torch.zeros((len(qids), len(docids)), dtype=torch.bool, device=qids.device)
for (i, qid) in enumerate(qids):
for d in self.qrels[qid.item()]:
negative_mask[i] = torch.logical_or(negative_mask[i], (docids == d))
negative_mask = negative_mask.type(torch.float32)
negative_mask.fill_diagonal_(0)
return negative_mask
def _gather_tensor(self, t: Tensor):
all_tensors = [torch.empty_like(t) for _ in range(dist.get_world_size())]
dist.all_gather(all_tensors, t)
all_tensors[self.args.local_rank] = t
all_tensors = torch.cat(all_tensors)
return all_tensors
def floating_point_ops(self, inputs: Dict[(str, Union[(torch.Tensor, Any)])]):
return 0 |
class BertConfig(PretrainedConfig):
model_type = 'bert'
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, gradient_checkpointing=False, position_embedding_type='absolute', **kwargs):
super().__init__(pad_token_id=pad_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.gradient_checkpointing = gradient_checkpointing
self.position_embedding_type = position_embedding_type |
class iCIFAR100(iCIFAR10):
base_dataset = datasets.cifar.CIFAR100
base_dataset_hierarchy = cifar_info.CIFAR100
common_transforms = [transforms.ToTensor(), transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))]
class_order = [87, 0, 52, 58, 44, 91, 68, 97, 51, 15, 94, 92, 10, 72, 49, 78, 61, 14, 8, 86, 84, 96, 18, 24, 32, 45, 88, 11, 4, 67, 69, 66, 77, 47, 79, 93, 29, 50, 57, 83, 17, 81, 41, 12, 37, 59, 25, 20, 80, 73, 1, 28, 6, 46, 62, 82, 53, 9, 31, 75, 38, 63, 33, 74, 27, 22, 36, 3, 16, 21, 60, 19, 70, 90, 89, 43, 5, 42, 65, 76, 40, 30, 23, 85, 2, 95, 56, 48, 71, 64, 98, 13, 99, 7, 34, 55, 54, 26, 35, 39]
class_order_super = [4, 95, 55, 30, 72, 73, 1, 67, 32, 91, 62, 92, 70, 54, 82, 10, 61, 28, 9, 16, 53, 83, 51, 0, 57, 87, 86, 40, 39, 22, 25, 5, 94, 84, 20, 18, 6, 7, 14, 24, 88, 97, 3, 43, 42, 17, 37, 12, 68, 76, 71, 60, 33, 23, 49, 38, 21, 15, 31, 19, 75, 66, 34, 63, 64, 45, 99, 26, 77, 79, 46, 98, 11, 2, 35, 93, 78, 44, 29, 27, 80, 65, 74, 50, 36, 52, 96, 56, 47, 59, 90, 58, 48, 13, 8, 69, 81, 41, 89, 85] |
class Normalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor, mask):
return (F.normalize(tensor, self.mean, self.std), mask) |
def evaluate(args, model):
dev_dataset = SequenceDataset(TextTokenIdsCache(args.preprocess_dir, f'{args.mode}-query'), args.max_seq_length)
collate_fn = get_collate_function(args.max_seq_length)
batch_size = args.pergpu_eval_batch_size
if (args.n_gpu > 1):
batch_size *= args.n_gpu
dev_dataloader = DataLoader(dev_dataset, batch_size=batch_size, collate_fn=collate_fn)
if (args.n_gpu > 1):
model = torch.nn.DataParallel(model)
qembedding_memmap = np.memmap(args.qmemmap_path, dtype='float32', shape=(len(dev_dataset), 768), mode='w+')
with torch.no_grad():
for (step, (batch, qoffsets)) in enumerate(tqdm(dev_dataloader)):
batch = {k: v.to(args.model_device) for (k, v) in batch.items()}
model.eval()
embeddings = model(input_ids=batch['input_ids'], attention_mask=batch['attention_mask'], is_query=True)
embeddings = embeddings.detach().cpu().numpy()
qembedding_memmap[qoffsets] = embeddings
return qembedding_memmap |
def load_gptq_model():
model_name_or_path = 'TheBloke/falcon-7b-instruct-GPTQ'
model_basename = 'gptq_model-4bit-64g'
use_triton = False
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True)
model = AutoGPTQForCausalLM.from_quantized(model_name_or_path, model_basename=model_basename, use_safetensors=True, trust_remote_code=True, device='cuda:0', use_triton=use_triton, quantize_config=None)
return (tokenizer, model) |
class Hswish(nn.Module):
def __init__(self, inplace=True):
super(Hswish, self).__init__()
self.inplace = inplace
def forward(self, x):
return ((x * F.relu6((x + 3.0), inplace=self.inplace)) / 6.0) |
def rotate_points(points, bbox):
tl_corner = (bbox[0], bbox[2])
distances = []
for point in points:
distances.append(get_distance(point, tl_corner))
min_index = np.argsort(distances)[0]
return (points[min_index:] + points[:min_index]) |
_charset('fr')
class FrCharSet(BaseCharset):
_CHARS = u'abcdefghijklmnopqrstuvwxyz'
_FEATURES = ['capitalization'] |
def test_audio_datamodule_prepare_download_archive(fs, mocker):
mocked_download = mocker.patch(f'{TESTED_MODULE}.data_utils.download_file_r2')
mocked_extract = mocker.patch(f'{TESTED_MODULE}.extract_archive')
data = AudioDataModule()
data.prepare_data()
assert (mocked_download.call_args_list == [mock.call(data.archive, data.url, data.bucket)])
assert (mocked_extract.call_args_list == [mock.call(data.archive, data.data_dir)]) |
class MemorySummary(NamedTuple):
sequential: List[MemoryState]
cumulative: List[MemoryState]
current: List[MemoryState]
total: Memory |
class Estimator(object):
def from_keras(*, model_creator: Optional[Callable]=None, config: Optional[Dict]=None, verbose: bool=False, workers_per_node: int=1, compile_args_creator: Optional[Callable]=None, backend: str='ray', cpu_binding: bool=False, log_to_driver: bool=True, model_dir: Optional[str]=None, **kwargs) -> Union[('TensorFlow2Estimator', 'SparkTFEstimator', None)]:
if (backend in {'ray', 'horovod'}):
from bigdl.orca.learn.tf2.ray_estimator import TensorFlow2Estimator
return TensorFlow2Estimator(model_creator=model_creator, config=config, verbose=verbose, workers_per_node=workers_per_node, backend=backend, compile_args_creator=compile_args_creator, cpu_binding=cpu_binding)
elif (backend == 'spark'):
if cpu_binding:
invalidInputError(False, 'cpu_binding should not be True when using spark backend')
from bigdl.orca.learn.tf2.pyspark_estimator import SparkTFEstimator
return SparkTFEstimator(model_creator=model_creator, config=config, verbose=verbose, compile_args_creator=compile_args_creator, workers_per_node=workers_per_node, log_to_driver=log_to_driver, model_dir=model_dir, **kwargs)
else:
invalidInputError(False, f'Only horovod, ray and spark backends are supported for now, got backend: {backend}')
return None
def latest_checkpoint(checkpoint_dir: str) -> str:
return get_latest_checkpoint(checkpoint_dir) |
def get_parser():
parser = argparse.ArgumentParser(description='RIASS')
parser.add_argument('--resume', dest='resume', action='store_true', help='whether to resume training an existing model (the one with name model_name will be used)')
parser.set_defaults(resume=False)
parser.add_argument('-epoch_resume', dest='epoch_resume', default=0, type=int, help='set epoch_resume if you want flags --finetune_after and --update_encoder to be properly activated (eg if you stop training for whatever reason at epoch 15, set epoch_resume to 15)')
parser.add_argument('-seed', dest='seed', default=123, type=int)
parser.add_argument('-batch_size', dest='batch_size', default=28, type=int)
parser.add_argument('-lr', dest='lr', default=0.001, type=float)
parser.add_argument('-lr_cnn', dest='lr_cnn', default=1e-06, type=float)
parser.add_argument('-optim_cnn', dest='optim_cnn', default='adam', choices=['adam', 'sgd', 'rmsprop'])
parser.add_argument('-momentum', dest='momentum', default=0.9, type=float)
parser.add_argument('-weight_decay', dest='weight_decay', default=1e-06, type=float)
parser.add_argument('-weight_decay_cnn', dest='weight_decay_cnn', default=1e-06, type=float)
parser.add_argument('-optim', dest='optim', default='adam', choices=['adam', 'sgd', 'rmsprop'])
parser.add_argument('-maxseqlen', dest='maxseqlen', default=10, type=int)
parser.add_argument('-gt_maxseqlen', dest='gt_maxseqlen', default=20, type=int)
parser.add_argument('-best_val_loss', dest='best_val_loss', default=1000, type=float)
parser.add_argument('--crop', dest='crop', action='store_true')
parser.set_defaults(crop=False)
parser.add_argument('--smooth_curves', dest='smooth_curves', action='store_true')
parser.set_defaults(smooth_curves=False)
parser.add_argument('-finetune_after', dest='finetune_after', default=0, type=int, help='epoch number to start finetuning. set -1 to not finetune.there is a patience term that can allow starting to fine tune earlier (does not apply if value is -1)')
parser.add_argument('--update_encoder', dest='update_encoder', action='store_true', help='used in sync with finetune_after. no need to activate.')
parser.set_defaults(update_encoder=False)
parser.add_argument('--transfer', dest='transfer', action='store_true')
parser.set_defaults(transfer=False)
parser.add_argument('-transfer_from', dest='transfer_from', default='model')
parser.add_argument('--curriculum_learning', dest='curriculum_learning', action='store_true')
parser.set_defaults(curriculum_learning=False)
parser.add_argument('-steps_cl', dest='steps_cl', default=1, type=int)
parser.add_argument('-min_steps', dest='min_steps', default=1, type=int)
parser.add_argument('-min_delta', dest='min_delta', default=0.0, type=float)
parser.add_argument('-class_loss_after', dest='class_loss_after', default=20, type=int, help='epoch number to start training the classification loss. set to -1 to not do it. A patience term can allow to start training with this loss (does not apply if value is -1)')
parser.add_argument('--use_class_loss', dest='use_class_loss', action='store_true')
parser.set_defaults(use_class_loss=False)
parser.add_argument('-stop_loss_after', dest='stop_loss_after', default=3000, type=int, help='epoch number to start training the stopping loss. set to -1 to not do it. A patience term can allow to start training with this loss (does not apply if value is -1)')
parser.add_argument('--use_stop_loss', dest='use_stop_loss', action='store_true')
parser.set_defaults(use_stop_loss=False)
parser.add_argument('-patience', dest='patience', default=15, type=int, help='patience term to activate flags such as use_class_loss, feed_prediction and update_encoder if their matching vars are not -1')
parser.add_argument('-patience_stop', dest='patience_stop', default=60, type=int, help='patience to stop training.')
parser.add_argument('-max_epoch', dest='max_epoch', default=4000, type=int)
parser.add_argument('-print_every', dest='print_every', default=10, type=int)
parser.add_argument('--log_term', dest='log_term', action='store_true', help='if activated, will show logs in stdout instead of log file.')
parser.set_defaults(log_term=False)
parser.add_argument('--visdom', dest='visdom', action='store_true')
parser.set_defaults(visdom=False)
parser.add_argument('-port', dest='port', default=8097, type=int, help='visdom port')
parser.add_argument('-server', dest='server', default=' help='visdom server')
parser.add_argument('-class_weight', dest='class_weight', default=0.1, type=float)
parser.add_argument('-iou_weight', dest='iou_weight', default=1.0, type=float)
parser.add_argument('-stop_weight', dest='stop_weight', default=0.5, type=float)
parser.add_argument('-stop_balance_weight', dest='stop_balance_weight', default=0.5, type=float)
parser.add_argument('--augment', dest='augment', action='store_true')
parser.set_defaults(augment=False)
parser.add_argument('-rotation', dest='rotation', default=10, type=int)
parser.add_argument('-translation', dest='translation', default=0.1, type=float)
parser.add_argument('-shear', dest='shear', default=0.1, type=float)
parser.add_argument('-zoom', dest='zoom', default=0.7, type=float)
parser.add_argument('--cpu', dest='use_gpu', action='store_false')
parser.set_defaults(use_gpu=True)
parser.add_argument('-ngpus', dest='ngpus', default=1, type=int)
parser.add_argument('-base_model', dest='base_model', default='resnet101', choices=['resnet101', 'resnet50', 'resnet34', 'vgg16'])
parser.add_argument('-skip_mode', dest='skip_mode', default='concat', choices=['sum', 'concat', 'mul', 'none'])
parser.add_argument('-model_name', dest='model_name', default='model')
parser.add_argument('-log_file', dest='log_file', default='train.log')
parser.add_argument('-hidden_size', dest='hidden_size', default=128, type=int)
parser.add_argument('-kernel_size', dest='kernel_size', default=3, type=int)
parser.add_argument('-dropout', dest='dropout', default=0.0, type=float)
parser.add_argument('-dropout_stop', dest='dropout_stop', default=0.0, type=float)
parser.add_argument('-dropout_cls', dest='dropout_cls', default=0.0, type=float)
parser.add_argument('-imsize', dest='imsize', default=256, type=int)
parser.add_argument('--resize', dest='resize', action='store_true')
parser.set_defaults(resize=False)
parser.add_argument('-num_classes', dest='num_classes', default=21, type=int)
parser.add_argument('-dataset', dest='dataset', default='pascal', choices=['pascal', 'cityscapes', 'leaves'])
parser.add_argument('-pascal_dir', dest='pascal_dir', default='/work/asalvador/dev/data/rsis/VOCAug/')
parser.add_argument('-cityscapes_dir', dest='cityscapes_dir', default='/gpfs/scratch/bsc31/bsc31429/CityScapes/')
parser.add_argument('-leaves_dir', dest='leaves_dir', default='/gpfs/scratch/bsc31/bsc31429/LeavesDataset/A1/')
parser.add_argument('-leaves_test_dir', dest='leaves_test_dir', default='/gpfs/scratch/bsc31/bsc31429/CVPPP2014_LSC_testing_data/A1/')
parser.add_argument('-num_workers', dest='num_workers', default=4, type=int)
parser.add_argument('-eval_split', dest='eval_split', default='test')
parser.add_argument('-mask_th', dest='mask_th', default=0.5, type=float)
parser.add_argument('-stop_th', dest='stop_th', default=0.5, type=float)
parser.add_argument('-class_th', dest='class_th', default=0.5, type=float)
parser.add_argument('-max_dets', dest='max_dets', default=100, type=int)
parser.add_argument('-min_size', dest='min_size', default=0.001, type=float)
parser.add_argument('-cat_id', dest='cat_id', default=(- 1), type=int)
parser.add_argument('--ignore_cats', dest='use_cats', action='store_false')
parser.add_argument('--display', dest='display', action='store_true')
parser.add_argument('--no_display_text', dest='no_display_text', action='store_true')
parser.add_argument('--all_classes', dest='all_classes', action='store_true')
parser.add_argument('--no_run_coco_eval', dest='no_run_coco_eval', action='store_true')
parser.add_argument('--display_route', dest='display_route', action='store_true')
parser.set_defaults(display=False)
parser.set_defaults(display_route=False)
parser.set_defaults(use_cats=True)
parser.set_defaults(all_classes=False)
parser.set_defaults(no_display_text=False)
parser.set_defaults(use_gt_cats=False)
parser.set_defaults(use_gt_masks=False)
parser.set_defaults(use_gt_stop=False)
return parser |
def __save_loss(losses, file_path):
pd.DataFrame(data=losses, columns=['epoch', 'batch', 'train_loss', 'val_loss']).to_csv(file_path, index=False) |
class GrapplerOptimizer(GraphRewriterBase):
def __init__(self, model, input_output_names, opt_cfg):
super().__init__(model)
self.input_output_names = input_output_names
self.opt_cfg = opt_cfg
self.generic_optimizer = ('pruning', 'shape', 'dependency', 'debug_stripper', 'loop')
self.tf_2_optimizer = ('constfold', 'arithmetic', 'min_graph_nodes')
_elapsed_time('Pass GrapplerOptimizer')
def do_transformation(self):
try:
g = tf.Graph()
with g.as_default():
g = tf.compat.v1.import_graph_def(self.model, name='')
meta_graph = saver.export_meta_graph(graph_def=self.model, graph=g, clear_devices=True)
fetch_collection = meta_graph_pb2.CollectionDef()
for fetch in self.input_output_names:
fetch_collection.node_list.value.append(fetch)
meta_graph.collection_def['train_op'].CopyFrom(fetch_collection)
config = config_pb2.ConfigProto()
rewriter_config = config.graph_options.rewrite_options
for optimizer in self.generic_optimizer:
if ((optimizer in self.opt_cfg) and self.opt_cfg[optimizer]):
rewriter_config.optimizers.append(optimizer)
if version1_gt_version2(tf.version.VERSION, '2.2.0'):
for optimizer in self.tf_2_optimizer:
if ((optimizer in self.opt_cfg) and self.opt_cfg[optimizer]):
rewriter_config.optimizers.append(optimizer)
rewriter_config.min_graph_nodes = (- 1)
optimized_graph = tf_optimizer.OptimizeGraph(config, meta_graph)
return optimized_graph
except Exception as e:
self.logger.warning('Fail to run grappler pass due to {}.'.format(str(e)))
return self.model |
class BasicBlock2D(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, **kwargs)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, features):
x = self.conv(features)
x = self.bn(x)
x = self.relu(x)
return x |
class Video():
def __init__(self, video_id):
self.posetrack_video_id = video_id
self.frames = []
def to_new(self):
result = {'images': [], 'annotations': []}
for image in self.frames:
image_json = image.to_new()
image_json['vid_id'] = self.posetrack_video_id
image_json['nframes'] = len(self.frames)
image_json['id'] = int(image.frame_id)
result['images'].append(image_json)
for (person_idx, person) in enumerate(image.people):
person_json = person.to_new()
person_json['image_id'] = int(image.frame_id)
person_json['id'] = ((int(image.frame_id) * 100) + person_idx)
result['annotations'].append(person_json)
result['categories'] = [{'supercategory': 'person', 'name': 'person', 'skeleton': [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12], [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3], [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]], 'keypoints': POSETRACK18_LM_NAMES_COCO_ORDER, 'id': 1}]
return result
def to_old(self):
res = {'annolist': []}
for image in self.frames:
elem = {}
(im_rep, ir_list, imgnum) = image.to_old()
elem['image'] = [im_rep]
elem['imgnum'] = [imgnum]
if ir_list:
elem['ignore_regions'] = ir_list
elem['annorect'] = []
for person in image.people:
elem['annorect'].append(person.to_old())
if image.people:
elem['is_labeled'] = [1]
else:
elem['is_labeled'] = [0]
res['annolist'].append(elem)
return res
def from_old(cls, track_data):
assert ('annolist' in track_data.keys()), 'Wrong format!'
video = None
for image_info in track_data['annolist']:
image = Image.from_old(image_info)
if (not video):
video = Video(path.basename(path.dirname(image.posetrack_filename)).split('_')[0])
else:
assert (video.posetrack_video_id == path.basename(path.dirname(image.posetrack_filename)).split('_')[0])
video.frames.append(image)
return [video]
def from_new(cls, track_data):
image_id_to_can_info = {}
video_id_to_video = {}
assert (len(track_data['categories']) == 1)
assert (track_data['categories'][0]['name'] == 'person')
assert (len(track_data['categories'][0]['keypoints']) in [15, 17])
conversion_table = []
for lm_name in track_data['categories'][0]['keypoints']:
if (lm_name not in POSETRACK18_LM_NAMES):
conversion_table.append(None)
else:
conversion_table.append(POSETRACK18_LM_NAMES.index(lm_name))
for (lm_idx, lm_name) in enumerate(POSETRACK18_LM_NAMES):
assert (lm_idx in conversion_table), ('Landmark `%s` not found.' % lm_name)
videos = []
for image_id in [image['id'] for image in track_data['images']]:
image = Image.from_new(track_data, image_id)
video_id = path.basename(path.dirname(image.posetrack_filename)).split('_')[0]
if (video_id in video_id_to_video.keys()):
video = video_id_to_video[video_id]
else:
video = Video(video_id)
video_id_to_video[video_id] = video
videos.append(video)
video.frames.append(image)
for person_info in track_data['annotations']:
if (person_info['image_id'] != image_id):
continue
image.people.append(Person.from_new(person_info, conversion_table))
return videos |
def learn(*, env, num_epoch, seed=None, eval_env=None, replay_strategy='future', policy_save_interval=5, clip_return=True, demo_file=None, override_params=None, load_model=False, load_buffer=False, load_path=None, save_path=None, play_no_training=False, offline_train=False, mode=None, su_method='', **kwargs):
override_params = (override_params or {})
rank = MPI.COMM_WORLD.Get_rank()
if (MPI is not None):
rank = MPI.COMM_WORLD.Get_rank()
num_cpu = MPI.COMM_WORLD.Get_size()
rank_seed = ((seed + (1000000 * rank)) if (seed is not None) else None)
set_global_seeds(rank_seed)
params = config.DEFAULT_PARAMS
env_name = env.spec.id
params['env_name'] = env_name
params['replay_strategy'] = replay_strategy
if (env_name in config.DEFAULT_ENV_PARAMS):
params.update(config.DEFAULT_ENV_PARAMS[env_name])
params.update(**override_params)
params.update(kwargs)
if ('num_epoch' in params):
num_epoch = params['num_epoch']
params['mode'] = mode
params['su_method'] = su_method
params = config.prepare_params(params)
params['rollout_batch_size'] = env.num_envs
random_init = params['random_init']
dump_params(logger, params)
if (rank == 0):
config.log_params(params, logger=logger)
dims = config.configure_dims(params)
policy = config.configure_wgcsl(dims=dims, params=params, clip_return=clip_return, offline_train=offline_train)
if (load_path is not None):
if load_model:
tf_util.load_variables(os.path.join(load_path, 'policy_last.pkl'))
if load_buffer:
policy.buffer.load(os.path.join(load_path, 'buffer.pkl'))
rollout_params = {'exploit': False, 'use_target_net': False, 'use_demo_states': True, 'compute_Q': False, 'T': params['T']}
eval_params = {'exploit': True, 'use_target_net': params['test_with_polyak'], 'use_demo_states': False, 'compute_Q': True, 'T': params['T']}
for name in ['T', 'rollout_batch_size', 'gamma', 'noise_eps', 'random_eps']:
rollout_params[name] = params[name]
eval_params[name] = params[name]
eval_env = (eval_env or env)
rollout_worker = RolloutWorker(env, policy, dims, logger, monitor=True, **rollout_params)
evaluator = RolloutWorker(eval_env, policy, dims, logger, **eval_params)
if play_no_training:
num_episode = 20
policy.buffer.clear_buffer()
for _ in range(num_episode):
episode = evaluator.generate_rollouts()
policy.store_episode(episode)
return policy
return train(save_path=save_path, policy=policy, rollout_worker=rollout_worker, evaluator=evaluator, n_epochs=num_epoch, n_test_rollouts=params['n_test_rollouts'], n_cycles=params['n_cycles'], n_batches=params['n_batches'], policy_save_interval=policy_save_interval, demo_file=demo_file, random_init=random_init, play_no_training=play_no_training, offline_train=offline_train) |
def ycbcr2rgb(img):
img_type = img.dtype
img = (_convert_input_type_range(img) * 255)
out_img = ((np.matmul(img, [[0., 0., 0.], [0, (- 0.), 0.], [0., (- 0.), 0]]) * 255.0) + [(- 222.921), 135.576, (- 276.836)])
out_img = _convert_output_type_range(out_img, img_type)
return out_img |
def training_batch_item_task(batch_index, model, sess, train_data, is_training):
for index in batch_index:
(_, support_user, target_item) = fbne_data.batch_gen_item_task(train_data, index, setting.batch_size)
feed_dict = {model.support_user: support_user, model.target_item: target_item, model.training_phrase_item_task: is_training}
sess.run([model.loss_item_task, model.optimizer_item_task], feed_dict) |
_torch
_vision
class BlipImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase):
image_processing_class = (BlipImageProcessor if is_vision_available() else None)
def setUp(self):
self.image_processor_tester = BlipImageProcessingTester(self)
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
image_processor = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processor, 'do_resize'))
self.assertTrue(hasattr(image_processor, 'size'))
self.assertTrue(hasattr(image_processor, 'do_normalize'))
self.assertTrue(hasattr(image_processor, 'image_mean'))
self.assertTrue(hasattr(image_processor, 'image_std'))
self.assertTrue(hasattr(image_processor, 'do_convert_rgb'))
def test_batch_feature(self):
pass
def test_call_pil(self):
image_processor = self.image_processing_class(**self.image_processor_dict)
image_inputs = self.image_processor_tester.prepare_inputs(equal_resolution=False)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
encoded_images = image_processor(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width']))
encoded_images = image_processor(image_inputs, return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width']))
def test_call_numpy(self):
image_processor = self.image_processing_class(**self.image_processor_dict)
image_inputs = self.image_processor_tester.prepare_inputs(equal_resolution=False, numpify=True)
for image in image_inputs:
self.assertIsInstance(image, np.ndarray)
encoded_images = image_processor(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width']))
encoded_images = image_processor(image_inputs, return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width']))
def test_call_pytorch(self):
image_processor = self.image_processing_class(**self.image_processor_dict)
image_inputs = self.image_processor_tester.prepare_inputs(equal_resolution=False, torchify=True)
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
encoded_images = image_processor(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width']))
encoded_images = image_processor(image_inputs, return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'])) |
class HmEncoder(object):
def __init__(self, cols=None):
self.enc = HelmertEncoder(cols=cols, verbose=1, mapping=None, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value')
def fit(self, X):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
self.enc.fit(X)
def transform(self, X):
return self.enc.transform(X) |
class BehaviorCloning(OffPolicyAlgorithm):
def __init__(self, *args, grad_norm_clip: Optional[float]=None, bc_data: str='all', bc_all_steps: int=0, **kwargs) -> None:
super().__init__(*args, **kwargs)
assert ('encoder' in self.network.CONTAINERS)
assert ('actor' in self.network.CONTAINERS)
assert isinstance(self.action_space, gym.spaces.Box)
assert (bc_data in {'all', 'pos'})
self.bc_data = bc_data
self.bc_all_steps = bc_all_steps
self.grad_norm_clip = grad_norm_clip
def setup_optimizers(self) -> None:
params = itertools.chain(self.network.actor.parameters(), self.network.encoder.parameters())
groups = utils.create_optim_groups(params, self.optim_kwargs)
self.optim['actor'] = self.optim_class(groups)
def _get_bc_loss(self, obs, action):
z = self.network.encoder(obs)
dist = self.network.actor(z)
if isinstance(dist, torch.distributions.Distribution):
loss = (- dist.log_prob(action))
elif (torch.is_tensor(dist) and isinstance(self.processor.action_space, gym.spaces.Box)):
loss = torch.nn.functional.mse_loss(dist, action, reduction='none').sum(dim=(- 1))
elif (torch.is_tensor(dist) and isinstance(self.processor.action_space, gym.spaces.Discrete)):
loss = torch.nn.functional.cross_entropy(dist, action, ignore_index=IGNORE_INDEX, reduction='none')
else:
raise ValueError('Invalid Policy output')
return loss.mean()
def train_step(self, batch: Dict, step: int, total_steps: int) -> Dict:
if (isinstance(batch, dict) and ('label' in batch)):
if ((self.bc_data == 'pos') and (step >= self.bc_all_steps)):
prefer_1 = (batch['label'] <= 0.5)
prefer_2 = (batch['label'] >= 0.5)
obs = torch.cat((batch['obs_1'][prefer_1], batch['obs_2'][prefer_2]), dim=0)
action = torch.cat((batch['action_1'][prefer_1], batch['action_2'][prefer_2]), dim=0)
else:
obs = torch.cat((batch['obs_1'], batch['obs_2']), dim=0)
action = torch.cat((batch['action_1'], batch['action_2']), dim=0)
else:
assert ('obs' in batch)
assert ('action' in batch)
assert (self.bc_data != 'pos'), 'Cannot select only pos data for replay dataset.'
(obs, action) = (batch['obs'], batch['action'])
loss = self._get_bc_loss(obs, action)
self.optim['actor'].zero_grad(set_to_none=True)
loss.backward()
if (self.grad_norm_clip is not None):
torch.nn.utils.clip_grad_norm_(self.network.parameters(), self.grad_norm_clip)
self.optim['actor'].step()
return dict(loss=loss.item())
def validation_step(self, batch: Any) -> Dict:
if (isinstance(batch, dict) and ('label' in batch)):
if (self.bc_data == 'pos'):
prefer_1 = (batch['label'] <= 0.5)
prefer_2 = (batch['label'] >= 0.5)
obs = torch.cat((batch['obs_1'][prefer_1], batch['obs_2'][prefer_2]), dim=0)
action = torch.cat((batch['action_1'][prefer_1], batch['action_2'][prefer_2]), dim=0)
else:
obs = torch.cat((batch['obs_1'], batch['obs_2']), dim=0)
action = torch.cat((batch['action_1'], batch['action_2']), dim=0)
else:
assert ('obs' in batch)
assert ('action' in batch)
assert (self.bc_data != 'pos'), 'Cannot select only pos data for replay dataset.'
(obs, action) = (batch['obs'], batch['action'])
with torch.no_grad():
loss = self._get_bc_loss(obs, action)
return dict(loss=loss.item())
def _get_train_action(self, obs: Any, step: int, total_steps: int):
batch = dict(obs=obs)
with torch.no_grad():
action = self.predict(batch, is_batched=False, sample=True)
return action |
def test_can_instantiate_from_loss_config(loss_cfg, parser):
cfg_string = read_cfg(loss_cfg)
parser.add_argument('cfg', type=Union[(Callable, torch.nn.Module)])
args = parser.parse_string(cfg_string)
assert ('class_path' in args.cfg), 'No class_path key in config root level'
class_path = args.cfg['class_path']
objs = parser.instantiate_classes(args)
assert isinstance(objs.cfg, import_class(class_path))
if isinstance(objs.cfg, torch.nn.Module):
assert hasattr(objs.cfg, 'forward'), 'Loss function must have a forward method.'
else:
assert isinstance(objs.cfg, Callable), 'Loss function must be callable.' |
def test_classification_metrics_integrated():
ground_truth = {'1': ['2', '3', '4'], '2': ['1', '3'], '3': ['1', '2'], '4': ['1']}
retrieved = {'1': ['2', '3'], '2': ['1'], '3': ['1'], '4': []}
expected_return = {'precision': np.array([0.5, 1.0]), 'recall': np.array([1.0, 0.5]), 'f1_score': np.array([0., 0.]), 'support': np.array([2, 4])}
metrics = classification_metrics(ground_truth, retrieved)
assert isinstance(metrics, dict)
for (k, v) in metrics.items():
assert isinstance(v, np.ndarray)
np.testing.assert_almost_equal(metrics[k], expected_return[k]) |
def _generate_teams() -> pd.DataFrame:
start_season = 1876
end_season = most_recent_season()
lahman_columns = ['yearID', 'lgID', 'teamID', 'franchID', 'divID', 'name', 'teamIDBR', 'teamIDlahman45', 'teamIDretro']
lahman_teams = lahman.teams_core().query('yearID >= _season')[lahman_columns]
fg_team_data = fangraphs.fg_team_batting_data(start_season, end_season, 'ALL', stat_columns=['AB'])
fg_columns = list(fg_team_data.columns.values)
unjoined_fangraphs_teams = fg_team_data.copy(deep=True)
unjoined_lahman_teams = lahman_teams.copy(deep=True)
unjoined_lahman_teams['manual_teamid'] = unjoined_lahman_teams.apply((lambda row: _manual_matches.get(row.franchID, (- 1))), axis=1)
lahman_columns += ['manual_teamid']
unjoined_lahman_teams['initials'] = unjoined_lahman_teams.apply((lambda row: re.sub('[^A-Z]', '', row['name'])), axis=1)
lahman_columns += ['initials']
unjoined_lahman_teams['city_start'] = unjoined_lahman_teams.apply((lambda row: row['name'][:3].upper()), axis=1)
lahman_columns += ['city_start']
joined: pd.DataFrame = None
for join_column in ['manual_teamid', 'teamID', 'franchID', 'teamIDBR', 'initials', 'city_start']:
joined_count = (len(joined.index) if (joined is not None) else 0)
if (join_column == 'manual_teamid'):
outer_joined = unjoined_lahman_teams.merge(unjoined_fangraphs_teams, how='outer', left_on=['yearID', join_column], right_on=['Season', 'teamIDfg'])
else:
outer_joined = unjoined_lahman_teams.merge(unjoined_fangraphs_teams, how='outer', left_on=['yearID', join_column], right_on=['Season', 'Team'])
found = outer_joined.query('not Season.isnull() and not yearID.isnull()')
joined = (pd.concat([joined, found]) if (joined is not None) else found)
unjoined = outer_joined.query('yearID.isnull() or Season.isnull()')
unjoined_lahman_teams = unjoined.query('Season.isnull()').drop(labels=fg_columns, axis=1)
unjoined_fangraphs_teams = unjoined.query('yearID.isnull()').drop(labels=lahman_columns, axis=1)
logger.info('Matched %s teams off of %s. %s teams remaining to match.', (len(joined.index) - joined_count), join_column, len(unjoined_lahman_teams.index))
joined_count = (len(joined.index) if (joined is not None) else 0)
unjoined_lahman_teams['close_match'] = unjoined_lahman_teams.apply((lambda row: _get_close_team_matches(row, unjoined_fangraphs_teams)), axis=1)
outer_joined = unjoined_lahman_teams.merge(unjoined_fangraphs_teams, how='outer', left_on=['yearID', 'close_match'], right_on=['Season', 'Team'])
joined = pd.concat([joined, outer_joined.query('not Season.isnull() and not yearID.isnull()')])
unjoined = outer_joined.query('(yearID.isnull() or Season.isnull()) and not (yearID.isnull() and Season.isnull())')
unjoined_lahman_teams = unjoined.query('Season.isnull()').drop(unjoined_fangraphs_teams.columns.values, axis=1)
unjoined_fangraphs_teams = unjoined.query('yearID.isnull()').drop(unjoined_lahman_teams.columns, axis=1)
logger.info('Matched %s teams off of close match. %s teams remaining to match.', (len(joined.index) - joined_count), len(unjoined_lahman_teams.index))
error_state = False
if (not unjoined_lahman_teams.empty):
logger.warning('When trying to join lahman data to Fangraphs, found %s rows of extraneous lahman data: %s', len(unjoined_lahman_teams.index), unjoined_lahman_teams.sort_values(['yearID', 'lgID', 'teamID', 'franchID']))
error_state = True
if (not unjoined_fangraphs_teams.empty):
this_year = date.today().year
if (not unjoined_fangraphs_teams[(unjoined_fangraphs_teams.Season.astype(int) < this_year)].empty):
logger.warning('When trying to join Fangraphs data to lahman, found %s rows of extraneous Fangraphs data: %s', len(unjoined_fangraphs_teams.index), unjoined_fangraphs_teams.sort_values(['Season', 'Team']))
error_state = True
if error_state:
raise Exception('Extraneous data was not matched. Aborting.')
joined = joined[['yearID', 'lgID', 'teamID', 'franchID', 'teamIDfg', 'teamIDBR', 'teamIDretro']]
joined = joined.assign(teamIDfg=joined['teamIDfg'].apply(int))
joined = joined.assign(yearID=joined['yearID'].apply(int))
joined = joined.sort_values(['yearID', 'lgID', 'teamID', 'franchID']).drop_duplicates()
joined = joined.reset_index(drop=True)
joined.to_csv(_DATA_FILENAME)
return joined |
def polar_gen_isic2018():
data_dir = '/raid/wjc/data/skin_lesion/isic2018_jpg_smooth/'
os.makedirs((data_dir + '/PolarImage'), exist_ok=True)
os.makedirs((data_dir + '/PolarLabel'), exist_ok=True)
path_list = os.listdir((data_dir + '/Label/'))
path_list.sort()
num = 0
for path in tqdm(path_list):
image_data = cv2.imread(os.path.join(data_dir, 'Image', path))
label_data = cv2.imread(os.path.join(data_dir, 'Label', path), cv2.IMREAD_GRAYSCALE)
center = centroid(image_data)
image_data = to_polar(image_data, center)
label_data = to_polar(label_data, center)
cv2.imwrite(((data_dir + '/PolarImage/') + path), image_data)
cv2.imwrite(((data_dir + '/PolarLabel/') + path), label_data) |
def nfsp_leduc_dqn_params(env: MultiAgentEnv) -> Dict[(str, Any)]:
return merge_dicts(GRL_DEFAULT_OPENSPIEL_POKER_DQN_PARAMS, {'exploration_config': {'type': ValidActionsEpsilonGreedy, 'initial_epsilon': 0.06, 'final_epsilon': 0.001, 'epsilon_timesteps': int(.0)}, 'num_gpus': float(os.getenv('WORKER_GPU_NUM', 0.0)), 'num_workers': 4, 'num_gpus_per_worker': float(os.getenv('WORKER_GPU_NUM', 0.0)), 'num_envs_per_worker': 32, 'learning_starts': 16000, 'rollout_fragment_length': 8, 'train_batch_size': 4096, 'model': merge_dicts(MODEL_DEFAULTS, {'fcnet_activation': 'relu', 'fcnet_hiddens': [128], 'custom_model': get_valid_action_fcn_class_for_env(env=env)})}) |
class PreprocessEnv(habitat.RLEnv):
def __init__(self, env, preprocessing_fn=None):
self.env = env
self.transform = None
self.observation_space = self.env.observation_space
if (preprocessing_fn is not None):
(self.transform, self.observation_space) = preprocessing_fn(self.env.observation_space)
def reset(self):
self.done = False
obs = self.env.reset()
obs = copy.deepcopy(obs)
if (self.transform is not None):
obs = self.transform(obs)
return self.wrap(obs)
def step(self, action):
action = (action[0] if isinstance(action, list) else action)
(obs, reward, self.done, info) = self.env.step(action)
obs = copy.deepcopy(obs)
if (self.transform is not None):
obs = self.transform(obs)
return (self.wrap(obs), np.array([reward], dtype=np.float32), np.array([self.done]), [info])
def wrap(self, x):
assert isinstance(x, dict)
for (k, v) in x.items():
if isinstance(v, torch.Tensor):
x[k] = v.unsqueeze(0)
elif isinstance(v, np.ndarray):
x[k] = np.expand_dims(v, axis=0)
elif isinstance(v, list):
x[k] = [x[k]]
else:
print(f'Habitat Single Env Wrapper: not wrapping {k}')
return x
def close(self):
self.env.close() |
def embedding(hparams, eval_loader, pred_loader, exp_dir, data_tag):
model_info = dict(hparams.model)
model = getattr(model_arch, model_info['type'])(**model_info['args'])
model.to(device)
checkpt = torch.load(((exp_dir + '/') + hparams.best_model), map_location=device)
model.load_state_dict(checkpt['state_dict'])
metrics = [getattr(model_metric, met) for met in hparams.metrics]
evaluating.embedding_driver(model, eval_loader, pred_loader, metrics, hparams, exp_dir, data_tag) |
class VCTreeLSTMContext(nn.Module):
def __init__(self, cfg, obj_classes, rel_classes, statistics, in_channels):
super(VCTreeLSTMContext, self).__init__()
self.cfg = cfg
self.obj_classes = obj_classes
self.rel_classes = rel_classes
self.num_obj_classes = len(obj_classes)
if self.cfg.MODEL.ROI_SCENEGRAPH_HEAD.USE_GT_BOX:
if self.cfg.MODEL.ROI_SCENEGRAPH_HEAD.USE_GT_OBJECT_LABEL:
self.mode = 'predcls'
else:
self.mode = 'sgcls'
else:
self.mode = 'sgdet'
self.embed_dim = self.cfg.MODEL.ROI_SCENEGRAPH_HEAD.EMBED_DIM
obj_embed_vecs = obj_edge_vectors(self.obj_classes, wv_dir=self.cfg.GLOVE_DIR, wv_dim=self.embed_dim)
self.obj_embed1 = nn.Embedding(self.num_obj_classes, self.embed_dim)
self.obj_embed2 = nn.Embedding(self.num_obj_classes, self.embed_dim)
with torch.no_grad():
self.obj_embed1.weight.copy_(obj_embed_vecs, non_blocking=True)
self.obj_embed2.weight.copy_(obj_embed_vecs, non_blocking=True)
self.pos_embed = nn.Sequential(*[nn.Linear(9, 32), nn.BatchNorm1d(32, momentum=0.001), nn.Linear(32, 128), nn.ReLU(inplace=True)])
self.overlap_embed = nn.Sequential(*[nn.Linear(6, 128), nn.BatchNorm1d(128, momentum=0.001), nn.ReLU(inplace=True)])
self.box_embed = nn.Sequential(*[nn.Linear(9, 128), nn.BatchNorm1d(128, momentum=0.001), nn.ReLU(inplace=True)])
self.obj_dim = in_channels
self.dropout_rate = self.cfg.MODEL.ROI_SCENEGRAPH_HEAD.CONTEXT_DROPOUT_RATE
self.hidden_dim = self.cfg.MODEL.ROI_SCENEGRAPH_HEAD.CONTEXT_HIDDEN_DIM
self.nl_obj = self.cfg.MODEL.ROI_SCENEGRAPH_HEAD.CONTEXT_OBJ_LAYER
self.nl_edge = self.cfg.MODEL.ROI_SCENEGRAPH_HEAD.CONTEXT_REL_LAYER
assert ((self.nl_obj > 0) and (self.nl_edge > 0))
co_occour = statistics['pred_dist'].float().sum((- 1))
assert (co_occour.shape[0] == co_occour.shape[(- 1)])
assert (len(co_occour.shape) == 2)
self.bi_freq_prior = nn.Linear((self.num_obj_classes * self.num_obj_classes), 1, bias=False)
with torch.no_grad():
co_occour = (co_occour + co_occour.transpose(0, 1))
self.bi_freq_prior.weight.copy_(co_occour.view((- 1)).unsqueeze(0), non_blocking=True)
self.obj_reduce = nn.Linear(self.obj_dim, 128)
self.emb_reduce = nn.Linear(self.embed_dim, 128)
self.score_pre = nn.Linear((128 * 4), self.hidden_dim)
self.score_sub = nn.Linear(self.hidden_dim, self.hidden_dim)
self.score_obj = nn.Linear(self.hidden_dim, self.hidden_dim)
self.vision_prior = nn.Linear(((self.hidden_dim * 3) + 1), 1)
layer_init(self.obj_reduce, xavier=True)
layer_init(self.emb_reduce, xavier=True)
layer_init(self.score_pre, xavier=True)
layer_init(self.score_sub, xavier=True)
layer_init(self.score_obj, xavier=True)
self.obj_ctx_rnn = MultiLayer_BTreeLSTM(in_dim=((self.obj_dim + self.embed_dim) + 128), out_dim=self.hidden_dim, num_layer=self.nl_obj, dropout=(self.dropout_rate if (self.nl_obj > 1) else 0))
self.edge_ctx_rnn = MultiLayer_BTreeLSTM(in_dim=((self.embed_dim + self.hidden_dim) + self.obj_dim), out_dim=self.hidden_dim, num_layer=self.nl_edge, dropout=(self.dropout_rate if (self.nl_edge > 1) else 0))
if (self.mode != 'predcls'):
self.decoder_rnn = DecoderTreeLSTM(self.cfg, self.obj_classes, embed_dim=self.embed_dim, inputs_dim=(((self.hidden_dim + self.obj_dim) + self.embed_dim) + 128), hidden_dim=self.hidden_dim, dropout=self.dropout_rate)
self.average_ratio = 0.0005
self.effect_analysis = None
if self.effect_analysis:
self.register_buffer('untreated_dcd_feat', torch.zeros((((self.hidden_dim + self.obj_dim) + self.embed_dim) + 128)))
self.register_buffer('untreated_obj_feat', torch.zeros(((self.obj_dim + self.embed_dim) + 128)))
self.register_buffer('untreated_edg_feat', torch.zeros((self.embed_dim + self.obj_dim)))
def obj_ctx(self, num_objs, obj_feats, proposals, obj_labels=None, vc_forest=None, ctx_average=False):
obj_feats = obj_feats.split(num_objs, dim=0)
obj_labels = (obj_labels.split(num_objs, dim=0) if (obj_labels is not None) else None)
obj_ctxs = []
obj_preds = []
obj_dists = []
for (i, (feat, tree, proposal)) in enumerate(zip(obj_feats, vc_forest, proposals)):
encod_rep = self.obj_ctx_rnn(tree, feat, len(proposal))
obj_ctxs.append(encod_rep)
if (self.mode != 'predcls'):
if ((not self.training) and self.effect_analysis and ctx_average):
decoder_inp = self.untreated_dcd_feat.view(1, (- 1)).expand(encod_rep.shape[0], (- 1))
else:
decoder_inp = torch.cat((feat, encod_rep), 1)
(obj_dist, obj_pred) = self.decoder_rnn(tree, decoder_inp, len(proposal))
else:
assert (obj_labels is not None)
obj_pred = obj_labels[i]
obj_dist = to_onehot(obj_pred, self.num_obj_classes)
obj_preds.append(obj_pred)
obj_dists.append(obj_dist)
obj_ctxs = cat(obj_ctxs, dim=0)
obj_preds = cat(obj_preds, dim=0)
obj_dists = cat(obj_dists, dim=0)
return (obj_ctxs, obj_preds, obj_dists)
def edge_ctx(self, num_objs, obj_feats, forest):
inp_feats = obj_feats.split(num_objs, dim=0)
edge_ctxs = []
for (feat, tree, num_obj) in zip(inp_feats, forest, num_objs):
edge_rep = self.edge_ctx_rnn(tree, feat, num_obj)
edge_ctxs.append(edge_rep)
edge_ctxs = cat(edge_ctxs, dim=0)
return edge_ctxs
def forward(self, x, proposals, boxes, rel_pair_idxs, logger=None, all_average=False, ctx_average=False):
num_objs = [len(b) for b in proposals]
if (self.training or self.cfg.MODEL.ROI_SCENEGRAPH_HEAD.USE_GT_BOX):
obj_labels = cat([proposal.pred_classes for proposal in proposals], dim=0)
else:
obj_labels = None
if self.cfg.MODEL.ROI_SCENEGRAPH_HEAD.USE_GT_OBJECT_LABEL:
obj_embed = self.obj_embed1(obj_labels.long())
obj_logits = to_onehot(obj_labels, self.num_obj_classes)
else:
obj_logits = cat([proposal.pred_scores for proposal in proposals], dim=0).detach()
obj_embed = (F.softmax(obj_logits, dim=1) self.obj_embed1.weight)
img_sizes = [proposal.image_size for proposal in proposals]
box_info = encode_box_info(boxes, img_sizes)
pos_embed = self.pos_embed(box_info)
batch_size = x.shape[0]
if (all_average and self.effect_analysis and (not self.training)):
obj_pre_rep = self.untreated_obj_feat.view(1, (- 1)).expand(batch_size, (- 1))
else:
obj_pre_rep = cat((x, obj_embed, pos_embed), (- 1))
box_inp = self.box_embed(box_info)
pair_inp = self.overlap_embed(get_overlap_info(boxes))
bi_inp = cat((self.obj_reduce(x.detach()), self.emb_reduce(obj_embed.detach()), box_inp, pair_inp), (- 1))
(bi_preds, vc_scores) = self.vctree_score_net(num_objs, bi_inp, obj_logits, proposals)
forest = generate_forest(vc_scores, proposals, self.mode)
vc_forest = arbForest_to_biForest(forest)
(obj_ctxs, obj_preds, obj_dists) = self.obj_ctx(num_objs, obj_pre_rep, proposals, obj_labels, vc_forest, ctx_average=ctx_average)
obj_embed2 = self.obj_embed2(obj_preds.long())
if ((all_average or ctx_average) and self.effect_analysis and (not self.training)):
obj_rel_rep = cat((self.untreated_edg_feat.view(1, (- 1)).expand(batch_size, (- 1)), obj_ctxs), dim=(- 1))
else:
obj_rel_rep = cat((obj_embed2, x, obj_ctxs), (- 1))
edge_ctx = self.edge_ctx(num_objs, obj_rel_rep, vc_forest)
if (self.training and self.effect_analysis):
self.untreated_obj_feat = self.moving_average(self.untreated_obj_feat, obj_pre_rep)
self.untreated_edg_feat = self.moving_average(self.untreated_edg_feat, cat((obj_embed2, x), (- 1)))
return (obj_dists, obj_preds, edge_ctx, bi_preds)
def moving_average(self, holder, input):
assert (len(input.shape) == 2)
with torch.no_grad():
holder = ((holder * (1 - self.average_ratio)) + (self.average_ratio * input.mean(0).view((- 1))))
return holder
def vctree_score_net(self, num_objs, roi_feat, roi_dist, proposals):
roi_dist = roi_dist.detach()
roi_dist = F.softmax(roi_dist, dim=(- 1))
roi_feat = F.relu(self.score_pre(roi_feat))
sub_feat = F.relu(self.score_sub(roi_feat))
obj_feat = F.relu(self.score_obj(roi_feat))
sub_feats = sub_feat.split(num_objs, dim=0)
obj_feats = obj_feat.split(num_objs, dim=0)
roi_dists = roi_dist.split(num_objs, dim=0)
bi_preds = []
vc_scores = []
for (sub, obj, dist, prp) in zip(sub_feats, obj_feats, roi_dists, proposals):
num_obj = sub.shape[0]
num_dim = sub.shape[(- 1)]
sub = sub.view(1, num_obj, num_dim).expand(num_obj, num_obj, num_dim)
obj = obj.view(num_obj, 1, num_dim).expand(num_obj, num_obj, num_dim)
sub_dist = dist.view(1, num_obj, (- 1)).expand(num_obj, num_obj, (- 1)).unsqueeze(2)
obj_dist = dist.view(num_obj, 1, (- 1)).expand(num_obj, num_obj, (- 1)).unsqueeze(3)
joint_dist = (sub_dist * obj_dist).view(num_obj, num_obj, (- 1))
co_prior = self.bi_freq_prior(joint_dist.view((num_obj * num_obj), (- 1))).view(num_obj, num_obj)
vis_prior = self.vision_prior(cat([(sub * obj), sub, obj, co_prior.unsqueeze((- 1))], dim=(- 1)).view((num_obj * num_obj), (- 1))).view(num_obj, num_obj)
joint_pred = (torch.sigmoid(vis_prior) * co_prior)
bi_preds.append(joint_pred)
vc_scores.append(torch.sigmoid(joint_pred))
return (bi_preds, vc_scores) |
def dump_class_labels(s_ids: dict, old_meta, new_meta):
infile = open(old_meta.class_labels, 'r')
outfile = open(new_meta.class_labels, 'w')
for line in infile.readlines():
(image_id, class_label_string) = line.strip('\n').split(',')
if (image_id in s_ids.keys()):
outfile.write(line)
infile.close()
outfile.close() |
class InceptionI3d(snt.AbstractModule):
VALID_ENDPOINTS = ('Conv3d_1a_7x7', 'MaxPool3d_2a_3x3', 'Conv3d_2b_1x1', 'Conv3d_2c_3x3', 'MaxPool3d_3a_3x3', 'Mixed_3b', 'Mixed_3c', 'MaxPool3d_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_4f', 'MaxPool3d_5a_2x2', 'Mixed_5b', 'Mixed_5c', 'Logits', 'Predictions')
def __init__(self, num_classes=400, spatial_squeeze=True, final_endpoint='Logits', name='inception_i3d'):
if (final_endpoint not in self.VALID_ENDPOINTS):
raise ValueError(('Unknown final endpoint %s' % final_endpoint))
super(InceptionI3d, self).__init__(name=name)
self._num_classes = num_classes
self._spatial_squeeze = spatial_squeeze
self._final_endpoint = final_endpoint
def _build(self, inputs, is_training, dropout_keep_prob=1.0):
if (self._final_endpoint not in self.VALID_ENDPOINTS):
raise ValueError(('Unknown final endpoint %s' % self._final_endpoint))
net = inputs
end_points = {}
end_point = 'Conv3d_1a_7x7'
net = Unit3D(output_channels=64, kernel_shape=[7, 7, 7], stride=[2, 2, 2], name=end_point)(net, is_training=is_training)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'MaxPool3d_2a_3x3'
net = tf.nn.max_pool3d(net, ksize=[1, 1, 3, 3, 1], strides=[1, 1, 2, 2, 1], padding=snt.SAME, name=end_point)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'Conv3d_2b_1x1'
net = Unit3D(output_channels=64, kernel_shape=[1, 1, 1], name=end_point)(net, is_training=is_training)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'Conv3d_2c_3x3'
net = Unit3D(output_channels=192, kernel_shape=[3, 3, 3], name=end_point)(net, is_training=is_training)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'MaxPool3d_3a_3x3'
net = tf.nn.max_pool3d(net, ksize=[1, 1, 3, 3, 1], strides=[1, 1, 2, 2, 1], padding=snt.SAME, name=end_point)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'Mixed_3b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=64, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=96, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=128, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_1, is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=16, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=32, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_2, is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1], strides=[1, 1, 1, 1, 1], padding=snt.SAME, name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=32, kernel_shape=[1, 1, 1], name='Conv3d_0b_1x1')(branch_3, is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'Mixed_3c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=128, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=128, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=192, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_1, is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=32, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=96, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_2, is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1], strides=[1, 1, 1, 1, 1], padding=snt.SAME, name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=64, kernel_shape=[1, 1, 1], name='Conv3d_0b_1x1')(branch_3, is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'MaxPool3d_4a_3x3'
net = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1], strides=[1, 2, 2, 2, 1], padding=snt.SAME, name=end_point)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'Mixed_4b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=192, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=96, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=208, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_1, is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=16, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=48, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_2, is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1], strides=[1, 1, 1, 1, 1], padding=snt.SAME, name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=64, kernel_shape=[1, 1, 1], name='Conv3d_0b_1x1')(branch_3, is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'Mixed_4c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=160, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=112, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=224, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_1, is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=24, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=64, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_2, is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1], strides=[1, 1, 1, 1, 1], padding=snt.SAME, name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=64, kernel_shape=[1, 1, 1], name='Conv3d_0b_1x1')(branch_3, is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'Mixed_4d'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=128, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=128, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=256, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_1, is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=24, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=64, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_2, is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1], strides=[1, 1, 1, 1, 1], padding=snt.SAME, name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=64, kernel_shape=[1, 1, 1], name='Conv3d_0b_1x1')(branch_3, is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'Mixed_4e'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=112, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=144, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=288, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_1, is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=32, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=64, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_2, is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1], strides=[1, 1, 1, 1, 1], padding=snt.SAME, name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=64, kernel_shape=[1, 1, 1], name='Conv3d_0b_1x1')(branch_3, is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'Mixed_4f'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=256, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=160, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=320, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_1, is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=32, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=128, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_2, is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1], strides=[1, 1, 1, 1, 1], padding=snt.SAME, name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=128, kernel_shape=[1, 1, 1], name='Conv3d_0b_1x1')(branch_3, is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'MaxPool3d_5a_2x2'
net = tf.nn.max_pool3d(net, ksize=[1, 2, 2, 2, 1], strides=[1, 2, 2, 2, 1], padding=snt.SAME, name=end_point)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'Mixed_5b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=256, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=160, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=320, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_1, is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=32, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=128, kernel_shape=[3, 3, 3], name='Conv3d_0a_3x3')(branch_2, is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1], strides=[1, 1, 1, 1, 1], padding=snt.SAME, name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=128, kernel_shape=[1, 1, 1], name='Conv3d_0b_1x1')(branch_3, is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'Mixed_5c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=384, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=192, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=384, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_1, is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=48, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=128, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_2, is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1], strides=[1, 1, 1, 1, 1], padding=snt.SAME, name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=128, kernel_shape=[1, 1, 1], name='Conv3d_0b_1x1')(branch_3, is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'Logits'
with tf.variable_scope(end_point):
net = tf.nn.avg_pool3d(net, ksize=[1, net.get_shape()[1], 7, 7, 1], strides=[1, 1, 1, 1, 1], padding=snt.VALID)
net2 = tf.nn.dropout(net, dropout_keep_prob)
logits = Unit3D(output_channels=self._num_classes, kernel_shape=[1, 1, 1], activation_fn=None, use_batch_norm=False, use_bias=True, name='Conv3d_0c_1x1')(net2, is_training=is_training)
if self._spatial_squeeze:
logits = tf.squeeze(logits, [2, 3], name='SpatialSqueeze')
end_points['logits_raw'] = logits
averaged_logits = tf.reduce_mean(logits, axis=1)
end_points[end_point] = averaged_logits
end_points['avg_pool3d'] = net
if (self._final_endpoint == end_point):
return (averaged_logits, end_points)
end_point = 'Predictions'
predictions = tf.nn.softmax(averaged_logits)
end_points[end_point] = predictions
return (predictions, end_points) |
def train_beta(model):
print('Starting initial training (with cropped images)')
num_epochs = 100
batch_size = 2
nframes = 14
nframes_val = 32
size = (480, 864)
def image_read(path):
pic = Image.open(path)
transform = tv.transforms.Compose([tv.transforms.Resize(size, interpolation=Image.BILINEAR), tv.transforms.ToTensor(), tv.transforms.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD)])
return transform(pic)
def label_read(path):
if os.path.exists(path):
pic = Image.open(path)
transform = tv.transforms.Compose([tv.transforms.Resize(size, interpolation=Image.NEAREST), LabelToLongTensor()])
label = transform(pic)
else:
label = torch.LongTensor(1, *size).fill_(255)
return label
def random_object_sampler(lst):
return [random.choice(lst)]
def deterministic_object_sampler(lst):
return [lst[0]]
train_transform = dataset_loaders.JointCompose([dataset_loaders.JointRandomHorizontalFlip()])
train_set = torch.utils.data.ConcatDataset([DAVIS17V2(config['davis17_path'], '2017', 'train', image_read, label_read, train_transform, nframes, random_object_sampler, start_frame='random')])
val_set = YTVOSV2(config['ytvos_path'], 'train', 'val_joakim', 'JPEGImages', image_read, label_read, None, nframes_val, deterministic_object_sampler, start_frame='first')
sampler = torch.utils.data.WeightedRandomSampler((len(train_set) * [1]), 118, replacement=True)
train_loader = DataLoader(train_set, batch_size=batch_size, sampler=sampler, num_workers=11)
val_loader = DataLoader(val_set, shuffle=False, batch_size=batch_size, num_workers=11)
print('Sets initiated with {} (train) and {} (val) samples.'.format(len(train_set), len(val_set)))
objective = nn.NLLLoss(ignore_index=255).cuda()
optimizer = torch.optim.Adam([param for param in model.parameters() if param.requires_grad], lr=1e-05, weight_decay=1e-06)
lr_sched = torch.optim.lr_scheduler.ExponentialLR(optimizer, 0.985)
trainer = trainers.VOSTrainer(model, optimizer, objective, lr_sched, train_loader, val_loader, use_gpu=True, workspace_dir=config['workspace_path'], save_name=(os.path.splitext(os.path.basename(__file__))[0] + '_beta'), checkpoint_interval=100, print_interval=25, debug=False)
trainer.load_checkpoint()
trainer.train(num_epochs) |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--datasets', nargs='+', default=penn.EVALUATION_DATASETS, help='The datasets to evaluate on')
parser.add_argument('--checkpoint', type=Path, help='The checkpoint file to evaluate')
parser.add_argument('--gpu', type=int, help='The index of the GPU to use for evaluation')
return parser.parse_known_args()[0] |
def _create_or_get_iterations_per_loop():
graph = ops.get_default_graph()
collection_name = '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR)
iter_vars = graph.get_collection(collection_name)
if (len(iter_vars) == 1):
return iter_vars[0]
elif (len(iter_vars) > 1):
raise RuntimeError('Multiple iterations_per_loop_var in collection.')
with ops.colocate_with(training_util.get_global_step()):
with variable_scope.variable_scope(_TPU_ESTIMATOR, reuse=variable_scope.AUTO_REUSE):
return variable_scope.get_variable(_ITERATIONS_PER_LOOP_VAR, initializer=init_ops.zeros_initializer(), shape=[], dtype=dtypes.int32, trainable=False, collections=[collection_name, ops.GraphKeys.LOCAL_VARIABLES], use_resource=True) |
def import_tinyImagenet_task():
try:
import sys
sys.path.insert(0, '/export/home/sicarbonnell/Recherche/_datasets')
from import_tinyImagenet import import_tinyImagenet
except:
raise ImportError('Our code does not provide the utilities to load the tinyImagenet dataset.')
(x_train, y_train, x_test, y_test) = import_tinyImagenet()
def get_model(weight_decay=0.001):
k = 32
model = VGG(input_shape=x_train.shape[1:], nbstages=5, nblayers=([2] * 5), nbfilters=[(1 * k), (2 * k), (4 * k), (8 * k), (16 * k)], nbclasses=y_train.shape[1], use_bias=False, batchnorm_training=False, kernel_initializer='he_uniform', batchnorm_momentum=0.9, weight_decay=weight_decay)
weights_location = (file_loc + 'saved_weights/initial_weights_tinyImagenet.h5')
if ('initial_weights_tinyImagenet.h5' not in os.listdir((file_loc + 'saved_weights'))):
model.save_weights(weights_location)
else:
model.load_weights(weights_location)
return model
return (x_train, y_train, x_test, y_test, get_model) |
def log_cfg(cfg: Dict, prefix: str='cfg') -> None:
logger = logging.getLogger(__name__)
for (k, v) in cfg.items():
if isinstance(v, dict):
p = '.'.join([prefix, k])
log_cfg(v, prefix=p)
else:
p = '.'.join([prefix, k])
logger.info('%34s : %s', p, v) |
def create_metadata_with_new_checkpoint_for_current_best_response(trainer: Trainer, player: int, save_dir: str, timesteps_training_br: int, episodes_training_br: int, active_policy_num: int=None, average_br_reward: float=None):
return {'checkpoint_path': save_policy_checkpoint(trainer=trainer, player=player, save_dir=save_dir, policy_id_to_save='best_response', checkpoint_name=f'player_{player}_policy_{active_policy_num}', additional_data={'policy_num': active_policy_num, 'timesteps_training_br': timesteps_training_br, 'episodes_training_br': episodes_training_br, 'average_br_reward': average_br_reward}), 'timesteps_training_br': timesteps_training_br, 'episodes_training_br': episodes_training_br, 'average_br_reward': average_br_reward} |
class StatusEnum(enum.Enum):
READY = 0
RUNNING = 1
COMPLETE = 2
ERROR = 3
SUSPENDED = 4 |
class _FunctionState(object):
_NORMAL_TRIGGER = 250
_TEST_TRIGGER = 400
def __init__(self):
self.in_a_function = False
self.lines_in_function = 0
self.current_function = ''
def Begin(self, function_name):
self.in_a_function = True
self.lines_in_function = 0
self.current_function = function_name
def Count(self):
if self.in_a_function:
self.lines_in_function += 1
def Check(self, error, filename, linenum):
if Match('T(EST|est)', self.current_function):
base_trigger = self._TEST_TRIGGER
else:
base_trigger = self._NORMAL_TRIGGER
trigger = (base_trigger * (2 ** _VerboseLevel()))
if (self.lines_in_function > trigger):
error_level = int(math.log((self.lines_in_function / base_trigger), 2))
if (error_level > 5):
error_level = 5
error(filename, linenum, 'readability/fn_size', error_level, ('Small and focused functions are preferred: %s has %d non-comment lines (error triggered by exceeding %d lines).' % (self.current_function, self.lines_in_function, trigger)))
def End(self):
self.in_a_function = False |
def get_parser():
parser = argparse.ArgumentParser('SampleNet: Differentiable Point Cloud Sampling')
parser.add_argument('--skip-projection', action='store_true', help='Do not project points in training')
parser.add_argument('-in', '--num-in-points', type=int, default=1024, help='Number of input Points [default: 1024]')
parser.add_argument('-out', '--num-out-points', type=int, default=64, help='Number of output points [2, 1024] [default: 64]')
parser.add_argument('--bottleneck-size', type=int, default=128, help='bottleneck size [default: 128]')
parser.add_argument('--alpha', type=float, default=0.01, help='Simplification regularization loss weight [default: 0.01]')
parser.add_argument('--gamma', type=float, default=1, help='Lb constant regularization loss weight [default: 1]')
parser.add_argument('--delta', type=float, default=0, help='Lb linear regularization loss weight [default: 0]')
parser.add_argument('-gs', '--projection-group-size', type=int, default=8, help='Neighborhood size in Soft Projection [default: 8]')
parser.add_argument('--lmbda', type=float, default=0.01, help='Projection regularization loss weight [default: 0.01]')
return parser |
def make_dataset(classlist, labellist=None):
images = []
labels = []
classes = utils.readtextfile(ifile)
classes = [x.rstrip('\n') for x in classes]
classes.sort()
for i in len(classes):
for fname in os.listdir(classes[i]):
if is_image_file(fname):
label = {}
label['class'] = os.path.split(classes[i])
images.append(fname)
labels.append(label)
if (labellist != None):
labels = utils.readtextfile(ifile)
labels = [x.rstrip('\n') for x in labels]
labels.sort()
for i in len(labels):
for fname in os.listdir(labels[i]):
if is_image_file(fname):
labels.append(os.path.split(classes[i]))
return (images, labels) |
def parallel_download_s3_objects(s3_files, destination_filepaths, bucket_name, process_pool_size=None):
if (process_pool_size is None):
process_pool_size = cpu_count()
s3_and_destination = zip(s3_files, destination_filepaths)
with Pool(process_pool_size, init_s3_client) as proc:
results = proc.starmap(functools.partial(download_s3_object_to_path, bucket_name), s3_and_destination)
failed_files = [os.path.join('s3://', bucket_name, s3_file) for (result, s3_file) in zip(results, s3_files) if (not result)]
assert (len(failed_files) == 0), 'Failed downloading {}/{} files:\n{}'.format(len(failed_files), len(results), '\n'.join(failed_files)) |
def run(settings):
settings.device = 'cuda'
settings.description = 'TransT with default settings.'
settings.batch_size = 32
settings.num_workers = 4
settings.multi_gpu = True
settings.print_interval = 1
settings.normalize_mean = [0.485, 0.456, 0.406]
settings.normalize_std = [0.229, 0.224, 0.225]
settings.search_area_factor = 4.0
settings.template_area_factor = 2.0
settings.search_feature_sz = 32
settings.template_feature_sz = 16
settings.search_sz = (settings.search_feature_sz * 8)
settings.temp_sz = (settings.template_feature_sz * 8)
settings.center_jitter_factor = {'search': 3, 'template': 0}
settings.scale_jitter_factor = {'search': 0.25, 'template': 0}
settings.position_embedding = 'sine'
settings.hidden_dim = 256
settings.dropout = 0.1
settings.nheads = 8
settings.dim_feedforward = 2048
settings.featurefusion_layers = 4
settings.ratio = (1 / 8)
eotb_train = EOTB(settings.env.eotb_dir, split='train')
transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05))
transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2), tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))
data_processing_train = processing.TransTProcessing(search_area_factor=settings.search_area_factor, template_area_factor=settings.template_area_factor, search_sz=settings.search_sz, temp_sz=settings.temp_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', transform=transform_train, joint_transform=transform_joint)
dataset_train = sampler.TransTSampler([eotb_train], [1], samples_per_epoch=(1000 * settings.batch_size), max_gap=100, processing=data_processing_train)
loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers, shuffle=True, drop_last=True, stack_dim=0)
model = transt_models.transt_resnet50(settings)
if settings.multi_gpu:
model = MultiGPU(model, dim=0)
objective = transt_models.transt_loss(settings)
n_parameters = sum((p.numel() for p in model.parameters() if p.requires_grad))
print('number of params:', n_parameters)
actor = actors.TranstActor(net=model, objective=objective)
param_dicts = [{'params': [p for (n, p) in model.named_parameters() if (('backbone' not in n) and p.requires_grad)]}, {'params': [p for (n, p) in model.named_parameters() if (('backbone' in n) and p.requires_grad)], 'lr': 1e-05}]
optimizer = torch.optim.AdamW(param_dicts, lr=0.0001, weight_decay=0.0001)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 500)
trainer = LTRTrainer(actor, [loader_train], optimizer, settings, lr_scheduler, ratio=settings.ratio)
trainer.train(1000, load_latest=True, fail_safe=True) |
class M2M100Config(PretrainedConfig):
model_type = 'm2m_100'
keys_to_ignore_at_inference = ['past_key_values']
attribute_map = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self, vocab_size=128112, max_position_embeddings=1024, encoder_layers=12, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=12, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.05, decoder_layerdrop=0.05, use_cache=True, is_encoder_decoder=True, activation_function='relu', d_model=1024, dropout=0.1, attention_dropout=0.1, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=2, scale_embedding=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.use_cache = use_cache
self.num_hidden_layers = encoder_layers
self.scale_embedding = scale_embedding
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, **kwargs) |
def rbm(name, n_components=None, learning_rate=None, batch_size=None, n_iter=None, verbose=False, random_state=None):
def _name(msg):
return ('%s.%s_%s' % (name, 'rbm', msg))
rval = scope.sklearn_BernoulliRBM(n_components=(scope.int(hp.qloguniform((name + '.n_components'), low=np.log(0.51), high=np.log(999.5), q=1.0)) if (n_components is None) else n_components), learning_rate=(hp.lognormal((name + '.learning_rate'), np.log(0.01), np.log(10)) if (learning_rate is None) else learning_rate), batch_size=(scope.int(hp.qloguniform((name + '.batch_size'), np.log(1), np.log(100), q=1)) if (batch_size is None) else batch_size), n_iter=(scope.int(hp.qloguniform((name + '.n_iter'), np.log(1), np.log(1000), q=1)) if (n_iter is None) else n_iter), verbose=verbose, random_state=_random_state(_name('rstate'), random_state))
return rval |
class TFBackend():
def __init__(self, tf):
self._tf = tf
for k in dir(tf):
setattr(self, k, getattr(tf, k))
self.min = tf.minimum
self.max = tf.maximum
def with_same_type(self, x, other):
if (not self._tf.is_tensor(x)):
x = (self._tf.ones_like(other) * x)
return self._tf.cast(x, other.dtype)
def cast(self, x, dtype):
return self._tf.cast(x, dtype) |
class DistillationLoss(torch.nn.Module):
def __init__(self, base_criterion: torch.nn.Module, teacher_model: torch.nn.Module, distillation_type: str, alpha: float, tau: float):
super().__init__()
self.base_criterion = base_criterion
self.teacher_model = teacher_model
assert (distillation_type in ['none', 'soft', 'hard'])
self.distillation_type = distillation_type
self.alpha = alpha
self.tau = tau
def forward(self, inputs, outputs, labels):
outputs_kd = None
if (not isinstance(outputs, torch.Tensor)):
(outputs, outputs_kd) = outputs
base_loss = self.base_criterion(outputs, labels)
if (self.distillation_type == 'none'):
return base_loss
if (outputs_kd is None):
raise ValueError('When knowledge distillation is enabled, the model is expected to return a Tuple[Tensor, Tensor] with the output of the class_token and the dist_token')
with torch.no_grad():
teacher_outputs = self.teacher_model(inputs)
if (self.distillation_type == 'soft'):
T = self.tau
distillation_loss = ((F.kl_div(F.log_softmax((outputs_kd / T), dim=1), F.log_softmax((teacher_outputs / T), dim=1), reduction='sum', log_target=True) * (T * T)) / outputs_kd.numel())
elif (self.distillation_type == 'hard'):
distillation_loss = F.cross_entropy(outputs_kd, teacher_outputs.argmax(dim=1))
loss = ((base_loss * (1 - self.alpha)) + (distillation_loss * self.alpha))
return loss |
class Swinv2Config(PretrainedConfig):
model_type = 'swinv2'
attribute_map = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__(self, image_size=224, patch_size=4, num_channels=3, embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7, mlp_ratio=4.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act='gelu', use_absolute_embeddings=False, initializer_range=0.02, layer_norm_eps=1e-05, encoder_stride=32, **kwargs):
super().__init__(**kwargs)
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.embed_dim = embed_dim
self.depths = depths
self.num_layers = len(depths)
self.num_heads = num_heads
self.window_size = window_size
self.mlp_ratio = mlp_ratio
self.qkv_bias = qkv_bias
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.drop_path_rate = drop_path_rate
self.hidden_act = hidden_act
self.use_absolute_embeddings = use_absolute_embeddings
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
self.encoder_stride = encoder_stride
self.hidden_size = int((embed_dim * (2 ** (len(depths) - 1))))
self.pretrained_window_sizes = (0, 0, 0, 0) |
def load_data(location='/tmp/.zoo/dataset/mnist'):
(train_images, train_labels) = read_data_sets(location, 'train')
(test_images, test_labels) = read_data_sets(location, 'test')
return ((train_images, train_labels), (test_images, test_labels)) |
class MaskedLMConfig(FairseqDataclass):
data: str = field(default=MISSING, metadata={'help': 'colon separated path to data directories list, will be iterated upon during epochs in round-robin manner'})
sample_break_mode: SAMPLE_BREAK_MODE_CHOICES = field(default='none', metadata={'help': 'If omitted or "none", fills each sample with tokens-per-sample tokens. If set to "complete", splits samples only at the end of sentence, but may include multiple sentences per sample. "complete_doc" is similar but respects doc boundaries. If set to "eos", includes only one sentence per sample.'})
tokens_per_sample: int = field(default=1024, metadata={'help': 'max number of tokens per sample for LM dataset'})
mask_prob: float = field(default=0.15, metadata={'help': 'probability of replacing a token with mask'})
leave_unmasked_prob: float = field(default=0.1, metadata={'help': 'probability that a masked token is unmasked'})
random_token_prob: float = field(default=0.1, metadata={'help': 'probability of replacing a token with a random token'})
freq_weighted_replacement: bool = field(default=False, metadata={'help': 'sample random replacement words based on word frequencies'})
mask_whole_words: bool = field(default=False, metadata={'help': 'mask whole words; you may also want to set --bpe'})
mask_multiple_length: int = field(default=1, metadata={'help': 'repeat the mask indices multiple times'})
mask_stdev: float = field(default=0.0, metadata={'help': 'stdev of the mask length'})
shorten_method: SHORTEN_METHOD_CHOICES = field(default='none', metadata={'help': 'if not none, shorten sequences that exceed --tokens-per-sample'})
shorten_data_split_list: str = field(default='', metadata={'help': 'comma-separated list of dataset splits to apply shortening to, e.g., "train,valid" (default: all dataset splits)'})
seed: int = II('common.seed')
include_target_tokens: bool = field(default=False, metadata={'help': 'include target tokens in model input. this is used for data2vec'}) |
def sum_space(sizes):
if isinstance(sizes, tuple):
if (len(sizes) == 0):
return 0
elif (type(sizes[0]) == int):
return np.prod(list(sizes))
else:
return sum_space(list(sizes))
elif isinstance(sizes, list):
return np.sum([sum_space(x) for x in sizes])
else:
return sizes |
def sample_list_to_type(dtype, t):
if isinstance(t, Dict):
for (k, v) in t.items():
if isinstance(v, Tensor):
if v.is_floating_point():
t[k] = v.to(dtype)
return t
elif isinstance(t, List):
for (i, elem) in enumerate(t):
if isinstance(elem, Tensor):
if elem.is_floating_point():
t[i] = elem.to(dtype)
return t
else:
return to_type_original(dtype, t) |
class HMNetTrainer(DistributedTrainer):
def __init__(self, opt):
super().__init__(opt)
self.task = Task.setup_task(self.opt['TASK'], self.opt, self.saveFolder)
def is_gradient_accumulation_boundary(self):
return (((self.updates + 1) % self.grad_acc_steps) == 0)
def get_batch_generator(self, dataset_label):
batch_generator = self.task.batch_gen(self.opt, dataset_label=dataset_label, model_config=self.module.config, tokenizer=self.module.tokenizer, world_size=self.opt['world_size'], rank=self.opt['rank'], seed=self.seed)
if isinstance(batch_generator, BaseBatchGen):
batch_generator = batch_generator.iterator
self.log(f"Loaded data on rank {self.opt['rank']}.")
return batch_generator
def set_up_model(self):
try:
model_module = importlib.import_module(('summertime.model.third_party.HMNet.Models.Networks.' + self.opt['MODEL']))
model_class = getattr(model_module, self.opt['MODEL'])
self.module = model_class(self.opt)
except Exception as e:
self.log(e)
self.log('ERROR: Model {} is unknown'.format(self.opt['MODEL']))
assert False
pytorch_total_params = sum((p.numel() for p in self.module.parameters() if p.requires_grad))
self.log('Total trainable parameters: {}'.format(pytorch_total_params))
try:
criterion_module = importlib.import_module(('summertime.model.third_party.HMNet.Models.Criteria.' + self.opt['CRITERION']))
criterion_class = getattr(criterion_module, self.opt['CRITERION'])
self.criterion = criterion_class(self.opt, self.module)
except Exception as e:
self.log(e)
self.log('ERROR: Criterion {} is unknown'.format(self.opt['CRITERION']))
assert False
self.module.to(self.opt['device'])
def get_optimizer_params_config(self, optimizer_class):
optimizer_parameters = {}
sig = inspect.signature(optimizer_class)
for param_name in sig.parameters.keys():
if (param_name == 'lr'):
optimizer_parameters[param_name] = self.opt['START_LEARNING_RATE']
if ((param_name not in ['params', 'lr']) and (param_name.upper() in self.opt)):
optimizer_parameters[param_name] = self.opt[param_name.upper()]
return optimizer_parameters
def get_lr_scheduler_params_config(self, lr_scheduler_class):
lr_scheduler_parameters = {}
sig = inspect.signature(lr_scheduler_class)
for param_name in sig.parameters.keys():
if ((param_name not in ['optimizer']) and (param_name.upper() in self.opt)):
lr_scheduler_parameters[param_name] = self.opt[param_name.upper()]
return lr_scheduler_parameters
def set_up_optimizer_and_lr_scheduler(self):
parameters = self.module.get_training_parameters()
try:
optimizer_class = getattr(optim, self.opt['OPTIMIZER'])
self.log('Using pytorch native optimizier: {}'.format(self.opt['OPTIMIZER']))
except:
try:
optimizer_module = importlib.import_module(('summertime.model.third_party.HMNet.Models.Optimizers.' + self.opt['OPTIMIZER']))
optimizer_class = getattr(optimizer_module, self.opt['OPTIMIZER'])
self.log('Using custom optimizer: {}'.format(self.opt['OPTIMIZER']))
except Exception as e:
self.log(e)
self.log('ERROR: Optimizer {} is unknown'.format(self.opt['OPTIMIZER']))
assert False
optimizer_parameters = self.get_optimizer_params_config(optimizer_class)
self.log(f'Optimizer parameters: {optimizer_parameters}')
self.optimizer = optimizer_class(parameters, **optimizer_parameters)
self.optimizer.zero_grad()
try:
lr_scheduler_class = getattr(lr_scheduler, self.opt['LR_SCHEDULER'])
self.log('Using pytorch native lr scheduler: {}'.format(self.opt['LR_SCHEDULER']))
except:
try:
lr_scheduler_module = importlib.import_module(('summertime.model.third_party.HMNet.Models.Optimizers.' + self.opt['LR_SCHEDULER']))
lr_scheduler_class = getattr(lr_scheduler_module, self.opt['LR_SCHEDULER'])
self.log('Using custom lr scheduler: {}'.format(self.opt['LR_SCHEDULER']))
except Exception as e:
self.log(e)
self.log('ERROR: LR Scheduler {} is unknown'.format(self.opt['LR_SCHEDULER']))
assert False
lr_scheduler_parameters = self.get_lr_scheduler_params_config(lr_scheduler_class)
self.log(f'Lr scheduler parameters: {lr_scheduler_parameters}')
self.lr_scheduler = lr_scheduler_class(self.optimizer, **lr_scheduler_parameters)
def initialize_fp16_DDP(self):
self.network = WrappedModel(self.module, self.criterion)
self.network.to(self.opt['device'])
if self.opt['fp16']:
from apex import amp
(self.network, self.optimizer) = amp.initialize(self.network, self.optimizer, opt_level=self.opt['fp16_opt_level'])
if (self.opt['world_size'] > 1):
self.network = torch.nn.parallel.DistributedDataParallel(self.network, device_ids=[self.opt['local_rank']], output_device=self.opt['local_rank'], find_unused_parameters=True)
self.log(f"Wrapped model with DDP on rank {self.opt['rank']}.")
assert (self.module is self.network.module.model)
else:
assert (self.module is self.network.model)
def eval(self):
if (self.opt['rank'] == 0):
self.log('')
self.log('Evaluating model ... ')
self.set_up_model()
for eval_dataset in ['dev', 'test']:
batch_generator_eval = self.get_batch_generator(eval_dataset)
self.task.evaluator.reset_best_score(set_high=True)
(result, score, got_better_score) = self.task.evaluator.eval_batches(self.module, batch_generator_eval, self.saveFolder, eval_dataset)
if (self.opt['rank'] == 0):
self.log('{0} results breakdown\n{1}'.format(eval_dataset, result))
def eval_return_results(self):
if (self.opt['rank'] == 0):
self.log('')
self.log('Evaluating model ... ')
self.set_up_model()
for eval_dataset in ['test']:
batch_generator_eval = self.get_batch_generator(eval_dataset)
self.task.evaluator.reset_best_score(set_high=True)
(result, score, got_better_score) = self.task.evaluator.eval_batches(self.module, batch_generator_eval, self.saveFolder, eval_dataset)
if (self.opt['rank'] == 0):
self.log('{0} results breakdown\n{1}'.format(eval_dataset, result))
return result
def train(self):
self.log(f"train on rank {self.opt['rank']}")
if (self.opt['rank'] == 0):
self.log('')
self.log('Initializing model...')
self.set_up_model()
self.network = None
self.train_batch_generator = self.get_batch_generator('train')
if isinstance(self.train_batch_generator, iterators.CheckpointableIterator):
self.updates_per_epoch = self.opt['UPDATES_PER_EPOCH']
else:
self.updates_per_epoch = len(self.train_batch_generator)
self.updates = 0
self.optim_steps = 0
self.start_epoch_idx = 0
self.start_batch_idx = 0
self.set_up_optimizer_and_lr_scheduler()
self.initialize_fp16_DDP()
if ('RESUME' in self.opt):
self.load_checkpoint()
numEpochs = self.opt['MAX_NUM_EPOCHS']
self.train_loss = AverageMeter()
self.acc_loss = 0.0
save_a_checkpoint = False
for epoch in range(self.start_epoch_idx, numEpochs):
self.current_epoch_idx = epoch
self.log('Epoch {}'.format(epoch))
startTime = datetime.now()
for (batch_idx, batch) in enumerate(self.train_batch_generator):
if (self.current_epoch_idx == self.start_epoch_idx):
if isinstance(self.train_batch_generator, iterators.CheckpointableIterator):
batch_idx += self.start_batch_idx
elif (batch_idx < self.start_batch_idx):
continue
self.current_batch_idx = batch_idx
if (('SAVE_PER_UPDATE_NUM' in self.opt) and (((self.updates + 1) % self.opt['SAVE_PER_UPDATE_NUM']) == 0)):
assert self.is_gradient_accumulation_boundary()
save_a_checkpoint = True
self.update(batch)
if save_a_checkpoint:
if (self.task.evaluator is not None):
evaluate_label = ('update_' + str(self.updates))
eval_dataset = 'dev'
batches = self.get_batch_generator(eval_dataset)
(result, score, got_better_score) = self.task.evaluator.eval_batches(self.module, batches, self.saveFolder, evaluate_label)
self.tb_log_scalar('Eval/score', score, self.updates)
if got_better_score:
self.log('Got new better score on rank-{0} evaluator, at updates {1}'.format(self.opt['rank'], self.updates))
self.log('Updates {0} - {1}: Current Score: {2:.3f} (best Score: {3:.3f})'.format(self.updates, eval_dataset, score, self.task.evaluator.best_score))
self.log('Current results breakdown\n{0}'.format(result))
self.log('Best results breakdown\n{0}'.format(self.task.evaluator.best_res))
self.save_checkpoint(self.updates)
save_a_checkpoint = False
if (((batch_idx % 10) == 0) or ((epoch == 0) and (batch_idx <= 50)) or ('DEBUG' in self.opt)):
if (self.opt['rank'] == 0):
batch_size = batch['encoder_input_ids'].shape[0]
self.log('epochs[{0:6}] updates[{1:6}] bsz[{2:d}] train loss[{3:.5f}] avg train loss[{4:.5f}] learning rate[{5:.5e}] remaining[{6}]'.format(epoch, self.updates, batch_size, self.train_loss.val, self.train_loss.avg, self.lr_scheduler.get_lr()[0], str((((datetime.now() - startTime) / (batch_idx + 1)) * ((self.updates_per_epoch - batch_idx) - 1))).split('.')[0]))
self.tb_log_scalar('Loss/train_val', self.train_loss.val, self.updates)
self.tb_log_scalar('Loss/train_avg', self.train_loss.avg, self.updates)
self.tb_log_scalar('Learning Rate/lr', self.lr_scheduler.get_lr()[0], self.updates)
if (isinstance(self.train_batch_generator, iterators.CheckpointableIterator) and ((batch_idx + 1) == self.updates_per_epoch)):
break
self.log(('This epoch takes' + str((datetime.now() - startTime))))
self.log('PROGRESS: {0:.2f}%'.format(((100.0 * (epoch + 1)) / numEpochs)))
self.log(('Config file is at ' + self.opt['confFile']))
if ('DEBUG' in self.opt):
break
def update(self, batch):
self.network.train()
if isinstance(batch, tuple):
batch = tuple((t.to(self.opt['device']) for t in batch))
elif isinstance(batch, list):
batch = [t.to(self.opt['device']) for t in batch]
elif isinstance(batch, dict):
for k in batch:
if torch.is_tensor(batch[k]):
batch[k] = batch[k].to(self.opt['device'])
else:
assert torch.is_tensor(batch)
batch = batch.to(self.opt['device'])
skip_gradient_sync = False
if ((self.opt['world_size'] > 1) and (not self.is_gradient_accumulation_boundary())):
if (not self.opt['fp16']):
if self.high_pytorch_version:
skip_gradient_sync = True
if skip_gradient_sync:
with self.network.no_sync():
loss = self.network(batch)
else:
loss = self.network(batch)
if (self.grad_acc_steps > 1):
loss = (loss / self.grad_acc_steps)
self.acc_loss += loss
def backward(loss_tensor):
if self.opt['fp16']:
from apex import amp
with amp.scale_loss(loss_tensor, self.optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss_tensor.backward()
if skip_gradient_sync:
with self.network.no_sync():
backward(loss)
else:
if (('DEBUG' in self.opt) and (self.opt['rank'] == 0)):
self.log('Performing synchronized backward at step {0}'.format(self.optim_steps))
backward(loss)
if self.is_gradient_accumulation_boundary():
if (self.opt['world_size'] > 1):
torch.distributed.all_reduce(self.acc_loss, torch.distributed.ReduceOp.SUM)
self.acc_loss /= self.opt['world_size']
self.train_loss.update(self.acc_loss.data, 1)
self.acc_loss = 0.0
if ('GRAD_CLIPPING' in self.opt):
if self.opt['fp16']:
from apex import amp
torch.nn.utils.clip_grad_norm_(amp.master_params(self.optimizer), self.opt['GRAD_CLIPPING'])
else:
torch.nn.utils.clip_grad_norm_(self.network.parameters(), self.opt['GRAD_CLIPPING'])
self.optim_steps += 1
self.optimizer.step()
self.optimizer.zero_grad()
self.lr_scheduler.step()
self.updates += 1
def save_checkpoint(self, tag):
self.log('Saving checkpoint...')
resume_epoch_idx = self.current_epoch_idx
resume_batch_idx = (self.current_batch_idx + 1)
if (resume_batch_idx == self.updates_per_epoch):
resume_batch_idx = 0
resume_epoch_idx += 1
if self.opt['fp16']:
from apex import amp
if (self.opt['rank'] == 0):
save_dir = os.path.join(self.saveFolder, str(tag))
os.makedirs(save_dir)
save_path = os.path.join(save_dir, 'training_states.pt')
state = {'network': self.network.state_dict(), 'optimizer': self.optimizer.state_dict(), 'lr_scheduler': self.lr_scheduler.state_dict(), 'amp': (amp.state_dict() if self.opt['fp16'] else None), 'optim_steps': self.optim_steps, 'updates': self.updates, 'updates_per_epoch': self.updates_per_epoch, 'start_epoch_idx': resume_epoch_idx, 'start_batch_idx': resume_batch_idx}
torch.save(state, save_path)
if (self.opt['world_size'] > 1):
torch.distributed.barrier()
save_dir = os.path.join(self.saveFolder, str(tag))
assert os.path.isdir(save_dir)
random_state_path = os.path.join(save_dir, 'random_state_rank_{:04d}'.format(self.opt['rank']))
random_state = {'random': random.getstate(), 'numpy_random': np.random.get_state(), 'torch_random': torch.get_rng_state(), 'torch_cuda_random': (torch.cuda.get_rng_state(device=self.opt['device']) if self.use_cuda else None)}
torch.save(random_state, random_state_path)
if isinstance(self.train_batch_generator, iterators.CheckpointableIterator):
batch_generator_file_path = os.path.join(save_dir, 'batch_generator_checkpoint_rank_{:04d}'.format(self.opt['rank']))
batch_generator_state = self.train_batch_generator.getstate()
torch.save(batch_generator_state, batch_generator_file_path)
else:
self.log('Batch generator is not checkpointable. Cannot save to checkpoint.')
if (self.opt['rank'] == 0):
self.module.save_pretrained(save_dir)
if (self.opt['rank'] == 0):
checkpoint_location = {'checkpoint_tag': str(tag), 'checkpoint_path': os.path.relpath(self.saveFolder, start=self.opt['datadir'])}
json.dump(checkpoint_location, open(os.path.join(self.opt['datadir'], (self.opt['basename'] + '_resume_checkpoint.json')), 'w', encoding='utf-8'))
self.log(f'Finished saving checkpoint and model to {save_dir}.')
def load_model(self, model_path):
self.module = self.module.from_pretrained(model_path)
self.module.to(self.opt['device'])
def load_checkpoint(self):
try:
checkpoint_location = json.load(open(os.path.join(self.opt['datadir'], (self.opt['basename'] + '_resume_checkpoint.json')), encoding='utf-8'))
checkpoint_path = os.path.join(self.opt['datadir'], checkpoint_location['checkpoint_path'], checkpoint_location['checkpoint_tag'])
tag = checkpoint_location['checkpoint_tag']
if (not os.path.isdir(checkpoint_path)):
if (self.opt['rank'] == 0):
self.log('Checkpoint path {} not exist. Continue without loading checkpoint'.format(checkpoint_path))
return
except:
if (self.opt['rank'] == 0):
self.log(f'''Cannot find checkpoint path from {(self.opt['basename'] + '_resume_checkpoint.json')}.
Make sure {os.path.join(self.opt['datadir'], (self.opt['basename'] + '_resume_checkpoint.json'))} exists.
Continue without loading checkpoint''')
return
if (self.opt['rank'] == 0):
json.dump(checkpoint_location, open(os.path.join(self.saveFolder, 'resumed_checkpoint.json'), 'w', encoding='utf-8'))
self.log(f'Loading checkpoint from {checkpoint_path}...')
load_path = os.path.join(checkpoint_path, 'training_states.pt')
state = torch.load(load_path, map_location=self.opt['device'])
self.network.load_state_dict(state['network'])
self.optimizer.load_state_dict(state['optimizer'])
self.lr_scheduler.load_state_dict(state['lr_scheduler'])
if self.opt['fp16']:
from apex import amp
amp.load_state_dict(state['amp'])
self.optim_steps = state['optim_steps']
self.updates = state['updates']
self.start_epoch_idx = state['start_epoch_idx']
self.start_batch_idx = state['start_batch_idx']
assert (self.updates_per_epoch == state['updates_per_epoch'])
assert (self.start_batch_idx < self.updates_per_epoch)
random_state_path = os.path.join(checkpoint_path, 'random_state_rank_{:04d}'.format(self.opt['rank']))
random_state = torch.load(random_state_path, map_location='cpu')
random.setstate(random_state['random'])
np.random.set_state(random_state['numpy_random'])
torch.set_rng_state(random_state['torch_random'])
if self.use_cuda:
torch.cuda.set_rng_state(random_state['torch_cuda_random'], device=self.opt['device'])
if (('RESET_DATA_LOADER' not in self.opt) and isinstance(self.train_batch_generator, iterators.CheckpointableIterator)):
batch_generator_file_path = os.path.join(checkpoint_path, 'batch_generator_checkpoint_rank_{:04d}'.format(self.opt['rank']))
batch_generator_state = torch.load(batch_generator_file_path, map_location='cpu')
self.train_batch_generator.setstate(batch_generator_state)
else:
self.log("No need to resume batch generator or batch generator is not checkpointable. Didn't load from checkpoint.")
self.log(f'Finished loading checkpoint from {checkpoint_path}.') |
def video_to_imgs(video_name='demo_output.mp4', image_dir='./images/'):
video_capture = VideoCapture(video_name)
number = 0
while True:
(flag, frame) = video_capture.read()
if (flag is False):
break
(w, h) = (frame.shape[0], frame.shape[1])
if (((w % 4) != 0) or ((h % 4) != 0)):
NW = int(((w // 4) * 4))
NH = int(((h // 4) * 4))
frame = cv2.resize(frame, (NW, NH))
imwrite(((image_dir + str((0 + number))) + '.jpg'), frame)
number += 1 |
class AsyncRlEval(AsyncRlBase):
_eval = True
def initialize_logging(self):
self._traj_infos = list()
self._last_eval_time = 0.0
super().initialize_logging()
self.pbar = ProgBarCounter(self.log_interval_itrs)
def log_diagnostics(self, itr, sampler_itr, throttle_time):
if (not self._traj_infos):
logger.log('WARNING: had no complete trajectories in eval.')
steps_in_eval = sum([info['Length'] for info in self._traj_infos])
logger.record_tabular('StepsInEval', steps_in_eval)
logger.record_tabular('TrajsInEval', len(self._traj_infos))
logger.record_tabular('CumEvalTime', self.ctrl.eval_time.value)
super().log_diagnostics(itr, sampler_itr, throttle_time)
self._traj_infos = list() |
def test_mask2ndarray():
raw_masks = np.ones((3, 28, 28))
bitmap_mask = BitmapMasks(raw_masks, 28, 28)
output_mask = mask2ndarray(bitmap_mask)
assert np.allclose(raw_masks, output_mask)
raw_masks = dummy_raw_polygon_masks((3, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
output_mask = mask2ndarray(polygon_masks)
assert (output_mask.shape == (3, 28, 28))
raw_masks = np.ones((3, 28, 28))
output_mask = mask2ndarray(raw_masks)
assert np.allclose(raw_masks, output_mask)
raw_masks = torch.ones((3, 28, 28))
output_mask = mask2ndarray(raw_masks)
assert np.allclose(raw_masks, output_mask)
raw_masks = []
with pytest.raises(TypeError):
output_mask = mask2ndarray(raw_masks) |
class DownsampleLayer(nn.Module):
def __init__(self, channels, norm_layer='LN'):
super().__init__()
self.conv = nn.Conv2d(channels, (2 * channels), kernel_size=3, stride=2, padding=1, bias=False)
self.norm = build_norm_layer((2 * channels), norm_layer, 'channels_first', 'channels_last')
def forward(self, x):
x = self.conv(x.permute(0, 3, 1, 2))
x = self.norm(x)
return x |
def test_dissipativeforce_method_inputAsQuantity():
from galpy.potential import ChandrasekharDynamicalFrictionForce
from galpy.util import conversion
(ro, vo) = ((8.0 * units.kpc), 220.0)
pot = ChandrasekharDynamicalFrictionForce(GMs=0.1, rhm=(1.2 / 8.0), ro=ro, vo=vo)
potu = ChandrasekharDynamicalFrictionForce(GMs=0.1, rhm=(1.2 / 8.0))
assert (numpy.fabs((pot.Rforce((1.1 * ro), (0.1 * ro), phi=(10.0 * units.deg), t=(10.0 * units.Gyr), v=((numpy.array([10.0, 200.0, (- 20.0)]) * units.km) / units.s), use_physical=False) - potu.Rforce(1.1, 0.1, phi=((10.0 / 180.0) * numpy.pi), v=(numpy.array([10.0, 200.0, (- 20.0)]) / vo)))) < (10.0 ** (- 4.0))), 'Potential method Rforce does not return the correct value when input is Quantity'
assert (numpy.fabs((pot.zforce((1.1 * ro), (0.1 * ro), phi=(10.0 * units.deg), t=(10.0 * units.Gyr), v=((numpy.array([10.0, 200.0, (- 20.0)]) * units.km) / units.s), use_physical=False) - potu.zforce(1.1, 0.1, phi=((10.0 / 180.0) * numpy.pi), v=(numpy.array([10.0, 200.0, (- 20.0)]) / vo)))) < (10.0 ** (- 4.0))), 'Potential method zforce does not return the correct value when input is Quantity'
assert (numpy.fabs((pot.phitorque((1.1 * ro), (0.1 * ro), phi=(10.0 * units.deg), t=(10.0 * units.Gyr), v=((numpy.array([10.0, 200.0, (- 20.0)]) * units.km) / units.s), use_physical=False) - potu.phitorque(1.1, 0.1, phi=((10.0 / 180.0) * numpy.pi), v=(numpy.array([10.0, 200.0, (- 20.0)]) / vo)))) < (10.0 ** (- 4.0))), 'Potential method phitorque does not return the correct value when input is Quantity'
return None |
class LinspaceRange(Range[float]):
def __init__(self, start: float, end: float, n: int, name: Optional[str]=None, dtype=None) -> None:
self.n = n
self.start = start
self.end = end
self.dtype = dtype
super().__init__(name)
def values(self) -> np.ndarray:
return np.linspace(self.start, self.end, self.n, dtype=self.dtype) |
def get_embedding(args):
print('{}, Building augmented embedding'.format(datetime.datetime.now().strftime('%02y/%02m/%02d %H:%M:%S')))
aux = []
for ebd in args.auxiliary:
if (ebd == 'pos'):
aux.append(POS(args))
else:
raise ValueError('Invalid argument for auxiliary ebd')
if (args.cuda != (- 1)):
aux = [a.cuda(args.cuda) for a in aux]
model = AUX(aux, args)
if (args.cuda != (- 1)):
return model.cuda(args.cuda)
else:
return model |
class NopModule(MsfModule):
def __init__(self, rpc, nop):
super(NopModule, self).__init__(rpc, 'nop', nop) |
def make_atom14_masks(protein: Dict[(str, torch.Tensor)]) -> Dict[(str, torch.Tensor)]:
restype_atom14_to_atom37_list = []
restype_atom37_to_atom14_list = []
restype_atom14_mask_list = []
for rt in rc.restypes:
atom_names = rc.restype_name_to_atom14_names[rc.restype_1to3[rt]]
restype_atom14_to_atom37_list.append([(rc.atom_order[name] if name else 0) for name in atom_names])
atom_name_to_idx14 = {name: i for (i, name) in enumerate(atom_names)}
restype_atom37_to_atom14_list.append([(atom_name_to_idx14[name] if (name in atom_name_to_idx14) else 0) for name in rc.atom_types])
restype_atom14_mask_list.append([(1.0 if name else 0.0) for name in atom_names])
restype_atom14_to_atom37_list.append(([0] * 14))
restype_atom37_to_atom14_list.append(([0] * 37))
restype_atom14_mask_list.append(([0.0] * 14))
restype_atom14_to_atom37 = torch.tensor(restype_atom14_to_atom37_list, dtype=torch.int32, device=protein['aatype'].device)
restype_atom37_to_atom14 = torch.tensor(restype_atom37_to_atom14_list, dtype=torch.int32, device=protein['aatype'].device)
restype_atom14_mask = torch.tensor(restype_atom14_mask_list, dtype=torch.float32, device=protein['aatype'].device)
protein_aatype = protein['aatype'].to(torch.long)
residx_atom14_to_atom37 = restype_atom14_to_atom37[protein_aatype]
residx_atom14_mask = restype_atom14_mask[protein_aatype]
protein['atom14_atom_exists'] = residx_atom14_mask
protein['residx_atom14_to_atom37'] = residx_atom14_to_atom37.long()
residx_atom37_to_atom14 = restype_atom37_to_atom14[protein_aatype]
protein['residx_atom37_to_atom14'] = residx_atom37_to_atom14.long()
restype_atom37_mask = torch.zeros([21, 37], dtype=torch.float32, device=protein['aatype'].device)
for (restype, restype_letter) in enumerate(rc.restypes):
restype_name = rc.restype_1to3[restype_letter]
atom_names = rc.residue_atoms[restype_name]
for atom_name in atom_names:
atom_type = rc.atom_order[atom_name]
restype_atom37_mask[(restype, atom_type)] = 1
residx_atom37_mask = restype_atom37_mask[protein_aatype]
protein['atom37_atom_exists'] = residx_atom37_mask
return protein |
def AusElectricity_train(sample):
if sample:
return {'class_balance': (lambda r: True), 'weight_decay': (lambda r: 0.0), 'lr': (lambda r: (10 ** r.uniform((- 5), (- 3)))), 'batch_size': (lambda r: int((2 ** r.uniform(3, 5))))}
else:
return {'class_balance': (lambda r: True), 'weight_decay': (lambda r: 0), 'lr': (lambda r: (10 ** (- 4))), 'batch_size': (lambda r: 2)} |
class BertChecker(BertPreTrainedModel):
def __init__(self, config, logic_lambda=0.0, prior='nli', m=8, temperature=1):
super().__init__(config)
self.num_labels = config.num_labels
self.hidden_size = config.hidden_size
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self._lambda = logic_lambda
self.prior = prior
self.temperature = temperature
self._step = 0
self.linear_self_attn = nn.Linear(self.hidden_size, 1, bias=False)
self.linear_m_attn = nn.Linear((self.hidden_size * 2), 1, bias=False)
self.var_hidden_size = (self.hidden_size // 4)
z_hid_size = (self.num_labels * m)
self.linear_P_theta = nn.Linear(((self.hidden_size * 2) + z_hid_size), self.var_hidden_size)
y_hid_size = self.var_hidden_size
self.linear_Q_phi = nn.Linear(((self.hidden_size * 2) + y_hid_size), self.var_hidden_size)
self.classifier = ClassificationHead(self.var_hidden_size, self.num_labels, config.hidden_dropout_prob)
self.z_clf = self.classifier
self.init_weights()
def forward(self, claim_input_ids, claim_attention_mask, claim_token_type_ids, qa_input_ids_list, qa_attention_mask_list, qa_token_type_ids_list, nli_labels=None, labels=None):
self._step += 1
_zero = torch.tensor(0.0).to(claim_input_ids.device)
global_output = self.bert(claim_input_ids, attention_mask=claim_attention_mask, token_type_ids=claim_token_type_ids)[0]
global_output = self.self_select(global_output)
_qa_input_ids_list = qa_input_ids_list.transpose(1, 0)
_qa_attention_mask_list = qa_attention_mask_list.transpose(1, 0)
_qa_token_type_ids_list = qa_token_type_ids_list.transpose(1, 0)
local_output_list = []
for (_inp, _attn, _token_ids) in zip(_qa_input_ids_list, _qa_attention_mask_list, _qa_token_type_ids_list):
_local_output = self.bert(_inp, attention_mask=_attn, token_type_ids=_token_ids)[0]
_local_output = self.self_select(_local_output)
local_output_list.append(_local_output)
local_outputs = torch.stack(local_output_list, 0)
local_outputs = local_outputs.transpose(1, 0).contiguous()
(neg_elbo, loss, logic_loss) = (_zero, _zero, _zero)
mask = attention_mask_to_mask(qa_attention_mask_list)
(local_outputs_w, m_attn) = self.local_attn(global_output, local_outputs, mask)
local_outputs = torch.cat([local_outputs, global_output.unsqueeze(1).repeat(1, local_outputs.size(1), 1)], (- 1))
if (labels is not None):
labels_onehot = F.one_hot(labels, num_classes=self.num_labels).to(torch.float)
y_star_emb = get_label_embeddings(labels_onehot, self.classifier.out_proj.weight)
z = self.Q_phi(local_outputs, y_star_emb)
z_softmax = z.softmax((- 1))
z_gumbel = F.gumbel_softmax(z, tau=temperature_annealing(self.temperature, self._step), dim=(- 1), hard=True)
y = self.P_theta(global_output, local_outputs_w, z_gumbel)
mask = mask.to(torch.int)
y_z = soft_logic(z_softmax, mask)
logic_loss = F.kl_div(y.log_softmax((- 1)), y_z)
elbo_neg_p_log = F.cross_entropy(y.view((- 1), self.num_labels), labels.view((- 1)))
if (self.prior == 'nli'):
prior = nli_labels.softmax(dim=(- 1))
elif (self.prior == 'uniform'):
prior = torch.tensor(([(1 / self.num_labels)] * self.num_labels)).to(y)
prior = prior.unsqueeze(0).unsqueeze(0).repeat(mask.size(0), mask.size(1), 1)
elif (self.prior == 'logic'):
prior = build_pseudo_labels(labels, m_attn)
else:
raise NotImplementedError(self.prior)
elbo_kl = F.kl_div(z_softmax.log(), prior)
neg_elbo = (elbo_kl + elbo_neg_p_log)
loss = (((1 - abs(self._lambda)) * neg_elbo) + (abs(self._lambda) * logic_loss))
else:
if (self.prior == 'nli'):
z = nli_labels
elif (self.prior == 'uniform'):
prior = torch.tensor(([(1 / self.num_labels)] * self.num_labels)).to(y)
z = prior.unsqueeze(0).unsqueeze(0).repeat(mask.size(0), mask.size(1), 1)
else:
z = torch.rand([local_outputs.size(0), local_outputs.size(1), self.num_labels]).to(local_outputs)
z_softmax = z.softmax((- 1))
for i in range(3):
z = z_softmax.argmax((- 1))
z = F.one_hot(z, num_classes=3).to(torch.float)
y = self.P_theta(global_output, local_outputs_w, z)
y = y.softmax((- 1))
y_emb = get_label_embeddings(y, self.classifier.out_proj.weight)
z = self.Q_phi(local_outputs, y_emb)
z_softmax = z.softmax((- 1))
return (loss, (neg_elbo, logic_loss), y, m_attn, (z_softmax, mask))
def Q_phi(self, X, y):
y_expand = y.unsqueeze(1).repeat(1, X.size(1), 1)
z_hidden = self.linear_Q_phi(torch.cat([y_expand, X], dim=(- 1)))
z_hidden = F.tanh(z_hidden)
z = self.z_clf(z_hidden)
return z
def P_theta(self, X_global, X_local, z):
b = z.size(0)
_logits = torch.cat([X_local, X_global, z.reshape(b, (- 1))], dim=(- 1))
_logits = self.dropout(_logits)
_logits = self.linear_P_theta(_logits)
_logits = torch.tanh(_logits)
y = self.classifier(_logits)
return y
def self_select(self, h_x):
w = self.dropout(self.linear_self_attn(h_x).squeeze((- 1))).softmax((- 1))
return torch.einsum('blh,bl->bh', h_x, w)
def local_attn(self, global_output, local_outputs, mask):
m = local_outputs.size(1)
scores = self.linear_m_attn(torch.cat([global_output.unsqueeze(1).repeat(1, m, 1), local_outputs], dim=(- 1))).squeeze((- 1))
mask = (1 - mask)
scores = scores.masked_fill(mask.to(torch.bool), (- 1e+16))
attn = F.softmax(scores, (- 1))
return (torch.einsum('bm,bmh->bh', attn, local_outputs), attn) |
class XceptionA(nn.Module):
def __init__(self, num_classes=1000, norm_layer=nn.BatchNorm2d):
super(XceptionA, self).__init__()
self.conv1 = nn.Sequential(nn.Conv2d(3, 8, 3, 2, 1, bias=False), norm_layer(8), nn.ReLU(True))
self.enc2 = Enc(8, 48, 4, norm_layer=norm_layer)
self.enc3 = Enc(48, 96, 6, norm_layer=norm_layer)
self.enc4 = Enc(96, 192, 4, norm_layer=norm_layer)
self.fca = FCAttention(192, norm_layer=norm_layer)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(192, num_classes)
def forward(self, x):
x = self.conv1(x)
x = self.enc2(x)
x = self.enc3(x)
x = self.enc4(x)
x = self.fca(x)
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
x = self.fc(x)
return x |
def optuna_init_optimizers(self, methods, space, sampler='TPESampler', sampler_opts=None, **create_study_opts):
import optuna
if isinstance(sampler, str):
if (sampler_opts is None):
sampler_opts = {}
sampler = getattr(optuna.samplers, sampler)(**sampler_opts)
optuna.logging.set_verbosity(optuna.logging.WARNING)
self._study = optuna.create_study(sampler=sampler, **create_study_opts)
self._retrieve_params = make_retriever(methods, space) |
class NATSpeechToTextDataset(SpeechToTextDataset):
def __getitem__(self, index: int) -> SpeechToTextDatasetItem:
has_concat = self.dataset_transforms.has_transform(ConcatAugment)
if has_concat:
concat = self.dataset_transforms.get_transform(ConcatAugment)
indices = concat.find_indices(index, self.n_frames, self.n_samples)
source = self._get_source_audio((indices if has_concat else index))
source = self.pack_frames(source)
target = None
if (self.tgt_texts is not None):
tokenized = self.get_tokenized_tgt_text((indices if has_concat else index))
target = self.tgt_dict.encode_line(tokenized, add_if_not_exist=False, append_eos=True).long()
bos = torch.LongTensor([self.tgt_dict.bos()])
target = torch.cat((bos, target), 0)
speaker_id = None
if (self.speaker_to_id is not None):
speaker_id = self.speaker_to_id[self.speakers[index]]
return SpeechToTextDatasetItem(index=index, source=source, target=target, speaker_id=speaker_id)
def collater(self, samples: List[SpeechToTextDatasetItem], return_order: bool=False) -> Dict:
if (len(samples) == 0):
return {}
indices = torch.tensor([x.index for x in samples], dtype=torch.long)
sources = [x.source for x in samples]
has_NOAug = self.dataset_transforms.has_transform(NoisyOverlapAugment)
if (has_NOAug and self.cfg.use_audio_input):
NOAug = self.dataset_transforms.get_transform(NoisyOverlapAugment)
sources = NOAug(sources)
frames = _collate_frames(sources, self.cfg.use_audio_input)
n_frames = torch.tensor([x.size(0) for x in sources], dtype=torch.long)
(n_frames, order) = n_frames.sort(descending=True)
indices = indices.index_select(0, order)
frames = frames.index_select(0, order)
(target, target_lengths) = (None, None)
ntokens = None
if (self.tgt_texts is not None):
target = fairseq_data_utils.collate_tokens([x.target for x in samples], self.tgt_dict.pad(), self.tgt_dict.eos(), left_pad=False, move_eos_to_beginning=False)
target = target.index_select(0, order)
target_lengths = torch.tensor([x.target.size(0) for x in samples], dtype=torch.long).index_select(0, order)
ntokens = sum((x.target.size(0) for x in samples))
speaker = None
if (self.speaker_to_id is not None):
speaker = torch.tensor([s.speaker_id for s in samples], dtype=torch.long).index_select(0, order).view((- 1), 1)
net_input = {'src_tokens': frames, 'src_lengths': n_frames}
out = {'id': indices, 'net_input': net_input, 'speaker': speaker, 'target': target, 'target_lengths': target_lengths, 'ntokens': ntokens, 'nsentences': len(samples)}
if return_order:
out['order'] = order
return out |
def test_emission_matrix(model, X):
e = model._emission_matrix(X)
assert_array_almost_equal(e, [[[(- 4.3782), (- 3.6372)], [(- 7.2354), (- 2.7799)], [(- 21.0449), (- 4.2237)], [(- 24.8544), (- 5.2129)], [(- 1.9973), (- 4.6479)]], [[(- 42.9497), (- 7.7994)], [(- 1.5211), (- 3.9812)], [(- 17.7116), (- 3.9011)], [(- 1.0449), (- 3.3146)], [(- 13.902), (- 3.425)]]], 4) |
class IBertConfig(PretrainedConfig):
model_type = 'ibert'
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=1, bos_token_id=0, eos_token_id=2, position_embedding_type='absolute', quant_mode=False, force_dequant='none', **kwargs):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.position_embedding_type = position_embedding_type
self.quant_mode = quant_mode
self.force_dequant = force_dequant |
class OwlViTProcessor(ProcessorMixin):
attributes = ['image_processor', 'tokenizer']
image_processor_class = 'OwlViTImageProcessor'
tokenizer_class = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__(self, image_processor=None, tokenizer=None, **kwargs):
if ('feature_extractor' in kwargs):
warnings.warn('The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor` instead.', FutureWarning)
feature_extractor = kwargs.pop('feature_extractor')
image_processor = (image_processor if (image_processor is not None) else feature_extractor)
if (image_processor is None):
raise ValueError('You need to specify an `image_processor`.')
if (tokenizer is None):
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(image_processor, tokenizer)
def __call__(self, text=None, images=None, query_images=None, padding='max_length', return_tensors='np', **kwargs):
if ((text is None) and (query_images is None) and (images is None)):
raise ValueError('You have to specify at least one text or query image or image. All three cannot be none.')
if (text is not None):
if (isinstance(text, str) or (isinstance(text, List) and (not isinstance(text[0], List)))):
encodings = [self.tokenizer(text, padding=padding, return_tensors=return_tensors, **kwargs)]
elif (isinstance(text, List) and isinstance(text[0], List)):
encodings = []
max_num_queries = max([len(t) for t in text])
for t in text:
if (len(t) != max_num_queries):
t = (t + ([' '] * (max_num_queries - len(t))))
encoding = self.tokenizer(t, padding=padding, return_tensors=return_tensors, **kwargs)
encodings.append(encoding)
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings')
if (return_tensors == 'np'):
input_ids = np.concatenate([encoding['input_ids'] for encoding in encodings], axis=0)
attention_mask = np.concatenate([encoding['attention_mask'] for encoding in encodings], axis=0)
elif ((return_tensors == 'jax') and is_flax_available()):
import jax.numpy as jnp
input_ids = jnp.concatenate([encoding['input_ids'] for encoding in encodings], axis=0)
attention_mask = jnp.concatenate([encoding['attention_mask'] for encoding in encodings], axis=0)
elif ((return_tensors == 'pt') and is_torch_available()):
import torch
input_ids = torch.cat([encoding['input_ids'] for encoding in encodings], dim=0)
attention_mask = torch.cat([encoding['attention_mask'] for encoding in encodings], dim=0)
elif ((return_tensors == 'tf') and is_tf_available()):
import tensorflow as tf
input_ids = tf.stack([encoding['input_ids'] for encoding in encodings], axis=0)
attention_mask = tf.stack([encoding['attention_mask'] for encoding in encodings], axis=0)
else:
raise ValueError('Target return tensor type could not be returned')
encoding = BatchEncoding()
encoding['input_ids'] = input_ids
encoding['attention_mask'] = attention_mask
if (query_images is not None):
encoding = BatchEncoding()
query_pixel_values = self.image_processor(query_images, return_tensors=return_tensors, **kwargs).pixel_values
encoding['query_pixel_values'] = query_pixel_values
if (images is not None):
image_features = self.image_processor(images, return_tensors=return_tensors, **kwargs)
if ((text is not None) and (images is not None)):
encoding['pixel_values'] = image_features.pixel_values
return encoding
elif ((query_images is not None) and (images is not None)):
encoding['pixel_values'] = image_features.pixel_values
return encoding
elif ((text is not None) or (query_images is not None)):
return encoding
else:
return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors)
def post_process(self, *args, **kwargs):
return self.image_processor.post_process(*args, **kwargs)
def post_process_object_detection(self, *args, **kwargs):
return self.image_processor.post_process_object_detection(*args, **kwargs)
def post_process_image_guided_detection(self, *args, **kwargs):
return self.image_processor.post_process_image_guided_detection(*args, **kwargs)
def batch_decode(self, *args, **kwargs):
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
return self.tokenizer.decode(*args, **kwargs)
def feature_extractor_class(self):
warnings.warn('`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.', FutureWarning)
return self.image_processor_class
def feature_extractor(self):
warnings.warn('`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.', FutureWarning)
return self.image_processor |
def sample_mixture_normal(mean, logvar, pi):
(b, c, h, w, n_mixtures) = tuple(map(int, pi.size()))
pi = pi.view((((b * c) * h) * w), n_mixtures)
sampled_pi = torch.multinomial(pi, num_samples=1).view((- 1))
mean = mean.view((((b * c) * h) * w), n_mixtures)
mean = mean[(torch.arange((((b * c) * h) * w)), sampled_pi)].view(b, c, h, w)
logvar = logvar.view((((b * c) * h) * w), n_mixtures)
logvar = logvar[(torch.arange((((b * c) * h) * w)), sampled_pi)].view(b, c, h, w)
y = sample_normal(mean, logvar)
return y |
def read_text(text_file):
for line in text_file:
parts = line.strip().split()
if (len(parts) < 1):
raise RuntimeError('Did not get enough columns; line {0} in {1}'.format(line, text_file.name))
elif (len(parts) == 1):
logger.warn('Empty transcript for utterance %s in %s', parts[0], text_file.name)
(yield (parts[0], []))
else:
(yield (parts[0], parts[1:]))
text_file.close() |
class ONNXModel(BaseModel):
def __init__(self, model, **kwargs):
self._model = (model if (not isinstance(model, str)) else onnx.load(model, load_external_data=False))
self._model_path = (None if (not isinstance(model, str)) else model)
self.check_is_large_model()
if (self._is_large_model and (self._model_path is None) and (not kwargs.get('ignore_warning', False))):
logger.warning('Model size > 2GB. Please use model path instead of onnx model object to quantize')
if (self._is_large_model and isinstance(model, str) and kwargs.get('load_external_data', True)):
from onnx.external_data_helper import load_external_data_for_model
load_external_data_for_model(self._model, os.path.dirname(self._model_path))
self._config = None
if (isinstance(model, str) and os.path.exists(Path(model).parent.joinpath('config.json').as_posix())):
from transformers import PretrainedConfig
self._config = PretrainedConfig.from_pretrained(Path(model).parent.as_posix())
self.node_name_counter = {}
self._output_name_to_node = {}
self._input_name_to_nodes = {}
self._get_input_name_to_nodes(self._model.graph.node)
self._get_output_name_to_node(self._model.graph.node)
self._graph_info = {}
self._get_graph_info()
self._q_config = None
def check_is_large_model(self):
init_size = 0
for init in self._model.graph.initializer:
if (init.HasField('data_location') and (init.data_location == onnx.TensorProto.EXTERNAL)):
self._is_large_model = True
return
try:
init_bytes = init.SerializeToString()
init_size += sys.getsizeof(init_bytes)
except Exception as e:
if ('exceeds maximum protobuf size of 2GB' in str(e)):
self._is_large_model = True
return
else:
raise e
if (init_size > MAXIMUM_PROTOBUF):
self._is_large_model = True
return
self._is_large_model = False
def is_large_model(self):
return self._is_large_model
def model_path(self):
return self._model_path
_path.setter
def model_path(self, path):
self._model_path = path
def framework(self):
return 'onnxruntime'
def q_config(self):
return self._q_config
_config.setter
def q_config(self, q_config):
self._q_config = q_config
def hf_config(self):
return self._config
def model(self):
return self._model
def model(self, model):
self._model = model
self._graph_info = {}
self._get_graph_info()
self._output_name_to_node = {}
self._input_name_to_nodes = {}
self._get_input_name_to_nodes(self._model.graph.node)
self._get_output_name_to_node(self._model.graph.node)
def input(self):
return [i.name for i in self._model.graph.input]
def output(self):
return [i.name for i in self._model.graph.output]
def update(self):
self._graph_info = {}
self._get_graph_info()
self._output_name_to_node = {}
self._input_name_to_nodes = {}
self._get_input_name_to_nodes(self._model.graph.node)
self._get_output_name_to_node(self._model.graph.node)
def graph_info(self):
return self._graph_info
def _get_graph_info(self):
for node in self._model.graph.node:
self.graph_info.update({node.name: node.op_type})
def save(self, root):
if ((os.path.split(root)[0] != '') and (not os.path.exists(os.path.split(root)[0]))):
raise ValueError('"root" directory does not exists.')
if self.is_large_model:
from onnx.external_data_helper import load_external_data_for_model
load_external_data_for_model(self._model, os.path.split(self._model_path)[0])
onnx.save_model(self._model, root, save_as_external_data=True, all_tensors_to_one_file=True, location=(root.split('/')[(- 1)] + '_data'), size_threshold=1024, convert_attribute=False)
else:
onnx.save(self._model, root)
if (self._config is not None):
model_type = ('' if (not hasattr(self._config, 'model_type')) else getattr(self._config, 'model_type'))
setattr(self._config.__class__, 'model_type', model_type)
output_config_file = Path(root).parent.joinpath('config.json').as_posix()
self._config.to_json_file(output_config_file, use_diff=False)
def nodes(self):
return self._model.graph.node
def initializer(self):
return self._model.graph.initializer
def graph(self):
return self._model.graph
def ir_version(self):
return self._model.ir_version
def opset_import(self):
return self._model.opset_import
def remove_node(self, node):
if (node in self._model.graph.node):
self._model.graph.node.remove(node)
def remove_nodes(self, nodes_to_remove):
for node in nodes_to_remove:
self.remove_node(node)
def add_node(self, node):
self._model.graph.node.extend([node])
def add_nodes(self, nodes_to_add):
self._model.graph.node.extend(nodes_to_add)
def add_initializer(self, tensor):
if (ortq.find_by_name(tensor.name, self._model.graph.initializer) is None):
self._model.graph.initializer.extend([tensor])
def add_initializers(self, tensors):
for tensor in tensors:
self.add_initializer(tensor)
def get_initializer(self, name):
for tensor in self._model.graph.initializer:
if (tensor.name == name):
return tensor
return None
def get_initializer_share_num(self, name):
num = 0
if (self.get_initializer(name) is None):
return num
for node in self.nodes():
if (name in node.input):
num += 1
return num
def get_node(self, name):
for node in self._model.graph.node:
if (node.name == name):
return node
return None
def remove_initializer(self, tensor):
if (tensor in self._model.graph.initializer):
self._model.graph.initializer.remove(tensor)
def remove_initializers(self, init_to_remove):
for initializer in init_to_remove:
self.remove_initializer(initializer)
def set_initializer(self, tensor, array, raw=False):
old_tensor = self.get_initializer(tensor)
self.remove_initializer(old_tensor)
dims = old_tensor.dims
data_type = old_tensor.data_type
new_tensor = (onnx.helper.make_tensor(tensor, data_type, dims, array.flatten().tolist()) if (not raw) else onnx.helper.make_tensor(tensor, data_type, dims, array.tostring(), raw=raw))
self.add_initializer(new_tensor)
def input_name_to_nodes(self):
return self._input_name_to_nodes
def _get_input_name_to_nodes(self, nodes):
for node in nodes:
attrs = [attr for attr in node.attribute if ((attr.type == onnx.AttributeProto.GRAPH) or (attr.type == onnx.AttributeProto.GRAPHS))]
if (len(attrs) > 0):
for attr in attrs:
self._get_input_name_to_nodes(attr.g.node)
for input_name in node.input:
if (len(input_name.strip()) != 0):
if (input_name not in self._input_name_to_nodes):
self._input_name_to_nodes[input_name] = [node]
else:
self._input_name_to_nodes[input_name].append(node)
def output_name_to_node(self):
return self._output_name_to_node
def _get_output_name_to_node(self, nodes):
for node in nodes:
attrs = [attr for attr in node.attribute if ((attr.type == onnx.AttributeProto.GRAPH) or (attr.type == onnx.AttributeProto.GRAPHS))]
if (len(attrs) > 0):
for attr in attrs:
self._get_output_name_to_node(attr.g.node)
for output_name in node.output:
if (len(output_name.strip()) != 0):
self._output_name_to_node[output_name] = node
def get_siblings(self, node):
siblings = []
for parent in self.get_parents(node):
for child in self.get_children(parent):
if (child.name != node.name):
siblings.append(child)
return siblings
def get_children(self, node, input_name_to_nodes=None):
if (input_name_to_nodes is None):
input_name_to_nodes = self._input_name_to_nodes
children = []
for output in node.output:
if (output in input_name_to_nodes):
for child in input_name_to_nodes[output]:
children.append(child)
return children
def get_parents(self, node, output_name_to_node=None):
if (output_name_to_node is None):
output_name_to_node = self._output_name_to_node
parents = []
for input in node.input:
if (input in output_name_to_node):
parents.append(output_name_to_node[input])
return parents
def get_parent(self, node, idx, output_name_to_node=None):
if (output_name_to_node is None):
output_name_to_node = self._output_name_to_node
if (len(node.input) <= idx):
return None
input = node.input[idx]
if (input not in output_name_to_node):
return None
return output_name_to_node[input]
def find_node_by_name(self, node_name, new_nodes_list, graph):
graph_nodes_list = list(graph.node)
graph_nodes_list.extend(new_nodes_list)
node = ortq.find_by_name(node_name, graph_nodes_list)
return node
def find_nodes_by_initializer(self, graph, initializer):
nodes = []
for node in graph.node:
for node_input in node.input:
if (node_input == initializer.name):
nodes.append(node)
return nodes
def get_scale_zero(self, tensor):
if (not tensor.endswith('_quantized')):
logger.debug('Find {} in the quantized graph is not quantized.'.format(tensor))
return (None, None)
def _searcher(tensor_name):
node = self._input_name_to_nodes[tensor_name][0]
parent = (self._output_name_to_node[tensor_name] if (tensor_name in self._output_name_to_node) else None)
direct_int8 = ['Reshape', 'Transpose', 'Squeeze', 'Unsqueeze', 'MaxPool', 'Pad', 'Split']
if ((parent is not None) and (parent.op_type in direct_int8)):
fp32_tensor_name = parent.input[0].replace('_quantized', '').replace('_QuantizeLinear', '').replace('_QuantizeInput', '')
elif (node.op_type in ['Gather']):
fp32_tensor_name = node.output[0].replace('_quantized', '').replace('_QuantizeLinear', '').replace('_QuantizeInput', '')
else:
fp32_tensor_name = tensor_name.replace('_quantized', '').replace('_QuantizeLinear', '').replace('_QuantizeInput', '')
scale = (fp32_tensor_name + '_scale')
scale_tensor = self.get_initializer(scale)
zo = (fp32_tensor_name + '_zero_point')
zo_tensor = self.get_initializer(zo)
if ((scale_tensor is None) or (zo_tensor is None)):
if (parent is not None):
(scale_tensor, zo_tensor) = _searcher(parent.input[0])
return (scale_tensor, zo_tensor)
node = self._input_name_to_nodes[tensor][0]
if (((node.op_type == 'QLinearConv') and (tensor == node.input[(- 1)])) or ((node.op_type == 'QGemm') and (tensor == node.input[(- 3)]))):
return (None, None)
else:
(scale_tensor, zo_tensor) = _searcher(tensor)
assert scale_tensor, 'missing scale for tensor {}'.format(tensor)
assert zo_tensor, 'missing zero point for tensor {}'.format(tensor)
return (scale_tensor, zo_tensor)
def save_model_to_file(self, output_path, use_external_data_format=False):
from onnx.external_data_helper import convert_model_to_external_data
if use_external_data_format:
convert_model_to_external_data(self._model, all_tensors_to_one_file=True, location=(Path(output_path).name + '.data'))
onnx.save_model(self._model, output_path)
def replace_node_input(node, old_input_name, new_input_name):
assert (isinstance(old_input_name, str) and isinstance(new_input_name, str))
for j in range(len(node.input)):
if (node.input[j] == old_input_name):
node.input[j] = new_input_name
def replace_input_of_all_nodes(self, old_input_name, new_input_name, white_optype=[], black_optype=[]):
if (len(white_optype) > 0):
for node in self.model.graph.node:
if (node.op_type in white_optype):
ONNXModel.replace_node_input(node, old_input_name, new_input_name)
else:
for node in self.model.graph.node:
if (node.op_type not in black_optype):
ONNXModel.replace_node_input(node, old_input_name, new_input_name)
def replace_node_output(node, old_output_name, new_output_name):
assert (isinstance(old_output_name, str) and isinstance(new_output_name, str))
for j in range(len(node.output)):
if (node.output[j] == old_output_name):
node.output[j] = new_output_name
def replace_output_of_all_nodes(self, old_output_name, new_output_name, white_optype=[], black_optype=[]):
if (len(white_optype) > 0):
for node in self.model.graph.node:
if (node.op_type in white_optype):
ONNXModel.replace_node_output(node, old_output_name, new_output_name)
else:
for node in self.model.graph.node:
if (node.op_type not in black_optype):
ONNXModel.replace_node_output(node, old_output_name, new_output_name)
def remove_unused_nodes(self):
unused_nodes = []
nodes = self.nodes()
for node in nodes:
if ((node.op_type == 'Constant') and (node.output[0] not in self._model.graph.output) and (node.output[0] not in self._input_name_to_nodes)):
unused_nodes.append(node)
elif ((node.op_type == 'QuantizeLinear') and (len(self.get_children(node)) == 1) and (self.get_children(node)[0].op_type == 'DequantizeLinear') and (node.input[0] not in self._output_name_to_node) and (self.get_children(node)[0].output[0] not in self._input_name_to_nodes)):
unused_nodes.append(node)
unused_nodes.extend(self.get_children(node))
else:
unused = True
for output in node.output:
if ((output in self._input_name_to_nodes) or (output in self.output())):
unused = False
break
for input in node.input:
if (self.get_initializer(input) is not None):
continue
elif ((input in self._output_name_to_node) or (input in self.input())):
unused = False
break
if unused:
unused_nodes.append(node)
self.remove_nodes(unused_nodes)
ununsed_weights = []
for w in self._model.graph.initializer:
if ((w.name not in self._input_name_to_nodes) and (w.name not in self._model.graph.output)):
ununsed_weights.append(w)
for graph_input in self.graph().input:
if (graph_input.name == w.name):
self.graph().input.remove(graph_input)
self.remove_initializers(ununsed_weights)
self.update()
def topological_sort(self, enable_subgraph=False):
import copy
from collections import deque
from functools import reduce
if (not enable_subgraph):
input_name_to_nodes = {}
output_name_to_node = {}
for node in self.model.graph.node:
for input_name in node.input:
if (len(input_name.strip()) != 0):
if (input_name not in input_name_to_nodes):
input_name_to_nodes[input_name] = [node]
else:
input_name_to_nodes[input_name].append(node)
for output_name in node.output:
if (len(output_name.strip()) != 0):
output_name_to_node[output_name] = node
else:
input_name_to_nodes = self._input_name_to_nodes
output_name_to_node = self._output_name_to_node
all_nodes = {}
q = deque()
wait = deque()
for inp in self.model.graph.input:
q.extend(input_name_to_nodes[inp.name])
for n in self.model.graph.node:
if all([((i not in output_name_to_node) and (i not in self.input())) for i in n.input]):
q.append(n)
while q:
n = q.popleft()
if (not all([(output_name_to_node[i].name in all_nodes) for i in n.input if (i in output_name_to_node)])):
if (n not in wait):
wait.append(n)
continue
all_nodes[n.name] = n
for out in n.output:
if (out in input_name_to_nodes):
q.extend([i for i in input_name_to_nodes[out] if ((i.name not in all_nodes) and (i not in q))])
if ((len(q) == 0) and (len(wait) != 0)):
q = copy.deepcopy(wait)
wait.clear()
nodes = [i[1] for i in all_nodes.items()]
assert (len(list(set([n.name for n in nodes]))) == len(list(set([n.name for n in self.model.graph.node]))))
self.model.graph.ClearField('node')
self.model.graph.node.extend(nodes)
def get_nodes_chain(self, start, stop, result_chain=[]):
from collections import deque
from onnx import NodeProto
start_node = deque()
for node in start:
if isinstance(node, str):
start_node.append(node)
elif isinstance(node, NodeProto):
start_node.append(node.name)
else:
assert False, "'get_nodes_chain' function only support list[string]or list[NodeProto] params"
stop_node = []
for node in stop:
if isinstance(node, str):
stop_node.append(node)
elif isinstance(node, NodeProto):
stop_node.append(node.name)
else:
assert False, "'get_nodes_chain' function only support list[string]or list[NodeProto] params"
while start_node:
node_name = start_node.popleft()
if (node_name in stop_node):
continue
if (node_name not in result_chain):
result_chain.append(node_name)
else:
continue
node = ortq.find_by_name(node_name, list(self.model.graph.node))
for parent in self.get_parents(node):
start_node.append(parent.name)
return result_chain
def find_split_node_for_layer_wise_quantization(self):
start_nodes = []
for node in self._model.graph.node:
(start_node, qkv_nodes_list) = (None, None)
if (node.op_type == 'SkipLayerNormalization'):
start_node = node
qkv_nodes_list = [self.match_parent_path(start_node, ['MatMul', 'Reshape', 'Transpose', 'Reshape', 'MatMul'], [None, 0, 0, 0, 0]), self.match_parent_path(start_node, ['Add', 'MatMul', 'Reshape', 'Transpose', 'MatMul'], [1, 1, 0, 0, 0])]
if (node.op_type == 'Add'):
start_node = node
qkv_nodes_list = [self.match_parent_path(start_node, ['Add', 'MatMul', 'Reshape', 'Transpose', 'MatMul'], [0, None, 0, 0, 0]), self.match_parent_path(start_node, ['Add', 'MatMul', 'Reshape', 'Transpose', 'MatMul'], [1, None, 0, 0, 0]), self.match_parent_path(start_node, ['Reshape', 'Gemm', 'Reshape', 'Reshape', 'Transpose', 'MatMul'], [None, 0, 0, 0, 0, 0], output_name_to_node=self.output_name_to_node, return_indice=[]), self.match_parent_path(start_node, ['Add', 'MatMul', 'Reshape', 'Transpose', 'Reshape', 'MatMul'], [0, None, 0, 0, 0, 0]), self.match_parent_path(start_node, ['Add', 'MatMul', 'Reshape', 'Transpose', 'Reshape', 'MatMul'], [1, None, 0, 0, 0, 0]), self.match_parent_path(start_node, ['MatMul', 'Mul', 'MatMul', 'Mul', 'Div', 'Add'], [None, 0, None, 0, None, 0]), self.match_parent_path(start_node, ['MatMul', 'Mul', 'MatMul', 'SimplifiedLayerNormalization', 'Add'], [None, 0, None, 0, 0])]
if (not start_node):
continue
if (not any(qkv_nodes_list)):
continue
start_nodes.append(start_node)
return start_nodes
def find_qkv_in_attention(self, find_all=False):
qkv = []
for node in self._model.graph.node:
if (node.op_type == 'Attention'):
qkv.append([node.name])
continue
(start_node, qkv_nodes_list) = (None, None)
if (node.op_type == 'SkipLayerNormalization'):
start_node = node
qkv_nodes_list = [self.match_parent_path(start_node, ['MatMul', 'Reshape', 'Transpose', 'Reshape', 'MatMul'], [None, 0, 0, 0, 0]), self.match_parent_path(start_node, ['Add', 'MatMul', 'Reshape', 'Transpose', 'MatMul'], [1, 1, 0, 0, 0])]
if (node.op_type == 'Add'):
start_node = node
qkv_nodes_list = [self.match_parent_path(start_node, ['Add', 'MatMul', 'Reshape', 'Transpose', 'MatMul'], [0, None, 0, 0, 0]), self.match_parent_path(start_node, ['Add', 'MatMul', 'Reshape', 'Transpose', 'MatMul'], [1, None, 0, 0, 0]), self.match_parent_path(start_node, ['Reshape', 'Gemm', 'Reshape', 'Reshape', 'Transpose', 'MatMul'], [None, 0, 0, 0, 0, 0], output_name_to_node=self.output_name_to_node, return_indice=[]), self.match_parent_path(start_node, ['Add', 'MatMul', 'Reshape', 'Transpose', 'Reshape', 'MatMul'], [0, None, 0, 0, 0, 0]), self.match_parent_path(start_node, ['Add', 'MatMul', 'Reshape', 'Transpose', 'Reshape', 'MatMul'], [1, None, 0, 0, 0, 0])]
if (not start_node):
continue
if (not any(qkv_nodes_list)):
continue
qkv_nodes = [qkv for qkv in qkv_nodes_list if (qkv is not None)][(- 1)]
other_inputs = []
for input in start_node.input:
if (input not in self.output_name_to_node):
continue
if (input == qkv_nodes[0].output[0]):
continue
other_inputs.append(input)
if (len(other_inputs) != 1):
continue
root_input = other_inputs[0]
input_name_to_nodes = self.input_name_to_nodes
children = input_name_to_nodes[root_input]
children_types = [child.op_type for child in children]
if (children_types.count('MatMul') == 3):
qkv.append([child.name for child in children if (child.op_type == 'MatMul')])
if (not find_all):
break
return qkv
def find_ffn_matmul(self, attention_index, attention_matmul_list, block_len):
ffn_matmul = []
for idx in range(len(attention_index)):
if (idx != (len(attention_index) - 1)):
index = attention_index[(idx + 1)]
if ((index - 2) >= 0):
ffn_matmul.append([attention_matmul_list[(index - 2)], attention_matmul_list[(index - 1)]])
else:
index = attention_index[idx]
if (((index + block_len) - 1) < len(attention_matmul_list)):
ffn_matmul.append([attention_matmul_list[((index + block_len) - 2)], attention_matmul_list[((index + block_len) - 1)]])
return ffn_matmul
def export(self, save_path, conf):
from neural_compressor.config import ONNXQlinear2QDQConfig
from neural_compressor.experimental.export import onnx_qlinear_to_qdq
if isinstance(conf, ONNXQlinear2QDQConfig):
(add_nodes, remove_nodes, inits) = onnx_qlinear_to_qdq(self._model, self._input_name_to_nodes)
self.add_nodes(add_nodes)
self.remove_nodes(remove_nodes)
self.add_initializers(inits)
self.update()
self.remove_unused_nodes()
self.topological_sort()
self.save(save_path)
else:
logger.warning('Unsupported config for export, only ONNXQlinear2QDQConfig is supported!')
exit(0)
def add_tensors_to_outputs(self, tensor_names):
added_outputs = []
for tensor in tensor_names:
if (tensor not in self.output()):
added_tensor = onnx.helper.ValueInfoProto()
added_tensor.name = tensor
added_outputs.append(added_tensor)
self._model.graph.output.extend(added_outputs)
def remove_tensors_from_outputs(self, tensor_names):
removed_outputs = []
for tensor in tensor_names:
if (tensor in self.output()):
removed_outputs.append(self._model.graph.output[self.output().index(tensor)])
for output in removed_outputs:
self._model.graph.output.remove(output)
def match_first_parent(self, node, parent_op_type, output_name_to_node, exclude=[]):
for (i, input) in enumerate(node.input):
if (input in output_name_to_node):
parent = output_name_to_node[input]
if ((parent.op_type == parent_op_type) and (parent not in exclude)):
return (parent, i)
return (None, None)
def match_parent(self, node, parent_op_type, input_index=None, output_name_to_node=None, exclude=[], return_indice=None):
assert (node is not None)
assert ((input_index is None) or (input_index >= 0))
if (output_name_to_node is None):
output_name_to_node = self._output_name_to_node
if (input_index is None):
(parent, index) = self.match_first_parent(node, parent_op_type, output_name_to_node, exclude)
if (return_indice is not None):
return_indice.append(index)
return parent
if (input_index >= len(node.input)):
return None
parent = self.get_parent(node, input_index, output_name_to_node)
if ((parent is not None) and (parent.op_type == parent_op_type) and (parent not in exclude)):
return parent
return None
def match_parent_path(self, node, parent_op_types, parent_input_index, output_name_to_node=None, return_indice=None):
assert (len(parent_input_index) == len(parent_op_types))
if (output_name_to_node is None):
output_name_to_node = self._output_name_to_node
current_node = node
matched_parents = []
for (i, op_type) in enumerate(parent_op_types):
matched_parent = self.match_parent(current_node, op_type, parent_input_index[i], output_name_to_node, exclude=[], return_indice=return_indice)
if (matched_parent is None):
return None
matched_parents.append(matched_parent)
current_node = matched_parent
return matched_parents
def is_smoothquant_model(self):
for init in self.model.graph.initializer:
if ('_smooth_scale' in init.name):
return True
return False
def find_split_nodes(self):
split_nodes = self.find_split_node_for_layer_wise_quantization()
return split_nodes
def split_model_with_node(self, split_node_name, path_of_model_to_split, shape_infer=True, save_both_split_models=True):
split_model_part_1 = onnx.ModelProto()
split_model_part_1.CopyFrom(self._model)
split_model_part_1.graph.ClearField('node')
split_model_part_2 = onnx.ModelProto()
split_model_part_2.CopyFrom(self._model)
split_model_part_2.graph.ClearField('node')
split_node_output = None
part_idx = 1
for node in self._model.graph.node:
if (part_idx == 1):
split_model_part_1.graph.node.append(node)
elif (part_idx == 2):
split_model_part_2.graph.node.append(node)
if (node.name == split_node_name):
split_node_output = node.output
part_idx = 2
assert (len(split_node_output) == 1), 'Only support split at node with 1 output tensor, while current split node {} has {} output tensors'.format(split_node_name, len(split_node_output))
split_tensor_name = split_node_output[0]
if shape_infer:
try:
from neural_compressor.adaptor.ox_utils.util import infer_shapes
self._model = infer_shapes(self._model, auto_merge=True, base_dir=os.path.dirname(self._model_path))
except Exception as e:
logger.error("Shape infer fails for layer-wise quantization. We would recommend checking the graph optimization level of your model and setting it to 'DISABLE_ALL' or 'ENABLE_BASIC', as this may help avoid this error.")
raise e
(split_tensor_type, split_tensor_shape) = self._get_output_type_shape_by_tensor_name(split_tensor_name)
split_tensor = onnx.helper.make_tensor_value_info(split_tensor_name, split_tensor_type, split_tensor_shape)
split_model_part_1 = ONNXModel(split_model_part_1, ignore_warning=True)
split_model_part_2 = ONNXModel(split_model_part_2, ignore_warning=True)
split_model_part_1._remove_unused_input_output()
split_model_part_2._remove_unused_input_output()
split_model_part_1.model.graph.output.append(split_tensor)
split_model_part_2.model.graph.input.append(split_tensor)
insert_output_for_model_1 = []
insert_input_for_model_2 = []
for output in split_model_part_1.output_name_to_node.keys():
if (output in split_model_part_2.input_name_to_nodes.keys()):
(output_type, output_shape) = self._get_output_type_shape_by_tensor_name(output)
output_tensor = onnx.helper.make_tensor_value_info(output, output_type, output_shape)
if (output_tensor not in split_model_part_1.model.graph.output):
insert_output_for_model_1.append(output_tensor)
if (output_tensor not in split_model_part_2.model.graph.input):
insert_input_for_model_2.append(output_tensor)
for output in insert_output_for_model_1:
split_model_part_1.model.graph.output.append(output)
for input in insert_input_for_model_2:
split_model_part_2.model.graph.input.append(input)
split_model_part_1.remove_unused_init()
split_model_part_2.remove_unused_init()
split_model_part_1.update()
split_model_part_2.update()
dir_of_model_to_split = os.path.dirname(path_of_model_to_split)
split_model_part_1.load_model_initializer_by_tensor(dir_of_model_to_split)
split_model_part_1_path = os.path.join(dir_of_model_to_split, 'split_model_part_1.onnx')
split_model_part_1.model_path = split_model_part_1_path
split_model_part_1._save_split_model(split_model_part_1_path)
split_model_part_1.check_is_large_model()
logger.debug('save split model part 1 to {} for layer wise quantization'.format(split_model_part_1_path))
if save_both_split_models:
split_model_part_2.load_model_initializer_by_tensor(dir_of_model_to_split)
split_model_part_2_path = os.path.join(dir_of_model_to_split, 'split_model_part_2.onnx')
split_model_part_2.model_path = split_model_part_2_path
split_model_part_2._save_split_model(split_model_part_2_path)
split_model_part_2.check_is_large_model()
logger.debug('save split model part 2 to {} for layer wise quantization'.format(split_model_part_2_path))
return (split_model_part_1, split_model_part_2)
else:
return (split_model_part_1, split_model_part_2)
def _save_split_model(self, save_path):
if os.path.exists((save_path + '_data')):
os.remove((save_path + '_data'))
onnx.save_model(self._model, save_path, save_as_external_data=True, all_tensors_to_one_file=True, location=(save_path.split('/')[(- 1)] + '_data'), size_threshold=1024, convert_attribute=False)
def _get_output_type_shape_by_tensor_name(self, tensor_name):
elem_type = onnx.TensorProto.FLOAT
shape = None
for output in self._model.graph.value_info:
if (output.name == tensor_name):
elem_type = output.type.tensor_type.elem_type
shape = [(dim.dim_value if dim.HasField('dim_value') else (- 1)) for dim in output.type.tensor_type.shape.dim]
break
return (elem_type, shape)
def _remove_unused_input_output(self):
remove_outputs = []
remove_inputs = []
for output in self._model.graph.output:
if (output.name not in self.output_name_to_node.keys()):
remove_outputs.append(output)
for input in self._model.graph.input:
if (input.name not in self.input_name_to_nodes.keys()):
remove_inputs.append(input)
for output in remove_outputs:
self._model.graph.output.remove(output)
for input in remove_inputs:
self._model.graph.input.remove(input)
def remove_unused_init(self):
remov_inits = []
for init in self._model.graph.initializer:
if (init.name not in self.input_name_to_nodes.keys()):
remov_inits.append(init)
self.remove_initializers(remov_inits)
def load_model_initializer_by_tensor(self, data_path=None):
from onnx.external_data_helper import load_external_data_for_tensor
if (data_path is None):
data_path = os.path.dirname(self._model_path)
for init in self._model.graph.initializer:
if (init.HasField('data_location') and (init.data_location == onnx.TensorProto.EXTERNAL)):
load_external_data_for_tensor(init, data_path)
def write_external_data_to_new_location(self, external_data_location='external.data', overwrite=False):
from onnx.external_data_helper import convert_model_to_external_data, write_external_data_tensors
if (overwrite and os.path.exists(os.path.join(os.path.dirname(self._model_path), external_data_location))):
os.remove(os.path.join(os.path.dirname(self._model_path), external_data_location))
self.load_model_initializer_by_tensor()
convert_model_to_external_data(self._model, location=external_data_location)
write_external_data_tensors(self._model, filepath=os.path.dirname(self._model_path))
def merge_split_models(self, to_merge_model):
to_merge_model.write_external_data_to_new_location()
self.add_nodes([node for node in to_merge_model.nodes()])
self.add_initializers([init for init in to_merge_model.initializer()])
self.update()
for output in to_merge_model.graph().output:
if (output.name not in self.output()):
self._model.graph.output.append(output)
remove_output = []
for output in self._model.graph.output:
if (output.name in to_merge_model.input()):
remove_output.append(output)
for output in remove_output:
self._model.graph.output.remove(output)
for input in to_merge_model.graph().input:
if ((input.name not in self.input()) and (input.name not in self.output()) and (input.name not in self.output_name_to_node.keys())):
self._model.graph.input.append(input)
def re_org_output(self, origin_output):
outputs = {}
tmp_remove = []
for output in self._model.graph.output:
outputs[output.name] = output
tmp_remove.append(output)
for output in tmp_remove:
self._model.graph.output.remove(output)
for out_name in origin_output:
self._model.graph.output.append(outputs[out_name]) |
def weights_from_ranking(rankings):
if (len(rankings) == 0):
assert False
if (type(rankings[0]) == type(0)):
rankings = [rankings]
rankings_num = len(rankings)
rankings_len = len(rankings[0])
assert all(((len(rankings[i]) == rankings_len) for i in range(rankings_num)))
total_score = []
for i in range(rankings_len):
total_score.append(mul((ranking[i] for ranking in rankings)))
total_ranking = {i: r for (r, i) in enumerate(np.argsort(np.array(total_score)))}
if (rankings_num == 1):
assert all(((total_ranking[i] == rankings[0][i]) for i in total_ranking.keys()))
weights = ([0.0] * rankings_len)
for i in range(rankings_len):
weights[i] = (1.0 / (total_ranking[i] + 1))
return weights |
class AutoPipelineForImage2Image(ConfigMixin):
config_name = 'model_index.json'
def __init__(self, *args, **kwargs):
raise EnvironmentError(f'{self.__class__.__name__} is designed to be instantiated using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path)` or `{self.__class__.__name__}.from_pipe(pipeline)` methods.')
def from_pretrained(cls, pretrained_model_or_path, **kwargs):
config = cls.load_config(pretrained_model_or_path)
orig_class_name = config['_class_name']
if ('controlnet' in kwargs):
orig_class_name = config['_class_name'].replace('Pipeline', 'ControlNetPipeline')
image_2_image_cls = _get_task_class(AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, orig_class_name)
return image_2_image_cls.from_pretrained(pretrained_model_or_path, **kwargs)
def from_pipe(cls, pipeline, **kwargs):
original_config = dict(pipeline.config)
original_cls_name = pipeline.__class__.__name__
image_2_image_cls = _get_task_class(AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, original_cls_name)
(expected_modules, optional_kwargs) = _get_signature_keys(image_2_image_cls)
pretrained_model_name_or_path = original_config.pop('_name_or_path', None)
passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if (k in kwargs)}
original_class_obj = {k: pipeline.components[k] for (k, v) in pipeline.components.items() if ((k in expected_modules) and (k not in passed_class_obj))}
passed_pipe_kwargs = {k: kwargs.pop(k) for k in optional_kwargs if (k in kwargs)}
original_pipe_kwargs = {k: original_config[k] for (k, v) in original_config.items() if ((k in optional_kwargs) and (k not in passed_pipe_kwargs))}
additional_pipe_kwargs = [k[1:] for k in original_config.keys() if (k.startswith('_') and (k[1:] in optional_kwargs) and (k[1:] not in passed_pipe_kwargs))]
for k in additional_pipe_kwargs:
original_pipe_kwargs[k] = original_config.pop(f'_{k}')
image_2_image_kwargs = {**passed_class_obj, **original_class_obj, **passed_pipe_kwargs, **original_pipe_kwargs}
unused_original_config = {f"{('' if k.startswith('_') else '_')}{k}": original_config[k] for (k, v) in original_config.items() if (k not in image_2_image_kwargs)}
missing_modules = ((set(expected_modules) - set(pipeline._optional_components)) - set(image_2_image_kwargs.keys()))
if (len(missing_modules) > 0):
raise ValueError(f'Pipeline {image_2_image_cls} expected {expected_modules}, but only {set((list(passed_class_obj.keys()) + list(original_class_obj.keys())))} were passed')
model = image_2_image_cls(**image_2_image_kwargs)
model.register_to_config(_name_or_path=pretrained_model_name_or_path)
model.register_to_config(**unused_original_config)
return model |
def get_colorize_data(sz: int, bs: int, crappy_path: Path, good_path: Path, random_seed: int=None, keep_pct: float=1.0, num_workers: int=8, stats: tuple=imagenet_stats, xtra_tfms=[]) -> ImageDataBunch:
src = ImageImageList.from_folder(crappy_path, convert_mode='RGB').use_partial_data(sample_pct=keep_pct, seed=random_seed).split_by_rand_pct(0.1, seed=random_seed)
data = src.label_from_func((lambda x: (good_path / x.relative_to(crappy_path)))).transform(get_transforms(max_zoom=1.2, max_lighting=0.5, max_warp=0.25, xtra_tfms=xtra_tfms), size=sz, tfm_y=True).databunch(bs=bs, num_workers=num_workers, no_check=True).normalize(stats, do_y=True)
data.c = 3
return data |
def get_name_bias_stats(links, attr_dict1, attr_dict2, cfg):
num_same = 0
num_close = 0
num_diff = 0
for ii in range(len(links)):
ent_name1 = get_name(links[ii][0], attr_dict1, cfg['dataset'])
ent_name2 = get_name(links[ii][1], attr_dict2, cfg['dataset'])
score = calc_edit_distance(ent_name1, ent_name2)
if (score == 1.0):
num_same += 1
elif (score == 0.0):
num_diff += 1
else:
num_close += 1
ratio = (' same%.2f close%.2f diff%.2f' % ((num_same / len(links)), (num_close / len(links)), (num_diff / len(links))))
return ratio |
def rewrite_logs(d):
new_d = {}
eval_prefix = 'eval_'
eval_prefix_len = len(eval_prefix)
for (k, v) in d.items():
if k.startswith(eval_prefix):
new_d[('eval/' + k[eval_prefix_len:])] = v
else:
new_d[('train/' + k)] = v
return new_d |
def match_function_multi_input_api_call(code):
ret = []
matches = re.finditer('\\([^)(]+,[^)(]+\\)', code)
for match in matches:
matched_code = match.group()
sc = code.split(matched_code)
if (len(sc) != 2):
continue
matched_code = matched_code[1:(- 1)]
for (t_prefix, t_suffix) in _match_function_multi_input_api_call_generate_template(matched_code):
ret.append(((sc[0] + t_prefix), (t_suffix + sc[1])))
return ret |
def build_net(net_name, input_tfs, reuse=False):
net = None
if (net_name == fc_2layers_1024units.NAME):
net = fc_2layers_1024units.build_net(input_tfs, reuse)
elif (net_name == fc_3layers_512units_branch_inputs.NAME):
net = fc_3layers_512units_branch_inputs.build_net(input_tfs, reuse)
elif (net_name == fc_2layers_512units.NAME):
net = fc_2layers_512units.build_net(input_tfs, reuse)
elif (net_name == fc_2layers_16units.NAME):
net = fc_2layers_16units.build_net(input_tfs, reuse)
else:
assert False, ('Unsupported net: ' + net_name)
return net |
def ReadFileSL(x_axis, tthread, batchInterval, NUM_ITEMS, NUM_ACCESS, key_skewness, overlap_ratio, abort_ratio, isCyclic, complexity):
(w, h) = (2, len(x_axis))
y = [[] for _ in range(w)]
for abort_ratio in x_axis:
inputEvents = (tthread * batchInterval)
op_gs_path = getPathSL('OPGSA', inputEvents, tthread, NUM_ITEMS, NUM_ACCESS, key_skewness, overlap_ratio, abort_ratio, isCyclic, complexity)
lines = open(op_gs_path).readlines()
throughput = lines[0].split(': ')[1]
y[0].append(float(throughput))
for abort_ratio in x_axis:
inputEvents = (tthread * batchInterval)
op_gs_path = getPathSL('OPGS', inputEvents, tthread, NUM_ITEMS, NUM_ACCESS, key_skewness, overlap_ratio, abort_ratio, isCyclic, complexity)
lines = open(op_gs_path).readlines()
throughput = lines[0].split(': ')[1]
y[1].append(float(throughput))
print(y)
return y |
def require_sentencepiece(test_case):
if (not is_sentencepiece_available()):
return unittest.skip('test requires SentencePiece')(test_case)
else:
return test_case |
def convert_to_npy(npz_file):
if (not os.path.isfile((npz_file[:(- 3)] + 'npy'))):
a = np.load(npz_file)['data']
np.save((npz_file[:(- 3)] + 'npy'), a) |
class SharedValue(object):
def __init__(self, data) -> None:
sc = OrcaContext.get_spark_context()
self.broadcast_data = sc.broadcast(data)
self._value = None
def value(self):
self._value = self.broadcast_data.value
return self._value
def unpersist(self):
self.broadcast_data.unpersist() |
def est_accuracy(mal_visible, t):
args = gv.args
delta_other_prev = None
if (len(mal_visible) >= 1):
mal_prev_t = mal_visible[(- 1)]
print(('Loading from previous iteration %s' % mal_prev_t))
delta_other_prev = np.load((gv.dir_name + ('ben_delta_t%s.npy' % mal_prev_t)), allow_pickle=True)
delta_other_prev = (delta_other_prev / (t - mal_prev_t))
print(('Divisor: %s' % (t - mal_prev_t)))
if (len(mal_visible) >= 3):
mal_prev_prev_t = mal_visible[(- 2)]
if (mal_prev_prev_t >= args.mal_delay):
delta_other_prev_prev = np.load((gv.dir_name + ('ben_delta_t%s.npy' % mal_prev_prev_t)), allow_pickle=True)
ben_delta_diff = (delta_other_prev - delta_other_prev_prev)
est_accuracy_l2 = 0.0
for i in range(len(ben_delta_diff)):
est_accuracy_l2 += np.linalg.norm(ben_delta_diff[i])
print(('Accuracy of estimate on round %s: %s' % (mal_prev_prev_t, est_accuracy_l2)))
write_dict = {}
write_dict['t'] = mal_prev_prev_t
write_dict['est_accuracy_l2'] = est_accuracy_l2
file_write(write_dict, purpose='est_accuracy_log')
return delta_other_prev |
class ParameterScheduler(_Scheduler):
def __init__(self, step=0, mode='train', **schedulers):
super(ParameterScheduler, self).__init__(step)
self.schedulers = schedulers
self.mode = mode
def train(self):
self.mode = 'train'
for scheduler in self.schedulers.values():
scheduler.train()
def eval(self):
self.mode = 'val'
for scheduler in self.schedulers.values():
scheduler.eval()
def step(self, require_zero_grad=False):
params_dic = {}
for (key, scheduler) in self.schedulers.items():
params_dic[key] = scheduler.step()
return params_dic |
class Albadi2018(dataset.Dataset):
name = 'albadi2018'
url = '
hash = '7f7d87384b4b715655ec0e2d329bc234bbc965ad116290f2e2d0b11e26e272b3'
files = [{'name': 'albadi2018ar_train.csv', 'language': 'ar', 'type': 'training', 'platform': 'twitter'}, {'name': 'albadi2018ar_test.csv', 'language': 'ar', 'type': 'test', 'platform': 'twitter'}]
license = 'UNKNOWN'
def process(cls, tmp_file_path, dataset_folder, api_config):
file_dir = helpers.unzip_file(tmp_file_path)
train_file = helpers.download_tweets_for_csv(os.path.join(file_dir, 'Arabic_hatespeech-master/train.csv'), 'id', api_config)
test_file = helpers.download_tweets_for_csv(os.path.join(file_dir, 'Arabic_hatespeech-master/test.csv'), 'id', api_config)
helpers.copy_file(train_file, os.path.join(dataset_folder, 'albadi2018ar_train.csv'))
helpers.copy_file(test_file, os.path.join(dataset_folder, 'albadi2018ar_test.csv'))
def unify_row(cls, row):
labels = []
if (row['hate'] == 1):
labels.append('hate')
else:
labels.append('noHate')
row['labels'] = labels
row = row.drop(['hate'])
return row |
class PairedDataset(Dataset):
def __init__(self, files_a: Tuple[str], files_b: Tuple[str], transform_fn: Callable, normalize_fn: Callable, corrupt_fn: Optional[Callable]=None, preload: bool=True, preload_size: Optional[int]=0, verbose=True):
assert (len(files_a) == len(files_b))
self.preload = preload
self.data_a = files_a
self.data_b = files_b
self.verbose = verbose
self.corrupt_fn = corrupt_fn
self.transform_fn = transform_fn
self.normalize_fn = normalize_fn
logger.info(f'Dataset has been created with {len(self.data_a)} samples')
if preload:
preload_fn = partial(self._bulk_preload, preload_size=preload_size)
if (files_a == files_b):
self.data_a = self.data_b = preload_fn(self.data_a)
else:
(self.data_a, self.data_b) = map(preload_fn, (self.data_a, self.data_b))
self.preload = True
def _bulk_preload(self, data: Iterable[str], preload_size: int):
jobs = [delayed(self._preload)(x, preload_size=preload_size) for x in data]
jobs = tqdm(jobs, desc='preloading images', disable=(not self.verbose))
return Parallel(n_jobs=cpu_count(), backend='threading')(jobs)
def _preload(x: str, preload_size: int):
img = _read_img(x)
if preload_size:
(h, w, *_) = img.shape
h_scale = (preload_size / h)
w_scale = (preload_size / w)
scale = max(h_scale, w_scale)
img = cv2.resize(img, fx=scale, fy=scale, dsize=None)
assert (min(img.shape[:2]) >= preload_size), f'weird img shape: {img.shape}'
return img
def _preprocess(self, img, res):
def transpose(x):
return np.transpose(x, (2, 0, 1))
return map(transpose, self.normalize_fn(img, res))
def __len__(self):
return len(self.data_a)
def __getitem__(self, idx):
(a, b) = (self.data_a[idx], self.data_b[idx])
if (not self.preload):
(a, b) = map(_read_img, (a, b))
(a, b) = self.transform_fn(a, b)
if (self.corrupt_fn is not None):
a = self.corrupt_fn(a)
(a, b) = self._preprocess(a, b)
return {'a': a, 'b': b}
def from_config(config, g_name=None):
config = deepcopy(config)
(files_a, files_b) = map((lambda x: sorted(glob(config[x], recursive=True))), ('files_a', 'files_b'))
transform_fn = aug.get_transforms(size=config['size'], scope=config['scope'], crop=config['crop'])
normalize_fn = aug.get_normalize()
corrupt_fn = aug.get_corrupt_function(config['corrupt'])
hash_fn = hash_from_paths
verbose = config.get('verbose', True)
data = subsample(data=zip(files_a, files_b), bounds=config.get('bounds', (0, 1)), hash_fn=hash_fn, verbose=verbose)
(files_a, files_b) = map(list, zip(*data))
return PairedDataset(files_a=files_a, files_b=files_b, preload=config['preload'], preload_size=config['preload_size'], corrupt_fn=corrupt_fn, normalize_fn=normalize_fn, transform_fn=transform_fn, verbose=verbose) |
class Preprocessor():
def __init__(self, config_dir, save_config_dir=None, verbose=True):
self.config_dir = config_dir
self.verbose = verbose
(self.vocab, self.vocab_dict) = self.__load_list_file(FILE_VOCAB, offset=1, verbose=verbose)
(self.tags, self.tags_dict) = self.__load_list_file(FILE_TAGS, verbose=verbose)
if save_config_dir:
self.__save_config(save_config_dir)
self.PAD_IDX = 0
self.OOV_IDX = len(self.vocab)
self.__adjust_vocab()
def __load_list_file(self, file_name, offset=0, verbose=False):
file_path = join(self.config_dir, file_name)
if (not exists(file_path)):
raise ValueError('"{}" file does not exist.'.format(file_path))
else:
elements = load_json_file(file_path)
elements_dict = {w: (idx + offset) for (idx, w) in enumerate(elements)}
if verbose:
print('config {} loaded'.format(file_path))
return (elements, elements_dict)
def __adjust_vocab(self):
self.vocab.insert(0, PAD)
self.vocab_dict[PAD] = 0
self.vocab.append(OOV)
self.vocab_dict[OOV] = (len(self.vocab) - 1)
def __save_config(self, dst_dir):
char_file = join(dst_dir, FILE_VOCAB)
save_json_file(self.vocab, char_file)
tag_file = join(dst_dir, FILE_TAGS)
save_json_file(self.tags, tag_file)
if self.verbose:
print('tag dict file => {}'.format(tag_file))
print('tag dict file => {}'.format(char_file))
def __cache_file_path(corpus_dir, max_seq_len):
return join(corpus_dir, FILE_DATASET_CACHE.format(max_seq_len))
def load_dataset(self, corpus_dir, val_split, test_split, max_seq_len):
ds_path = self.__cache_file_path(corpus_dir, max_seq_len)
if (not exists(ds_path)):
(xs, ys) = self.__build_corpus(corpus_dir, max_seq_len)
else:
print('loading dataset {} ...'.format(ds_path))
dataset = np.load(ds_path)
(xs, ys) = (dataset['xs'], dataset['ys'])
(xs, ys) = map(torch.tensor, (xs, ys))
total_count = len(xs)
assert (total_count == len(ys))
val_count = int((total_count * val_split))
test_count = int((total_count * test_split))
train_count = ((total_count - val_count) - test_count)
assert ((train_count > 0) and (val_count > 0))
indices = np.cumsum([0, train_count, val_count, test_count])
datasets = [(xs[s:e], ys[s:e]) for (s, e) in zip(indices[:(- 1)], indices[1:])]
print('datasets loaded:')
for ((xs_, ys_), name) in zip(datasets, ['train', 'val', 'test']):
print('\t{}: {}, {}'.format(name, xs_.shape, ys_.shape))
return datasets
def decode_tags(self, batch_tags):
batch_tags = [[self.tags[t] for t in tags] for tags in batch_tags]
return batch_tags
def sent_to_vector(self, sentence, max_seq_len=0):
max_seq_len = (max_seq_len if (max_seq_len > 0) else len(sentence))
vec = [self.vocab_dict.get(c, self.OOV_IDX) for c in sentence[:max_seq_len]]
return (vec + ([self.PAD_IDX] * (max_seq_len - len(vec))))
def tags_to_vector(self, tags, max_seq_len=0):
max_seq_len = (max_seq_len if (max_seq_len > 0) else len(tags))
vec = [self.tags_dict[c] for c in tags[:max_seq_len]]
return (vec + ([0] * (max_seq_len - len(vec))))
def __build_corpus(self, corpus_dir, max_seq_len):
file_path = join(corpus_dir, FILE_DATASET)
(xs, ys) = ([], [])
with open(file_path, encoding='utf8') as f:
for (idx, line) in tqdm(enumerate(f), desc='parsing {}'.format(file_path)):
fields = line.strip().split('\t')
if (len(fields) != 2):
raise ValueError('format error in line {}, tabs count: {}'.format((idx + 1), (len(fields) - 1)))
(sentence, tags) = fields
try:
if (sentence[0] == '['):
sentence = json.loads(sentence)
tags = json.loads(tags)
xs.append(self.sent_to_vector(sentence, max_seq_len=max_seq_len))
ys.append(self.tags_to_vector(tags, max_seq_len=max_seq_len))
if (len(sentence) != len(tags)):
raise ValueError('"sentence length({})" != "tags length({})" in line {}"'.format(len(sentence), len(tags), (idx + 1)))
except Exception as e:
raise ValueError('exception raised when parsing line {}\n\t{}\n\t{}'.format((idx + 1), line, e))
(xs, ys) = (np.asarray(xs), np.asarray(ys))
cache_file = self.__cache_file_path(corpus_dir, max_seq_len)
np.savez(cache_file, xs=xs, ys=ys)
print('dataset cache({}, {}) => {}'.format(xs.shape, ys.shape, cache_file))
return (xs, ys) |
def draw_circle_edge(ax: matplotlib.axes.Axes, v_coor: List[Tuple[(float, float)]], v_size: list, e_list: List[Tuple[(int, int)]], e_color: list, e_fill_color: list, e_line_width: list):
n_v = len(v_coor)
(line_paths, arc_paths, vertices) = hull_layout(n_v, e_list, v_coor, v_size)
for (eidx, lines) in enumerate(line_paths):
pathdata = []
for line in lines:
if (len(line) == 0):
continue
(start_pos, end_pos) = line
pathdata.append((Path.MOVETO, start_pos.tolist()))
pathdata.append((Path.LINETO, end_pos.tolist()))
if (len(list(zip(*pathdata))) == 0):
continue
(codes, verts) = zip(*pathdata)
path = Path(verts, codes)
ax.add_patch(PathPatch(path, linewidth=e_line_width[eidx], facecolor=e_fill_color[eidx], edgecolor=e_color[eidx]))
for (eidx, arcs) in enumerate(arc_paths):
for arc in arcs:
(center, theta1, theta2, radius) = arc
(x, y) = (center[0], center[1])
ax.add_patch(matplotlib.patches.Arc((x, y), (2 * radius), (2 * radius), theta1=theta1, theta2=theta2, linewidth=e_line_width[eidx], edgecolor=e_color[eidx], facecolor=e_fill_color[eidx])) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.