code stringlengths 101 5.91M |
|---|
class PriorBox(object):
def __init__(self, config):
super(PriorBox, self).__init__()
self.frame_size = config['frame_size']
self.num_priors = len(config['frame_work']['aspect_ratios'])
self.variance = (config['frame_work']['variance'] or [0.1])
self.feature_maps = config['frame_work']['feature_maps']
self.min_sizes = config['frame_work']['min_sizes']
self.max_sizes = config['frame_work']['max_sizes']
self.steps = config['frame_work']['steps']
self.aspect_ratios = config['frame_work']['aspect_ratios']
self.scales = config['frame_work']['boxes_scales']
self.clip = config['frame_work']['clip']
for v in self.variance:
if (v <= 0):
raise ValueError('Variances must be greater than 0')
def forward(self):
mean = []
for (k, f) in enumerate(self.feature_maps):
for (i, j) in product(range(f), repeat=2):
f_k = (self.frame_size / self.steps[k])
cx = ((j + 0.5) / f_k)
cy = ((i + 0.5) / f_k)
s_k = (self.min_sizes[k] / self.frame_size)
mean += [cx, cy, s_k, s_k]
for s in self.scales[k]:
s_k_s = ((self.min_sizes[k] + ((self.max_sizes[k] - self.min_sizes[k]) * s)) / self.frame_size)
mean += [cx, cy, s_k_s, s_k_s]
s_k_prime = sqrt((s_k * (self.max_sizes[k] / self.frame_size)))
mean += [cx, cy, s_k_prime, s_k_prime]
for ar in self.aspect_ratios[k]:
mean += [cx, cy, (s_k * sqrt(ar)), (s_k / sqrt(ar))]
mean += [cx, cy, (s_k / sqrt(ar)), (s_k * sqrt(ar))]
output = torch.Tensor(mean).view((- 1), 4)
if self.clip:
output.clamp_(max=1, min=0)
return output |
def _nbool_correspond_ft_tf(u, v, w=None):
if ((u.dtype == v.dtype == bool) and (w is None)):
not_u = (~ u)
not_v = (~ v)
nft = (not_u & v).sum()
ntf = (u & not_v).sum()
else:
dtype = np.result_type(int, u.dtype, v.dtype)
u = u.astype(dtype)
v = v.astype(dtype)
not_u = (1.0 - u)
not_v = (1.0 - v)
if (w is not None):
not_u = (w * not_u)
u = (w * u)
nft = (not_u * v).sum()
ntf = (u * not_v).sum()
return (nft, ntf) |
class LNNP(LightningModule):
def __init__(self, hparams, prior_model=None, mean=None, std=None):
super(LNNP, self).__init__()
self.save_hyperparameters(hparams)
if self.hparams.load_model:
self.model = load_model(self.hparams.load_model, args=self.hparams)
elif self.hparams.pretrained_model:
self.model = load_model(self.hparams.pretrained_model, args=self.hparams, mean=mean, std=std)
else:
self.model = create_model(self.hparams, prior_model, mean, std)
self.ema = None
self._reset_ema_dict()
self.losses = None
self._reset_losses_dict()
def configure_optimizers(self):
optimizer = AdamW(self.model.parameters(), lr=self.hparams.lr, weight_decay=self.hparams.weight_decay)
if (self.hparams.lr_schedule == 'cosine'):
scheduler = CosineAnnealingLR(optimizer, self.hparams.lr_cosine_length)
lr_scheduler = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
elif (self.hparams.lr_schedule == 'reduce_on_plateau'):
scheduler = ReduceLROnPlateau(optimizer, 'min', factor=self.hparams.lr_factor, patience=self.hparams.lr_patience, min_lr=self.hparams.lr_min)
lr_scheduler = {'scheduler': scheduler, 'monitor': 'val_loss', 'interval': 'epoch', 'frequency': 1}
else:
raise ValueError(f'Unknown lr_schedule: {self.hparams.lr_schedule}')
return ([optimizer], [lr_scheduler])
def forward(self, z, pos, batch=None):
return self.model(z, pos, batch=batch)
def training_step(self, batch, batch_idx):
return self.step(batch, mse_loss, 'train')
def validation_step(self, batch, batch_idx, *args):
if ((len(args) == 0) or ((len(args) > 0) and (args[0] == 0))):
return self.step(batch, mse_loss, 'val')
return self.step(batch, l1_loss, 'test')
def test_step(self, batch, batch_idx):
return self.step(batch, l1_loss, 'test')
def step(self, batch, loss_fn, stage):
with torch.set_grad_enabled(((stage == 'train') or self.hparams.derivative)):
(pred, noise_pred, deriv) = self(batch.z, batch.pos, batch.batch)
denoising_is_on = (('pos_target' in batch) and (self.hparams.denoising_weight > 0) and (noise_pred is not None))
(loss_y, loss_dy, loss_pos) = (0, 0, 0)
if self.hparams.derivative:
if ('y' not in batch):
deriv = (deriv + (pred.sum() * 0))
loss_dy = loss_fn(deriv, batch.dy)
if ((stage in ['train', 'val']) and (self.hparams.ema_alpha_dy < 1)):
if (self.ema[(stage + '_dy')] is None):
self.ema[(stage + '_dy')] = loss_dy.detach()
loss_dy = ((self.hparams.ema_alpha_dy * loss_dy) + ((1 - self.hparams.ema_alpha_dy) * self.ema[(stage + '_dy')]))
self.ema[(stage + '_dy')] = loss_dy.detach()
if (self.hparams.force_weight > 0):
self.losses[(stage + '_dy')].append(loss_dy.detach())
if ('y' in batch):
if ((noise_pred is not None) and (not denoising_is_on)):
pred = (pred + (noise_pred.sum() * 0))
if (batch.y.ndim == 1):
batch.y = batch.y.unsqueeze(1)
loss_y = loss_fn(pred, batch.y)
if ((stage in ['train', 'val']) and (self.hparams.ema_alpha_y < 1)):
if (self.ema[(stage + '_y')] is None):
self.ema[(stage + '_y')] = loss_y.detach()
loss_y = ((self.hparams.ema_alpha_y * loss_y) + ((1 - self.hparams.ema_alpha_y) * self.ema[(stage + '_y')]))
self.ema[(stage + '_y')] = loss_y.detach()
if (self.hparams.energy_weight > 0):
self.losses[(stage + '_y')].append(loss_y.detach())
if denoising_is_on:
if ('y' not in batch):
noise_pred = (noise_pred + (pred.sum() * 0))
normalized_pos_target = self.model.pos_normalizer(batch.pos_target)
loss_pos = loss_fn(noise_pred, normalized_pos_target)
self.losses[(stage + '_pos')].append(loss_pos.detach())
loss = (((loss_y * self.hparams.energy_weight) + (loss_dy * self.hparams.force_weight)) + (loss_pos * self.hparams.denoising_weight))
self.losses[stage].append(loss.detach())
if (stage == 'train'):
train_metrics = {(k + '_per_step'): v[(- 1)] for (k, v) in self.losses.items() if (k.startswith('train') and (len(v) > 0))}
train_metrics['lr_per_step'] = self.trainer.optimizers[0].param_groups[0]['lr']
train_metrics['step'] = self.trainer.global_step
train_metrics['batch_pos_mean'] = batch.pos.mean().item()
self.log_dict(train_metrics, sync_dist=True)
return loss
def optimizer_step(self, *args, **kwargs):
optimizer = (kwargs['optimizer'] if ('optimizer' in kwargs) else args[2])
if (self.trainer.global_step < self.hparams.lr_warmup_steps):
lr_scale = min(1.0, (float((self.trainer.global_step + 1)) / float(self.hparams.lr_warmup_steps)))
for pg in optimizer.param_groups:
pg['lr'] = (lr_scale * self.hparams.lr)
super().optimizer_step(*args, **kwargs)
optimizer.zero_grad()
def training_epoch_end(self, training_step_outputs):
dm = self.trainer.datamodule
if (hasattr(dm, 'test_dataset') and (len(dm.test_dataset) > 0)):
should_reset = (((self.current_epoch % self.hparams.test_interval) == 0) or (((self.current_epoch - 1) % self.hparams.test_interval) == 0))
if should_reset:
self.trainer.reset_val_dataloader(self)
def validation_epoch_end(self, validation_step_outputs):
if (not self.trainer.running_sanity_check):
result_dict = {'epoch': self.current_epoch, 'lr': self.trainer.optimizers[0].param_groups[0]['lr'], 'train_loss': torch.stack(self.losses['train']).mean(), 'val_loss': torch.stack(self.losses['val']).mean()}
if (len(self.losses['test']) > 0):
result_dict['test_loss'] = torch.stack(self.losses['test']).mean()
if ((len(self.losses['train_y']) > 0) and (len(self.losses['train_dy']) > 0)):
result_dict['train_loss_y'] = torch.stack(self.losses['train_y']).mean()
result_dict['train_loss_dy'] = torch.stack(self.losses['train_dy']).mean()
result_dict['val_loss_y'] = torch.stack(self.losses['val_y']).mean()
result_dict['val_loss_dy'] = torch.stack(self.losses['val_dy']).mean()
if (len(self.losses['test']) > 0):
result_dict['test_loss_y'] = torch.stack(self.losses['test_y']).mean()
result_dict['test_loss_dy'] = torch.stack(self.losses['test_dy']).mean()
if (len(self.losses['train_y']) > 0):
result_dict['train_loss_y'] = torch.stack(self.losses['train_y']).mean()
if (len(self.losses['val_y']) > 0):
result_dict['val_loss_y'] = torch.stack(self.losses['val_y']).mean()
if (len(self.losses['test_y']) > 0):
result_dict['test_loss_y'] = torch.stack(self.losses['test_y']).mean()
if (len(self.losses['train_pos']) > 0):
result_dict['train_loss_pos'] = torch.stack(self.losses['train_pos']).mean()
if (len(self.losses['val_pos']) > 0):
result_dict['val_loss_pos'] = torch.stack(self.losses['val_pos']).mean()
if (len(self.losses['test_pos']) > 0):
result_dict['test_loss_pos'] = torch.stack(self.losses['test_pos']).mean()
self.log_dict(result_dict, sync_dist=True)
self._reset_losses_dict()
def _reset_losses_dict(self):
self.losses = {'train': [], 'val': [], 'test': [], 'train_y': [], 'val_y': [], 'test_y': [], 'train_dy': [], 'val_dy': [], 'test_dy': [], 'train_pos': [], 'val_pos': [], 'test_pos': []}
def _reset_ema_dict(self):
self.ema = {'train_y': None, 'val_y': None, 'train_dy': None, 'val_dy': None} |
def create_model(model_name: str, pretrained: str='', precision: str='fp32', device: torch.device=torch.device('cpu'), force_quick_gelu: bool=False):
model_name = model_name.replace('/', '-')
if (model_name in _MODEL_CONFIGS):
logging.info(f'Loading {model_name} model config.')
model_cfg = deepcopy(_MODEL_CONFIGS[model_name])
else:
logging.error(f'Model config for {model_name} not found; available models {list_models()}.')
raise RuntimeError(f'Model config for {model_name} not found.')
if force_quick_gelu:
model_cfg['quick_gelu'] = True
model = EVA_CLIP(**model_cfg)
if pretrained:
load_checkpoint(model, pretrained)
model.to(device=device)
if (precision == 'fp16'):
assert (device.type != 'cpu')
convert_weights_to_fp16(model)
model.visual.image_mean = OPENAI_DATASET_MEAN
model.visual.image_std = OPENAI_DATASET_STD
return model |
class CycleGANDAGModel(BaseModel):
def modify_commandline_options(parser, is_train=True):
parser.set_defaults(no_dropout=True)
if is_train:
parser.add_argument('--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)')
parser.add_argument('--lambda_B', type=float, default=10.0, help='weight for cycle loss (B -> A -> B)')
parser.add_argument('--lambda_identity', type=float, default=0.5, help='use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1')
return parser
def __init__(self, opt):
BaseModel.__init__(self, opt)
self.loss_names = ['D_A', 'G_A', 'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B']
visual_names_A = ['real_A', 'fake_B', 'rec_A']
visual_names_B = ['real_B', 'fake_A', 'rec_B']
if (self.isTrain and (self.opt.lambda_identity > 0.0)):
visual_names_A.append('idt_B')
visual_names_B.append('idt_A')
self.visual_names = (visual_names_A + visual_names_B)
if self.isTrain:
self.model_names = ['G_A', 'G_B', 'D_A', 'D_B']
else:
self.model_names = ['G_A', 'G_B']
self.netG_A = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm, (not opt.no_dropout), opt.init_type, opt.init_gain, self.gpu_ids)
self.netG_B = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, opt.netG, opt.norm, (not opt.no_dropout), opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain:
self.netD_A = networks.define_D_dag(opt.output_nc, opt.ndf, opt.netD, opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
self.netD_B = networks.define_D_dag(opt.input_nc, opt.ndf, opt.netD, opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain:
if (opt.lambda_identity > 0.0):
assert (opt.input_nc == opt.output_nc)
self.fake_A_pool = ImagePool(opt.pool_size)
self.fake_B_pool = ImagePool(opt.pool_size)
self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)
self.criterionCycle = torch.nn.L1Loss()
self.criterionIdt = torch.nn.L1Loss()
self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_G)
self.optimizers.append(self.optimizer_D)
def set_input(self, input):
AtoB = (self.opt.direction == 'AtoB')
self.real_A = input[('A' if AtoB else 'B')].to(self.device)
self.real_B = input[('B' if AtoB else 'A')].to(self.device)
self.image_paths = input[('A_paths' if AtoB else 'B_paths')]
def forward(self):
self.fake_B = self.netG_A(self.real_A)
self.rec_A = self.netG_B(self.fake_B)
self.fake_A = self.netG_B(self.real_B)
self.rec_B = self.netG_A(self.fake_A)
def backward_D_basic(self, netD, real, fake):
(real_1, real_2, real_3) = transform_image(real)
(fake_1, fake_2, fake_3) = transform_image(fake)
(pred_real, _, _, _) = netD(real)
loss_D_real = self.criterionGAN(pred_real, True)
(pred_fake, _, _, _) = netD(fake.detach())
loss_D_fake = self.criterionGAN(pred_fake, False)
loss_D = ((loss_D_real + loss_D_fake) * 0.5)
(_, pred_real_1, _, _) = netD(real_1)
loss_D_real_1 = self.criterionGAN(pred_real_1, True)
(_, pred_fake_1, _, _) = netD(fake_1.detach())
loss_D_fake_1 = self.criterionGAN(pred_fake_1, False)
loss_D_1 = ((loss_D_real_1 + loss_D_fake_1) * 0.5)
(_, _, pred_real_2, _) = netD(real_2)
loss_D_real_2 = self.criterionGAN(pred_real_2, True)
(_, _, pred_fake_2, _) = netD(fake_2.detach())
loss_D_fake_2 = self.criterionGAN(pred_fake_2, False)
loss_D_2 = ((loss_D_real_2 + loss_D_fake_2) * 0.5)
(_, _, _, pred_real_3) = netD(real_3)
loss_D_real_3 = self.criterionGAN(pred_real_3, True)
(_, _, _, pred_fake_3) = netD(fake_3.detach())
loss_D_fake_3 = self.criterionGAN(pred_fake_3, False)
loss_D_3 = ((loss_D_real_3 + loss_D_fake_3) * 0.5)
loss_D = (loss_D + ((0.2 / 4) * (((loss_D + loss_D_1) + loss_D_2) + loss_D_3)))
loss_D.backward()
return loss_D
def backward_D_A(self):
fake_B = self.fake_B_pool.query(self.fake_B)
self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, fake_B)
def backward_D_B(self):
fake_A = self.fake_A_pool.query(self.fake_A)
self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, fake_A)
def backward_G(self):
lambda_idt = self.opt.lambda_identity
lambda_A = self.opt.lambda_A
lambda_B = self.opt.lambda_B
if (lambda_idt > 0):
self.idt_A = self.netG_A(self.real_B)
self.loss_idt_A = ((self.criterionIdt(self.idt_A, self.real_B) * lambda_B) * lambda_idt)
self.idt_B = self.netG_B(self.real_A)
self.loss_idt_B = ((self.criterionIdt(self.idt_B, self.real_A) * lambda_A) * lambda_idt)
else:
self.loss_idt_A = 0
self.loss_idt_B = 0
(self.fake_B_1, self.fake_B_2, self.fake_B_3) = transform_image(self.fake_B)
(self.D_A_fake_B, _, _, _) = self.netD_A(self.fake_B)
self.loss_G_A = self.criterionGAN(self.D_A_fake_B, True)
(_, self.D_A_fake_B_1, _, _) = self.netD_A(self.fake_B_1)
self.loss_G_A_1 = self.criterionGAN(self.D_A_fake_B_1, True)
(_, _, self.D_A_fake_B_2, _) = self.netD_A(self.fake_B_2)
self.loss_G_A_2 = self.criterionGAN(self.D_A_fake_B_2, True)
(_, _, _, self.D_A_fake_B_3) = self.netD_A(self.fake_B_3)
self.loss_G_A_3 = self.criterionGAN(self.D_A_fake_B_3, True)
(self.fake_A_1, self.fake_A_2, self.fake_A_3) = transform_image(self.fake_A)
(self.D_B_fake_A, _, _, _) = self.netD_B(self.fake_A)
self.loss_G_B = self.criterionGAN(self.D_B_fake_A, True)
(_, self.D_B_fake_A_1, _, _) = self.netD_B(self.fake_A_1)
self.loss_G_B_1 = self.criterionGAN(self.D_B_fake_A_1, True)
(_, _, self.D_B_fake_A_2, _) = self.netD_B(self.fake_A_2)
self.loss_G_B_2 = self.criterionGAN(self.D_B_fake_A_2, True)
(_, _, _, self.D_B_fake_A_3) = self.netD_B(self.fake_A_3)
self.loss_G_B_3 = self.criterionGAN(self.D_B_fake_A_3, True)
self.loss_cycle_A = (self.criterionCycle(self.rec_A, self.real_A) * lambda_A)
self.loss_cycle_B = (self.criterionCycle(self.rec_B, self.real_B) * lambda_B)
self.loss_G = (((((((self.loss_G_A + ((0.2 / 4.0) * (((self.loss_G_A + self.loss_G_A_1) + self.loss_G_A_2) + self.loss_G_A_3))) + self.loss_G_B) + ((0.2 / 4.0) * (((self.loss_G_B + self.loss_G_B_1) + self.loss_G_B_2) + self.loss_G_B_3))) + self.loss_cycle_A) + self.loss_cycle_B) + self.loss_idt_A) + self.loss_idt_B)
self.loss_G.backward()
def optimize_parameters(self):
self.forward()
self.set_requires_grad([self.netD_A, self.netD_B], False)
self.optimizer_G.zero_grad()
self.backward_G()
self.optimizer_G.step()
self.set_requires_grad([self.netD_A, self.netD_B], True)
self.optimizer_D.zero_grad()
self.backward_D_A()
self.backward_D_B()
self.optimizer_D.step() |
class FlavaTextConfig(PretrainedConfig):
model_type = 'flava_text_model'
def __init__(self, vocab_size: int=30522, type_vocab_size: int=2, max_position_embeddings: int=512, position_embedding_type: str='absolute', hidden_size: int=768, num_hidden_layers: int=12, num_attention_heads: int=12, intermediate_size: int=3072, hidden_act: str='gelu', hidden_dropout_prob: float=0.0, attention_probs_dropout_prob: float=0.0, initializer_range: float=0.02, layer_norm_eps: float=1e-12, pad_token_id: int=0, qkv_bias: bool=True, **kwargs):
super().__init__(**kwargs)
self.vocab_size = vocab_size
self.type_vocab_size = type_vocab_size
self.max_position_embeddings = max_position_embeddings
self.position_embedding_type = position_embedding_type
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.qkv_bias = qkv_bias
self.pad_token_id = pad_token_id
def from_pretrained(cls, pretrained_model_name_or_path: Union[(str, os.PathLike)], **kwargs) -> 'PretrainedConfig':
(config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
if (config_dict.get('model_type') == 'flava'):
config_dict = config_dict['text_config']
if (('model_type' in config_dict) and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type)):
logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(config_dict, **kwargs) |
def parallel_forward(model, *args, **kwargs):
device_ids = range(min(torch.cuda.device_count(), args[0].size(0)))
return parallel.data_parallel(model, args, device_ids=device_ids, module_kwargs=kwargs) |
def annotate_instance(image, mask, color, text_label, font_size=0.5, draw_bbox=True):
assert (image.shape[:2] == mask.shape), 'Shape mismatch between image {} and mask {}'.format(image.shape, mask.shape)
color = tuple(color)
overlayed_image = overlay_mask_on_image(image, mask, mask_color=color)
bbox = bbox_from_mask(mask)
if (not bbox):
return overlayed_image
(xmin, ymin, xmax, ymax) = bbox
cv2.rectangle(overlayed_image, (xmin, ymin), (xmax, ymax), color=color, thickness=2)
((text_width, text_height), _) = cv2.getTextSize(text_label, cv2.FONT_HERSHEY_SIMPLEX, font_size, thickness=1)
(text_offset_x, text_offset_y) = (int((xmin + 2)), int(((ymin + text_height) + 2)))
text_bg_box_pt1 = (int(text_offset_x), int((text_offset_y + 2)))
text_bg_box_pt2 = (int(((text_offset_x + text_width) + 2)), int(((text_offset_y - text_height) - 2)))
if draw_bbox:
cv2.rectangle(overlayed_image, text_bg_box_pt1, text_bg_box_pt2, color=(255, 255, 255), thickness=(- 1))
cv2.putText(overlayed_image, text_label, (text_offset_x, text_offset_y), cv2.FONT_HERSHEY_SIMPLEX, font_size, (0, 0, 0))
return overlayed_image |
class Vector():
def __init__(self, pa, pb):
self.x = (int(pb.x) - int(pa.x))
self.y = (int(pb.y) - int(pa.y))
def __str__(self):
return ((str(self.x) + ',') + str(self.y)) |
def main():
configs = collect_configurations()
train(configs)
statistics_file = eval(configs)
make_plots(statistics_file) |
class Tardis(QtWidgets.QMainWindow):
def __init__(self, tablemodel, config=None, atom_data=None, parent=None):
QtWidgets.QMainWindow.__init__(self, parent)
self.path = os.path.join(tardis.__path__[0], 'gui', 'images')
self.mode = 'passive'
if (config is not None):
self.mode = 'active'
statusbr = self.statusBar()
lblstr = '<font color="red"><b>Calculation did not converge</b></font>'
self.successLabel = QtWidgets.QLabel(lblstr)
self.successLabel.setFrameStyle((QtWidgets.QFrame.StyledPanel | QtWidgets.QFrame.Sunken))
statusbr.addPermanentWidget(self.successLabel)
self.modeLabel = QtWidgets.QLabel('Passive mode')
statusbr.addPermanentWidget(self.modeLabel)
statusbr.showMessage(self.mode, 5000)
statusbr.showMessage('Ready', 5000)
quitAction = QtWidgets.QAction('&Quit', self)
quitAction.setIcon(QtGui.QIcon(os.path.join(self.path, 'closeicon.png')))
quitAction.triggered.connect(self.close)
self.viewMdv = QtWidgets.QAction('View &Model', self)
self.viewMdv.setIcon(QtGui.QIcon(os.path.join(self.path, 'mdvswitch.png')))
self.viewMdv.setCheckable(True)
self.viewMdv.setChecked(True)
self.viewMdv.setEnabled(False)
self.viewMdv.triggered.connect(self.switch_to_mdv)
self.viewForm = QtWidgets.QAction('&Edit Model', self)
self.viewForm.setIcon(QtGui.QIcon(os.path.join(self.path, 'formswitch.png')))
self.viewForm.setCheckable(True)
self.viewForm.setEnabled(False)
self.viewForm.triggered.connect(self.switch_to_form)
self.fileMenu = self.menuBar().addMenu('&File')
self.fileMenu.addAction(quitAction)
self.viewMenu = self.menuBar().addMenu('&View')
self.viewMenu.addAction(self.viewMdv)
self.viewMenu.addAction(self.viewForm)
self.helpMenu = self.menuBar().addMenu('&Help')
fileToolbar = self.addToolBar('File')
fileToolbar.setObjectName('FileToolBar')
fileToolbar.addAction(quitAction)
viewToolbar = self.addToolBar('View')
viewToolbar.setObjectName('ViewToolBar')
viewToolbar.addAction(self.viewMdv)
viewToolbar.addAction(self.viewForm)
self.stackedWidget = QtWidgets.QStackedWidget()
self.mdv = ModelViewer(tablemodel)
self.stackedWidget.addWidget(self.mdv)
if (self.mode == 'active'):
raise TemporarilyUnavaliable('The active mode is underdevelopment. Please use the passive mode for now.')
self.setCentralWidget(self.stackedWidget)
def show_model(self, model=None):
if model:
self.mdv.change_model(model)
if model.converged:
self.successLabel.setText('<font color="green">converged</font>')
if (self.mode == 'active'):
self.modeLabel.setText('Active Mode')
self.mdv.fill_output_label()
self.mdv.tableview.setModel(self.mdv.tablemodel)
self.mdv.plot_model()
self.mdv.plot_spectrum()
self.showMaximized()
def switch_to_mdv(self):
self.stackedWidget.setCurrentIndex(0)
self.viewForm.setChecked(False)
def switch_to_form(self):
self.stackedWidget.setCurrentIndex(1)
self.viewMdv.setChecked(False) |
def register_all_pascal_voc(root):
SPLITS = [('voc_2007_trainval', 'VOC2007', 'trainval'), ('voc_2007_train', 'VOC2007', 'train'), ('voc_2007_val', 'VOC2007', 'val'), ('voc_2007_test', 'VOC2007', 'test'), ('voc_2012_trainval', 'VOC2012', 'trainval'), ('voc_2012_train', 'VOC2012', 'train'), ('voc_2012_val', 'VOC2012', 'val')]
for (name, dirname, split) in SPLITS:
year = (2007 if ('2007' in name) else 2012)
register_pascal_voc(name, os.path.join(root, dirname), split, year)
MetadataCatalog.get(name).evaluator_type = 'pascal_voc' |
def get_rank():
if (not torch.distributed.is_initialized()):
return 0
return torch.distributed.get_rank() |
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--redd_location', type=str, default=None)
parser.add_argument('--ukdale_location', type=str, default=None)
parser.add_argument('--refit_location', type=str, default=None)
parser.add_argument('--export_root', type=str, default='results')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--device', type=str, default='cpu', choices=['cpu', 'cuda'])
parser.add_argument('--dataset_code', type=str, default='refit', choices=['redd_lf', 'uk_dale', 'refit'])
parser.add_argument('--house_indicies', type=list, default=[1, 2, 3, 4, 5])
parser.add_argument('--appliance_names', type=list, default=['Washing_Machine'])
parser.add_argument('--sampling', type=str, default='6s')
parser.add_argument('--normalize', type=str, default='mean', choices=['mean', 'minmax', 'none'])
parser.add_argument('--c0', type=dict, default=None)
parser.add_argument('--cutoff', type=dict, default=None)
parser.add_argument('--threshold', type=dict, default=None)
parser.add_argument('--min_on', type=dict, default=None)
parser.add_argument('--min_off', type=dict, default=None)
parser.add_argument('--window_size', type=int, default=480)
parser.add_argument('--window_stride', type=int, default=120)
parser.add_argument('--validation_size', type=float, default=0.1)
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--output_size', type=int, default=1)
parser.add_argument('--drop_out', type=float, default=0.1)
parser.add_argument('--hidden', type=int, default=256)
parser.add_argument('--heads', type=int, default=2)
parser.add_argument('--n_layers', type=int, default=2)
parser.add_argument('--pretrain', type=bool, default=True)
parser.add_argument('--mask_prob', type=float, default=0.25)
parser.add_argument('--pretrain_num_epochs', type=int, default=10)
parser.add_argument('--num_epochs', type=int, default=90)
parser.add_argument('--tau', type=float, default=0.1)
parser.add_argument('--optimizer', type=str, default='adam', choices=['sgd', 'adam', 'adamw'])
parser.add_argument('--lr', type=float, default=0.0001)
parser.add_argument('--enable_lr_schedule', type=bool, default=False)
parser.add_argument('--weight_decay', type=float, default=0.0)
parser.add_argument('--momentum', type=float, default=None)
parser.add_argument('--decay_step', type=int, default=100)
args = parser.parse_args()
args.ukdale_location = 'data/UK_Dale'
args.redd_location = 'data/REDD'
args.refit_location = 'data/Refit'
args = update_preprocessing_parameters(args)
if torch.cuda.is_available():
args.device = 'cuda:0'
return args |
class SparseMap3D(genpy.Message):
_md5sum = 'a20102f0b3a02e95070dab4140b78fb5'
_type = 'multi_map_server/SparseMap3D'
_has_header = True
_full_text = "Header header\nnav_msgs/MapMetaData info\nVerticalOccupancyGridList[] lists\n\n\n\nMSG: std_msgs/Header\n# Standard metadata for higher-level stamped data types.\n# This is generally used to communicate timestamped data \n# in a particular coordinate frame.\n# \n# sequence ID: consecutively increasing ID \nuint32 seq\n#Two-integer timestamp that is expressed as:\n# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')\n# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')\n# time-handling sugar is provided by the client library\ntime stamp\n#Frame this data is associated with\n# 0: no frame\n# 1: global frame\nstring frame_id\n\n\nMSG: nav_msgs/MapMetaData\n# This hold basic information about the characterists of the OccupancyGrid\n\n# The time at which the map was loaded\ntime map_load_time\n# The map resolution [m/cell]\nfloat32 resolution\n# Map width [cells]\nuint32 width\n# Map height [cells]\nuint32 height\n# The origin of the map [m, m, rad]. This is the real-world pose of the\n# cell (0,0) in the map.\ngeometry_msgs/Pose origin\n\nMSG: geometry_msgs/Pose\n# A representation of pose in free space, composed of postion and orientation. \nPoint position\nQuaternion orientation\n\n\nMSG: geometry_msgs/Point\n# This contains the position of a point in free space\nfloat64 x\nfloat64 y\nfloat64 z\n\n\nMSG: geometry_msgs/Quaternion\n# This represents an orientation in free space in quaternion form.\n\nfloat64 x\nfloat64 y\nfloat64 z\nfloat64 w\n\n\nMSG: multi_map_server/VerticalOccupancyGridList\nfloat32 x\nfloat32 y\nint32[] upper\nint32[] lower\nint32[] mass\n\n\n"
__slots__ = ['header', 'info', 'lists']
_slot_types = ['std_msgs/Header', 'nav_msgs/MapMetaData', 'multi_map_server/VerticalOccupancyGridList[]']
def __init__(self, *args, **kwds):
if (args or kwds):
super(SparseMap3D, self).__init__(*args, **kwds)
if (self.header is None):
self.header = std_msgs.msg.Header()
if (self.info is None):
self.info = nav_msgs.msg.MapMetaData()
if (self.lists is None):
self.lists = []
else:
self.header = std_msgs.msg.Header()
self.info = nav_msgs.msg.MapMetaData()
self.lists = []
def _get_types(self):
return self._slot_types
def serialize(self, buff):
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if (python3 or (type(_x) == unicode)):
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack(('<I%sB' % length), length, *_x))
else:
buff.write(struct.pack(('<I%ss' % length), length, _x))
_x = self
buff.write(_struct_2If2I7d.pack(_x.info.map_load_time.secs, _x.info.map_load_time.nsecs, _x.info.resolution, _x.info.width, _x.info.height, _x.info.origin.position.x, _x.info.origin.position.y, _x.info.origin.position.z, _x.info.origin.orientation.x, _x.info.origin.orientation.y, _x.info.origin.orientation.z, _x.info.origin.orientation.w))
length = len(self.lists)
buff.write(_struct_I.pack(length))
for val1 in self.lists:
_x = val1
buff.write(_struct_2f.pack(_x.x, _x.y))
length = len(val1.upper)
buff.write(_struct_I.pack(length))
pattern = ('<%si' % length)
buff.write(struct.pack(pattern, *val1.upper))
length = len(val1.lower)
buff.write(_struct_I.pack(length))
pattern = ('<%si' % length)
buff.write(struct.pack(pattern, *val1.lower))
length = len(val1.mass)
buff.write(_struct_I.pack(length))
pattern = ('<%si' % length)
buff.write(struct.pack(pattern, *val1.mass))
except struct.error as se:
self._check_types(struct.error(("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x)))))
except TypeError as te:
self._check_types(ValueError(("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x)))))
def deserialize(self, str):
try:
if (self.header is None):
self.header = std_msgs.msg.Header()
if (self.info is None):
self.info = nav_msgs.msg.MapMetaData()
if (self.lists is None):
self.lists = None
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 76
(_x.info.map_load_time.secs, _x.info.map_load_time.nsecs, _x.info.resolution, _x.info.width, _x.info.height, _x.info.origin.position.x, _x.info.origin.position.y, _x.info.origin.position.z, _x.info.origin.orientation.x, _x.info.origin.orientation.y, _x.info.origin.orientation.z, _x.info.origin.orientation.w) = _struct_2If2I7d.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.lists = []
for i in range(0, length):
val1 = multi_map_server.msg.VerticalOccupancyGridList()
_x = val1
start = end
end += 8
(_x.x, _x.y) = _struct_2f.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = ('<%si' % length)
start = end
end += struct.calcsize(pattern)
val1.upper = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = ('<%si' % length)
start = end
end += struct.calcsize(pattern)
val1.lower = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = ('<%si' % length)
start = end
end += struct.calcsize(pattern)
val1.mass = struct.unpack(pattern, str[start:end])
self.lists.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e)
def serialize_numpy(self, buff, numpy):
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if (python3 or (type(_x) == unicode)):
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack(('<I%sB' % length), length, *_x))
else:
buff.write(struct.pack(('<I%ss' % length), length, _x))
_x = self
buff.write(_struct_2If2I7d.pack(_x.info.map_load_time.secs, _x.info.map_load_time.nsecs, _x.info.resolution, _x.info.width, _x.info.height, _x.info.origin.position.x, _x.info.origin.position.y, _x.info.origin.position.z, _x.info.origin.orientation.x, _x.info.origin.orientation.y, _x.info.origin.orientation.z, _x.info.origin.orientation.w))
length = len(self.lists)
buff.write(_struct_I.pack(length))
for val1 in self.lists:
_x = val1
buff.write(_struct_2f.pack(_x.x, _x.y))
length = len(val1.upper)
buff.write(_struct_I.pack(length))
pattern = ('<%si' % length)
buff.write(val1.upper.tostring())
length = len(val1.lower)
buff.write(_struct_I.pack(length))
pattern = ('<%si' % length)
buff.write(val1.lower.tostring())
length = len(val1.mass)
buff.write(_struct_I.pack(length))
pattern = ('<%si' % length)
buff.write(val1.mass.tostring())
except struct.error as se:
self._check_types(struct.error(("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x)))))
except TypeError as te:
self._check_types(ValueError(("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x)))))
def deserialize_numpy(self, str, numpy):
try:
if (self.header is None):
self.header = std_msgs.msg.Header()
if (self.info is None):
self.info = nav_msgs.msg.MapMetaData()
if (self.lists is None):
self.lists = None
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 76
(_x.info.map_load_time.secs, _x.info.map_load_time.nsecs, _x.info.resolution, _x.info.width, _x.info.height, _x.info.origin.position.x, _x.info.origin.position.y, _x.info.origin.position.z, _x.info.origin.orientation.x, _x.info.origin.orientation.y, _x.info.origin.orientation.z, _x.info.origin.orientation.w) = _struct_2If2I7d.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.lists = []
for i in range(0, length):
val1 = multi_map_server.msg.VerticalOccupancyGridList()
_x = val1
start = end
end += 8
(_x.x, _x.y) = _struct_2f.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = ('<%si' % length)
start = end
end += struct.calcsize(pattern)
val1.upper = numpy.frombuffer(str[start:end], dtype=numpy.int32, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = ('<%si' % length)
start = end
end += struct.calcsize(pattern)
val1.lower = numpy.frombuffer(str[start:end], dtype=numpy.int32, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = ('<%si' % length)
start = end
end += struct.calcsize(pattern)
val1.mass = numpy.frombuffer(str[start:end], dtype=numpy.int32, count=length)
self.lists.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) |
def plot(net_name, load_path, plot_path):
print('')
print(net_name)
print(load_path)
print(plot_path)
start = time.time()
args.net = net_name
net = get_model(args, device)
if (torch.cuda.device_count() > 1):
net.module.load_state_dict(torch.load(load_path))
else:
net.load_state_dict(torch.load(load_path))
end = time.time()
simple_lapsed_time('Time taken to train/load the model', (end - start))
start = time.time()
if (args.imgs is None):
(images, labels) = get_random_images(testloader.dataset)
elif ((- 1) in args.imgs):
(dummy_imgs, _) = get_random_images(testloader.dataset)
(images, labels) = get_noisy_images(torch.stack(dummy_imgs), testloader.dataset, net, device)
elif ((- 10) in args.imgs):
image_ids = args.imgs[0]
images = [testloader.dataset[image_ids][0]]
labels = [testloader.dataset[image_ids][1]]
for i in list(range(2)):
temp = torch.zeros_like(images[0])
if (i == 0):
temp[(0, 0, 0)] = 1
else:
temp[(0, (- 1), (- 1))] = 1
images.append(temp)
labels.append(0)
else:
image_ids = args.imgs
images = [testloader.dataset[i][0] for i in image_ids]
labels = [testloader.dataset[i][1] for i in image_ids]
print(labels)
if args.adv:
adv_net = AttackPGD(net, trainloader.dataset)
(adv_preds, imgs) = adv_net(torch.stack(images).to(device), torch.tensor(labels).to(device))
images = [img.cpu() for img in imgs]
planeloader = make_planeloader(images, args)
preds = decision_boundary(args, net, planeloader, device)
sampl_path = '_'.join(list(map(str, args.imgs)))
args.plot_path = plot_path
plot = produce_plot_alt(args.plot_path, preds, planeloader, images, labels, trainloader, temp=args.temp)
end = time.time()
simple_lapsed_time('Time taken to plot the image', (end - start)) |
def _get_func_info(func_module):
(module_name, func_name) = func_module.rsplit('.', 1)
module = import_module(module_name)
func = getattr(module, func_name)
func_sig = signature(func)
func_params = [p.name for p in func_sig.parameters.values() if (p.kind not in (p.VAR_POSITIONAL, p.VAR_KEYWORD))]
required_params = [p.name for p in func_sig.parameters.values() if ((p.default is p.empty) and (p.kind not in (p.VAR_POSITIONAL, p.VAR_KEYWORD)))]
return (func, func_name, func_params, required_params) |
.parametrize('current_line_length', (0, 20))
.parametrize('operations_processed, percentage', ((0, '[ 0%]'), (1, '[100%]')))
def test_display_percentage(capsys, execution_context, after_execution, swagger_20, current_line_length, operations_processed, percentage):
execution_context.current_line_length = current_line_length
execution_context.operations_processed = operations_processed
default.display_percentage(execution_context, after_execution)
out = capsys.readouterr().out
assert (((len(click.unstyle(out)) + current_line_length) - 1) == default.get_terminal_width())
assert (out.strip() == strip_style_win32(click.style(percentage, fg='cyan'))) |
class InfNanRemoveLogitsProcessor(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def make_plots(statistics_file):
print('\n Make Plots')
with open(statistics_file, 'r') as f:
stats = json.load(f)
output_folder = os.path.split(statistics_file)[0]
FILETYPE = 'eps'
latex = io.StringIO()
LATEX_SHOW_STD = False
numStepsizes = len(STEPSIZES)
numTFs = len(CONFIG_FILES)
classNames = ['pos', 'dirP', 'dirF', 'world', 'world-pre']
numClasses = len(classNames)
def classTags(tf_idx):
return [f'run_screen_{tf_idx}_%d_plain', f'run_screen_{tf_idx}_%d_dirD', f'run_screen_{tf_idx}_%d_dirF', 'world_%d', 'world_pre_%d']
statNames = ['SSIM $\\uparrow$', 'LPIPS $\\downarrow$']
statTags = ['ssim', 'lpips']
latex.write(('\\begin{tabular}{{}rr%{}}\n' % ('c' * (len(statNames) * numStepsizes))))
latex.write('\\toprule\n')
latex.write('&')
for (j, s) in enumerate(STEPSIZES):
latex.write((' & \\multicolumn{%d}{c}{Stepsize %s}' % (len(statNames), ('%.0e' % s))))
latex.write('\\\\\n')
for j in range(len(STEPSIZES)):
latex.write(('\\cmidrule(r){%d-%d}' % ((3 + (len(statNames) * j)), (2 + (len(statNames) * (j + 1))))))
latex.write('\n TF & Input ')
for j in range(len(STEPSIZES)):
for s in statNames:
latex.write((' & %s' % s))
latex.write('\\\\\n')
(fig, axs) = plt.subplots(numTFs, 2, squeeze=False, sharex=True, figsize=(6.4, (1 + (2 * numTFs))))
x_offset = np.linspace((- 0.3), (+ 0.3), numClasses, True)
width = (x_offset[1] - x_offset[0])
handles = []
handle_names = []
for row in range(numTFs):
local_stat = stats[row]
axs[(row, 0)].set_ylabel(('TF %d' % (row + 1)))
for (k, (ax, stat, label)) in enumerate(zip(axs[row], statTags, statNames)):
for (i, (cls, tag)) in enumerate(zip(classNames, classTags(row))):
X = []
Y = []
err = []
for (j, s) in enumerate(STEPSIZES):
X.append((j + x_offset[i]))
(y, e) = local_stat[(tag % (1000 * s))][stat]
Y.append(y)
err.append(e)
h = ax.bar(X, Y, width=width, yerr=err)
if (stat == 'ssim'):
handles.append(h)
handle_names.append(cls)
Xlabel = [('%.0e' % s) for s in STEPSIZES]
ax.set_title(label)
ax.set_xticks(np.arange(numStepsizes))
ax.set_xticklabels(Xlabel)
ax.set_xlabel('Stepsize')
latex.write('\\cmidrule(r){1-2}')
for j in range(len(STEPSIZES)):
latex.write(('\\cmidrule(r){%d-%d}' % ((3 + (len(statNames) * j)), (2 + (len(statNames) * (j + 1))))))
latex.write(('\\multirow{%d}{*}{TF %d}' % (numClasses, (row + 1))))
tags = classTags(row)
sorted_ssim = dict()
sorted_lpips = dict()
for (j, s) in enumerate(STEPSIZES):
sorted_ssim[j] = list(sorted([local_stat[(tags[i] % (1000 * s))]['ssim'][0] for i in range(len(classNames))], reverse=True))
sorted_lpips[j] = list(sorted([local_stat[(tags[i] % (1000 * s))]['lpips'][0] for i in range(len(classNames))]))
best_stats = {'ssim': sorted_ssim, 'lpips': sorted_lpips}
for i in range(len(classNames)):
tag = tags[i]
latex.write((' & %s' % classNames[i]))
for (j, s) in enumerate(STEPSIZES):
for (k, stat) in enumerate(statTags):
(y, e) = local_stat[(tag % (1000 * s))][stat]
is_best = ((y == best_stats[stat][j][0]) or (y == best_stats[stat][j][1]))
if is_best:
if LATEX_SHOW_STD:
latex.write((' & $\\bm{%.2f}$ ($\\pm %.2f$)' % (y, e)))
else:
latex.write((' & $\\bm{%.4f}$' % y))
elif LATEX_SHOW_STD:
latex.write((' & $%.2f$ ($\\pm %.2f$)' % (y, e)))
else:
latex.write((' & $%.4f$' % y))
latex.write(' \\\\\n')
shutil.copyfile(os.path.join(output_folder, ('images_%d/reference/reference000.png' % row)), os.path.join(output_folder, ('%d_reference.png' % row)))
for tag in classTags(row):
for s in STEPSIZES:
foldername = (tag % (1000 * s))
filename = ('world000.png' if foldername.startswith('world') else 'screen000.png')
shutil.copyfile(os.path.join(output_folder, ('images_%d/%s/%s' % (row, foldername, filename))), os.path.join(output_folder, ('%s_%s.png' % (row, foldername))))
lgd = fig.legend(handles, handle_names, bbox_to_anchor=(0.65, 0.7), loc='lower center', borderaxespad=0.0)
fig.savefig(os.path.join(output_folder, ('ScreenVsWorldGrid-SSIM.%s' % FILETYPE)), bbox_inches='tight', bbox_extra_artists=(lgd,))
latex.write('\\bottomrule\n')
latex.write('\\end{tabular}\n')
latex = latex.getvalue()
with open(os.path.join(output_folder, 'screenVsWorldGrid-SSIM.tex'), 'w') as f:
f.write(latex)
print(latex)
print('Done')
plt.show() |
class VarValueRenaming():
def __init__(self):
self.new_var_nos = []
self.new_values = []
self.new_sizes = []
self.new_var_count = 0
self.num_removed_values = 0
def dump(self):
old_var_count = len(self.new_var_nos)
print(('variable count: %d => %d' % (old_var_count, self.new_var_count)))
print(('number of removed values: %d' % self.num_removed_values))
print('variable conversions:')
for (old_var_no, (new_var_no, new_values)) in enumerate(zip(self.new_var_nos, self.new_values)):
old_size = len(new_values)
if (new_var_no is None):
print(('variable %d [size %d] => removed' % (old_var_no, old_size)))
else:
new_size = self.new_sizes[new_var_no]
print(('variable %d [size %d] => %d [size %d]' % (old_var_no, old_size, new_var_no, new_size)))
for (old_value, new_value) in enumerate(new_values):
if (new_value is always_false):
new_value = 'always false'
elif (new_value is always_true):
new_value = 'always true'
print((' value %d => %s' % (old_value, new_value)))
def register_variable(self, old_domain_size, init_value, new_domain):
assert (1 <= len(new_domain) <= old_domain_size)
assert (init_value in new_domain)
if (len(new_domain) == 1):
new_values_for_var = ([always_false] * old_domain_size)
new_values_for_var[init_value] = always_true
self.new_var_nos.append(None)
self.new_values.append(new_values_for_var)
self.num_removed_values += old_domain_size
else:
new_value_counter = count()
new_values_for_var = []
for value in range(old_domain_size):
if (value in new_domain):
new_values_for_var.append(next(new_value_counter))
else:
self.num_removed_values += 1
new_values_for_var.append(always_false)
new_size = next(new_value_counter)
assert (new_size == len(new_domain))
self.new_var_nos.append(self.new_var_count)
self.new_values.append(new_values_for_var)
self.new_sizes.append(new_size)
self.new_var_count += 1
def apply_to_task(self, task):
if DEBUG:
self.dump()
self.apply_to_variables(task.variables)
self.apply_to_mutexes(task.mutexes)
self.apply_to_init(task.init)
self.apply_to_goals(task.goal.pairs)
self.apply_to_operators(task.operators)
self.apply_to_axioms(task.axioms)
def apply_to_variables(self, variables):
variables.ranges = self.new_sizes
new_axiom_layers = ([None] * self.new_var_count)
for (old_no, new_no) in enumerate(self.new_var_nos):
if (new_no is not None):
new_axiom_layers[new_no] = variables.axiom_layers[old_no]
assert (None not in new_axiom_layers)
variables.axiom_layers = new_axiom_layers
self.apply_to_value_names(variables.value_names)
def apply_to_value_names(self, value_names):
new_value_names = [([None] * size) for size in self.new_sizes]
for (var_no, values) in enumerate(value_names):
for (value, value_name) in enumerate(values):
(new_var_no, new_value) = self.translate_pair((var_no, value))
if (new_value is always_true):
if DEBUG:
print(('Removed true proposition: %s' % value_name))
elif (new_value is always_false):
if DEBUG:
print(('Removed false proposition: %s' % value_name))
else:
new_value_names[new_var_no][new_value] = value_name
assert all(((None not in value_names) for value_names in new_value_names))
value_names[:] = new_value_names
def apply_to_mutexes(self, mutexes):
new_mutexes = []
for mutex in mutexes:
new_facts = []
for (var, val) in mutex.facts:
(new_var_no, new_value) = self.translate_pair((var, val))
if ((new_value is not always_true) and (new_value is not always_false)):
new_facts.append((new_var_no, new_value))
if (len(new_facts) >= 2):
mutex.facts = new_facts
new_mutexes.append(mutex)
mutexes[:] = new_mutexes
def apply_to_init(self, init):
init_pairs = list(enumerate(init.values))
try:
self.convert_pairs(init_pairs)
except Impossible:
assert False, 'Initial state impossible? Inconceivable!'
new_values = ([None] * self.new_var_count)
for (new_var_no, new_value) in init_pairs:
new_values[new_var_no] = new_value
assert (None not in new_values)
init.values = new_values
def apply_to_goals(self, goals):
self.convert_pairs(goals)
if (not goals):
raise TriviallySolvable
def apply_to_operators(self, operators):
new_operators = []
num_removed = 0
for op in operators:
new_op = self.translate_operator(op)
if (new_op is None):
num_removed += 1
if DEBUG:
print(('Removed operator: %s' % op.name))
else:
new_operators.append(new_op)
print(('%d operators removed' % num_removed))
operators[:] = new_operators
def apply_to_axioms(self, axioms):
new_axioms = []
num_removed = 0
for axiom in axioms:
try:
self.apply_to_axiom(axiom)
except (Impossible, DoesNothing):
num_removed += 1
if DEBUG:
print('Removed axiom:')
axiom.dump()
else:
new_axioms.append(axiom)
print(('%d axioms removed' % num_removed))
axioms[:] = new_axioms
def translate_operator(self, op):
applicability_conditions = op.get_applicability_conditions()
try:
self.convert_pairs(applicability_conditions)
except Impossible:
return None
conditions_dict = dict(applicability_conditions)
new_prevail_vars = set(conditions_dict)
new_pre_post = []
for entry in op.pre_post:
new_entry = self.translate_pre_post(entry, conditions_dict)
if (new_entry is not None):
new_pre_post.append(new_entry)
new_var = new_entry[0]
new_prevail_vars.discard(new_var)
if (not new_pre_post):
return None
new_prevail = sorted(((var, value) for (var, value) in conditions_dict.items() if (var in new_prevail_vars)))
return sas_tasks.SASOperator(name=op.name, prevail=new_prevail, pre_post=new_pre_post, cost=op.cost)
def apply_to_axiom(self, axiom):
self.convert_pairs(axiom.condition)
(new_var, new_value) = self.translate_pair(axiom.effect)
assert (new_value is not always_false)
if (new_value is always_true):
raise DoesNothing
axiom.effect = (new_var, new_value)
def translate_pre_post(self, pre_post_entry, conditions_dict):
(var_no, pre, post, cond) = pre_post_entry
(new_var_no, new_post) = self.translate_pair((var_no, post))
if (new_post is always_true):
return None
if (pre == (- 1)):
new_pre = (- 1)
else:
(_, new_pre) = self.translate_pair((var_no, pre))
assert (new_pre is not always_false), 'This function should only be called for operators whose applicability conditions are deemed possible.'
if (new_post == new_pre):
return None
new_cond = list(cond)
try:
self.convert_pairs(new_cond)
except Impossible:
return None
for (cond_var, cond_value) in new_cond:
if ((cond_var in conditions_dict) and (conditions_dict[cond_var] != cond_value)):
return None
assert (new_post is not always_false), 'if we survived so far, this effect can trigger (as far as our analysis can determine this), and then new_post cannot be always_false'
assert (new_pre is not always_true), 'if this pre_post changes the value and can fire, new_pre cannot be always_true'
return (new_var_no, new_pre, new_post, new_cond)
def translate_pair(self, fact_pair):
(var_no, value) = fact_pair
new_var_no = self.new_var_nos[var_no]
new_value = self.new_values[var_no][value]
return (new_var_no, new_value)
def convert_pairs(self, pairs):
new_pairs = []
for pair in pairs:
(new_var_no, new_value) = self.translate_pair(pair)
if (new_value is always_false):
raise Impossible
elif (new_value is not always_true):
assert (new_var_no is not None)
new_pairs.append((new_var_no, new_value))
pairs[:] = new_pairs |
class NormalTranslationDataset(datasets.TranslationDataset):
def __init__(self, path, exts, fields, load_dataset=False, prefix='', **kwargs):
if (not isinstance(fields[0], (tuple, list))):
fields = [('src', fields[0]), ('trg', fields[1])]
(src_path, trg_path) = tuple((os.path.expanduser((path + x)) for x in exts))
if (load_dataset and os.path.exists((path + '.processed.{}.pt'.format(prefix)))):
examples = torch.load((path + '.processed.{}.pt'.format(prefix)))
else:
examples = []
with open(src_path) as src_file, open(trg_path) as trg_file:
for (src_line, trg_line) in zip(src_file, trg_file):
(src_line, trg_line) = (src_line.strip(), trg_line.strip())
if ((src_line != '') and (trg_line != '')):
examples.append(data.Example.fromlist([src_line, trg_line], fields))
if load_dataset:
torch.save(examples, (path + '.processed.{}.pt'.format(prefix)))
super(datasets.TranslationDataset, self).__init__(examples, fields, **kwargs) |
def test_sdca_squared(bin_train_data):
(X_bin, y_bin) = bin_train_data
clf = SDCAClassifier(loss='squared', random_state=0)
clf.fit(X_bin, y_bin)
assert (not hasattr(clf, 'predict_proba'))
assert (clf.score(X_bin, y_bin) == 1.0) |
class NONLOCALBAN(BAN):
def __init__(self, in_channels=256, out_channels=256, cls_out_channels=2):
super(NONLOCALBAN, self).__init__()
self.head = CARHead(in_channels, out_channels, cls_out_channels)
def forward(self, z_f, x_f):
features = non_local_xcorr(z_f, x_f)
(cls, reg) = self.head(features)
return (cls, reg) |
class DMA():
def __init__(self, dirpath, dmaType):
self.dirpath = dirpath
self.dmaType = dmaType
self.chipArgs = dict()
self.linecount = 0
self.actual_corenum = 0
self.regList = []
self.total_time_dict = {'start': [], 'end': []}
self.dma_cycle_list = []
self.dma_ddr_total_datasize_list = []
self.dma_l2_total_datasize_list = []
self.dma_ddr_avg_bw_list = []
self.dma_l2_avg_bw_list = []
self.dma_ddr_avg_burst_length_list = []
self.ddr_total_cycle = 0
self.l2_total_cycle = 0
self.total_burst_length = 0
self.total_xact_cnt = 0
self.frequency = 0
self.columns = ['Engine Id', 'Core Id', 'Cmd Id', 'Layer Id', 'Layer Name', 'Function Type', 'Function Name', 'DMA data size(B)', 'Start Cycle', 'End Cycle', 'Asic Cycle', 'Bandwidth', 'Direction', 'AvgBurstLength', 'Data Type', 'Non32ByteRatio', 'MaskWriteRatio', 'cmd_special_function', 'src_start_addr', 'dst_start_addr', 'src_shape', 'dst_shape', 'index_shape', 'src_nsize', 'src_csize', 'src_hsize', 'src_wsize', 'dst_nsize', 'dst_csize', 'dst_hsize', 'dst_wsize', 'src_nstride', 'src_cstride', 'src_wstride', 'src_hstride', 'dst_nstride', 'dst_cstride', 'dst_hstride', 'dst_wstride', 'nchw_copy', 'stride_enable', 'src_data_format', 'cmd_type', 'index_csize', 'index_hsize', 'index_cstride', 'index_hstride', 'mask_start_addr_h8', 'mask_start_addr_l32', 'mask_data_format', 'localmem_mask_h32', 'localmem_mask_l32', 'fill_constant_en', 'constant_value', 'index', 'cmd_short', 'cmd_id_dep', 'intr_en', 'Msg Id', 'Sd\\Wt Count']
def dma_engine_type(self):
if (self.dmaType == 'CDMA'):
return '4'
elif (self.dmaType == 'GDMA'):
self.dmaType = 'TDMA'
return '1'
elif (self.dmaType == 'SDMA'):
self.dmaType = 'TDMA'
return '3'
def process_file(self):
engineId = self.dma_engine_type()
file_name = f'{self.dirpath}/{self.dmaType.lower()}RegInfo_0.txt'
if (os.path.exists(file_name) and (os.path.getsize(file_name) > 0)):
with open(file_name, 'r') as f:
lines = f.readlines()
for line in lines:
self.linecount += 1
if ('\t' in line):
fields = line.split(': ')
attr = fields[0][1:]
val = fields[1][:(- 1)]
self.chipArgs[attr] = val
if (f'__{self.dmaType}_REG_INFO__' in line):
break
self.frequency = int(self.chipArgs['DMA Frequency(MHz)'])
coreNum = int(self.chipArgs['Core Num'])
for coreId in range(int(coreNum)):
curDmaRegFile = (((f'{self.dirpath}/{self.dmaType.lower()}RegInfo' + '_') + str(coreId)) + '.txt')
if (os.path.exists(curDmaRegFile) and (os.path.getsize(curDmaRegFile) != 0)):
self.actual_corenum += 1
dmaDf_list = []
for coreId in range(self.actual_corenum):
dmaDf_list.append(self.process_data(coreId, engineId))
return dmaDf_list
else:
self.dma_cycle_list.append(0)
return []
def process_data(self, coreId, engineId):
curDmaRegFile = (((f'{self.dirpath}/{self.dmaType.lower()}RegInfo' + '_') + str(coreId)) + '.txt')
new_reglist = []
with open(curDmaRegFile) as f:
rows = f.readlines()[self.linecount:]
fieldSet = set()
for row in rows:
if ('\t' in row):
attr = row.split(': ')[0][1:]
fieldSet.add(attr)
fieldList = (list(fieldSet) if (len(fieldSet) >= len(self.columns)) else self.columns)
dmaRegDict = dict.fromkeys(fieldList, '')
idx = 0
for row in rows:
if (f'__{self.dmaType}_REG_INFO__' in row):
if (idx != 0):
new_reglist.append(dmaRegDict)
dmaRegDict = dict.fromkeys(fieldList, '')
else:
fields = row.split(': ')
attr = fields[0][1:]
val = fields[1][:(- 1)]
dmaRegDict[attr] = val
idx += 1
new_reglist.append(dmaRegDict)
temp = []
for reg_dict in new_reglist:
if (reg_dict['Engine Id'] == engineId):
temp.append(reg_dict)
new_reglist = temp
startTime = sys.maxsize
endTime = 0
DmaCycle = 0
dmaDdrTotalDataSize = 0
dmaL2TotalDataSize = 0
dmaDdrCycle = 0
dmaL2Cycle = 0
dmaDdrBurstLength = 0
dmaDdrXactCnt = 0
totalInstRegList = []
for i in range(len(new_reglist)):
regDict = new_reglist[i]
if (regDict['cmd_type'] == '6'):
regDict['Data Type'] = 'None'
if (int(regDict['cmd_type']) == 6):
regDict['Direction'] = '-'
if regDict['Asic Cycle'].isnumeric():
DmaCycle += int(regDict['Asic Cycle'])
if (('DDR' in regDict['Direction']) and regDict['DMA data size(B)'].isnumeric()):
dmaDdrTotalDataSize += int(regDict['DMA data size(B)'])
dmaDdrCycle += float(regDict['Asic Cycle'])
dmaDdrBurstLength += int(regDict['gmem_bl_sum'])
dmaDdrXactCnt += int(regDict['gmem_xact_cnt'])
elif (('L2' in regDict['Direction']) and regDict['DMA data size(B)'].isnumeric()):
dmaL2TotalDataSize += int(regDict['DMA data size(B)'])
dmaL2Cycle += float(regDict['Asic Cycle'])
if (int(regDict['gmem_xact_cnt']) > 0):
regDict['AvgBurstLength'] = Decimal((int(regDict['gmem_bl_sum']) / int(regDict['gmem_xact_cnt']))).quantize(Decimal('0.00'))
regDict['Non32ByteRatio'] = Decimal((int(regDict['gmem_n32Ba_sa_cnt']) / int(regDict['gmem_xact_cnt']))).quantize(Decimal('0.00'))
regDict['MaskWriteRatio'] = Decimal((int(regDict['gmem_msk_wr_cnt']) / int(regDict['gmem_xact_cnt']))).quantize(Decimal('0.00'))
else:
regDict['AvgBurstLength'] = 0
regDict['Non32ByteRatio'] = 0
regDict['MaskWriteRatio'] = 0
regDict['Start Cycle'] = get_realtime_from_cycle(int(regDict['Start Cycle']), self.frequency)
regDict['End Cycle'] = get_realtime_from_cycle(int(regDict['End Cycle']), self.frequency)
startTime = min(startTime, int(regDict['Start Cycle']))
endTime = max(endTime, int(regDict['End Cycle']))
totalInstRegList.append(regDict)
self.regList.append(totalInstRegList)
self.total_time_dict['start'].append(startTime)
self.total_time_dict['end'].append(endTime)
self.dma_cycle_list.append(DmaCycle)
self.dma_ddr_total_datasize_list.append(dmaDdrTotalDataSize)
self.dma_l2_total_datasize_list.append(dmaL2TotalDataSize)
if (dmaDdrCycle > 0):
dmaDdrTotalBandWidth = str(Decimal((((dmaDdrTotalDataSize / dmaDdrCycle) * self.frequency) / 1000)).quantize(Decimal('0.00')))
else:
dmaDdrTotalBandWidth = 0
if (dmaL2Cycle > 0):
dmaL2TotalBandWidth = str(Decimal((((dmaL2TotalDataSize / dmaL2Cycle) * self.frequency) / 1000)).quantize(Decimal('0.00')))
else:
dmaL2TotalBandWidth = 0
self.ddr_total_cycle += dmaDdrCycle
self.l2_total_cycle += dmaL2Cycle
self.dma_ddr_avg_bw_list.append(dmaDdrTotalBandWidth)
self.dma_l2_avg_bw_list.append(dmaL2TotalBandWidth)
dmaDdrAvgBurstLength = (0 if (dmaDdrXactCnt == 0) else Decimal((dmaDdrBurstLength / dmaDdrXactCnt)).quantize(Decimal('0.00')))
self.dma_ddr_avg_burst_length_list.append(dmaDdrAvgBurstLength)
self.total_burst_length += dmaDdrBurstLength
self.total_xact_cnt += dmaDdrXactCnt
dmaDf = pd.DataFrame(totalInstRegList)
new_df = pd.DataFrame()
if (len(dmaDf) > 0):
for column in self.columns:
if (column in dmaDf.columns):
new_df[column] = dmaDf[column]
else:
new_df[column] = None
pre_clos = dmaDf.columns
dmaDf = new_df
for col in self.columns:
if ((('addr' in col) or ('mask' in col)) and (col in pre_clos)):
dmaDf[col] = intToHex(dmaDf[col].values)
return dmaDf |
def generate_doc(name, specs):
tab = (' ' * 4)
doc = ['- :py:func:`~scipy.special.{}`::\n'.format(name)]
for spec in specs:
(incodes, outcodes) = spec.split('->')
incodes = incodes.split('*')
intypes = list(map((lambda x: CY_TYPES[x]), incodes[0]))
if (len(incodes) > 1):
types = map((lambda x: '{} *'.format(CY_TYPES[x])), incodes[1])
intypes.extend(types)
outtype = CY_TYPES[outcodes]
line = '{} {}({})'.format(outtype, name, ', '.join(intypes))
doc.append(((2 * tab) + line))
doc[(- 1)] = '{}\n'.format(doc[(- 1)])
doc = '\n'.join(doc)
return doc |
_datapipe('collate')
class CollatorIterDataPipe(MapperIterDataPipe):
def __init__(self, datapipe: IterDataPipe, collate_fn: Callable=_utils.collate.default_collate, fn_args: Optional[Tuple]=None, fn_kwargs: Optional[Dict]=None) -> None:
super().__init__(datapipe, fn=collate_fn, fn_args=fn_args, fn_kwargs=fn_kwargs) |
def hmdb(omninet, videos, targets=None, mode='train', return_str_preds=False, num_steps=1):
batch_size = videos.shape[0]
omninet.reset(batch_size)
omninet.encode_videos(videos, domain='IMAGE')
if (mode in ['train', 'val']):
predictions = omninet.decode_from_targets('HMDB', targets=targets)
elif (mode == 'predict'):
predictions = omninet.decode_greedy('HMDB', num_steps=num_steps)
if (targets is not None):
(loss, acc) = calc_nll_loss_and_acc(predictions, targets)
else:
(loss, acc) = (None, None)
if return_str_preds:
predictions = predictions.argmax((- 1))
return (predictions, loss, acc) |
def relative_order_from_ring_generators(gens, check_is_integral=True, check_rank=True, is_maximal=None, allow_subfield=False, is_maximal_at=()):
if (check_is_integral and (not each_is_integral(gens))):
raise ValueError('each generator must be integral')
gens = Sequence(gens)
K = gens.universe()
Kabs = K.absolute_field('z')
(from_Kabs, to_Kabs) = Kabs.structure()
module_gens = [to_Kabs(a) for a in gens]
n = [a.absolute_minpoly().degree() for a in gens]
absolute_order_module_gens = monomials(module_gens, n)
abs_order = absolute_order_from_module_generators(absolute_order_module_gens, check_integral=False, check_is_ring=False, check_rank=check_rank, is_maximal=is_maximal, is_maximal_at=is_maximal_at)
return RelativeOrder(K, abs_order, check=False) |
def train():
x_train = load_data()
with tf.Session(config=TF_CONFIG) as sess:
gan = GAN(sess, MODEL_CONFIG)
gan.init_all()
gan.load_latest(EXP_CONFIG['first_stage_dir'])
refine_gan = RefineGAN(sess, MODEL_CONFIG, gan)
refine_gan.init_all()
if (EXP_CONFIG['pretrained_dir'] is not None):
refine_gan.load_latest(EXP_CONFIG['pretrained_dir'])
refine_gan.train(x_train, TRAIN_CONFIG) |
def enable_power_on_by_usb_plug_in(bledevice):
asyncio.get_event_loop().run_until_complete(aenable_power_on_by_usb_plug_in(bledevice)) |
def setup_environment():
custom_module_path = os.environ.get('TORCH_DETECTRON_ENV_MODULE')
if custom_module_path:
setup_custom_environment(custom_module_path)
else:
pass |
('/api/spellcheck', methods=['POST', 'GET'])
def do_spellcheck():
result = {}
if (request.method == 'POST'):
text = request.json.get('text')
else:
text = request.args.get('text')
text = text.strip()
words = regex.split('(\\s+)', text)
result = {}
for windex in range(len(words)):
word = words[windex]
isCorrect = spellchecker.spellcheck(word)
suggestions = []
if (not isCorrect):
suggestions = spellchecker.candidates(word)
result[word] = {'correct': isCorrect, 'suggestions': suggestions}
return jsonify(result) |
def main(args, config):
utils.init_distributed_mode(args)
device = torch.device(args.gpu)
seed = (args.seed + utils.get_rank())
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
cudnn.benchmark = True
cudnn.deterministic = True
print('Creating dataset')
datasets = [create_dataset('pretrain', config, min_scale=0.2)]
print(('number of training samples: %d' % len(datasets[0])))
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
samplers = create_sampler(datasets, [True], num_tasks, global_rank)
data_loader = create_loader(datasets, samplers, batch_size=[config['batch_size']], num_workers=[args.num_workers], is_trains=[True], collate_fns=[None])[0]
print(('=' * 50))
print('time now is: ')
print(time.strftime('%Y/%m/%d %H:%M:%S'))
print(('=' * 50))
print('Creating model')
model = blip_pretrain(image_size=config['image_size'], vit=config['vit'], vit_grad_ckpt=config['vit_grad_ckpt'], vit_ckpt_layer=config['vit_ckpt_layer'], queue_size=config['queue_size'])
model = model.cuda()
optimizer = torch.optim.AdamW(params=model.parameters(), lr=config['init_lr'], weight_decay=config['weight_decay'])
start_epoch = 0
if args.checkpoint:
checkpoint = torch.load(args.checkpoint, map_location='cpu')
state_dict = checkpoint['model']
model.load_state_dict(state_dict)
optimizer.load_state_dict(checkpoint['optimizer'])
start_epoch = (checkpoint['epoch'] + 1)
print(('resume checkpoint from %s' % args.checkpoint))
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
print('Start training')
start_time = time.time()
for epoch in range(start_epoch, config['max_epoch']):
step_lr_schedule(optimizer, epoch, config['init_lr'], config['min_lr'], config['lr_decay_rate'])
train_stats = train(model, data_loader, optimizer, epoch, device, config)
if utils.is_main_process():
log_stats = {**{f'train_{k}': v for (k, v) in train_stats.items()}, 'epoch': epoch}
save_obj = {'model': model_without_ddp.state_dict(), 'optimizer': optimizer.state_dict(), 'config': config, 'epoch': epoch}
torch.save(save_obj, os.path.join(args.output_dir, ('checkpoint_%02d.pth' % epoch)))
with open(os.path.join(args.output_dir, 'log.txt'), 'a') as f:
f.write((json.dumps(log_stats) + '\n'))
dist.barrier()
total_time = (time.time() - start_time)
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str)) |
def filename_to_imagebind_modality(fn: str) -> str:
from imagebind.models.imagebind_model import ModalityType
(_, ext) = os.path.splitext(fn)
if (ext in {'.wav'}):
return ModalityType.AUDIO
elif (ext in {'.jpg', '.png', '.jpeg'}):
return ModalityType.VISION
else:
return ModalityType.TEXT |
class GradedModules(GradedModulesCategory):
class ParentMethods():
pass
class ElementMethods():
pass |
def test_ArrayBuilder_of_complex():
def add_a_complex(builder, complex):
builder.complex(complex)
return builder
builder = add_a_complex(ak.ArrayBuilder(), (1.0 + 0.1j))
out = builder.snapshot()
assert (out.to_list() == [(1.0 + 0.1j)])
builder = add_a_complex(builder, (2.0 + 0.2j))
out = builder.snapshot()
assert (out.to_list() == [(1.0 + 0.1j), (2.0 + 0.2j)])
builder = add_a_complex(builder, 2)
out = builder.snapshot()
assert (out.to_list() == [(1.0 + 0.1j), (2.0 + 0.2j), (2.0 + 0j)])
builder = add_a_complex(builder, 2.0)
out = builder.snapshot()
assert (out.to_list() == [(1.0 + 0.1j), (2.0 + 0.2j), (2.0 + 0j), (2.0 + 0j)]) |
def get_latest_price_for_worker_type_aws(worker_type, current_time, per_instance_type_spot_prices):
if (worker_type == 'v100'):
instance_type = 'p3.2xlarge'
elif (worker_type == 'p100'):
instance_type = 'p2.xlarge'
elif (worker_type == 'k80'):
instance_type = 'p2.xlarge'
timestamps = [datetime.strptime(x['Timestamp'], '%Y-%m-%dT%H:%M:%S.000Z') for x in per_instance_type_spot_prices[instance_type]]
timestamps.sort()
availability_zones = [x['AvailabilityZone'] for x in per_instance_type_spot_prices[instance_type]]
latest_prices = []
for availability_zone in set(availability_zones):
per_instance_type_spot_prices[instance_type].sort(key=(lambda x: datetime.strptime(x['Timestamp'], '%Y-%m-%dT%H:%M:%S.000Z')))
latest_price = None
for x in per_instance_type_spot_prices[instance_type]:
if (x['AvailabilityZone'] != availability_zone):
continue
timestamp = (datetime.strptime(x['Timestamp'], '%Y-%m-%dT%H:%M:%S.000Z') - timestamps[0]).total_seconds()
if ((timestamp > current_time) and (latest_price is not None)):
break
latest_price = float(x['SpotPrice'])
assert (latest_price is not None)
latest_prices.append(latest_price)
if (worker_type == 'p100'):
return (min(latest_prices) * 1.5)
else:
return min(latest_prices) |
class HLOptions(OptionsEnv):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _after_choice(self):
self.obs = {'obs': np.copy(self.s), 'mask': np.copy(self.m)}
self.r = 0
self.steps = 0
def _after_step(self):
self.r += ((self.discount ** self.steps) * self.discriminator.discrim_net.predict_reward_train(state=torch.tensor(self.s).unsqueeze(0).to(self.discriminator.discrim_net.device()), action=torch.tensor([[self.a]]).to(self.discriminator.discrim_net.device()), next_state=torch.tensor(self.s).unsqueeze(0).to(self.discriminator.discrim_net.device()), done=torch.tensor(self.done).unsqueeze(0).to(self.discriminator.discrim_net.device())))
self.steps += 1
def _transitions(self):
(yield {'obs': self.obs, 'action': self.ch.cpu(), 'reward': self.r, 'episode_start': self.episode_start, 'value': self.value.detach(), 'log_prob': self.log_prob.detach(), 'done': self.done})
def sample_hl(self, policy, discriminator):
self.discriminator = discriminator
return self.sample(policy) |
class LogisticRegressionMaskOutput(mx.operator.CustomOp):
def __init__(self, ignore_label):
super(LogisticRegressionMaskOutput, self).__init__()
self.ignore_label = ignore_label
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], (1.0 / (1.0 + nd.exp((- in_data[0])))))
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
output = out_data[0].asnumpy()
label = in_data[1].asnumpy()
data_grad = ((output - label) * (label != self.ignore_label))
self.assign(in_grad[0], req[0], data_grad) |
def mask_tokens(inputs, tokenizer, args):
labels = inputs.clone()
masked_indices = torch.bernoulli(torch.full(labels.shape, args.mlm_probability)).bool()
labels[(~ masked_indices)] = (- 1)
indices_replaced = (torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices)
inputs[indices_replaced] = tokenizer.convert_tokens_to_ids(tokenizer.mask_token)
indices_random = ((torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices) & (~ indices_replaced))
random_words = torch.randint(len(tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
return (inputs, labels) |
def random_normal(dims: Sequence[Dim], *, dtype: Optional[str]=None, device: Optional[str]=None, sparse_dim: Optional[Dim]=None, feature_dim: Optional[Dim]=None, mean: Optional[Union[(int, float, Tensor)]]=0.0, stddev: Optional[Union[(int, float, Tensor)]]=1.0, seed: Optional[Union[(int, Sequence[int], numpy.ndarray)]]=None, algorithm: Optional[str]=None, explicit_state: Optional[Tensor]=None, auto_update_state: Optional[bool]=None, static: Optional[bool]=None, out: Optional[Tensor]=None):
return random(dims=dims, dtype=dtype, device=device, sparse_dim=sparse_dim, feature_dim=feature_dim, distribution='normal', mean=mean, stddev=stddev, seed=seed, algorithm=algorithm, explicit_state=explicit_state, auto_update_state=auto_update_state, static=static, out=out) |
def stream(stream):
if (stream is None):
(yield)
return
prev_stream = current_stream()
torch._C._cuda_setStream(stream._cdata)
try:
(yield)
finally:
torch._C._cuda_setStream(prev_stream._cdata) |
def deserialize_model(package, strict=False):
klass = package['class']
if strict:
model = klass(*package['args'], **package['kwargs'])
else:
sig = inspect.signature(klass)
kw = package['kwargs']
for key in list(kw):
if (key not in sig.parameters):
logger.warning('Dropping inexistant parameter %s', key)
del kw[key]
model = klass(*package['args'], **kw)
model.load_state_dict(package['state'])
return model |
class MSDataLoader(DataLoader):
def __init__(self, args, dataset, batch_size=1, shuffle=False, sampler=None, batch_sampler=None, collate_fn=default_collate, pin_memory=False, drop_last=False, timeout=0, worker_init_fn=None):
super(MSDataLoader, self).__init__(dataset, batch_size=batch_size, shuffle=shuffle, sampler=sampler, batch_sampler=batch_sampler, num_workers=args.n_threads, collate_fn=collate_fn, pin_memory=pin_memory, drop_last=drop_last, timeout=timeout, worker_init_fn=worker_init_fn)
self.scale = args.scale
def __iter__(self):
return _MSDataLoaderIter(self) |
class BiLSTMCRF(BaseModel):
def __init__(self, embed, num_classes, num_layers=1, hidden_size=100, dropout=0.5, target_vocab=None):
super().__init__()
self.embed = get_embeddings(embed)
if (num_layers > 1):
self.lstm = LSTM(self.embed.embedding_dim, num_layers=num_layers, hidden_size=hidden_size, bidirectional=True, batch_first=True, dropout=dropout)
else:
self.lstm = LSTM(self.embed.embedding_dim, num_layers=num_layers, hidden_size=hidden_size, bidirectional=True, batch_first=True)
self.dropout = nn.Dropout(dropout)
self.fc = nn.Linear((hidden_size * 2), num_classes)
trans = None
if (target_vocab is not None):
assert (len(target_vocab) == num_classes), 'The number of classes should be same with the length of target vocabulary.'
trans = allowed_transitions(target_vocab.idx2word, include_start_end=True)
self.crf = ConditionalRandomField(num_classes, include_start_end_trans=True, allowed_transitions=trans)
def _forward(self, words, seq_len=None, target=None):
words = self.embed(words)
(feats, _) = self.lstm(words, seq_len=seq_len)
feats = self.fc(feats)
feats = self.dropout(feats)
logits = F.log_softmax(feats, dim=(- 1))
mask = seq_len_to_mask(seq_len)
if (target is None):
(pred, _) = self.crf.viterbi_decode(logits, mask)
return {C.OUTPUT: pred}
else:
loss = self.crf(logits, target, mask).mean()
return {C.LOSS: loss}
def forward(self, words, seq_len, target):
return self._forward(words, seq_len, target)
def predict(self, words, seq_len):
return self._forward(words, seq_len) |
def COS(data_A, data_B):
print('AVG ', np.average(data_A), np.average(data_B))
print('STD ', np.std(data_A), np.std(data_B))
print('MEDIAN ', np.median(data_A), np.median(data_B))
print('MIN ', np.min(data_A), np.min(data_B))
print('MAX ', np.max(data_A), np.max(data_B)) |
.parametrize('n_actions, len_list, base_classifier, description', valid_input_of_ipw_learner_init)
def test_ipw_learner_init_using_valid_inputs(n_actions, len_list, base_classifier, description):
ipw_learner = IPWLearner(n_actions=n_actions, len_list=len_list, base_classifier=base_classifier)
assert (ipw_learner.policy_type == PolicyType.OFFLINE) |
.usefixtures('spark')
()
def gt_spark(spark):
return spark.createDataFrame(gt_data, schema=['uid', 'iid']) |
def wrap_layout(content: T, behavior: (Mapping | None)=None, highlevel: bool=True, like: Any=None, allow_other: bool=False, attrs: (Mapping | None)=None) -> ((T | Array) | HighLevelRecord):
import awkward.highlevel
from awkward.contents import Content
from awkward.record import Record
assert (isinstance(content, (Content, Record)) or allow_other)
assert ((behavior is None) or isinstance(behavior, Mapping))
assert isinstance(highlevel, bool)
if highlevel:
if ((like is not None) and (behavior is None)):
behavior = behavior_of(like)
if isinstance(content, Content):
return awkward.highlevel.Array(content, behavior=behavior, attrs=attrs)
elif isinstance(content, Record):
return awkward.highlevel.Record(content, behavior=behavior, attrs=attrs)
elif allow_other:
return content
else:
raise AssertionError
elif (isinstance(content, (Content, Record)) or allow_other):
return content
else:
raise AssertionError |
def test_downsample():
from topaz.commands import downsample
parser = downsample.add_arguments() |
class CacheCommand(Command):
ignore_require_venv = True
usage = '\n %prog dir\n %prog info\n %prog list [<pattern>]\n %prog remove <pattern>\n %prog purge\n '
def run(self, options, args):
handlers = {'dir': self.get_cache_dir, 'info': self.get_cache_info, 'list': self.list_cache_items, 'remove': self.remove_cache_items, 'purge': self.purge_cache}
if (not options.cache_dir):
logger.error('pip cache commands can not function since cache is disabled.')
return ERROR
if ((not args) or (args[0] not in handlers)):
logger.error('Need an action (%s) to perform.', ', '.join(sorted(handlers)))
return ERROR
action = args[0]
try:
handlers[action](options, args[1:])
except PipError as e:
logger.error(e.args[0])
return ERROR
return SUCCESS
def get_cache_dir(self, options, args):
if args:
raise CommandError('Too many arguments')
logger.info(options.cache_dir)
def get_cache_info(self, options, args):
if args:
raise CommandError('Too many arguments')
num_packages = len(self._find_wheels(options, '*'))
cache_location = self._wheels_cache_dir(options)
cache_size = filesystem.format_directory_size(cache_location)
message = textwrap.dedent('\n Location: {location}\n Size: {size}\n Number of wheels: {package_count}\n ').format(location=cache_location, package_count=num_packages, size=cache_size).strip()
logger.info(message)
def list_cache_items(self, options, args):
if (len(args) > 1):
raise CommandError('Too many arguments')
if args:
pattern = args[0]
else:
pattern = '*'
files = self._find_wheels(options, pattern)
if (not files):
logger.info('Nothing cached.')
return
results = []
for filename in files:
wheel = os.path.basename(filename)
size = filesystem.format_file_size(filename)
results.append(' - {} ({})'.format(wheel, size))
logger.info('Cache contents:\n')
logger.info('\n'.join(sorted(results)))
def remove_cache_items(self, options, args):
if (len(args) > 1):
raise CommandError('Too many arguments')
if (not args):
raise CommandError('Please provide a pattern')
files = self._find_wheels(options, args[0])
if (not files):
raise CommandError('No matching packages')
for filename in files:
os.unlink(filename)
logger.debug('Removed %s', filename)
logger.info('Files removed: %s', len(files))
def purge_cache(self, options, args):
if args:
raise CommandError('Too many arguments')
return self.remove_cache_items(options, ['*'])
def _wheels_cache_dir(self, options):
return os.path.join(options.cache_dir, 'wheels')
def _find_wheels(self, options, pattern):
wheel_dir = self._wheels_cache_dir(options)
pattern = (pattern + ('*.whl' if ('-' in pattern) else '-*.whl'))
return filesystem.find_files(wheel_dir, pattern) |
def test_cannot_read_outside_length_of_dotfiles():
(train, _) = load_toy_cancer()
bkg = Background(modes=train.modes)
clf = BoostedRDNClassifier(target='cancer', background=bkg)
clf.fit(train)
for test_input in [(- 10), (- 5), (- 1), 10]:
with pytest.raises(IndexError):
_ = export_digraph(clf, tree_index=test_input) |
def schema_encoding(preds_hidden, preds_len, pwords_hidden, pwords_len):
masked_preds_hidden = seq_hidden_masking_before_pooling(seq_hidden_input=preds_hidden, len_input=preds_len)
masked_pwords_hidden = seq_hidden_masking_before_pooling(seq_hidden_input=pwords_hidden, len_input=pwords_len)
masked_merge_hidden = tf.concat([masked_preds_hidden, masked_pwords_hidden], axis=1, name='masked_merge_hidden')
schema_hidden = tf.reduce_max(masked_merge_hidden, axis=1, name='schema_hidden')
return schema_hidden |
def load_data(data_downsample, data_dirs, validate_only, render_only, **kwargs):
od: Dict[(str, Any)] = {}
if (not validate_only):
od.update(init_tr_data(data_downsample, data_dirs, **kwargs))
else:
od.update(tr_loader=None, tr_dset=None)
test_split = ('render' if render_only else 'test')
od.update(init_ts_data(data_dirs, split=test_split, **kwargs))
return od |
def compute_line_coverage(trace: ExecutionTrace, subject_properties: SubjectProperties) -> float:
existing = len(subject_properties.existing_lines)
if (existing == 0):
coverage = 1.0
else:
covered = len(trace.covered_line_ids)
coverage = (covered / existing)
assert (0.0 <= coverage <= 1.0), 'Coverage must be in [0,1]'
return coverage |
def parse_arguments():
ap = argparse.ArgumentParser()
ap.add_argument('-c', '--classifier', required=True, help='Using `cls` token or GAP for the vit representation.')
ap.add_argument('-p', '--position', required=True, help='Learned or sincos for positional embedding.')
ap.add_argument('-m', '--use-mp', action='store_true', required=False, help='If we are using mixed-precision training.')
args = vars(ap.parse_args())
return args |
def evaluate(args, model, fold, output_file=None):
(dataloader, examples, features, processor) = load_examples(args, fold)
label_list = processor.get_labels()
all_predictions = defaultdict(dict)
for batch in tqdm(dataloader, desc='Eval'):
model.eval()
inputs = {k: v.to(args.device) for (k, v) in batch.items() if (k != 'feature_indices')}
with torch.no_grad():
logits = model(**inputs)
for (i, feature_index) in enumerate(batch['feature_indices']):
feature = features[feature_index.item()]
for (j, span) in enumerate(feature.original_entity_spans):
if (span is not None):
all_predictions[feature.example_index][span] = logits[(i, j)].detach().cpu().max(dim=0)
assert (len(all_predictions) == len(examples))
sent_words_list = []
sent_labels_list = []
sent_predictions_list = []
for (example_index, example) in enumerate(examples):
predictions = all_predictions[example_index]
doc_results = []
for (span, (max_logit, max_index)) in predictions.items():
if (max_index != 0):
doc_results.append((max_logit.item(), span, label_list[max_index.item()]))
predicted_sequence = (['O'] * len(example.words))
for (_, span, label) in sorted(doc_results, key=(lambda o: o[0]), reverse=True):
if all([(o == 'O') for o in predicted_sequence[span[0]:span[1]]]):
predicted_sequence[span[0]] = ('B-' + label)
if ((span[1] - span[0]) > 1):
predicted_sequence[(span[0] + 1):span[1]] = ([('I-' + label)] * ((span[1] - span[0]) - 1))
for sent_index in range((len(example.sentence_boundaries) - 1)):
(sent_start, sent_end) = example.sentence_boundaries[sent_index:(sent_index + 2)]
sent_words_list.append(example.words[sent_start:sent_end])
sent_predictions_list.append(predicted_sequence[sent_start:sent_end])
sent_labels_list.append(example.labels[sent_start:sent_end])
prev_type = None
for sent_predictions in sent_predictions_list:
for (n, label) in enumerate(sent_predictions):
if ((label[0] == 'B') and (label[2:] != prev_type)):
sent_predictions[n] = ('I' + label[1:])
prev_type = label[2:]
if output_file:
with open(output_file, 'w') as f:
for (sent_words, sent_predictions, sent_labels) in zip(sent_words_list, sent_predictions_list, sent_labels_list):
for (word, prediction, label) in zip(sent_words, sent_predictions, sent_labels):
f.write(f'''{word} {label} {prediction}
''')
f.write('\n')
print(seqeval.metrics.classification_report(sent_labels_list, sent_predictions_list, digits=4))
return dict(f1=seqeval.metrics.f1_score(sent_labels_list, sent_predictions_list), precision=seqeval.metrics.precision_score(sent_labels_list, sent_predictions_list), recall=seqeval.metrics.recall_score(sent_labels_list, sent_predictions_list)) |
class TrainTransform():
def __init__(self, size):
self.size = size
self.augment = Compose([ConvertFromInts(), PhotometricDistort(), Expand(), RandomSampleCrop(), RandomFlipping(), ToPercentCoords(), Resize(self.size), Normalize(), ToTensor()])
def __call__(self, img, boxes, labels):
return self.augment(img, boxes, labels) |
def stochasticApproximation(G, Aobs, changestats_func_list, theta0, Zobs, sampler_func=basicALAAMsampler):
epsilon = np.finfo(float).eps
n = len(changestats_func_list)
A = np.copy(Aobs)
theta = np.copy(theta0)
iterationInStep = (10 * G.numNodes())
phase1steps = (7 + (3 * n))
numSubphases = 5
a_initial = 0.01
phase3steps = 1000
burnin = int(round(((0.1 * phase3steps) * iterationInStep)))
print('Phase 1 steps = ', phase1steps, 'iters per step = ', iterationInStep)
start = time.time()
Z = np.copy(Zobs)
Zmatrix = np.empty((phase1steps, n))
for i in range(phase1steps):
(acceptance_rate, changeTo1ChangeStats, changeTo0ChangeStats) = sampler_func(G, A, changestats_func_list, theta, performMove=True, sampler_m=iterationInStep)
Z += (changeTo1ChangeStats - changeTo0ChangeStats)
Zmatrix[(i,)] = Z
Zmean = np.mean(Zmatrix, axis=0)
Zmean = np.reshape(Zmean, (1, len(Zmean)))
theta = np.reshape(theta, (1, len(theta)))
print('Zmean = ', Zmean)
Zmatrix -= Zmean
D = ((1.0 / phase1steps) * np.matmul(np.transpose(Zmatrix), Zmatrix))
print('D = ')
print(D)
if ((1.0 / np.linalg.cond(D)) < epsilon):
sys.stdout.write('Covariance matrix is singular: may be degenerate model\n')
return (None, None, None)
Dinv = np.linalg.inv(D)
print('Phase 1 took', (time.time() - start), 's')
print('Phase 2 subphases = ', numSubphases, ' iters per step = ', iterationInStep)
start = time.time()
a = a_initial
for k in range(numSubphases):
NkMin = int(round(((2.0 ** ((4.0 * k) / 3.0)) * (7 + n))))
NkMax = (NkMin + 200)
print('subphase', k, 'a = ', a, 'NkMin = ', NkMin, 'NkMax = ', NkMax, 'theta = ', theta)
i = 0
sumSuccessiveProducts = np.zeros(n)
thetaSum = np.zeros((1, n))
while ((i < NkMax) and ((i <= NkMin) or (not np.all((sumSuccessiveProducts < 0))))):
oldZ = np.copy(Z)
(acceptance_rate, changeTo1ChangeStats, changeTo0ChangeStats) = sampler_func(G, A, changestats_func_list, theta, performMove=True, sampler_m=iterationInStep)
Z += (changeTo1ChangeStats - changeTo0ChangeStats)
theta_step = (a * np.matmul(Dinv, (Z - Zobs)))
theta -= theta_step
thetaSum += theta
sumSuccessiveProducts += ((Z - Zobs) * (oldZ - Zobs))
i += 1
if (k > 1):
a /= 2.0
print(' subphase', k, 'finished after', i, 'iterations (acceptance rate =', acceptance_rate, ')')
theta = (thetaSum / i)
print('Phase 2 took', (time.time() - start), 's')
print('Phase 3 steps = ', phase3steps, 'iters per step = ', iterationInStep, 'burnin = ', burnin)
start = time.time()
Zmatrix = np.empty((phase3steps, n))
(acceptance_rate, changeTo1ChangeStats, changeTo0ChangeStats) = sampler_func(G, A, changestats_func_list, theta, performMove=True, sampler_m=burnin)
Z += (changeTo1ChangeStats - changeTo0ChangeStats)
for i in range(phase3steps):
(acceptance_rate, changeTo1ChangeStats, changeTo0ChangeStats) = sampler_func(G, A, changestats_func_list, theta, performMove=True, sampler_m=iterationInStep)
Z += (changeTo1ChangeStats - changeTo0ChangeStats)
Zmatrix[(i,)] = Z
print('XXX Zmatrix = ')
print(Zmatrix)
Zmean = np.mean(Zmatrix, axis=0)
Zmean = np.reshape(Zmean, (1, len(Zmean)))
print('Phase 3 Zmean = ', Zmean)
Zmatrix -= Zmean
D = ((1.0 / phase3steps) * np.matmul(np.transpose(Zmatrix), Zmatrix))
print('Phase 3 covariance matrix D = ')
print(D)
if ((1.0 / np.linalg.cond(D)) < epsilon):
sys.stdout.write('Phase 3 covariance matrix is singular: may be degenerate model\n')
return (None, None, None)
D0 = np.copy(np.diag(D))
Dinv = np.linalg.inv(D)
D0inv = (1.0 / D0)
std_error = np.sqrt(np.diag(Dinv))
t_ratio = ((Zmean - Zobs) * np.sqrt(D0inv))
print('Phase 3 took', (time.time() - start), 's')
theta = np.reshape(theta, (n,))
t_ratio = np.reshape(t_ratio, (n,))
return (theta, std_error, t_ratio) |
class cifar100(cifar10):
base_folder = 'cifar-100-python'
url = '
filename = 'cifar-100-python.tar.gz'
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [['train', '16019d7e3df5f24257cddd939b257f8d']]
test_list = [['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc']]
meta = {'filename': 'meta', 'key': 'fine_label_names', 'md5': '7973b15100ade9c7d40fb424638fde48'}
map_fine_to_coarse = {49: 10, 33: 10, 72: 0, 51: 4, 71: 10, 92: 2, 15: 11, 14: 7, 23: 10, 0: 4, 75: 12, 81: 19, 69: 19, 40: 5, 43: 8, 97: 8, 70: 2, 53: 4, 29: 15, 21: 11, 16: 3, 39: 5, 8: 18, 20: 6, 61: 3, 41: 19, 93: 15, 56: 17, 73: 1, 58: 18, 11: 14, 25: 6, 37: 9, 63: 12, 24: 7, 22: 5, 17: 9, 4: 0, 6: 7, 9: 3, 57: 4, 2: 14, 32: 1, 52: 17, 42: 8, 77: 13, 27: 15, 65: 16, 7: 7, 35: 14, 82: 2, 66: 12, 90: 18, 67: 1, 91: 1, 10: 3, 78: 15, 54: 2, 89: 19, 18: 7, 13: 18, 50: 16, 26: 13, 83: 4, 47: 17, 95: 0, 76: 9, 59: 17, 85: 19, 19: 11, 46: 14, 1: 1, 74: 16, 60: 10, 64: 12, 45: 13, 36: 16, 87: 5, 30: 0, 99: 13, 80: 16, 28: 3, 98: 14, 12: 9, 94: 6, 68: 9, 44: 15, 31: 11, 79: 13, 34: 12, 55: 0, 62: 2, 96: 17, 84: 6, 38: 11, 86: 5, 5: 6, 48: 18, 3: 8, 88: 8}
map_coarse_to_fine = {10: [49, 33, 71, 23, 60], 0: [72, 4, 95, 30, 55], 4: [51, 0, 53, 57, 83], 2: [92, 70, 82, 54, 62], 11: [15, 21, 19, 31, 38], 7: [14, 24, 6, 7, 18], 12: [75, 63, 66, 64, 34], 19: [81, 69, 41, 89, 85], 5: [40, 39, 22, 87, 86], 8: [43, 97, 42, 3, 88], 15: [29, 93, 27, 78, 44], 3: [16, 61, 9, 10, 28], 18: [8, 58, 90, 13, 48], 6: [20, 25, 94, 84, 5], 17: [56, 52, 47, 59, 96], 1: [73, 32, 67, 91, 1], 14: [11, 2, 35, 46, 98], 9: [37, 17, 76, 12, 68], 13: [77, 26, 45, 99, 79], 16: [65, 50, 74, 36, 80]}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.coarse_label_names = unpickle(os.path.join(self.root, self.base_folder, self.meta['filename']))['coarse_label_names']
self.fine_label_names = unpickle(os.path.join(self.root, self.base_folder, self.meta['filename']))['fine_label_names']
def get_label_info(self, fine_label_original):
return {'name': self.fine_label_names[fine_label_original], 'coarse': self.map_fine_to_coarse[fine_label_original], 'coarse_name': self.coarse_label_names[self.map_fine_to_coarse[fine_label_original]]} |
class ResumeZhProcessor(QueryNERProcessor):
def get_labels(self):
return ['ORG', 'LOC', 'NAME', 'RACE', 'TITLE', 'EDU', 'PRO', 'CONT', 'O'] |
class ByteMaskedForm(ByteMaskedMeta[Form], Form):
_content: Form
def __init__(self, mask, content, valid_when, *, parameters=None, form_key=None):
if (not isinstance(mask, str)):
raise TypeError(f"{type(self).__name__} 'mask' must be of type str, not {mask!r}")
if (not isinstance(content, Form)):
raise TypeError("{} all 'contents' must be Form subclasses, not {}".format(type(self).__name__, repr(content)))
if (not isinstance(valid_when, bool)):
raise TypeError("{} 'valid_when' must be bool, not {}".format(type(self).__name__, repr(valid_when)))
self._mask = mask
self._content = content
self._valid_when = valid_when
self._init(parameters=parameters, form_key=form_key)
def mask(self):
return self._mask
def content(self):
return self._content
def valid_when(self):
return self._valid_when
def copy(self, mask=UNSET, content=UNSET, valid_when=UNSET, *, parameters=UNSET, form_key=UNSET):
return ByteMaskedForm((self._mask if (mask is UNSET) else mask), (self._content if (content is UNSET) else content), (self._valid_when if (valid_when is UNSET) else valid_when), parameters=(self._parameters if (parameters is UNSET) else parameters), form_key=(self._form_key if (form_key is UNSET) else form_key))
def simplified(cls, mask, content, valid_when, *, parameters=None, form_key=None):
if content.is_union:
return content._union_of_optionarrays('i64', parameters)
elif (content.is_indexed or content.is_option):
return ak.forms.IndexedOptionForm.simplified('i64', content, parameters=parameters)
else:
return cls(mask, content, valid_when, parameters=parameters, form_key=form_key)
def is_identity_like(self) -> bool:
return False
def __repr__(self):
args = [repr(self._mask), repr(self._content), repr(self._valid_when), *self._repr_args()]
return '{}({})'.format(type(self).__name__, ', '.join(args))
def _to_dict_part(self, verbose, toplevel):
return self._to_dict_extra({'class': 'ByteMaskedArray', 'mask': self._mask, 'valid_when': self._valid_when, 'content': self._content._to_dict_part(verbose, toplevel=False)}, verbose)
def type(self):
return ak.types.OptionType(self._content.type, parameters=self._parameters).simplify_option_union()
def _columns(self, path, output, list_indicator):
self._content._columns(path, output, list_indicator)
def _prune_columns(self, is_inside_record_or_union: bool) -> (Self | None):
next_content = self._content._prune_columns(is_inside_record_or_union)
if (next_content is None):
return None
else:
return self.copy(content=next_content)
def _select_columns(self, match_specifier: _SpecifierMatcher) -> Self:
return self.copy(content=self._content._select_columns(match_specifier))
def _column_types(self):
return self._content._column_types()
def __setstate__(self, state):
if isinstance(state, dict):
self.__dict__.update(state)
else:
(has_identities, parameters, form_key, mask, content, valid_when) = state
if (form_key is not None):
form_key = ('part0-' + form_key)
self.__init__(mask, content, valid_when, parameters=parameters, form_key=form_key)
def _expected_from_buffers(self, getkey: Callable[([Form, str], str)], recursive: bool) -> Iterator[tuple[(str, DType)]]:
(yield (getkey(self, 'mask'), index_to_dtype[self._mask]))
if recursive:
(yield from self._content._expected_from_buffers(getkey, recursive))
def _is_equal_to(self, other: Any, all_parameters: bool, form_key: bool) -> bool:
return (self._is_equal_to_generic(other, all_parameters, form_key) and (self._mask == other._mask) and (self._valid_when == other._valid_when) and self._content._is_equal_to(other._content, all_parameters, form_key)) |
class TestFromrecords(object):
def test_fromrecords(self):
r = np.rec.fromrecords([[456, 'dbe', 1.2], [2, 'de', 1.3]], names='col1,col2,col3')
assert_equal(r[0].item(), (456, 'dbe', 1.2))
assert_equal(r['col1'].dtype.kind, 'i')
if (sys.version_info[0] >= 3):
assert_equal(r['col2'].dtype.kind, 'U')
assert_equal(r['col2'].dtype.itemsize, 12)
else:
assert_equal(r['col2'].dtype.kind, 'S')
assert_equal(r['col2'].dtype.itemsize, 3)
assert_equal(r['col3'].dtype.kind, 'f')
def test_fromrecords_0len(self):
dtype = [('a', float), ('b', float)]
r = np.rec.fromrecords([], dtype=dtype)
assert_equal(r.shape, (0,))
def test_fromrecords_2d(self):
data = [[(1, 2), (3, 4), (5, 6)], [(6, 5), (4, 3), (2, 1)]]
expected_a = [[1, 3, 5], [6, 4, 2]]
expected_b = [[2, 4, 6], [5, 3, 1]]
r1 = np.rec.fromrecords(data, dtype=[('a', int), ('b', int)])
assert_equal(r1['a'], expected_a)
assert_equal(r1['b'], expected_b)
r2 = np.rec.fromrecords(data, names=['a', 'b'])
assert_equal(r2['a'], expected_a)
assert_equal(r2['b'], expected_b)
assert_equal(r1, r2)
def test_method_array(self):
r = np.rec.array((b'abcdefg' * 100), formats='i2,a3,i4', shape=3, byteorder='big')
assert_equal(r[1].item(), (25444, b'efg', ))
def test_method_array2(self):
r = np.rec.array([(1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), (5, 55, 'ex'), (6, 66, 'f'), (7, 77, 'g')], formats='u1,f4,a1')
assert_equal(r[1].item(), (2, 22.0, b'b'))
def test_recarray_slices(self):
r = np.rec.array([(1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), (5, 55, 'ex'), (6, 66, 'f'), (7, 77, 'g')], formats='u1,f4,a1')
assert_equal(r[1::2][1].item(), (4, 44.0, b'd'))
def test_recarray_fromarrays(self):
x1 = np.array([1, 2, 3, 4])
x2 = np.array(['a', 'dd', 'xyz', '12'])
x3 = np.array([1.1, 2, 3, 4])
r = np.rec.fromarrays([x1, x2, x3], names='a,b,c')
assert_equal(r[1].item(), (2, 'dd', 2.0))
x1[1] = 34
assert_equal(r.a, np.array([1, 2, 3, 4]))
def test_recarray_fromfile(self):
data_dir = path.join(path.dirname(__file__), 'data')
filename = path.join(data_dir, 'recarray_from_file.fits')
fd = open(filename, 'rb')
fd.seek((2880 * 2))
r1 = np.rec.fromfile(fd, formats='f8,i4,a5', shape=3, byteorder='big')
fd.seek((2880 * 2))
r2 = np.rec.array(fd, formats='f8,i4,a5', shape=3, byteorder='big')
fd.close()
assert_equal(r1, r2)
def test_recarray_from_obj(self):
count = 10
a = np.zeros(count, dtype='O')
b = np.zeros(count, dtype='f8')
c = np.zeros(count, dtype='f8')
for i in range(len(a)):
a[i] = list(range(1, 10))
mine = np.rec.fromarrays([a, b, c], names='date,data1,data2')
for i in range(len(a)):
assert_((mine.date[i] == list(range(1, 10))))
assert_((mine.data1[i] == 0.0))
assert_((mine.data2[i] == 0.0))
def test_recarray_repr(self):
a = np.array([(1, 0.1), (2, 0.2)], dtype=[('foo', '<i4'), ('bar', '<f8')])
a = np.rec.array(a)
assert_equal(repr(a), textwrap.dedent(" rec.array([(1, 0.1), (2, 0.2)],\n dtype=[('foo', '<i4'), ('bar', '<f8')])"))
a = np.array(np.ones(4, dtype='f8'))
assert_(repr(np.rec.array(a)).startswith('rec.array'))
a = np.rec.array(np.ones(3, dtype='i4,i4'))
assert_equal(repr(a).find('numpy.record'), (- 1))
a = np.rec.array(np.ones(3, dtype='i4'))
assert_((repr(a).find('dtype=int32') != (- 1)))
def test_0d_recarray_repr(self):
arr_0d = np.rec.array((1, 2.0, '2003'), dtype='<i4,<f8,<M8[Y]')
assert_equal(repr(arr_0d), textwrap.dedent(" rec.array((1, 2., '2003'),\n dtype=[('f0', '<i4'), ('f1', '<f8'), ('f2', '<M8[Y]')])"))
record = arr_0d[()]
assert_equal(repr(record), "(1, 2., '2003')")
try:
np.set_printoptions(legacy='1.13')
assert_equal(repr(record), '(1, 2.0, datetime.date(2003, 1, 1))')
finally:
np.set_printoptions(legacy=False)
def test_recarray_from_repr(self):
a = np.array([(1, 'ABC'), (2, 'DEF')], dtype=[('foo', int), ('bar', 'S4')])
recordarr = np.rec.array(a)
recarr = a.view(np.recarray)
recordview = a.view(np.dtype((np.record, a.dtype)))
recordarr_r = eval(('numpy.' + repr(recordarr)), {'numpy': np})
recarr_r = eval(('numpy.' + repr(recarr)), {'numpy': np})
recordview_r = eval(('numpy.' + repr(recordview)), {'numpy': np})
assert_equal(type(recordarr_r), np.recarray)
assert_equal(recordarr_r.dtype.type, np.record)
assert_equal(recordarr, recordarr_r)
assert_equal(type(recarr_r), np.recarray)
assert_equal(recarr_r.dtype.type, np.record)
assert_equal(recarr, recarr_r)
assert_equal(type(recordview_r), np.ndarray)
assert_equal(recordview.dtype.type, np.record)
assert_equal(recordview, recordview_r)
def test_recarray_views(self):
a = np.array([(1, 'ABC'), (2, 'DEF')], dtype=[('foo', int), ('bar', 'S4')])
b = np.array([1, 2, 3, 4, 5], dtype=np.int64)
assert_equal(np.rec.array(a).dtype.type, np.record)
assert_equal(type(np.rec.array(a)), np.recarray)
assert_equal(np.rec.array(b).dtype.type, np.int64)
assert_equal(type(np.rec.array(b)), np.recarray)
assert_equal(a.view(np.recarray).dtype.type, np.record)
assert_equal(type(a.view(np.recarray)), np.recarray)
assert_equal(b.view(np.recarray).dtype.type, np.int64)
assert_equal(type(b.view(np.recarray)), np.recarray)
r = np.rec.array(np.ones(4, dtype='f4,i4'))
rv = r.view('f8').view('f4,i4')
assert_equal(type(rv), np.recarray)
assert_equal(rv.dtype.type, np.record)
r = np.rec.array(np.ones(4, dtype=[('a', 'i4'), ('b', 'i4'), ('c', 'i4,i4')]))
assert_equal(r['c'].dtype.type, np.record)
assert_equal(type(r['c']), np.recarray)
class C(np.recarray):
pass
c = r.view(C)
assert_equal(type(c['c']), C)
test_dtype = [('a', 'f4,f4'), ('b', 'V8'), ('c', ('f4', 2)), ('d', ('i8', 'i4,i4'))]
r = np.rec.array([((1, 1), b'', [1, 1], 1), ((1, 1), b'', [1, 1], 1)], dtype=test_dtype)
assert_equal(r.a.dtype.type, np.record)
assert_equal(r.b.dtype.type, np.void)
assert_equal(r.c.dtype.type, np.float32)
assert_equal(r.d.dtype.type, np.int64)
r = np.rec.array(np.ones(4, dtype='i4,i4'))
assert_equal(r.view('f4,f4').dtype.type, np.record)
assert_equal(r.view(('i4', 2)).dtype.type, np.int32)
assert_equal(r.view('V8').dtype.type, np.void)
assert_equal(r.view(('i8', 'i4,i4')).dtype.type, np.int64)
arrs = [np.ones(4, dtype='f4,i4'), np.ones(4, dtype='f8')]
for arr in arrs:
rec = np.rec.array(arr)
arr2 = rec.view((rec.dtype.fields or rec.dtype), np.ndarray)
assert_equal(arr2.dtype.type, arr.dtype.type)
assert_equal(type(arr2), type(arr))
def test_recarray_from_names(self):
ra = np.rec.array([(1, 'abc', 3., 0), (2, 'xy', 6., 1), (0, ' ', 0., 0)], names='c1, c2, c3, c4')
pa = np.rec.fromrecords([(1, 'abc', 3., 0), (2, 'xy', 6., 1), (0, ' ', 0., 0)], names='c1, c2, c3, c4')
assert_((ra.dtype == pa.dtype))
assert_((ra.shape == pa.shape))
for k in range(len(ra)):
assert_((ra[k].item() == pa[k].item()))
def test_recarray_conflict_fields(self):
ra = np.rec.array([(1, 'abc', 2.3), (2, 'xyz', 4.2), (3, 'wrs', 1.3)], names='field, shape, mean')
ra.mean = [1.1, 2.2, 3.3]
assert_array_almost_equal(ra['mean'], [1.1, 2.2, 3.3])
assert_((type(ra.mean) is type(ra.var)))
ra.shape = (1, 3)
assert_((ra.shape == (1, 3)))
ra.shape = ['A', 'B', 'C']
assert_array_equal(ra['shape'], [['A', 'B', 'C']])
ra.field = 5
assert_array_equal(ra['field'], [[5, 5, 5]])
assert_(isinstance(ra.field, collections_abc.Callable))
def test_fromrecords_with_explicit_dtype(self):
a = np.rec.fromrecords([(1, 'a'), (2, 'bbb')], dtype=[('a', int), ('b', object)])
assert_equal(a.a, [1, 2])
assert_equal(a[0].a, 1)
assert_equal(a.b, ['a', 'bbb'])
assert_equal(a[(- 1)].b, 'bbb')
ndtype = np.dtype([('a', int), ('b', object)])
a = np.rec.fromrecords([(1, 'a'), (2, 'bbb')], dtype=ndtype)
assert_equal(a.a, [1, 2])
assert_equal(a[0].a, 1)
assert_equal(a.b, ['a', 'bbb'])
assert_equal(a[(- 1)].b, 'bbb')
def test_recarray_stringtypes(self):
a = np.array([('abc ', 1), ('abc', 2)], dtype=[('foo', 'S4'), ('bar', int)])
a = a.view(np.recarray)
assert_equal((a.foo[0] == a.foo[1]), False)
def test_recarray_returntypes(self):
qux_fields = {'C': (np.dtype('S5'), 0), 'D': (np.dtype('S5'), 6)}
a = np.rec.array([('abc ', (1, 1), 1, ('abcde', 'fgehi')), ('abc', (2, 3), 1, ('abcde', 'jklmn'))], dtype=[('foo', 'S4'), ('bar', [('A', int), ('B', int)]), ('baz', int), ('qux', qux_fields)])
assert_equal(type(a.foo), np.ndarray)
assert_equal(type(a['foo']), np.ndarray)
assert_equal(type(a.bar), np.recarray)
assert_equal(type(a['bar']), np.recarray)
assert_equal(a.bar.dtype.type, np.record)
assert_equal(type(a['qux']), np.recarray)
assert_equal(a.qux.dtype.type, np.record)
assert_equal(dict(a.qux.dtype.fields), qux_fields)
assert_equal(type(a.baz), np.ndarray)
assert_equal(type(a['baz']), np.ndarray)
assert_equal(type(a[0].bar), np.record)
assert_equal(type(a[0]['bar']), np.record)
assert_equal(a[0].bar.A, 1)
assert_equal(a[0].bar['A'], 1)
assert_equal(a[0]['bar'].A, 1)
assert_equal(a[0]['bar']['A'], 1)
assert_equal(a[0].qux.D, b'fgehi')
assert_equal(a[0].qux['D'], b'fgehi')
assert_equal(a[0]['qux'].D, b'fgehi')
assert_equal(a[0]['qux']['D'], b'fgehi')
def test_zero_width_strings(self):
cols = [(['test'] * 3), ([''] * 3)]
rec = np.rec.fromarrays(cols)
assert_equal(rec['f0'], ['test', 'test', 'test'])
assert_equal(rec['f1'], ['', '', ''])
dt = np.dtype([('f0', '|S4'), ('f1', '|S')])
rec = np.rec.fromarrays(cols, dtype=dt)
assert_equal(rec.itemsize, 4)
assert_equal(rec['f0'], [b'test', b'test', b'test'])
assert_equal(rec['f1'], [b'', b'', b'']) |
class Data():
def __init__(self, args, mode='train'):
if (mode == 'train'):
data_file = args.train_data_file
elif (mode == 'test'):
data_file = args.test_data_file
elif (mode == 'dev'):
data_file = args.dev_data_file
elif (mode == 'test_noise'):
data_file = args.test_noise_file
self.dataset = 'tacred'
rel2id_file = args.rel2id_file
self.max_len = args.max_len
self.tokenizer = WordTokenizer(args.vocab_file)
self.use_noise_label = args.noise_label
self.create_label_dict()
self.facts = defaultdict(set)
print('Data Loading!-----')
if (self.use_noise_label and (mode == 'test_noise')):
data = self.load_data_nyt_arnor_ner_noise(data_file, rel2id_file)
elif (self.dataset == 'nyt'):
data = self.load_data_nyt_arnor_ner(data_file, rel2id_file)
elif (self.dataset == 'tacred'):
data = self.load_data_tacred_arnor_ner(data_file, rel2id_file)
ori_data_len = len(data)
print('Data Loaded!-----')
print('Data Preprocessing!-----')
features = self.preprocess(data)
print('Data Preprocessed!-----')
print('Processed Data List Creating!----')
self.processed_data = []
delete_index = []
self.rel_num = defaultdict(int)
for (_, (item, feature)) in enumerate(zip(data, features)):
if (feature is None):
delete_index.append(_)
continue
temp_item = {}
temp_item['input_ids'] = feature.input_ids
temp_item['e1_begin'] = feature.head_span[0]
temp_item['e1_end'] = feature.head_span[1]
temp_item['e2_begin'] = feature.tail_span[0]
temp_item['e2_end'] = feature.tail_span[1]
if (not item.relation):
delete_index.append(_)
continue
temp_item['rel'] = item.relation
temp_item['D_rel'] = item.d_rel
temp_item['ori_sentence'] = item.words
temp_item['token_masks'] = feature.token_masks
temp_item['bag_name'] = (item.head, item.tail, item.relation)
temp_item['ner'] = item.ner
if self.use_noise_label:
temp_item['is_noise'] = item.is_noise
self.rel_num[item.relation] += 1
self.processed_data.append(temp_item)
print('Processed Data List Created!----')
print('Processed data has {} instances'.format(len(self.processed_data)))
for (rel, num) in self.rel_num.items():
print('{}: {}'.format(rel, num))
def load_predenoise_labels(self, path, describe=''):
with open(((path + describe) + '_labels.txt'), 'r') as f:
labels = json.load(f)
return labels
def load_data_nyt_arnor_ner(self, data_file, rel2id_file, load_ner=True):
self.create_label_dict(rel2id_file)
if load_ner:
self.create_ner_dict()
with open(data_file, 'r') as infile:
data = json.load(infile)
instances = []
for item in data:
words = item['sentence'].split(' ')
if (len(words) > self.max_len):
continue
relation = item['relation']
if (relation == 'None'):
relation = 'NA'
head = item['head']['word']
tail = item['tail']['word']
if (relation != 'NA'):
self.facts[(head, tail)].add(relation)
try:
head_list = head.split()
pos = (- 1)
while True:
pos = words.index(head_list[0], (pos + 1))
if (' '.join(words[pos:(pos + len(head_list))]) == head):
head_pos = (pos, ((pos + len(head_list)) - 1))
break
tail_list = tail.split()
pos = (- 1)
while True:
pos = words.index(tail_list[0], (pos + 1))
if (' '.join(words[pos:(pos + len(tail_list))]) == tail):
tail_pos = (pos, ((pos + len(tail_list)) - 1))
break
except:
continue
head_type = item['head']['type']
tail_type = item['tail']['type']
if load_ner:
ner = [self.ner2id[i] for i in item['stanford_ner']]
else:
ner = None
instances.append(Instance(words, relation, head, tail, head_pos, tail_pos, head_type, tail_type, ner=ner))
print('Original data has {} instances'.format(len(instances)))
print('***** print examples ******')
for ins in instances[:5]:
print('words: {}, head: {}, head_pos: {}, tail: {}, tail_pos: {}, relation: {}, d_rel: {}, ner: {}'.format(' '.join(ins.words), ins.head, str(ins.headpos), ins.tail, str(ins.tailpos), ins.relation, ins.d_rel, ins.ner))
return instances
def load_data_tacred_arnor_ner(self, data_file, rel2id_file, load_ner=True):
if load_ner:
self.create_ner_dict(file='data/tacred_ner2id.json')
with open(data_file, 'r') as infile:
data = json.load(infile)
instances = []
for item in data:
words = item['sentence'].split(' ')
if (len(words) > self.max_len):
continue
relation = item['relation']
d_rel = item['D_relation']
head = item['head']['word']
tail = item['tail']['word']
self.facts[(head, tail)].add(relation)
head_pos = item['head']['pos']
tail_pos = item['tail']['pos']
head_type = item['head']['type']
tail_type = item['tail']['type']
(ss, se) = head_pos
(os, oe) = tail_pos
words[ss:(se + 1)] = ([('SUBJ-' + head_type)] * ((se - ss) + 1))
words[os:(oe + 1)] = ([('OBJ-' + tail_type)] * ((oe - os) + 1))
if load_ner:
ner = [self.ner2id[i] for i in item['stanford_ner']]
else:
ner = None
instances.append(Instance(words, relation, head, tail, head_pos, tail_pos, head_type, tail_type, d_rel=d_rel, ner=ner))
print('Original data has {} instances'.format(len(instances)))
print('***** print examples ******')
for ins in instances[:5]:
print('words: {}, head: {}, head_pos: {}, tail: {}, tail_pos: {}, relation: {}, d_rel: {}, ner: {}'.format(' '.join(ins.words), ins.head, str(ins.headpos), ins.tail, str(ins.tailpos), ins.relation, ins.d_rel, ins.ner))
return instances
def load_data_nyt_arnor_ner_noise(self, data_file, rel2id_file, load_ner=True):
self.create_label_dict(rel2id_file)
if load_ner:
self.create_ner_dict()
with open(data_file, 'r') as infile:
data = json.load(infile)
instances = []
for item in data:
words = item['sentence'].split(' ')
if (len(words) > self.max_len):
continue
relation = item['relation']
if (relation == 'None'):
relation = 'NA'
head = item['head']['word']
tail = item['tail']['word']
if (relation != 'NA'):
self.facts[(head, tail)].add(relation)
try:
head_list = head.split()
pos = (- 1)
while True:
pos = words.index(head_list[0], (pos + 1))
if (' '.join(words[pos:(pos + len(head_list))]) == head):
head_pos = (pos, ((pos + len(head_list)) - 1))
break
tail_list = tail.split()
pos = (- 1)
while True:
pos = words.index(tail_list[0], (pos + 1))
if (' '.join(words[pos:(pos + len(tail_list))]) == tail):
tail_pos = (pos, ((pos + len(tail_list)) - 1))
break
except:
continue
head_type = item['head']['type']
tail_type = item['tail']['type']
if load_ner:
ner = [self.ner2id[i] for i in item['stanford_ner']]
else:
ner = None
is_noise = item['is_noise']
instances.append(Instance(words, relation, head, tail, head_pos, tail_pos, head_type, tail_type, ner=ner, is_noise=is_noise))
print('Original data has {} instances'.format(len(instances)))
print('***** print examples ******')
for ins in instances[:5]:
print('words: {}, head: {}, head_pos: {}, tail: {}, tail_pos: {}, relation: {}, d_rel: {}, ner: {}'.format(' '.join(ins.words), ins.head, str(ins.headpos), ins.tail, str(ins.tailpos), ins.relation, ins.d_rel, ins.ner))
return instances
def create_ner_dict(self, file=None):
if (file is None):
file = 'data/ner2id.json'
with open(file, 'r') as f:
self.ner2id = json.load(f)
def get_label_num(self):
return len(self.relId2labelId)
def preprocess(self, data, token_mask_id=0):
features = []
unk = 0
for (idx, item) in enumerate(data):
tokens = item.words
if (len(tokens) > self.max_len):
features.append(None)
continue
(input_ids, unk_num) = self.tokenizer.convert_tokens_to_ids(tokens, self.max_len, self.tokenizer.vocab['[PAD]'], self.tokenizer.vocab['[UNK]'], uncased=False)
head_span = [item.headpos[0], (item.headpos[(- 1)] + 1)]
tail_span = [item.tailpos[0], (item.tailpos[(- 1)] + 1)]
token_masks = ([1] * len(input_ids))
if (idx < 5):
print('*** Example ***')
print('tokens: {}'.format(' '.join(tokens)))
print('E1 position:({}, {}), E2 position:({}, {})'.format(head_span[0], head_span[1], tail_span[0], tail_span[1]))
print('token mask: {}'.format(str(token_masks)))
print('input ids: {}'.format(str(input_ids)))
features.append(InputFeatures(input_ids=input_ids, head_span=head_span, tail_span=tail_span, token_masks=token_masks))
unk += unk_num
print('Convert token to vocab id, unk token num: {}'.format(unk))
return features
def get_vocab_size(self):
return len(self.tokenizer.vocab)
def create_label_dict(self, file=None):
if (file is None):
self.relId2labelId = LABEL_TO_ID
self.labelId2rel = {v: k for (k, v) in self.relId2labelId.items()}
else:
with open(file, 'r') as f:
line = json.load(f)
self.relId2labelId = line
self.labelId2rel = {v: k for (k, v) in self.relId2labelId.items()}
def get_labels(self):
return self.labels
def id2rel(self, id):
return self.labelId2rel.get(id, None)
def rel2id(self, rel):
return self.relId2labelId.get(rel, None)
def get_label_num(self):
return len(self.relId2labelId)
def posnum_to_posarray(self, posbegin, posend):
if (posend < posbegin):
posend = posbegin
array1 = (np.arange(0, posbegin) - posbegin)
array2 = np.zeros((posend - posbegin), dtype=np.int32)
array3 = (np.arange(posend, self.max_len) - posend)
posarray = (np.append(np.append(array1, array2), array3) + self.max_len)
return posarray
def batchify(self, noise_label=False):
batch_data = []
PAD = self.tokenizer.vocab['[PAD]']
ner_PAD = self.ner2id['[PAD]']
for (i, item) in enumerate(self.processed_data):
padding_size = (self.max_len - len(item['input_ids']))
ori_token_masks = torch.LongTensor((item['token_masks'] + ([0] * padding_size)))
head_masks = torch.zeros((len(item['input_ids']) + padding_size)).long()
head_masks[item['e1_begin']:item['e1_end']] = 1
head_masks = (head_masks * ori_token_masks)
tail_masks = torch.zeros((len(item['input_ids']) + padding_size)).long()
tail_masks[item['e2_begin']:item['e2_end']] = 1
tail_masks = (tail_masks * ori_token_masks)
head_pos = torch.LongTensor(self.posnum_to_posarray(item['e1_begin'], (item['e1_end'] - 1)))
tail_pos = torch.LongTensor(self.posnum_to_posarray(item['e2_begin'], (item['e2_end'] - 1)))
try:
assert (head_pos.size(0) == self.max_len)
except:
print(item['e1_begin'], item['e1_end'])
input_ids = torch.LongTensor((item['input_ids'] + ([PAD] * padding_size)))
input_masks = torch.LongTensor((([1] * len(item['input_ids'])) + ([0] * padding_size)))
labels = torch.LongTensor([self.relId2labelId[item['rel']]])
if (self.dataset == 'tacred'):
D_labels = torch.LongTensor([self.relId2labelId[item['D_rel']]])
ner_labels = torch.LongTensor((item['ner'] + ([ner_PAD] * padding_size)))
if (self.dataset == 'tacred'):
batch_data.append([head_pos, tail_pos, input_ids, input_masks, ori_token_masks, head_masks, tail_masks, ner_labels, labels, D_labels])
else:
batch_data.append([head_pos, tail_pos, input_ids, input_masks, ori_token_masks, head_masks, tail_masks, ner_labels, labels])
if noise_label:
is_noise = item['is_noise']
batch_data[(- 1)].append(is_noise)
return batch_data
def dumpData(self, save_path):
with open(save_path, 'wb'):
pickle.dump(self, save_path) |
def register_Ns3RraaWifiManager_methods(root_module, cls):
cls.add_constructor([param('ns3::RraaWifiManager const &', 'arg0')])
cls.add_constructor([])
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('SetupMac', 'void', [param('ns3::Ptr< ns3::WifiMac > const', 'mac')], is_virtual=True)
cls.add_method('SetupPhy', 'void', [param('ns3::Ptr< ns3::WifiPhy > const', 'phy')], is_virtual=True)
cls.add_method('DoCreateStation', 'ns3::WifiRemoteStation *', [], is_const=True, visibility='private', is_virtual=True)
cls.add_method('DoGetDataTxVector', 'ns3::WifiTxVector', [param('ns3::WifiRemoteStation *', 'station')], visibility='private', is_virtual=True)
cls.add_method('DoGetRtsTxVector', 'ns3::WifiTxVector', [param('ns3::WifiRemoteStation *', 'station')], visibility='private', is_virtual=True)
cls.add_method('DoInitialize', 'void', [], visibility='private', is_virtual=True)
cls.add_method('DoNeedRts', 'bool', [param('ns3::WifiRemoteStation *', 'st'), param('ns3::Ptr< ns3::Packet const >', 'packet'), param('bool', 'normally')], visibility='private', is_virtual=True)
cls.add_method('DoReportDataFailed', 'void', [param('ns3::WifiRemoteStation *', 'station')], visibility='private', is_virtual=True)
cls.add_method('DoReportDataOk', 'void', [param('ns3::WifiRemoteStation *', 'station'), param('double', 'ackSnr'), param('ns3::WifiMode', 'ackMode'), param('double', 'dataSnr')], visibility='private', is_virtual=True)
cls.add_method('DoReportFinalDataFailed', 'void', [param('ns3::WifiRemoteStation *', 'station')], visibility='private', is_virtual=True)
cls.add_method('DoReportFinalRtsFailed', 'void', [param('ns3::WifiRemoteStation *', 'station')], visibility='private', is_virtual=True)
cls.add_method('DoReportRtsFailed', 'void', [param('ns3::WifiRemoteStation *', 'station')], visibility='private', is_virtual=True)
cls.add_method('DoReportRtsOk', 'void', [param('ns3::WifiRemoteStation *', 'station'), param('double', 'ctsSnr'), param('ns3::WifiMode', 'ctsMode'), param('double', 'rtsSnr')], visibility='private', is_virtual=True)
cls.add_method('DoReportRxOk', 'void', [param('ns3::WifiRemoteStation *', 'station'), param('double', 'rxSnr'), param('ns3::WifiMode', 'txMode')], visibility='private', is_virtual=True)
cls.add_method('IsLowLatency', 'bool', [], is_const=True, visibility='private', is_virtual=True)
return |
class TestTVAE():
('ctgan.synthesizers.tvae._loss_function')
('ctgan.synthesizers.tvae.tqdm')
def test_fit_verbose(self, tqdm_mock, loss_func_mock):
epochs = 1
def mock_iter():
for i in range(epochs):
(yield i)
def mock_add(a, b):
mock_loss = Mock()
mock_loss.detach().cpu().item.return_value = 1.
return mock_loss
loss_mock = MagicMock()
loss_mock.__add__ = mock_add
loss_func_mock.return_value = (loss_mock, loss_mock)
iterator_mock = MagicMock()
iterator_mock.__iter__.side_effect = mock_iter
tqdm_mock.return_value = iterator_mock
synth = TVAE(epochs=epochs, verbose=True)
train_data = pd.DataFrame({'col1': [0, 1, 2, 3, 4], 'col2': [10, 11, 12, 13, 14]})
synth.fit(train_data)
tqdm_mock.assert_called_once_with(range(epochs), disable=False)
assert (iterator_mock.set_description.call_args_list[0] == call('Loss: 0.000'))
assert (iterator_mock.set_description.call_args_list[1] == call('Loss: 1.235'))
assert (iterator_mock.set_description.call_count == 2) |
def load_examples_hyp(path, args):
hypotheses = [' neutral', ' partisan']
label2synonym = {0: [' neutral', ' fair', ' objective'], 1: [' partisan', ' biased', ' unfair']}
prompt = '\n neutral or partisan? Answer:'
icl_str = ''
if (args.k_shot > 0):
train_examples = []
train_path = path.replace('dev', 'train')
with open(train_path) as fp:
reader = csv.DictReader(fp)
for row in reader:
label = int(row['label'])
summary = row['text'].strip()
premise = f'{summary}{prompt}'
options = []
for h in hypotheses:
o = {}
o['premise'] = premise
o['hypothesis'] = h
o['uncond_premise'] = prompt
o['uncond_hypothesis'] = h
options.append(o)
similarity = (float(row['similarity']) if ('similarity' in row) else None)
train_examples.append({'options': options, 'label': label, 'sim': similarity, 'label2synonym': label2synonym, 'label_list': hypotheses})
icl_str = construct_icl_examples(train_examples, k=args.k_shot)
examples = []
with open(path) as fp:
reader = csv.DictReader(fp)
for row in reader:
label = int(row['label'])
summary = row['text'].strip()
premise = f'{summary}{prompt}'
options = []
for h in hypotheses:
o = {}
o['premise'] = (icl_str + premise)
o['knn_premise'] = premise
o['hypothesis'] = h
o['uncond_premise'] = prompt
o['uncond_hypothesis'] = h
options.append(o)
similarity = (float(row['similarity']) if ('similarity' in row) else None)
examples.append({'options': options, 'label': label, 'sim': similarity, 'label2synonym': label2synonym, 'label_list': hypotheses})
print('examples: ', examples[0]['options'][0]['premise'])
return examples |
_on_pypy
.skipif((not hasattr(m, 'NCVirt')), reason='NCVirt test broken on ICPC')
def test_move_support():
class NCVirtExt(m.NCVirt):
def get_noncopyable(self, a, b):
nc = m.NonCopyable((a * a), (b * b))
return nc
def get_movable(self, a, b):
self.movable = m.Movable(a, b)
return self.movable
class NCVirtExt2(m.NCVirt):
def get_noncopyable(self, a, b):
self.nc = m.NonCopyable(a, b)
return self.nc
def get_movable(self, a, b):
return m.Movable(a, b)
ncv1 = NCVirtExt()
assert (ncv1.print_nc(2, 3) == '36')
assert (ncv1.print_movable(4, 5) == '9')
ncv2 = NCVirtExt2()
assert (ncv2.print_movable(7, 7) == '14')
with pytest.raises(RuntimeError):
ncv2.print_nc(9, 9)
nc_stats = ConstructorStats.get(m.NonCopyable)
mv_stats = ConstructorStats.get(m.Movable)
assert (nc_stats.alive() == 1)
assert (mv_stats.alive() == 1)
del ncv1, ncv2
assert (nc_stats.alive() == 0)
assert (mv_stats.alive() == 0)
assert (nc_stats.values() == ['4', '9', '9', '9'])
assert (mv_stats.values() == ['4', '5', '7', '7'])
assert (nc_stats.copy_constructions == 0)
assert (mv_stats.copy_constructions == 1)
assert (nc_stats.move_constructions >= 0)
assert (mv_stats.move_constructions >= 0) |
class Normalizer(mrl.Module):
def __init__(self, normalizer):
super().__init__('state_normalizer', required_agent_modules=[], locals=locals())
self.normalizer = normalizer
self.lazy_load = None
def __call__(self, *args, **kwargs):
if self.training:
self.normalizer.read_only = False
else:
self.normalizer.read_only = True
if (self.lazy_load is not None):
self.normalizer(*args, **kwargs)
self.load(self.lazy_load)
print('LOADED NORMALIZER')
self.lazy_load = None
return self.normalizer(*args, **kwargs)
def save(self, save_folder):
if (self.normalizer.state_dict() is not None):
with open(os.path.join(save_folder, 'normalizer.pickle'), 'wb') as f:
pickle.dump(self.normalizer.state_dict(), f)
def load(self, save_folder):
if (self.normalizer.state_dict() is not None):
save_path = os.path.join(save_folder, 'normalizer.pickle')
if os.path.exists(save_path):
with open(save_path, 'rb') as f:
self.normalizer.load_state_dict(pickle.load(f))
else:
print('WARNING: No saved normalizer state to load.')
else:
self.lazy_load = save_folder |
class Scatter(BenchmarkItem):
name = 'scatter'
def __init__(self):
self._items = {'scatter': True, 'gether': False} |
def test_fit_predict_on_pipeline_without_fit_predict():
scaler = StandardScaler()
pca = PCA(svd_solver='full')
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
error_regex = "'PCA' object has no attribute 'fit_predict'"
with raises(AttributeError, match=error_regex):
getattr(pipe, 'fit_predict') |
def JvecAdjointTest_1D(sigmaHalf, formulation='PrimSec'):
frequencies = np.logspace(0, 4, 21)
receivers_list = [nsem.receivers.PointNaturalSource(component='real'), nsem.receivers.PointNaturalSource(component='imag'), nsem.receivers.PointNaturalSource(component='app_res'), nsem.receivers.PointNaturalSource(component='phase')]
source_list = []
for ii in range(0, len(frequencies)):
source_list.append(nsem.sources.Planewave(receivers_list, frequencies[ii]))
survey = nsem.survey.Survey(source_list)
layer_thicknesses = np.array([200, 100])
sigma_model = np.array([0.001, 0.01, 0.1])
mapping = maps.Wires(('sigma', 3), ('thicknesses', 2))
simulation = nsem.simulation_1d.Simulation1DRecursive(survey=survey, sigmaMap=mapping.sigma, thicknessesMap=mapping.thicknesses)
m = np.r_[(sigma_model, layer_thicknesses)]
u = simulation.fields(m)
np.random.seed(1983)
v = np.random.rand(survey.nD)
w = np.random.rand(len(m))
vJw = v.dot(simulation.Jvec(m, w, u))
wJtv = w.dot(simulation.Jtvec(m, v, u))
tol = np.max([(TOL * (10 ** int(np.log10(np.abs(vJw))))), FLR])
print(' vJw wJtv vJw - wJtv tol abs(vJw - wJtv) < tol')
print(vJw, wJtv, (vJw - wJtv), tol, (np.abs((vJw - wJtv)) < tol))
return (np.abs((vJw - wJtv)) < tol) |
def main(args):
mp.set_start_method('spawn')
args.dist_url = f'tcp://{args.node}:{args.port}'
print('Using url {}'.format(args.dist_url))
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
args.world_size = ngpus_per_node
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
main_worker(args.gpu, ngpus_per_node, args) |
class BanditPolicySimulator():
policy: Any
environment: BanditEnvironmentSimulator = None
reward_round_lookup: defaultdict = None
_selected_actions: List[int] = None
_obtained_rewards: List[int] = None
_ground_truth_rewards: List[np.ndarray] = None
_contexts: List[np.ndarray] = None
total_reward: int = 0
rounds_played: int = 0
current_round: BanditRound = None
def selected_actions(self) -> np.ndarray:
return np.asarray(self._selected_actions)
def obtained_rewards(self) -> np.ndarray:
return np.asarray(self._obtained_rewards)
def ground_truth_rewards(self) -> np.ndarray:
return np.vstack(self._ground_truth_rewards)
def contexts(self) -> np.ndarray:
return np.vstack(self._contexts)
def __post_init__(self):
self._selected_actions = []
self._obtained_rewards = []
self._ground_truth_rewards = []
self._contexts = []
self.reward_round_lookup = defaultdict(list)
def start_next_bandit_round(self, bandit_round: BanditRound=None) -> None:
if (not bandit_round):
self.current_round = self.environment.next_bandit_round()
else:
self.current_round = bandit_round
self.append_contexts(self.current_round.context)
self.append_ground_truth_rewards(self.current_round.rewards)
def append_ground_truth_rewards(self, ground_truth_rewards):
self._ground_truth_rewards.append(ground_truth_rewards)
def append_contexts(self, context):
self._contexts.append(context)
def step(self, bandit_round: BanditRound=None):
self.start_next_bandit_round(bandit_round)
self._step()
def _step(self):
selected_action = self.select_action()
self._selected_actions.append(selected_action)
reward_ = self.current_round.rewards[selected_action]
self._obtained_rewards.append(reward_)
self.total_reward += reward_
delays = self.current_round.round_delays
if (delays is None):
self.update_policy(self.current_round.context, selected_action, reward_)
else:
round_delay = delays[selected_action]
self.reward_round_lookup[(round_delay + self.rounds_played)].append(self.rounds_played)
available_rounds = self.reward_round_lookup.get(self.rounds_played, [])
self.delayed_update_policy(available_rounds, self.rounds_played)
self.rounds_played += 1
def select_action(self):
if (self.policy.policy_type == PolicyType.CONTEXT_FREE):
selected_action = self.policy.select_action()[0]
elif (self.policy.policy_type == PolicyType.CONTEXTUAL):
selected_action = self.policy.select_action(np.expand_dims(self.current_round.context, axis=0))[0]
else:
raise RuntimeError(f'Policy type {self.policy.policy_type} of policy {self.policy.policy_name} is unsupported')
return selected_action
def steps(self, n_rounds: int=None, batch_bandit_rounds: BanditRounds=None) -> None:
if n_rounds:
for _ in tqdm(range(n_rounds)):
self.step()
if batch_bandit_rounds:
for bandit_round in tqdm(batch_bandit_rounds):
self.step(bandit_round)
def delayed_update_policy(self, available_rounds: List[int], current_round: int) -> None:
for available_round_idx in available_rounds:
available_action = self._selected_actions[available_round_idx]
available_context = self._contexts[available_round_idx]
available_rewards = self._obtained_rewards[available_round_idx]
self.update_policy(available_context, available_action, available_rewards)
self.reward_round_lookup.pop(current_round, None)
def clear_delayed_queue(self):
for (round_idx, available_rounds) in self.reward_round_lookup.copy().items():
self.delayed_update_policy(available_rounds, current_round=round_idx)
def update_policy(self, context: np.ndarray, action: int, reward: int) -> None:
if (self.policy.policy_type == PolicyType.CONTEXT_FREE):
self.policy.update_params(action=action, reward=reward)
elif (self.policy.policy_type == PolicyType.CONTEXTUAL):
self.policy.update_params(action=action, reward=reward, context=np.expand_dims(context, axis=0)) |
def load_checkpoint(filepath, device):
assert os.path.isfile(filepath)
print(f"Loading '{filepath}'")
checkpoint_dict = torch.load(filepath, map_location=device)
print('Complete.')
return checkpoint_dict |
(Output('forecasting-select-features', 'options'), Input('forecasting-select-features-parent', 'n_clicks'), [State('forecasting-select-file', 'value'), State('forecasting-select-target', 'value'), State('forecasting-select-exog', 'value')])
def select_features(n_clicks, filename, target_name, exog_names):
options = []
ctx = dash.callback_context
prop_id = ctx.triggered_id
if (prop_id == 'forecasting-select-features-parent'):
if ((filename is not None) and (target_name is not None)):
file_path = os.path.join(file_manager.data_directory, filename)
df = ForecastModel().load_data(file_path, nrows=2)
options += [{'label': s, 'value': s} for s in df.columns if (s not in ([target_name] + (exog_names or [])))]
return options |
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
from math import factorial
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError:
raise ValueError('window_size and order have to be of type int')
if (((window_size % 2) != 1) or (window_size < 1)):
raise TypeError('window_size size must be a positive odd number')
if (window_size < (order + 2)):
raise TypeError('window_size is too small for the polynomials order')
order_range = range((order + 1))
half_window = ((window_size - 1) // 2)
b = np.mat([[(k ** i) for i in order_range] for k in range((- half_window), (half_window + 1))])
m = ((np.linalg.pinv(b).A[deriv] * (rate ** deriv)) * factorial(deriv))
firstvals = (y[0] - np.abs((y[1:(half_window + 1)][::(- 1)] - y[0])))
lastvals = (y[(- 1)] + np.abs((y[((- half_window) - 1):(- 1)][::(- 1)] - y[(- 1)])))
y = np.concatenate((firstvals, y, lastvals))
return np.convolve(m[::(- 1)], y, mode='valid') |
def DuadicCodeOddPair(F, S1, S2):
from sage.misc.stopgap import stopgap
stopgap('The function DuadicCodeOddPair has several issues which may cause wrong results', 25896)
from .cyclic_code import CyclicCode
n = ((len(S1) + len(S2)) + 1)
if (not _is_a_splitting(S1, S2, n)):
raise TypeError(('%s, %s must be a splitting of %s.' % (S1, S2, n)))
q = F.order()
k = Mod(q, n).multiplicative_order()
FF = GF((q ** k), 'z')
z = FF.gen()
zeta = (z ** (((q ** k) - 1) / n))
P1 = PolynomialRing(FF, 'x')
x = P1.gen()
g1 = prod([(x - (zeta ** i)) for i in (S1 + [0])])
g2 = prod([(x - (zeta ** i)) for i in (S2 + [0])])
j = sum([((x ** i) / n) for i in range(n)])
P2 = PolynomialRing(F, 'x')
x = P2.gen()
coeffs1 = [_lift2smallest_field(c)[0] for c in (g1 + j).coefficients(sparse=False)]
coeffs2 = [_lift2smallest_field(c)[0] for c in (g2 + j).coefficients(sparse=False)]
gg1 = P2(coeffs1)
gg2 = P2(coeffs2)
gg1 = gcd(gg1, ((x ** n) - 1))
gg2 = gcd(gg2, ((x ** n) - 1))
C1 = CyclicCode(length=n, generator_pol=gg1)
C2 = CyclicCode(length=n, generator_pol=gg2)
return (C1, C2) |
class ModuleDict(BaseModule, nn.ModuleDict):
def __init__(self, modules: Optional[dict]=None, init_cfg: Optional[dict]=None):
BaseModule.__init__(self, init_cfg)
nn.ModuleDict.__init__(self, modules) |
def petersen_graph() -> StellarGraph:
nxg = nx.petersen_graph()
return StellarGraph.from_networkx(nxg, node_features=node_features()) |
class Seq2SeqQuestionAnsweringModelOutput(ModelOutput):
loss: Optional[torch.FloatTensor] = None
start_logits: torch.FloatTensor = None
end_logits: torch.FloatTensor = None
past_key_values: Optional[List[torch.FloatTensor]] = None
decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None |
class RGNNConv(ChainableGCNConv):
def __init__(self, mean='soft_k_medoid', mean_kwargs: Dict[(str, Any)]=dict(k=64, temperature=1.0, with_weight_correction=True), **kwargs):
super().__init__(**kwargs)
self._mean = ROBUST_MEANS[mean]
self._mean_kwargs = mean_kwargs
def message_and_aggregate(self, adj_t) -> torch.Tensor:
raise NotImplementedError
def propagate(self, edge_index: Union[(torch.Tensor, SparseTensor)], size=None, **kwargs) -> torch.Tensor:
x = kwargs['x']
if (not isinstance(edge_index, SparseTensor)):
edge_weights = (kwargs['norm'] if ('norm' in kwargs) else kwargs['edge_weight'])
A = SparseTensor.from_edge_index(edge_index, edge_weights, (x.size(0), x.size(0)))
return self._mean(A, x, **self._mean_kwargs)
def aggregate(edge_index: SparseTensor, x: torch.Tensor):
return self._mean(edge_index, x, **self._mean_kwargs)
if self.do_chunk:
return chunked_message_and_aggregate(edge_index, x, n_chunks=self.n_chunks, aggregation_function=aggregate)
else:
return aggregate(edge_index, x) |
class EvalHook(_EvalHook):
greater_keys = ['mIoU', 'mAcc', 'aAcc']
def __init__(self, *args, by_epoch=False, efficient_test=False, pre_eval=False, **kwargs):
super().__init__(*args, by_epoch=by_epoch, **kwargs)
self.pre_eval = pre_eval
self.latest_results = None
if efficient_test:
warnings.warn('DeprecationWarning: ``efficient_test`` for evaluation hook is deprecated, the evaluation hook is CPU memory friendly with ``pre_eval=True`` as argument for ``single_gpu_test()`` function')
def _do_evaluate(self, runner):
if (not self._should_evaluate(runner)):
return
from mmseg.apis import single_gpu_test
results = single_gpu_test(runner.model, self.dataloader, show=False, pre_eval=self.pre_eval)
self.latest_results = results
runner.log_buffer.clear()
runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)
key_score = self.evaluate(runner, results)
if self.save_best:
self._save_ckpt(runner, key_score) |
def load_json_dataset(fpath, tokenizer, tag_fmt='IO', contiguous_only=False):
(documents, entities) = ([], {})
fopen = (gzip.open if (fpath.split('.')[(- 1)] == 'gz') else open)
with fopen(fpath, 'rb') as fp:
for line in fp:
d = json.loads(line)
doc = Document(d['name'], [Sentence(**s) for s in d['sentences']])
documents.append(doc)
entities[doc.name] = set()
if ('entities' not in d):
continue
for entity in d['entities']:
del entity['abs_char_start']
del entity['abs_char_end']
if ('doc_name' not in entity):
entity['doc_name'] = doc.name
anno = Annotation(**entity)
if ((len(anno.span) > 1) and contiguous_only):
continue
entities[doc.name].add(Annotation(**entity))
return NerDocumentDataset(documents, entities, tag_fmt=tag_fmt, tokenizer=tokenizer) |
class Args():
concat = 1
crop_size = 216
dis_norm = None
dis_scale = 3
dis_spectral_norm = False
dataroot = 'data'
gpu = 1
input_dim = 3
nThreads = 4
num_domains = 5
nz = 8
resume = 'gan_weights.pth' |
def augment(image, label):
img = tf.image.rot90(image)
img = tf.image.flip_left_right(img)
return (img, label) |
class MediumPayloadCustomMode2():
SIZE = 34
def from_reader(reader: _ResponseReader):
assert (reader.remaining() >= MediumPayloadCustomMode2.SIZE)
rv = MediumPayloadCustomMode2()
rv.timestamp = Timestamp.from_reader(reader)
rv.euler = EulerAngles.from_reader(reader)
rv.free_acceleration = FreeAcceleration.from_reader(reader)
rv.magnetic_field = MagneticField.from_reader(reader)
return rv
def from_bytes(bites):
reader = _ResponseReader(bites)
return MediumPayloadCustomMode2.from_reader(reader)
def __repr__(self):
return _pretty_print(self) |
class PerColHeader(object):
def __init__(self, header):
self.percentile_0 = header[0]
self.percentile_25 = header[1]
self.percentile_75 = header[2]
self.percentile_100 = header[3] |
def unpack_sim_data(result):
headers = ['time', 'x', 'y', 'z', 'xdot', 'ydot', 'zdot', 'qx', 'qy', 'qz', 'qw', 'wx', 'wy', 'wz', 'windx', 'windy', 'windz', 'r1', 'r2', 'r3', 'r4', 'xdes', 'ydes', 'zdes', 'xdotdes', 'ydotdes', 'zdotdes', 'xddotdes', 'yddotdes', 'zddotdes', 'xdddotdes', 'ydddotdes', 'zdddotdes', 'xddddotdes', 'yddddotdes', 'zddddotdes', 'yawdes', 'yawdotdes', 'ax', 'ay', 'az', 'ax_gt', 'ay_gt', 'az_gt', 'gx', 'gy', 'gz', 'mocap_x', 'mocap_y', 'mocap_z', 'mocap_xdot', 'mocap_ydot', 'mocap_zdot', 'mocap_qx', 'mocap_qy', 'mocap_qz', 'mocap_qw', 'mocap_wx', 'mocap_wy', 'mocap_wz', 'r1des', 'r2des', 'r3des', 'r4des', 'thrustdes', 'qxdes', 'qydes', 'qzdes', 'qwdes', 'mxdes', 'mydes', 'mzdes']
time = result['time'].reshape((- 1), 1)
state = result['state']
x = state['x']
v = state['v']
q = state['q']
w = state['w']
wind = state['wind']
rotor_speeds = state['rotor_speeds']
control = result['control']
cmd_rotor = control['cmd_motor_speeds']
cmd_thrust = control['cmd_thrust'].reshape((- 1), 1)
cmd_q = control['cmd_q']
cmd_moment = control['cmd_moment']
flat = result['flat']
x_des = flat['x']
v_des = flat['x_dot']
a_des = flat['x_ddot']
j_des = flat['x_dddot']
s_des = flat['x_ddddot']
yaw_des = flat['yaw'].reshape((- 1), 1)
yawdot_des = flat['yaw_dot'].reshape((- 1), 1)
imu_measurements = result['imu_measurements']
a_measured = imu_measurements['accel']
w_measured = imu_measurements['gyro']
mocap_measurements = result['mocap_measurements']
x_mc = mocap_measurements['x']
v_mc = mocap_measurements['v']
q_mc = mocap_measurements['q']
w_mc = mocap_measurements['w']
imu_actual = result['imu_gt']
a_actual = imu_actual['accel']
state_estimate = result['state_estimate']
filter_state = state_estimate['filter_state']
covariance = state_estimate['covariance']
if (filter_state.shape[1] > 0):
sd = (3 * np.sqrt(np.diagonal(covariance, axis1=1, axis2=2)))
headers.extend([('xhat_' + str(i)) for i in range(filter_state.shape[1])])
headers.extend([('sigma_' + str(i)) for i in range(filter_state.shape[1])])
dataset = np.hstack((time, x, v, q, w, wind, rotor_speeds, x_des, v_des, a_des, j_des, s_des, yaw_des, yawdot_des, a_measured, a_actual, w_measured, x_mc, v_mc, q_mc, w_mc, cmd_rotor, cmd_thrust, cmd_q, cmd_moment, filter_state, sd))
else:
sd = []
dataset = np.hstack((time, x, v, q, w, wind, rotor_speeds, x_des, v_des, a_des, j_des, s_des, yaw_des, yawdot_des, a_measured, a_actual, w_measured, x_mc, v_mc, q_mc, w_mc, cmd_rotor, cmd_thrust, cmd_q, cmd_moment))
df = pd.DataFrame(data=dataset, columns=headers)
return df |
def test_profiler(cl):
frame = cl.io.Input([NamedVideoStream(cl, 'test1')])
hist = cl.ops.Histogram(frame=frame)
ghist = cl.streams.Gather(hist, [[0]])
output_op = cl.io.Output(ghist, [NamedStream(cl, '_ignore')])
time_start = time.time()
job_id = cl.run(output_op, PerfParams.estimate(), show_progress=False, cache_mode=CacheMode.Overwrite)
print('Time', (time.time() - time_start))
profile = cl.get_profile(job_id)
f = tempfile.NamedTemporaryFile(delete=False, suffix='.trace')
f.close()
profile.write_trace(f.name)
profile.statistics()
run(['rm', '-f', f.name]) |
def proper_subterms(term):
seen = set()
return itertools.chain.from_iterable((subterms(a, seen) for a in term.args())) |
def _validate_state(state: State):
assert (state.env_id in get_args(EnvId))
assert (state.current_player.dtype == jnp.int32), state.current_player.dtype
assert (state.terminated.dtype == jnp.bool_), state.terminated.dtype
assert (state.rewards.dtype == jnp.float32), state.rewards.dtype
assert (state.legal_action_mask.dtype == jnp.bool_), state.legal_action_mask.dtype
public_attributes = ['current_player', 'observation', 'rewards', 'terminated', 'truncated', 'legal_action_mask']
for (k, v) in state.__dict__.items():
if k.startswith('_'):
continue
assert (k in public_attributes) |
def check_version(new_version):
if (version.parse(__version__) < version.parse(new_version)):
print("A new version of the GENO solver is available. You should consider upgrading it via 'pip install --upgrade genosolver'.") |
def filter_logdirs(logdirs: list, beta: Optional[float]=None, group: Optional[str]=None, nlf: Optional[int]=None, merge_directions: Optional[bool]=None, framework: Optional[str]=None, latvolume: Optional[list[int]]=None) -> list[os.PathLike]:
matches = []
for logdir in logdirs:
if _match_beta(logdir, beta):
matches.append(logdir)
if _match_group(logdir, group):
matches.append(logdir)
if _match_nlf(logdir, nlf):
matches.append(logdir)
if _match_merge_directions(logdir, merge_directions):
matches.append(logdir)
if _match_framework(logdir, framework):
matches.append(logdir)
if _match_latvolume(logdir, latvolume):
matches.append(logdir)
return matches |
def handle_failed_request(api_type: str, response: Dict):
error_message: str = f'AI21 {api_type} API error -'
if ('detail' in response):
error_message += f" Detail: {response['detail']}"
if ('Error' in response):
error_message += f" Error: {response['Error']}"
raise AI21RequestError(error_message) |
def main_test():
kwargs = {'num_workers': 1, 'pin_memory': True}
test_loader = torch.utils.data.DataLoader(datasets.MNIST('./data', train=False, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])), batch_size=args.test_batch_size, shuffle=True, **kwargs)
model = Net(args).to(device)
model.load_state_dict(torch.load(args.weights)['model'])
accuracy = 0
t = 20
for i in range(t):
(acc, pruned_dim) = test(args, model, device, test_loader, args.channel_noise)
accuracy += acc
print('Noise level:', args.channel_noise, 'Test Accuracy:', (accuracy / t), 'Pruned dim:', pruned_dim, 'Activated dim:', (args.intermediate_dim - pruned_dim)) |
def register_Ns3LteRrcSapReestabUeIdentity_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::LteRrcSap::ReestabUeIdentity const &', 'arg0')])
cls.add_instance_attribute('cRnti', 'uint16_t', is_const=False)
cls.add_instance_attribute('physCellId', 'uint16_t', is_const=False)
return |
def load_and_cache_examples(args, tokenizer, evaluate=False):
file_path = (args.eval_data_file if evaluate else args.train_data_file)
if args.line_by_line:
return LineByLineTextDataset(tokenizer, args, file_path=file_path, block_size=args.block_size)
else:
return TextDataset(tokenizer, args, file_path=file_path, block_size=args.block_size) |
def test_shapes():
seed = 300
np.random.seed(seed)
dp_encoder = DirectlyParameterizedNormalDiag(num_data, latent_dim)
assert np.all((tf.shape(dp_encoder.means) == (num_data, latent_dim)))
assert np.all((tf.shape(dp_encoder.stds) == (num_data, latent_dim)))
np.random.seed(seed)
expected_means = (0.01 * np.random.randn(num_data, latent_dim))
expected_stds = (1e-05 * np.ones_like(expected_means))
np.testing.assert_equal(dp_encoder.means.numpy(), expected_means)
np.testing.assert_allclose(dp_encoder.stds.numpy(), expected_stds, rtol=1e-11) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.