code stringlengths 101 5.91M |
|---|
class energy_50_RL(nn.Module):
def __init__(self):
super(energy_50_RL, self).__init__()
self.name = 'energy_RL'
self.feature = nn.Sequential(nn.Linear((50 * 51), 128), nn.Softplus(), nn.Linear(128, 128), nn.Softplus())
self.value = nn.Sequential(nn.Linear(128, 64), nn.Tanhshrink(), nn.Linear(64, 1), nn.Sigmoid())
self.action = nn.Sequential(nn.Linear(128, 256), nn.Softplus(), nn.Linear(256, (50 * 50)))
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
x = x.flatten()
x = self.feature(x)
value = self.value(x)
action = self.softmax(self.action(x).reshape(50, 50))
return (value, action) |
class Config():
def __init__(self):
self.det_head = 'pip'
self.net_stride = 32
self.batch_size = 16
self.init_lr = 0.0001
self.num_epochs = 60
self.decay_steps = [30, 50]
self.input_size = 256
self.backbone = 'resnet50'
self.pretrained = True
self.criterion_cls = 'l2'
self.criterion_reg = 'l1'
self.cls_loss_weight = 10
self.reg_loss_weight = 1
self.num_lms = 19
self.save_interval = self.num_epochs
self.num_nb = 10
self.use_gpu = True
self.gpu_id = 2 |
def ssl_null(args, model_dict, optimizer_dict, lrer_dict, criterion_dict, task_func):
if (not (len(model_dict) == len(optimizer_dict) == len(lrer_dict) == len(criterion_dict) == 1)):
logger.log_err('The len(element_dict) of SSL_NULL should be 1\n')
elif (list(model_dict.keys())[0] != 'model'):
logger.log_err("In SSL_NULL, the key of element_dict should be 'model',\nbut '{0}' is given\n".format(model_dict.keys()))
model_funcs = [model_dict['model']]
optimizer_funcs = [optimizer_dict['model']]
lrer_funcs = [lrer_dict['model']]
criterion_funcs = [criterion_dict['model']]
algorithm = SSLNULL(args)
algorithm.build(model_funcs, optimizer_funcs, lrer_funcs, criterion_funcs, task_func)
return algorithm |
def load_states_from_checkpoint(model_file: str) -> CheckpointState:
print(f'Reading saved model from {model_file}')
state_dict = torch.load(model_file, map_location=(lambda s, l: default_restore_location(s, 'cpu')))
return CheckpointState(**state_dict) |
def get_augmentation_v1(patch_size):
return Compose([Rotate(((- 15), 15), (0, 0), (0, 0), p=0.5), RandomCropFromBorders(crop_value=0.1, p=0.5), ElasticTransform((0, 0.25), interpolation=2, p=0.1), RandomDropPlane(plane_drop_prob=0.1, axes=(0, 1, 2), p=0.5), Resize(patch_size, interpolation=1, always_apply=True, p=1.0), Flip(0, p=0.5), Flip(1, p=0.5), Flip(2, p=0.5), RandomRotate90((1, 2), p=0.5), GaussianNoise(var_limit=(0, 5), p=0.5), RandomGamma(gamma_limit=(80, 120), p=0.5)], p=1.0) |
def parse_args(argv):
parser = argparse.ArgumentParser(description='Example training script.')
parser.add_argument('-m', '--model', default='bmshj2018-factorized', choices=models.keys(), help='Model architecture (default: %(default)s)')
parser.add_argument('-d', '--dataset', type=str, required=True, help='Training dataset')
parser.add_argument('-e', '--epochs', default=50, type=int, help='Number of epochs (default: %(default)s)')
parser.add_argument('-lr', '--learning-rate', default=0.0001, type=float, help='Learning rate (default: %(default)s)')
parser.add_argument('-n', '--num-workers', type=int, default=20, help='Dataloaders threads (default: %(default)s)')
parser.add_argument('--lambda', dest='lmbda', type=float, default=3, help='Bit-rate distortion parameter (default: %(default)s)')
parser.add_argument('--batch-size', type=int, default=8, help='Batch size (default: %(default)s)')
parser.add_argument('--test-batch-size', type=int, default=8, help='Test batch size (default: %(default)s)')
parser.add_argument('--aux-learning-rate', default=0.001, help='Auxiliary loss learning rate (default: %(default)s)')
parser.add_argument('--patch-size', type=int, nargs=2, default=(256, 256), help='Size of the patches to be cropped (default: %(default)s)')
parser.add_argument('--cuda', action='store_true', help='Use cuda')
parser.add_argument('--save', action='store_true', default=True, help='Save model to disk')
parser.add_argument('--seed', type=float, default=100, help='Set random seed for reproducibility')
parser.add_argument('--clip_max_norm', default=1.0, type=float, help='gradient clipping max norm (default: %(default)s')
parser.add_argument('--checkpoint', type=str, help='Path to a checkpoint')
parser.add_argument('--type', type=str, default='mse', help='loss type', choices=['mse', 'ms-ssim'])
parser.add_argument('--save_path', type=str, help='save_path')
parser.add_argument('--skip_epoch', type=int, default=0)
parser.add_argument('--N', type=int, default=128)
parser.add_argument('--lr_epoch', nargs='+', type=int)
parser.add_argument('--continue_train', action='store_true', default=True)
args = parser.parse_args(argv)
return args |
def cbam_resnet101(**kwargs):
return get_resnet(blocks=101, model_name='cbam_resnet101', **kwargs) |
def get_splits(lines, line_counts):
all_lines = []
line_idx = []
file_mappings = []
for (i, l) in enumerate(lines):
all_lines.extend(l)
line_idx.extend(list(range(len(l))))
file_mappings.extend(([i] * len(l)))
indices = list(range(len(all_lines)))
random.shuffle(indices)
all_lines = [all_lines[idx] for idx in indices]
line_idx = [line_idx[idx] for idx in indices]
file_mappings = [file_mappings[idx] for idx in indices]
splits = []
mappings = []
start = 0
for end in line_counts:
end += start
splits.append(all_lines[start:end])
mappings.append(format_mappings(line_idx[start:end], file_mappings[start:end]))
start = end
return (splits, mappings) |
def model_creator_multiple_metrics(config):
model = tf.keras.models.Sequential([tf.keras.layers.Dense(1)])
model.compile(loss='mse', optimizer='sgd', metrics=['mse', 'mae'])
return model |
def test_he_normal_receptive_field():
from lasagne.init import HeNormal
sample = HeNormal().sample((50, 50, 2))
assert ((- 0.01) < sample.mean() < 0.01)
assert (0.09 < sample.std() < 0.11) |
class ECABasicBlock(BasicBlock):
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, dimension=3):
super(ECABasicBlock, self).__init__(inplanes, planes, stride=stride, dilation=dilation, downsample=downsample, dimension=dimension)
self.eca = ECALayer(planes, gamma=2, b=1)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
out = self.eca(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out |
class XfunReTrainer(FunsdTrainer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.label_names.append('relations')
def prediction_step(self, model: nn.Module, inputs: Dict[(str, Union[(torch.Tensor, Any)])], prediction_loss_only: bool, ignore_keys: Optional[List[str]]=None) -> Tuple[(Optional[float], Optional[torch.Tensor], Optional[torch.Tensor])]:
inputs = self._prepare_inputs(inputs)
with torch.no_grad():
if self.use_amp:
with autocast():
outputs = model(**inputs)
else:
outputs = model(**inputs)
labels = tuple((inputs.get(name) for name in self.label_names))
return (outputs, labels)
def prediction_loop(self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool]=None, ignore_keys: Optional[List[str]]=None, metric_key_prefix: str='eval') -> PredictionOutput:
if (not isinstance(dataloader.dataset, collections.abc.Sized)):
raise ValueError('dataset must implement __len__')
prediction_loss_only = (prediction_loss_only if (prediction_loss_only is not None) else self.args.prediction_loss_only)
if (self.args.deepspeed and (not self.args.do_train)):
logger.info('Detected the deepspeed argument but it will not be used for evaluation')
model = self._wrap_model(self.model, training=False)
if ((not self.is_in_train) and self.args.fp16_full_eval):
model = model.half().to(self.args.device)
batch_size = dataloader.batch_size
num_examples = self.num_examples(dataloader)
logger.info('***** Running %s *****', description)
logger.info(' Num examples = %d', num_examples)
logger.info(' Batch size = %d', batch_size)
model.eval()
self.callback_handler.eval_dataloader = dataloader
re_labels = None
pred_relations = None
entities = None
for (step, inputs) in enumerate(dataloader):
(outputs, labels) = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
re_labels = (labels[1] if (re_labels is None) else (re_labels + labels[1]))
pred_relations = (outputs.pred_relations if (pred_relations is None) else (pred_relations + outputs.pred_relations))
entities = (outputs.entities if (entities is None) else (entities + outputs.entities))
self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)
gt_relations = []
for b in range(len(re_labels)):
rel_sent = []
for (head, tail) in zip(re_labels[b]['head'], re_labels[b]['tail']):
rel = {}
rel['head_id'] = head
rel['head'] = (entities[b]['start'][rel['head_id']], entities[b]['end'][rel['head_id']])
rel['head_type'] = entities[b]['label'][rel['head_id']]
rel['tail_id'] = tail
rel['tail'] = (entities[b]['start'][rel['tail_id']], entities[b]['end'][rel['tail_id']])
rel['tail_type'] = entities[b]['label'][rel['tail_id']]
rel['type'] = 1
rel_sent.append(rel)
gt_relations.append(rel_sent)
re_metrics = self.compute_metrics(EvalPrediction(predictions=pred_relations, label_ids=gt_relations))
re_metrics = {'precision': re_metrics['ALL']['p'], 'recall': re_metrics['ALL']['r'], 'f1': re_metrics['ALL']['f1']}
re_metrics[f'{metric_key_prefix}_loss'] = outputs.loss.mean().item()
metrics = {}
for key in list(re_metrics.keys()):
if (not key.startswith(f'{metric_key_prefix}_')):
metrics[f'{metric_key_prefix}_{key}'] = re_metrics.pop(key)
else:
metrics[f'{key}'] = re_metrics.pop(key)
return metrics
def evaluate(self, eval_dataset: Optional[Dataset]=None, ignore_keys: Optional[List[str]]=None, metric_key_prefix: str='eval') -> Dict[(str, float)]:
if ((eval_dataset is not None) and (not isinstance(eval_dataset, collections.abc.Sized))):
raise ValueError('eval_dataset must implement __len__')
self.args.local_rank = (- 1)
eval_dataloader = self.get_eval_dataloader(eval_dataset)
self.args.local_rank = torch.distributed.get_rank()
start_time = time.time()
metrics = self.prediction_loop(eval_dataloader, description='Evaluation', prediction_loss_only=(True if (self.compute_metrics is None) else None), ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix)
n_samples = len((eval_dataset if (eval_dataset is not None) else self.eval_dataset))
metrics.update(speed_metrics(metric_key_prefix, start_time, n_samples))
self.log(metrics)
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, metrics)
return metrics
def create_optimizer(self, speedup_r=4.0):
if (self.optimizer is None):
decay_parameters = get_parameter_names(self.model, [torch.nn.LayerNorm])
decay_parameters = [name for name in decay_parameters if ('bias' not in name)]
speedup_parameters = [name for name in get_parameter_names(self.model, []) if (('extractor' in name) and ('rel_classifier' not in name))]
optimizer_grouped_parameters = [{'params': [p for (n, p) in self.model.named_parameters() if ((n in decay_parameters) and (n in speedup_parameters))], 'weight_decay': self.args.weight_decay, 'lr': (self.args.learning_rate * speedup_r)}, {'params': [p for (n, p) in self.model.named_parameters() if ((n not in decay_parameters) and (n in speedup_parameters))], 'weight_decay': 0.0, 'lr': (self.args.learning_rate * speedup_r)}, {'params': [p for (n, p) in self.model.named_parameters() if ((n in decay_parameters) and (n not in speedup_parameters))], 'weight_decay': self.args.weight_decay, 'lr': self.args.learning_rate}, {'params': [p for (n, p) in self.model.named_parameters() if ((n not in decay_parameters) and (n not in speedup_parameters))], 'weight_decay': 0.0, 'lr': self.args.learning_rate}]
optimizer_cls = (Adafactor if self.args.adafactor else AdamW)
if self.args.adafactor:
optimizer_cls = Adafactor
optimizer_kwargs = {'scale_parameter': False, 'relative_step': False}
else:
optimizer_cls = AdamW
optimizer_kwargs = {'betas': (self.args.adam_beta1, self.args.adam_beta2), 'eps': self.args.adam_epsilon}
if (self.sharded_ddp == ShardedDDPOption.SIMPLE):
self.optimizer = OSS(params=optimizer_grouped_parameters, optim=optimizer_cls, **optimizer_kwargs)
else:
self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
if is_sagemaker_mp_enabled():
import smdistributed.modelparallel.torch as smp
self.optimizer = smp.DistributedOptimizer(self.optimizer) |
class AverageMeter(object):
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val
self.count += n
self.avg = (self.sum / self.count)
def __format__(self, format):
return '{self.val:{format}} ({self.avg:{format}})'.format(self=self, format=format) |
def testGetGeneralActivationBound():
u = (torch.ones(1) * 5)
l = (torch.ones(1) * (- 4))
activation = 'relu'
func = Activation[activation][0]
(kl, bl, ku, bu) = getConvenientGeneralActivationBound(l, u, activation, use_constant=True)
x = ((torch.rand(1000) * (u - l)) + l)
func_x = func(x)
l_func_x = ((kl * x) + bl)
u_func_x = ((ku * x) + bu)
plt.plot(x.numpy(), func_x.numpy(), '.')
plt.plot(x.numpy(), l_func_x.numpy(), '.')
plt.plot(x.numpy(), u_func_x.numpy(), '.')
print((l_func_x <= func_x).min())
print(x[(l_func_x > func_x)], l_func_x[(l_func_x > func_x)], func_x[(l_func_x > func_x)])
print((u_func_x >= func_x).min())
print(x[(u_func_x < func_x)], u_func_x[(u_func_x < func_x)], func_x[(u_func_x < func_x)]) |
def open_image(image, ext):
if ((ext == '.jpg') or (ext == '.jpeg')):
return Image.open(image).convert('RGB')
if (ext == '.png'):
return Image.open(image).convert('RGB')
if (ext == '.dcm'):
ds = pydicom.dcmread(image)
if ('WindowWidth' in ds):
img = apply_voi_lut(ds.pixel_array, ds).astype(float)
else:
img = ds.pixel_array.astype(float)
img = ((np.maximum(img, 0) / img.max()) * 255.0)
img = np.uint8(img)
return Image.fromarray(img).convert('RGB')
if (ext in ['.npy', '.npz']):
if (type(image) == str):
image = np.load(image)
if (type(image) == np.ndarray):
image = torch.from_numpy(image)
return image
if (ext in PAPER_EXT.keys()):
return eval(PAPER_EXT[ext])(image)
raise NotImplementedError('Image extension {} not implemented'.format(ext)) |
def find_latest_checkpoint(path, suffix='pth'):
if (not osp.exists(path)):
warnings.warn('The path of checkpoints does not exist.')
return None
if osp.exists(osp.join(path, f'latest.{suffix}')):
return osp.join(path, f'latest.{suffix}')
checkpoints = glob.glob(osp.join(path, f'*.{suffix}'))
if (len(checkpoints) == 0):
warnings.warn('There are no checkpoints in the path.')
return None
latest = (- 1)
latest_path = None
for checkpoint in checkpoints:
count = int(osp.basename(checkpoint).split('_')[(- 1)].split('.')[0])
if (count > latest):
latest = count
latest_path = checkpoint
return latest_path |
def is_chunk_start(prev_tag, tag):
(prefix1, chunk_type1) = split_tag(prev_tag)
(prefix2, chunk_type2) = split_tag(tag)
if (prefix2 == 'O'):
return False
if (prefix1 == 'O'):
return (prefix2 != 'O')
if (chunk_type1 != chunk_type2):
return True
return ((prefix2 in ['B', 'S']) or (prefix1 in ['E', 'S'])) |
class ChatCompletionChunkChoice(TypedDict):
index: int
delta: ChatCompletionChunkDelta
finish_reason: Optional[str] |
class TestInitialStateBridge(BridgeTest):
def _create_bridge(self, **kwargs):
return InitialStateBridge(encoder_outputs=self.encoder_outputs, decoder_state_size=self.decoder_cell.state_size, params=kwargs, mode=tf.contrib.learn.ModeKeys.TRAIN)
def _assert_correct_outputs(self, initial_state_):
nest.assert_same_structure(initial_state_, self.decoder_cell.state_size)
def test_with_final_state(self):
self._assert_correct_outputs(self._run(bridge_input='final_state'))
def test_with_outputs(self):
self._assert_correct_outputs(self._run(bridge_input='outputs'))
def test_with_activation_fn(self):
self._assert_correct_outputs(self._run(bridge_input='final_state', activation_fn='tanh')) |
def check_file_integrity(results_dir):
config_file = os.path.join(results_dir, 'sweep_config.json')
with open(config_file, 'r') as fp:
flags = json.load(fp)
flags['data_path'] = 'dummy'
flags['save_path'] = 'dummy'
(_, train_args) = hparams_sweep.make_args_list(flags)
missing_files = 0
missing_names = []
for args in tqdm.tqdm(train_args, desc=('Checking file integrity for folder ' + results_dir)):
name = (get_job_name(args) + '.json')
if (not os.path.exists(os.path.join(results_dir, 'logs', name))):
missing_files += 1
missing_names.append(name)
assert (missing_files == 0), ((str(missing_files) + ' sweep results are missing from the results directory:') + str(missing_names))
assert (len(train_args) == len(os.listdir(os.path.join(results_dir, 'logs')))), 'There are extra files in the logs directory' |
class VGG(extractor.BaseModule):
def __init__(self, config, name):
super(VGG, self).__init__()
self.name = name
cfg = config['cfg']
in_channels = config['channels']
batch_norm = config['batch_norm']
self.features = make_layers(cfgs[cfg], batch_norm=batch_norm, in_channels=in_channels)
self.n_features = 512
def forward(self, x):
x = self.features(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if (m.bias is not None):
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0) |
def train(train_data, val_data, model, args):
if args.maml:
return maml.train(train_data, val_data, model, args)
else:
return regular.train(train_data, val_data, model, args) |
def create_annotation_info(annotation_id, image_id, category_info, binary_mask, score=None, image_size=None, tolerance=2, bounding_box=None):
if (image_size is not None):
binary_mask = resize_binary_mask(binary_mask, image_size)
binary_mask_encoded = mask.encode(np.asfortranarray(binary_mask.astype(np.uint8)))
area = mask.area(binary_mask_encoded)
if (area < 1):
return None
if (bounding_box is None):
bounding_box = mask.toBbox(binary_mask_encoded)
if category_info['is_crowd']:
is_crowd = 1
segmentation = binary_mask_to_rle(binary_mask)
else:
is_crowd = 0
segmentation = binary_mask_to_polygon(binary_mask, tolerance)
if (not segmentation):
return None
if (score is not None):
annotation_info = {'id': annotation_id, 'image_id': image_id, 'category_id': category_info['id'], 'iscrowd': is_crowd, 'area': area.tolist(), 'bbox': bounding_box.astype(int).tolist(), 'segmentation': segmentation, 'width': binary_mask.shape[1], 'height': binary_mask.shape[0], 'score': score}
else:
annotation_info = {'id': annotation_id, 'image_id': image_id, 'category_id': category_info['id'], 'iscrowd': is_crowd, 'area': area.tolist(), 'bbox': bounding_box.astype(int).tolist(), 'segmentation': segmentation, 'width': binary_mask.shape[1], 'height': binary_mask.shape[0]}
return annotation_info |
class DeepLabv3(nn.Module):
def __init__(self, backbone='resnet101', output_stride=16, num_classes=21, norm_layer=nn.BatchNorm2d, freeze_bn=False, bn_mom=0.05, aspp_depth=256, pretrained=True):
super(DeepLabv3, self).__init__()
self.aspp_depth = aspp_depth
self.output_stride = output_stride
self.norm_layer = norm_layer
self.backbone = net_factory.get_backbone_net(output_stride=output_stride, pretrained=pretrained, norm_layer=norm_layer, bn_mom=bn_mom, root_beta=True)
self.aspp = ASPP(backbone, output_stride, norm_layer, depth=self.aspp_depth, bn_mom=bn_mom)
self.last_conv = nn.Conv2d(self.aspp_depth, num_classes, kernel_size=1, stride=1)
if freeze_bn:
self.freeze_bn()
self._init_weight()
def forward(self, input):
(x, end_points) = self.backbone(input)
x = self.aspp(x)
x = self.last_conv(x)
x = F.interpolate(x, size=input.size()[2:], mode='bilinear', align_corners=True)
return x
def freeze_bn(self):
for m in self.modules():
if isinstance(m, (self.norm_layer, nn.BatchNorm2d)):
m.eval()
def _init_weight(self):
modules = [self.last_conv, self.aspp]
for module in modules:
for m in module.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='relu')
if (m.bias is not None):
m.bias.data.zero_()
elif isinstance(m, (self.norm_layer, nn.BatchNorm2d)):
m.weight.data.fill_(1)
m.bias.data.zero_() |
def eval_single_ckpt(model, test_loader, args, eval_output_dir, logger, epoch_id, dist_test=False):
model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=dist_test)
model.cuda()
eval_utils.eval_one_epoch(cfg, model, test_loader, epoch_id, logger, dist_test=dist_test, result_dir=eval_output_dir, save_to_file=args.save_to_file) |
def sample_lp_star(preds):
preds_ = preds[:]
pred_num = len(preds)
graph_depth = random.randint(2, (pred_num // 2))
width = (pred_num // graph_depth)
preds_0 = preds_[:(pred_num % graph_depth)]
preds_ = preds_[(pred_num % graph_depth):]
rules = []
levels = []
prev_level = [[x, random.randint(0, 1)] for x in preds_[:width]]
(prev_level[0][1], prev_level[1][1]) = (0, 1)
preds_ = preds_[width:]
levels.append(prev_level)
for d in range(0, (graph_depth - 1)):
level = [[x, random.randint(0, 1)] for x in preds_[:width]]
if (preds_0 != []):
level.append((preds_0[0], random.randint(0, 1)))
preds_0 = preds_0[1:]
(level[0][1], level[1][1]) = (0, 1)
preds_ = preds_[width:]
for node in level:
(lit, label) = (node[0], node[1])
head_nodes_cand = prev_level
if (label == 1):
head_nodes_cand = [x for x in prev_level if (x[1] == 1)]
head_num = random.randint(1, min(3, len(head_nodes_cand)))
while True:
head_nodes = random.sample(head_nodes_cand, head_num)
if (not (all([x[1] for x in head_nodes]) and (label == 0))):
break
head = [x[0] for x in head_nodes]
rules.append((head, lit))
levels.append(level)
prev_level = level
rule_num = random.randint((0 * pred_num), (3 * pred_num))
nodes = [x for y in levels for x in y]
for _ in range(0, rule_num):
tail_d = random.randint(0, (len(levels) - 2))
tail_level = levels[tail_d]
tail_node = random.sample([x for x in tail_level if (x[1] == 1)], 1)[0]
tail = tail_node[0]
head_cand = [x for y in levels[tail_d:] for x in y if (x[0] != tail)]
head_num = random.randint(1, min(3, len(head_cand)))
while True:
head_nodes = random.sample(head_cand, head_num)
if (not all([x[1] for x in head_nodes])):
break
head_nodes = random.sample(head_cand, head_num)
head = [x[0] for x in head_nodes]
rules.append((head, tail))
facts = [x[0] for x in levels[0] if (x[1] == 1)]
query = random.sample([x[0] for x in nodes], 1)[0]
return (rules, facts, query) |
_model_architecture('masked_lm', 'xlm_base')
def xlm_architecture(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)
args.share_encoder_input_output_embed = getattr(args, 'share_encoder_input_output_embed', True)
args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False)
args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', True)
args.num_segment = getattr(args, 'num_segment', 1)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096)
args.bias_kv = getattr(args, 'bias_kv', False)
args.zero_attn = getattr(args, 'zero_attn', False)
args.sent_loss = getattr(args, 'sent_loss', False)
args.activation_fn = getattr(args, 'activation_fn', 'gelu')
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
args.pooler_activation_fn = getattr(args, 'pooler_activation_fn', 'tanh')
base_architecture(args) |
def rdata_to_csv_for_aq(file_rdata, file_csv_output, rdata_df):
call(['Rscript', '--vanilla', 'data_processing/rdata_to_csv_for_aq.r', file_rdata, file_csv_output, rdata_df]) |
def supported_features_mapping(*supported_features: str, onnx_config_cls: str=None) -> Dict[(str, Callable[([PretrainedConfig], OnnxConfig)])]:
if (onnx_config_cls is None):
raise ValueError('A OnnxConfig class must be provided')
config_cls = transformers
for attr_name in onnx_config_cls.split('.'):
config_cls = getattr(config_cls, attr_name)
mapping = {}
for feature in supported_features:
if ('-with-past' in feature):
task = feature.replace('-with-past', '')
mapping[feature] = partial(config_cls.with_past, task=task)
else:
mapping[feature] = partial(config_cls.from_model_config, task=feature)
return mapping |
def action_invariance_constraint(logs, replay_dict, agent, ensemble_idx, a=None):
(oo, _) = replay_dict['original_obs']
(ao, _) = replay_dict['augmented_obs']
actor = agent.actors[ensemble_idx]
with torch.no_grad():
os_rep = agent.encoder(oo)
o_dist = actor(os_rep)
if (a is None):
a = o_dist.sample()
o_logp_a = o_dist.log_prob(a).sum((- 1), keepdim=True)
as_rep = agent.encoder(ao)
a_dist = actor(as_rep)
a_logp_a = a_dist.log_prob(a).sum((- 1), keepdim=True)
return F.mse_loss(o_logp_a, a_logp_a) |
class D2vAudioConfig(D2vModalityConfig):
type: Modality = Modality.AUDIO
extractor_mode: str = 'layer_norm'
feature_encoder_spec: str = field(default='[(512, 10, 5)] + [(512, 3, 2)] * 4 + [(512,2,2)] + [(512,2,2)]', metadata={'help': 'string describing convolutional feature extraction layers in form of a python list that contains [(dim, kernel_size, stride), ...]'})
conv_pos_width: int = field(default=95, metadata={'help': 'number of filters for convolutional positional embeddings'})
conv_pos_groups: int = field(default=16, metadata={'help': 'number of groups for convolutional positional embedding'})
conv_pos_depth: int = field(default=5, metadata={'help': 'depth of positional encoder network'})
conv_pos_pre_ln: bool = False |
class MaskedSoftmax(nn.Module):
def __init__(self, dim):
super(MaskedSoftmax, self).__init__()
self.dim = dim
def forward(self, logit, mask=None):
if (mask is None):
dist = F.softmax((logit - torch.max(logit, dim=self.dim, keepdim=True)[0]), dim=self.dim)
else:
dist_ = (F.softmax((logit - torch.max(logit, dim=self.dim, keepdim=True)[0]), dim=self.dim) * mask)
normalization_factor = dist_.sum(self.dim, keepdim=True)
dist = (dist_ / normalization_factor)
return dist |
def _grad_boosting_hp_space(name_func, learning_rate=None, n_estimators=None, subsample=None, min_samples_split=None, min_samples_leaf=None, max_depth=None, init=None, random_state=None, max_features=None, verbose=0, max_leaf_nodes=None, warm_start=False, presort='auto'):
hp_space = dict(learning_rate=(_grad_boosting_learning_rate(name_func('learning_rate')) if (learning_rate is None) else learning_rate), n_estimators=(_boosting_n_estimators(name_func('n_estimators')) if (n_estimators is None) else n_estimators), subsample=(_grad_boosting_subsample(name_func('subsample')) if (subsample is None) else subsample), min_samples_split=(_trees_min_samples_split(name_func('min_samples_split')) if (min_samples_split is None) else min_samples_split), min_samples_leaf=(_trees_min_samples_leaf(name_func('min_samples_leaf')) if (min_samples_leaf is None) else min_samples_leaf), max_depth=(_trees_max_depth(name_func('max_depth')) if (max_depth is None) else max_depth), init=init, random_state=_random_state(name_func('rstate'), random_state), max_features=(_trees_max_features(name_func('max_features')) if (max_features is None) else max_features), warm_start=warm_start, presort=presort)
return hp_space |
def test_iterable():
sampler1 = UniformFloatSampler()
sampler1._value = 0.5
sampler2 = UniformFloatSampler()
sampler2._value = 0.5
sampler3 = UniformFloatSampler()
sampler3._value = 0.5
assert (sampler3 not in [sampler1, sampler2]) |
def writeTrainValImageLabelPathPairsToTxtFile(data_home='../', useTrain=True, useVal=False):
assert (useTrain or useVal), 'Error: None of the training set or the validation set is used.'
train_home = osp.join(data_home, 'train')
train_paths = os.listdir(train_home)
val_home = osp.join(data_home, 'valid')
val_paths = os.listdir(val_home)
all_img_path = []
all_lbl_path = []
if useTrain:
for pd in train_paths:
img_dir = osp.join(train_home, pd, 'Images')
img_paths = os.listdir(img_dir)
for img_p in img_paths:
img_path = osp.abspath(osp.join(img_dir, img_p))
label_path = osp.abspath(img_path.replace('Images', 'Labels'))
assert osp.exists(img_path)
assert osp.exists(label_path)
all_img_path.append(img_path)
all_lbl_path.append(label_path)
if useVal:
for pd in val_paths:
img_dir = osp.join(val_home, pd, 'Images')
img_paths = os.listdir(img_dir)
for img_p in img_paths:
img_path = osp.abspath(osp.join(img_dir, img_p))
label_path = osp.abspath(img_path.replace('Images', 'Labels'))
assert osp.exists(img_path)
assert osp.exists(label_path)
all_img_path.append(img_path)
all_lbl_path.append(label_path)
assert (len(all_img_path) == len(all_lbl_path)), 'Image number and label number are not equal.'
print('Number of image label pairs are:', len(all_img_path))
with open('./img_lbl_pair.txt', 'w') as f:
for (i, l) in zip(all_img_path, all_lbl_path):
f.write((((i + ' ') + l) + '\n')) |
def url_to_filename(url, etag=None):
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += ('.' + etag_hash.hexdigest())
return filename |
_tokenizers
class BertJapaneseCharacterTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = BertJapaneseTokenizer
def setUp(self):
super().setUp()
vocab_tokens = ['[UNK]', '[CLS]', '[SEP]', '', '', '', '', '', '', '', '', '', '']
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([(x + '\n') for x in vocab_tokens]))
def get_tokenizer(self, **kwargs):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname, subword_tokenizer_type='character', **kwargs)
def get_input_output_texts(self, tokenizer):
input_text = ' \n'
output_text = ' '
return (input_text, output_text)
def test_pretokenized_inputs(self):
pass
def test_maximum_encoding_length_pair_input(self):
pass
def test_maximum_encoding_length_single_input(self):
pass
def test_full_tokenizer(self):
tokenizer = self.tokenizer_class(self.vocab_file, subword_tokenizer_type='character')
tokens = tokenizer.tokenize(' \n')
self.assertListEqual(tokens, ['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', ''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12])
def test_character_tokenizer(self):
vocab_tokens = ['[UNK]', '[CLS]', '[SEP]', '', '', '', '', '', '', '', '', '']
vocab = {}
for (i, token) in enumerate(vocab_tokens):
vocab[token] = i
tokenizer = CharacterTokenizer(vocab=vocab, unk_token='[UNK]')
self.assertListEqual(tokenizer.tokenize(''), [])
self.assertListEqual(tokenizer.tokenize(''), ['', '', '', '', ''])
self.assertListEqual(tokenizer.tokenize(''), ['', '', '', '', '[UNK]'])
def test_sequence_builders(self):
tokenizer = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char')
text = tokenizer.encode('', add_special_tokens=False)
text_2 = tokenizer.encode('', add_special_tokens=False)
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert (encoded_sentence == (([2] + text) + [3]))
assert (encoded_pair == (((([2] + text) + [3]) + text_2) + [3])) |
def init_seed(seed):
torch.cuda.cudnn_enabled = False
torch.backends.cudnn.deterministic = True
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed) |
class VehicleID(BaseImageDataset):
dataset_dir = 'VehicleID_V1.0'
def __init__(self, root='', verbose=True, test_size=800, **kwargs):
super(VehicleID, self).__init__()
self.dataset_dir = osp.join(root, self.dataset_dir)
self.img_dir = osp.join(self.dataset_dir, 'image')
self.split_dir = osp.join(self.dataset_dir, 'train_test_split')
self.train_list = osp.join(self.split_dir, 'train_list.txt')
self.test_size = test_size
if (self.test_size == 800):
self.test_list = osp.join(self.split_dir, 'test_list_800.txt')
elif (self.test_size == 1600):
self.test_list = osp.join(self.split_dir, 'test_list_1600.txt')
elif (self.test_size == 2400):
self.test_list = osp.join(self.split_dir, 'test_list_2400.txt')
print(self.test_list)
self.check_before_run()
(train, query, gallery) = self.process_split(relabel=True)
self.train = train
self.query = query
self.gallery = gallery
if verbose:
print('=> VehicleID loaded')
self.print_dataset_statistics(train, query, gallery)
(self.num_train_pids, self.num_train_imgs, self.num_train_cams, self.num_train_vids) = self.get_imagedata_info(self.train)
(self.num_query_pids, self.num_query_imgs, self.num_query_cams, self.num_query_vids) = self.get_imagedata_info(self.query)
(self.num_gallery_pids, self.num_gallery_imgs, self.num_gallery_cams, self.num_gallery_vids) = self.get_imagedata_info(self.gallery)
def check_before_run(self):
if (not osp.exists(self.dataset_dir)):
raise RuntimeError('"{}" is not available'.format(self.dataset_dir))
if (not osp.exists(self.split_dir)):
raise RuntimeError('"{}" is not available'.format(self.split_dir))
if (not osp.exists(self.train_list)):
raise RuntimeError('"{}" is not available'.format(self.train_list))
if (self.test_size not in [800, 1600, 2400]):
raise RuntimeError('"{}" is not available'.format(self.test_size))
if (not osp.exists(self.test_list)):
raise RuntimeError('"{}" is not available'.format(self.test_list))
def get_pid2label(self, pids):
pid_container = set(pids)
pid2label = {pid: label for (label, pid) in enumerate(pid_container)}
return pid2label
def parse_img_pids(self, nl_pairs, pid2label=None, cam=0):
output = []
for info in nl_pairs:
name = info[0]
pid = info[1]
if (pid2label is not None):
pid = pid2label[pid]
camid = cam
img_path = osp.join(self.img_dir, (name + '.jpg'))
viewid = 1
output.append((img_path, pid, camid, viewid))
return output
def process_split(self, relabel=False):
train_pid_dict = defaultdict(list)
with open(self.train_list) as f_train:
train_data = f_train.readlines()
for data in train_data:
(name, pid) = data.strip().split(' ')
pid = int(pid)
train_pid_dict[pid].append([name, pid])
train_pids = list(train_pid_dict.keys())
num_train_pids = len(train_pids)
assert (num_train_pids == 13164), 'There should be 13164 vehicles for training, but but got {}, please check the data'.format(num_train_pids)
test_pid_dict = defaultdict(list)
with open(self.test_list) as f_test:
test_data = f_test.readlines()
for data in test_data:
(name, pid) = data.split(' ')
pid = int(pid)
test_pid_dict[pid].append([name, pid])
test_pids = list(test_pid_dict.keys())
num_test_pids = len(test_pids)
assert (num_test_pids == self.test_size), 'There should be {} vehicles for testing, but but got {}, please check the data'.format(self.test_size, num_test_pids)
train_data = []
query_data = []
gallery_data = []
train_pids = sorted(train_pids)
for pid in train_pids:
imginfo = train_pid_dict[pid]
train_data.extend(imginfo)
for pid in test_pids:
imginfo = test_pid_dict[pid]
sample = random.choice(imginfo)
imginfo.remove(sample)
query_data.extend(imginfo)
gallery_data.append(sample)
if relabel:
train_pid2label = self.get_pid2label(train_pids)
else:
train_pid2label = None
train = self.parse_img_pids(train_data, train_pid2label)
query = self.parse_img_pids(query_data, cam=0)
gallery = self.parse_img_pids(gallery_data, cam=1)
return (train, query, gallery) |
class HelperFunction(AbstractMetaFeature):
def __init__(self):
super(HelperFunction, self).__init__()
self.type_ = 'HELPERFUNCTION' |
def check(opt):
if (opt.model == 'pix2pix'):
assert (opt.task in ['edges2shoes-r', 'map2sat', 'cityscapes', 'cityscapes_fast', 'edges2shoes-r_fast', 'map2sat_fast'])
elif (opt.model == 'cycle_gan'):
assert (opt.task in ['horse2zebra', 'horse2zebra_fast'])
elif (opt.model == 'gaugan'):
assert (opt.task in ['cityscapes', 'cityscapes_lite', 'coco_fast'])
elif (opt.model == 'munit'):
assert (opt.task in ['edges2shoes-r_fast'])
else:
raise NotImplementedError(('Unsupported model [%s]!' % opt.model)) |
def _find_human_readable_labels(synsets, synset_to_human):
humans = []
for s in synsets:
assert (s in synset_to_human), ('Failed to find: %s' % s)
humans.append(synset_to_human[s])
return humans |
class ResGN(nn.Module):
def __init__(self, indim, outdim):
super().__init__()
self.res1 = ResBlock(indim, outdim)
self.res2 = ResBlock(outdim, outdim)
def forward(self, x):
return self.res2(self.res1(x)) |
def csr_to_problem_nojit(l, x_val, x_ind, x_rowptr, prob_val, prob_ind, prob_rowptr):
for i in range(l):
x_slice = slice(x_rowptr[i], x_rowptr[(i + 1)])
prob_slice = slice(prob_rowptr[i], (prob_rowptr[(i + 1)] - 2))
prob_ind[prob_slice] = (x_ind[x_slice] + 1)
prob_val[prob_slice] = x_val[x_slice] |
class Datagen_deepcom():
def __init__(self, X, Y, batch_size, code_dic, nl_dic, train=True):
self.X = X
self.Y = Y
self.batch_size = batch_size
self.code_dic = code_dic
self.nl_dic = nl_dic
self.train = train
def __len__(self):
return len(range(0, len(self.X), self.batch_size))
def __call__(self, epoch=0):
return GeneratorLen(BackgroundGenerator(self.gen(epoch), 1), len(self))
def gen(self, epoch):
if self.train:
np.random.seed(epoch)
newindex = list(np.random.permutation(len(self.X)))
X = [self.X[i] for i in newindex]
Y = [self.Y[i] for i in newindex]
else:
X = [x for x in self.X]
Y = [y for y in self.Y]
for i in range(0, len(self.X), self.batch_size):
x = X[i:(i + self.batch_size)]
y = Y[i:(i + self.batch_size)]
x_raw = [read_pickle(n) for n in x]
y_raw = [[self.nl_dic[t] for t in s] for s in y]
x = [sequencing(n) for n in x_raw]
x = [np.array([self.code_dic[t] for t in xx], 'int32') for xx in x]
x = tf.constant(tf.keras.preprocessing.sequence.pad_sequences(x, min(max([len(s) for s in x]), 400), padding='post', truncating='post', value=(- 1.0)))
x_raw = [traverse_label(n) for n in x_raw]
y = tf.constant(tf.keras.preprocessing.sequence.pad_sequences(y, min(max([len(s) for s in y]), 100), padding='post', truncating='post', value=(- 1.0)))
(yield (x, y, x_raw, y_raw)) |
def Swish(data, name=None):
name = (GetLayerName.get('swish') if (name is None) else name)
x = (data * mx.sym.sigmoid(data))
return x |
class Mine_estimator(nn.Module):
def __init__(self, input_dim=2048, hidden_dim=512):
super(Mine_estimator, self).__init__()
self.mine_model = Mine(input_dim, hidden_dim)
def forward(self, X, Y):
Y_shffle = Y[torch.randperm(len(Y))]
loss_joint = self.mine_model(X, Y)
loss_marginal = self.mine_model(X, Y_shffle)
ret = (torch.mean(loss_joint) - torch.log(torch.mean(torch.exp(loss_marginal))))
loss = (- ret)
return loss |
def imagenet_beit_base_in22k_pretrained(output_dim):
model = timm.create_model('beit_base_patch16_224_in22k', pretrained=True)
return _vit_replace_fc(model, output_dim) |
def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module, data_loader: Iterable, optimizer: torch.optim.Optimizer, device: torch.device, epoch: int, max_norm: float=0):
model.train()
criterion.train()
metric_logger = utils.MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
if hasattr(criterion, 'loss_labels'):
metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))
else:
metric_logger.add_meter('obj_class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
for (samples, targets) in metric_logger.log_every(data_loader, print_freq, header):
samples = samples.to(device)
targets = [{k: v.to(device) for (k, v) in t.items() if (k != 'filename')} for t in targets]
outputs = model(samples)
loss_dict = criterion(outputs, targets)
weight_dict = criterion.weight_dict
losses = sum(((loss_dict[k] * weight_dict[k]) for k in loss_dict.keys() if (k in weight_dict)))
loss_dict_reduced = utils.reduce_dict(loss_dict)
loss_dict_reduced_unscaled = {f'{k}_unscaled': v for (k, v) in loss_dict_reduced.items()}
loss_dict_reduced_scaled = {k: (v * weight_dict[k]) for (k, v) in loss_dict_reduced.items() if (k in weight_dict)}
losses_reduced_scaled = sum(loss_dict_reduced_scaled.values())
loss_value = losses_reduced_scaled.item()
if (not math.isfinite(loss_value)):
print('Loss is {}, stopping training'.format(loss_value))
print(loss_dict_reduced)
sys.exit(1)
optimizer.zero_grad()
losses.backward()
if (max_norm > 0):
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
optimizer.step()
metric_logger.update(loss=loss_value, **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled)
if hasattr(criterion, 'loss_labels'):
metric_logger.update(class_error=loss_dict_reduced['class_error'])
else:
metric_logger.update(obj_class_error=loss_dict_reduced['obj_class_error'])
metric_logger.update(lr=optimizer.param_groups[0]['lr'])
metric_logger.synchronize_between_processes()
print('Averaged stats:', metric_logger)
return {k: meter.global_avg for (k, meter) in metric_logger.meters.items()} |
def set_seed(seed, use_cuda=True):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if use_cuda:
torch.cuda.manual_seed_all(seed) |
def get_teacher(args, data_info):
heads = (([args.t_num_heads] * args.t_num_layers) + [args.t_num_out_heads])
model = GAT(data_info['g'], args.t_num_layers, data_info['num_feats'], args.t_num_hidden, data_info['n_classes'], heads, F.elu, args.in_drop, args.attn_drop, args.alpha, args.residual)
return model |
class CudaBuildExt(setuptools_build_ext):
def run(self):
if (CUDA is not None):
def wrap_new_compiler(func):
def _wrap_new_compiler(*args, **kwargs):
try:
return func(*args, **kwargs)
except errors.DistutilsPlatformError:
if (sys.platform != 'win32'):
CCompiler = _UnixCCompiler
else:
CCompiler = _MSVCCompiler
return CCompiler(None, kwargs['dry_run'], kwargs['force'])
return _wrap_new_compiler
ccompiler.new_compiler = wrap_new_compiler(ccompiler.new_compiler)
self.compiler = 'nvidia'
setuptools_build_ext.run(self) |
class PSRoIPool(nn.Module):
def __init__(self, output_size: int, spatial_scale: float):
super(PSRoIPool, self).__init__()
self.output_size = output_size
self.spatial_scale = spatial_scale
def forward(self, input: Tensor, rois: Tensor) -> Tensor:
return ps_roi_pool(input, rois, self.output_size, self.spatial_scale)
def __repr__(self) -> str:
tmpstr = (self.__class__.__name__ + '(')
tmpstr += ('output_size=' + str(self.output_size))
tmpstr += (', spatial_scale=' + str(self.spatial_scale))
tmpstr += ')'
return tmpstr |
class ReacherBulletEnv_v1(ReacherBulletEnv):
def __init__(self):
self.robot = Reacher_v1()
MJCFBaseBulletEnv.__init__(self, self.robot)
def _step(self, a):
assert (not self.scene.multiplayer)
self.robot.apply_action(a)
self.scene.global_step()
state = self.robot.calc_state()
potential_old = self.potential
self.potential = self.robot.calc_potential()
electricity_cost = (((- 0.1) * (np.abs((a[0] * self.robot.theta_dot)) + np.abs((a[1] * self.robot.gamma_dot)))) - (0.01 * (np.abs(a[0]) + np.abs(a[1]))))
stuck_joint_cost = ((- 0.1) if (np.abs((np.abs(self.robot.gamma) - 1)) < 0.01) else 0.0)
self.rewards = [0.0]
self.HUD(state, a, False)
return (state, sum(self.rewards), False, {}) |
def process_rollout(rollout, gamma, lambda_=1.0):
batch_si = np.asarray(rollout.states)
batch_a = np.asarray(rollout.actions)
rewards = np.asarray(rollout.rewards)
vpred_t = np.asarray((rollout.values + [rollout.r]))
rewards_plus_v = np.asarray((rollout.rewards + [rollout.r]))
batch_r = discount(rewards_plus_v, gamma)[:(- 1)]
delta_t = ((rewards + (gamma * vpred_t[1:])) - vpred_t[:(- 1)])
batch_adv = discount(delta_t, (gamma * lambda_))
features = rollout.features[0]
return Batch(batch_si, batch_a, batch_adv, batch_r, rollout.terminal, features) |
def plot_scatter(x, y, c, s, xlab: str, ylab: str, colorlab: str, sizelab: str, markersize_rescaling: int, figsize=(7, 3)):
(fig, ax) = plt.subplots(dpi=500, figsize=figsize, facecolor='w')
scatter = ax.scatter(x, y, c=c, s=s, alpha=1)
plt.yscale('symlog')
plt.xscale('symlog')
leg_els = [Line2D([0], [0], marker='o', color='w', label='High', markerfacecolor=cs[2], markersize=6), Line2D([0], [0], marker='o', color='w', label='Medium', markerfacecolor=cs[1], markersize=6), Line2D([0], [0], marker='o', color='w', label='Low', markerfacecolor=cs[0], markersize=6)]
legend1 = ax.legend(handles=leg_els, loc='upper left', title=colorlab, fontsize=9)
ax.add_artist(legend1)
(handles, labels) = scatter.legend_elements(prop='sizes', alpha=1)
l2 = []
for i in range(len(labels)):
s = labels[i]
num = (markersize_rescaling * int(s[(s.index('{') + 1):s.index('}')]))
l2.append((('$\\mathdefault{' + str(num)) + '}$'))
legend2 = ax.legend(handles, l2, loc='lower right', title=sizelab)
plt.xlabel(xlab)
plt.ylabel(ylab) |
def move_dataset():
if on_cc():
from contrastyou import DATA_PATH
return (f' find {DATA_PATH} ' + "-name '*.zip' -exec cp {} $SLURM_TMPDIR \\;")
return '' |
def parse_uri(path):
path = path.strip()
scheme = None
if path.startswith(TFConstants.FILE_SCHEME()):
scheme = TFConstants.FILE_SCHEME()
elif path.startswith(TFConstants.FAKE_SCHEME()):
scheme = TFConstants.FAKE_SCHEME()
else:
raise ValueError(('Wrong path provided: %s' % path))
scheme_prefix = scheme
start_pos = len(scheme_prefix)
return (scheme, path[start_pos:]) |
class T5Tokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ['input_ids', 'attention_mask']
def __init__(self, vocab_file, eos_token='</s>', unk_token='<unk>', pad_token='<pad>', extra_ids=100, additional_special_tokens=None, sp_model_kwargs: Optional[Dict[(str, Any)]]=None, **kwargs) -> None:
if ((extra_ids > 0) and (additional_special_tokens is None)):
additional_special_tokens = [f'<extra_id_{i}>' for i in range(extra_ids)]
elif ((extra_ids > 0) and (additional_special_tokens is not None)):
extra_tokens = len(set(filter((lambda x: bool(('extra_id' in str(x)))), additional_special_tokens)))
if (extra_tokens != extra_ids):
raise ValueError(f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids tokens')
self.sp_model_kwargs = ({} if (sp_model_kwargs is None) else sp_model_kwargs)
super().__init__(eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, extra_ids=extra_ids, additional_special_tokens=additional_special_tokens, sp_model_kwargs=self.sp_model_kwargs, **kwargs)
self.vocab_file = vocab_file
self._extra_ids = extra_ids
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(vocab_file)
def _eventually_correct_t5_max_length(pretrained_model_name_or_path, max_model_length, init_max_model_length):
if (pretrained_model_name_or_path in T5Tokenizer.max_model_input_sizes):
deprecated_max_model_length = T5Tokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if ((init_max_model_length is not None) and (init_max_model_length != max_model_length)):
return init_max_model_length
elif (init_max_model_length is None):
warnings.warn(f'''This tokenizer was incorrectly instantiated with a model max length of {deprecated_max_model_length} which will be corrected in Transformers v5.
For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.
- Be aware that you SHOULD NOT rely on {pretrained_model_name_or_path} automatically truncating your input to {deprecated_max_model_length} when padding/encoding.
- If you want to encode/pad to sequences longer than {deprecated_max_model_length} you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.
- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.''', FutureWarning)
return max_model_length
def vocab_size(self):
return (self.sp_model.get_piece_size() + self._extra_ids)
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if (token_ids_1 is None):
return (([0] * len(token_ids_0)) + [1])
return (((([0] * len(token_ids_0)) + [1]) + ([0] * len(token_ids_1))) + [1])
def get_sentinel_tokens(self):
return list(set(filter((lambda x: (bool(re.search('<extra_id_\\d+>', x)) is not None)), self.additional_special_tokens)))
def get_sentinel_token_ids(self):
return [self._convert_token_to_id(token) for token in self.get_sentinel_tokens()]
def _add_eos_if_not_present(self, token_ids: List[int]) -> List[int]:
if ((len(token_ids) > 0) and (token_ids[(- 1)] == self.eos_token_id)):
warnings.warn(f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated eos tokens being added.')
return token_ids
else:
return (token_ids + [self.eos_token_id])
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
eos = [self.eos_token_id]
if (token_ids_1 is None):
return (len((token_ids_0 + eos)) * [0])
return (len((((token_ids_0 + eos) + token_ids_1) + eos)) * [0])
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
token_ids_0 = self._add_eos_if_not_present(token_ids_0)
if (token_ids_1 is None):
return token_ids_0
else:
token_ids_1 = self._add_eos_if_not_present(token_ids_1)
return (token_ids_0 + token_ids_1)
def __getstate__(self):
state = self.__dict__.copy()
state['sp_model'] = None
return state
def __setstate__(self, d):
self.__dict__ = d
if (not hasattr(self, 'sp_model_kwargs')):
self.sp_model_kwargs = {}
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def _tokenize(self, text: str) -> List[str]:
return self.sp_model.encode(text, out_type=str)
def _convert_token_to_id(self, token):
if token.startswith('<extra_id_'):
match = re.match('<extra_id_(\\d+)>', token)
num = int(match.group(1))
return ((self.vocab_size - num) - 1)
return self.sp_model.piece_to_id(token)
def _convert_id_to_token(self, index):
if (index < self.sp_model.get_piece_size()):
token = self.sp_model.IdToPiece(index)
else:
token = f'<extra_id_{((self.vocab_size - 1) - index)}>'
return token
def convert_tokens_to_string(self, tokens):
current_sub_tokens = []
out_string = ''
prev_is_special = False
for token in tokens:
if (token in self.all_special_tokens):
if (not prev_is_special):
out_string += ' '
out_string += (self.sp_model.decode(current_sub_tokens) + token)
prev_is_special = True
current_sub_tokens = []
else:
current_sub_tokens.append(token)
prev_is_special = False
out_string += self.sp_model.decode(current_sub_tokens)
return out_string.strip()
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not os.path.isdir(save_directory)):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
out_vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
if ((os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file)) and os.path.isfile(self.vocab_file)):
copyfile(self.vocab_file, out_vocab_file)
elif (not os.path.isfile(self.vocab_file)):
with open(out_vocab_file, 'wb') as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
return (out_vocab_file,) |
def to_tensor(value, device):
if isinstance(value, RolloutBatch):
return RolloutBatch(*to_tensor(list(value), device))
elif isinstance(value, list):
return [to_tensor(x, device) for x in value]
elif isinstance(value, tuple):
return tuple(to_tensor(list(value), device))
elif isinstance(value, dict):
return {key: to_tensor(val, device) for (key, val) in value.items()}
elif isinstance(value, np.ndarray):
if (value.dtype == np.bool):
value = value.astype(np.float32)
return torch.from_numpy(value).to(device)
elif torch.is_tensor(value):
return value.to(device)
else:
raise Exception(('%s Not supported' % type(value))) |
def adjust_lr(optimizer, init_lr, epoch, decay_rate=0.1, decay_epoch=30):
decay = (decay_rate ** (epoch // decay_epoch))
for param_group in optimizer.param_groups:
param_group['lr'] *= decay |
class BurgersNode(PdeNode):
def __init__(self, u: str='u', v='v'):
super().__init__()
(x, t) = symbols('x t')
input_variables = {'x': x, 't': t}
assert (type(u) == str), 'u needs to be string'
u = symbolize(u, input_variables)
v = symbolize(v, input_variables)
self.equations = {f'burgers_{str(u)}': ((u.diff(t) + (u * u.diff(x))) - (v * u.diff(x).diff(x)))}
self.make_nodes() |
def condensenet74_c4_g4(**kwargs):
return get_condensenet(num_layers=74, groups=4, model_name='condensenet74_c4_g4', **kwargs) |
def train():
cube_len = FLAGS.cube_len
output_dir = os.path.join(FLAGS.output_dir, FLAGS.category)
checkpoint_dir = os.path.join(output_dir, 'checkpoints')
synthesis_dir = os.path.join(output_dir, 'recovery')
log_dir = os.path.join(output_dir, 'log')
obs = tf.placeholder(tf.float32, [None, cube_len, cube_len, cube_len, 1], name='obs_data')
syn = tf.placeholder(tf.float32, [None, cube_len, cube_len, cube_len, 1], name='syn_data')
obs_res = descriptor(obs, reuse=False)
syn_res = descriptor(syn, reuse=True)
recon_err = tf.square((tf.reduce_mean(syn, axis=0) - tf.reduce_mean(obs, axis=0)))
des_loss = tf.subtract(tf.reduce_mean(syn_res, axis=0), tf.reduce_mean(obs_res, axis=0))
syn_langevin = langevin_dynamics(syn)
train_data = data_io.getObj(FLAGS.data_path, FLAGS.category, train=True, cube_len=cube_len, num_voxels=FLAGS.train_size, low_bound=0, up_bound=1)
num_voxels = len(train_data)
incomplete_data = np.zeros(train_data.shape)
masks = np.zeros(train_data.shape)
for i in range(len(incomplete_data)):
(incomplete_data[i], masks[i]) = get_incomplete_data(train_data[i])
train_data = train_data[(..., np.newaxis)]
incomplete_data = incomplete_data[(..., np.newaxis)]
masks = masks[(..., np.newaxis)]
data_io.saveVoxelsToMat(train_data, ('%s/observed_data.mat' % output_dir), cmin=0, cmax=1)
data_io.saveVoxelsToMat(incomplete_data, ('%s/incomplete_data.mat' % output_dir), cmin=0, cmax=1)
voxel_mean = train_data.mean()
train_data = (train_data - voxel_mean)
incomplete_data = (incomplete_data - voxel_mean)
num_batches = int(math.ceil((num_voxels / FLAGS.batch_size)))
des_vars = [var for var in tf.trainable_variables() if var.name.startswith('des')]
des_optim = tf.train.AdamOptimizer(FLAGS.d_lr, beta1=FLAGS.beta1)
des_grads_vars = des_optim.compute_gradients(des_loss, var_list=des_vars)
des_grads = [tf.reduce_mean(tf.abs(grad)) for (grad, var) in des_grads_vars if ('/w' in var.name)]
apply_d_grads = des_optim.apply_gradients(des_grads_vars)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(max_to_keep=50)
recover_voxels = np.random.randn(num_voxels, cube_len, cube_len, cube_len, 1)
des_loss_epoch = []
recon_err_epoch = []
plt.ion()
for epoch in range(FLAGS.num_epochs):
d_grad_vec = []
des_loss_vec = []
recon_err_vec = []
init_data = incomplete_data.copy()
start_time = time.time()
for i in range(num_batches):
indices = slice((i * FLAGS.batch_size), min(num_voxels, ((i + 1) * FLAGS.batch_size)))
obs_data = train_data[indices]
syn_data = init_data[indices]
data_mask = masks[indices]
sample = sess.run(syn_langevin, feed_dict={syn: syn_data})
syn_data = ((sample * (1 - data_mask)) + (syn_data * data_mask))
(d_grad, d_loss) = sess.run([des_grads, des_loss, apply_d_grads], feed_dict={obs: obs_data, syn: syn_data})[:2]
d_grad_vec.append(d_grad)
des_loss_vec.append(d_loss)
mse = sess.run(recon_err, feed_dict={obs: obs_data, syn: syn_data})
recon_err_vec.append(mse)
recover_voxels[indices] = syn_data
end_time = time.time()
(d_grad_mean, des_loss_mean, recon_err_mean) = (float(np.mean(d_grad_vec)), float(np.mean(des_loss_vec)), float(np.mean(recon_err_vec)))
des_loss_epoch.append(des_loss_mean)
recon_err_epoch.append(recon_err_mean)
print(('Epoch #%d, descriptor loss: %.4f, descriptor SSD weight: %.4f, Avg MSE: %4.4f, time: %.2fs' % (epoch, des_loss_mean, d_grad_mean, recon_err_mean, (end_time - start_time))))
if ((epoch % FLAGS.log_step) == 0):
if (not os.path.exists(synthesis_dir)):
os.makedirs(synthesis_dir)
data_io.saveVoxelsToMat((recover_voxels + voxel_mean), ('%s/sample%04d.mat' % (synthesis_dir, epoch)))
if (not os.path.exists(checkpoint_dir)):
os.makedirs(checkpoint_dir)
saver.save(sess, ('%s/%s' % (checkpoint_dir, 'model.ckpt')), global_step=epoch)
if (not os.path.exists(log_dir)):
os.makedirs(log_dir)
plt.figure(1)
data_io.draw_graph(plt, des_loss_epoch, 'des_loss', log_dir, 'r')
plt.figure(2)
data_io.draw_graph(plt, recon_err_epoch, 'recon_error', log_dir, 'b') |
def initialize(n_parallel):
try:
signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGINT])
singleton_pool.initialize(n_parallel)
singleton_pool.run_each(_worker_init, [(id,) for id in range(singleton_pool.n_parallel)])
finally:
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signal.SIGINT]) |
class View(Module):
def __init__(self, *args):
super(View, self).__init__()
if ((len(args) == 1) and isinstance(args[0], torch.Size)):
self.size = args[0]
else:
self.size = torch.Size(args)
def forward(self, input):
return input.view(self.size) |
def load_ckpt(args, model, optimizer=None, scheduler=None, val_err=[]):
if os.path.isfile(args.load_ckpt):
logger.info('loading checkpoint %s', args.load_ckpt)
checkpoint = torch.load(args.load_ckpt, map_location=(lambda storage, loc: storage), pickle_module=dill)
model_state_dict_keys = model.state_dict().keys()
checkpoint_state_dict_noprefix = strip_prefix_if_present(checkpoint['model_state_dict'], 'module.')
if all((key.startswith('module.') for key in model_state_dict_keys)):
model.module.load_state_dict(checkpoint_state_dict_noprefix)
else:
model.load_state_dict(checkpoint_state_dict_noprefix)
del checkpoint
torch.cuda.empty_cache() |
def split_data_TM(x_TM, y_TM, seq_len_TM, split_indices):
x_TM_split = []
y_TM_split = []
seq_len_TM_split = []
for i in split_indices:
x_TM_split.append(x_TM[i])
y_TM_split.append(y_TM[i])
seq_len_TM_split.append(seq_len_TM[i])
return (np.array(x_TM_split), np.array(y_TM_split), np.array(seq_len_TM_split)) |
class TestPatientSampler(TestCase):
def setUp(self) -> None:
super().setUp()
self.dataset_root = './'
self.dataset_subfolders = ['img', 'gt']
if Path(self.dataset_root, ACDCDataset.folder_name).exists():
shutil.rmtree(Path(self.dataset_root, ACDCDataset.folder_name), ignore_errors=True)
if Path(self.dataset_root, ACDCDataset.zip_name).exists():
os.remove(Path(self.dataset_root, ACDCDataset.zip_name))
def test_acdc_sampler(self):
dataset = ACDCDataset(root_dir=self.dataset_root, mode='train', subfolders=self.dataset_subfolders)
patient_sampler = PatientSampler(dataset=dataset, grp_regex=dataset._pattern, shuffle=False, infinite_sampler=False)
dataloader = DataLoader(dataset, batch_sampler=patient_sampler)
for (i, (_, filename)) in enumerate(dataloader):
print(filename)
def test_infinit_sampler(self):
dataset = ACDCDataset(root_dir=self.dataset_root, mode='train', subfolders=self.dataset_subfolders)
patient_sampler = PatientSampler(dataset=dataset, grp_regex=dataset._pattern, shuffle=False, infinite_sampler=True)
dataloader = DataLoader(dataset, batch_sampler=patient_sampler)
for (i, (_, filename)) in enumerate(dataloader):
print(filename)
if (i == 100):
break
def tearDown(self) -> None:
super().tearDown()
if Path(self.dataset_root, ACDCDataset.folder_name).exists():
shutil.rmtree(Path(self.dataset_root, ACDCDataset.folder_name), ignore_errors=True)
if Path(self.dataset_root, ACDCDataset.zip_name).exists():
os.remove(Path(self.dataset_root, ACDCDataset.zip_name)) |
def make_k_circles(k=2, n_samples=100, shuffle=False, noise=None, random_state=None, factor=0.8, c=None, rot=None):
if ((not (factor is None)) and ((factor >= 1) or (factor < 0))):
raise ValueError("'factor' has to be between 0 and 1.")
if ((factor is None) and (c is None)):
raise ValueError("one of 'factor' or 'c' has to be between 0 and 1.")
if isinstance(n_samples, numbers.Integral):
n_samples_lists = [(n_samples // k) for i in range((k - 1))]
n_samples_lists.append((n_samples - np.sum(n_samples_lists)))
else:
try:
n_samples_lists = n_samples
assert (len(n_samples) == k)
except ValueError:
raise ValueError('`n_samples_lists` can be either an int or a k-element tuple.')
generator = check_random_state(random_state)
linspaces = [np.linspace(0, (2 * np.pi), n_samples_list, endpoint=False) for n_samples_list in n_samples_lists]
if (rot is not None):
linspaces = [np.linspace((0 + r), (r + (2 * np.pi)), n_samples_list, endpoint=False) for (n_samples_list, r) in zip(n_samples_lists, rot)]
if factor:
circs_x = [(np.cos(linspace) * (factor ** i)) for (i, linspace) in enumerate(linspaces)]
circs_y = [(np.sin(linspace) * (factor ** i)) for (i, linspace) in enumerate(linspaces)]
else:
circs_x = [((np.cos(linspace) * c) * i) for (i, linspace) in enumerate(linspaces)]
circs_y = [((np.sin(linspace) * c) * i) for (i, linspace) in enumerate(linspaces)]
X = np.vstack([np.concatenate(circs_x, axis=0), np.concatenate(circs_y, axis=0)]).T
y = np.hstack([(np.ones(n_samples_list, dtype=np.intp) * i) for (i, n_samples_list) in enumerate(n_samples_lists)])
if shuffle:
(X, y) = util_shuffle(X, y, random_state=generator)
if (noise is not None):
X += generator.normal(scale=noise, size=X.shape)
return (X, y) |
class TRPOIPOBuffer():
def __init__(self, obs_dim, act_dim, size, gamma=0.99, lam=0.95):
self.obs_buf = np.zeros(core.combined_shape(size, obs_dim), dtype=np.float32)
self.act_buf = np.zeros(core.combined_shape(size, act_dim), dtype=np.float32)
self.adv_buf = np.zeros(size, dtype=np.float32)
self.rew_buf = np.zeros(size, dtype=np.float32)
self.ret_buf = np.zeros(size, dtype=np.float32)
self.val_buf = np.zeros(size, dtype=np.float32)
self.cost_buf = np.zeros(size, dtype=np.float32)
self.logp_buf = np.zeros(size, dtype=np.float32)
self.mu_buf = np.zeros(core.combined_shape(size, act_dim), dtype=np.float32)
self.logstd_buf = np.zeros(core.combined_shape(size, act_dim), dtype=np.float32)
self.epi_id_buf = np.zeros(size, dtype=np.float32)
(self.gamma, self.lam) = (gamma, lam)
(self.ptr, self.path_start_idx, self.max_size) = (0, 0, size)
self.epi_id = 0
def store(self, obs, act, rew, val, cost, logp, mu, logstd):
assert (self.ptr < self.max_size)
self.obs_buf[self.ptr] = obs
self.act_buf[self.ptr] = act
self.rew_buf[self.ptr] = rew
self.val_buf[self.ptr] = val
self.cost_buf[self.ptr] = cost
self.logp_buf[self.ptr] = logp
self.mu_buf[self.ptr] = mu
self.logstd_buf[self.ptr] = logstd
self.ptr += 1
def finish_path(self, last_val=0):
path_slice = slice(self.path_start_idx, self.ptr)
rews = np.append(self.rew_buf[path_slice], last_val)
vals = np.append(self.val_buf[path_slice], last_val)
deltas = ((rews[:(- 1)] + (self.gamma * vals[1:])) - vals[:(- 1)])
self.adv_buf[path_slice] = core.discount_cumsum(deltas, (self.gamma * self.lam))
self.ret_buf[path_slice] = core.discount_cumsum(rews, self.gamma)[:(- 1)]
self.epi_id_buf[path_slice] = self.epi_id
self.path_start_idx = self.ptr
self.epi_id += 1
def get(self):
assert (self.ptr == self.max_size)
(self.ptr, self.path_start_idx, self.epi_id) = (0, 0, 0)
(adv_mean, adv_std) = mpi_statistics_scalar(self.adv_buf)
self.adv_buf = ((self.adv_buf - adv_mean) / adv_std)
data = dict(obs=torch.FloatTensor(self.obs_buf).to(device), act=torch.FloatTensor(self.act_buf).to(device), cost=torch.FloatTensor(self.cost_buf).to(device), ret=torch.FloatTensor(self.ret_buf).to(device), adv=torch.FloatTensor(self.adv_buf).to(device), logp=torch.FloatTensor(self.logp_buf).to(device), mu=torch.FloatTensor(self.mu_buf).to(device), logstd=torch.FloatTensor(self.logstd_buf).to(device), epi_id=self.epi_id_buf)
return {k: torch.as_tensor(v, dtype=torch.float32) for (k, v) in data.items()} |
def test_point_assigner_with_empty_boxes_and_gt():
self = PointAssigner()
points = torch.FloatTensor([])
gt_bboxes = torch.FloatTensor([])
assign_result = self.assign(points, gt_bboxes)
assert (len(assign_result.gt_inds) == 0) |
class DenoiseBlock(nn.Module):
def __init__(self, in_channels, inner_channels, out_channels, levels):
super().__init__()
self.levels = [(l / 255) for l in levels]
self.conv_0 = HyperConv(self.levels, in_channels, inner_channels, kernel_size=3, padding=1)
self.conv_1 = HyperConv(self.levels, (in_channels + inner_channels), inner_channels, kernel_size=3, padding=1)
self.conv_2 = HyperConv(self.levels, (in_channels + (2 * inner_channels)), inner_channels, kernel_size=3, padding=1)
self.conv_3 = HyperConv(self.levels, (in_channels + (3 * inner_channels)), out_channels, kernel_size=3, padding=1)
self.actv_0 = ModuleParallel(nn.PReLU(inner_channels))
self.actv_1 = ModuleParallel(nn.PReLU(inner_channels))
self.actv_2 = ModuleParallel(nn.PReLU(inner_channels))
self.actv_3 = ModuleParallel(nn.PReLU(out_channels))
def forward(self, x):
out_0 = self.actv_0(self.conv_0(x))
out_0 = [torch.cat([xx, oo], 1) for (xx, oo) in zip(x, out_0)]
out_1 = self.actv_1(self.conv_1(out_0))
out_1 = [torch.cat([xx, oo], 1) for (xx, oo) in zip(out_1, out_0)]
out_2 = self.actv_2(self.conv_2(out_1))
out_2 = [torch.cat([xx, oo], 1) for (xx, oo) in zip(out_2, out_1)]
out_3 = self.actv_3(self.conv_3(out_2))
return [(xx + oo) for (xx, oo) in zip(out_3, x)] |
class ConcatDataset(FairseqDataset):
def cumsum(sequence, sample_ratios):
(r, s) = ([], 0)
for (e, ratio) in zip(sequence, sample_ratios):
curr_len = int((ratio * len(e)))
r.append((curr_len + s))
s += curr_len
return r
def __init__(self, datasets, sample_ratios=1):
super(ConcatDataset, self).__init__()
assert (len(datasets) > 0), 'datasets should not be an empty iterable'
self.datasets = list(datasets)
if isinstance(sample_ratios, int):
sample_ratios = ([sample_ratios] * len(self.datasets))
self.sample_ratios = sample_ratios
self.cumulative_sizes = self.cumsum(self.datasets, sample_ratios)
self.real_sizes = [len(d) for d in self.datasets]
def __len__(self):
return self.cumulative_sizes[(- 1)]
def __getitem__(self, idx):
(dataset_idx, sample_idx) = self._get_dataset_and_sample_index(idx)
return self.datasets[dataset_idx][sample_idx]
def _get_dataset_and_sample_index(self, idx: int):
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if (dataset_idx == 0):
sample_idx = idx
else:
sample_idx = (idx - self.cumulative_sizes[(dataset_idx - 1)])
sample_idx = (sample_idx % self.real_sizes[dataset_idx])
return (dataset_idx, sample_idx)
def collater(self, samples, **extra_args):
if hasattr(self.datasets[0], 'collater'):
return self.datasets[0].collater(samples, **extra_args)
else:
return default_collate(samples, **extra_args)
def size(self, idx: int):
(dataset_idx, sample_idx) = self._get_dataset_and_sample_index(idx)
return self.datasets[dataset_idx].size(sample_idx)
def num_tokens(self, index: int):
return np.max(self.size(index))
def attr(self, attr: str, index: int):
dataset_idx = bisect.bisect_right(self.cumulative_sizes, index)
return getattr(self.datasets[dataset_idx], attr, None)
def sizes(self):
_dataset_sizes = []
for (ds, sr) in zip(self.datasets, self.sample_ratios):
if isinstance(ds.sizes, np.ndarray):
_dataset_sizes.append(np.tile(ds.sizes, sr))
else:
assert isinstance(ds.sizes, list)
_dataset_sizes.append(np.tile(ds.sizes[0], sr))
return np.concatenate(_dataset_sizes)
def supports_prefetch(self):
return all((d.supports_prefetch for d in self.datasets))
def ordered_indices(self):
return np.argsort(self.sizes)
def prefetch(self, indices):
frm = 0
for (to, ds) in zip(self.cumulative_sizes, self.datasets):
real_size = len(ds)
if getattr(ds, 'supports_prefetch', False):
ds.prefetch([((i - frm) % real_size) for i in indices if (frm <= i < to)])
frm = to
def set_epoch(self, epoch):
super().set_epoch(epoch)
for ds in self.datasets:
if hasattr(ds, 'set_epoch'):
ds.set_epoch(epoch) |
def masked_gaussian_log_density(mu, data, obsrv_std, mask, temporal_weights=None):
(n_traj_samples, n_traj, n_timepoints, n_dims) = mu.size()
assert (data.size()[(- 1)] == n_dims)
func = (lambda mu, data: gaussian_log_likelihood(mu, data, obsrv_std=obsrv_std))
res = compute_masked_likelihood(mu, data, mask, func, temporal_weights)
return res |
def write_feature_info(out_path):
event_des = EventDescription()
outf = file(out_path, 'w')
for event_id in range(2, (max(event_des.id2rtype.keys()) + 1)):
rtype = event_des.id2rtype[event_id]
names = event_des.get_name(rtype)
obj = {'event_id': event_id, 'rtype': rtype, 'text_feature': [], 'feature': []}
num_feature_idx = event_des.event_featureidx_map.get(rtype, [])
pattern = event_des.event_des_pattern[rtype]
names = event_des.reverse_text_feature_name(names, pattern['feature'])
text_features = event_des.event_text_map[event_id]
text_idx = 0
num_idx = 0
for (name, feature_type) in zip(names, pattern['feature']):
if (feature_type == 'text'):
value = text_features[text_idx]
text_idx += 1
obj['text_feature'].append(('%s=%s' % (name, value)))
else:
index = num_feature_idx[num_idx]
num_idx += 1
obj['feature'].append(('%s at %d' % (name, index)))
outf.write(('%s\n' % json.dumps(obj)))
outf.close() |
class DenSPIServer(object):
def __init__(self, args):
self.args = args
self.base_ip = args.base_ip
self.query_port = args.query_port
self.doc_port = args.doc_port
self.index_port = args.index_port
self.mips = None
def load_query_encoder(self, device, args):
vocab_path = os.path.join(args.metadata_dir, args.vocab_name)
bert_config_path = os.path.join(args.metadata_dir, (((args.bert_config_name.replace('.json', '') + '_') + args.bert_model_option) + '.json'))
bert_config = BertConfig.from_json_file(bert_config_path)
model = DenSPI(bert_config)
if args.parallel:
model = torch.nn.DataParallel(model)
state = torch.load(args.query_encoder_path, map_location='cpu')
model.load_state_dict(state['model'], strict=False)
check_diff(model.state_dict(), state['model'])
model.to(device)
logger.info(('Model loaded from %s' % args.query_encoder_path))
logger.info('Number of model parameters: {:,}'.format(sum((p.numel() for p in model.parameters()))))
tokenizer = tokenization.FullTokenizer(vocab_file=vocab_path, do_lower_case=(not args.do_case))
return (model, tokenizer)
def get_question_dataloader(self, questions, tokenizer, batch_size):
question_examples = [SquadExample(qas_id='qs', question_text=q) for q in questions]
query_features = convert_questions_to_features(examples=question_examples, tokenizer=tokenizer, max_query_length=64)
question_dataloader = convert_question_features_to_dataloader(query_features, fp16=False, local_rank=(- 1), predict_batch_size=batch_size)
return (question_dataloader, question_examples, query_features)
def serve_query_encoder(self, query_port, args):
device = ('cuda' if args.cuda else 'cpu')
(query_encoder, tokenizer) = self.load_query_encoder(device, args)
def query2vec(queries):
(question_dataloader, question_examples, query_features) = self.get_question_dataloader(queries, tokenizer, batch_size=24)
query_encoder.eval()
question_results = get_question_results(question_examples, query_features, question_dataloader, device, query_encoder)
outs = []
for (qr_idx, question_result) in enumerate(question_results):
for ngram in question_result.sparse.keys():
question_result.sparse[ngram] = question_result.sparse[ngram].tolist()
out = (question_result.start.tolist(), question_result.end.tolist(), question_result.sparse, question_result.input_ids)
outs.append(out)
return outs
app = Flask(__name__)
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False
CORS(app)
('/batch_api', methods=['POST'])
def batch_api():
batch_query = json.loads(request.form['query'])
outs = query2vec(batch_query)
return jsonify(outs)
logger.info(f'Starting QueryEncoder server at {self.get_address(query_port)}')
= HTTPServer(WSGIContainer(app))
IOLoop.instance().start()
def load_phrase_index(self, args, dump_only=False):
if (self.mips is not None):
return self.mips
phrase_dump_dir = os.path.join(args.dump_dir, args.phrase_dir)
tfidf_dump_dir = os.path.join(args.dump_dir, args.tfidf_dir)
index_dir = os.path.join(args.dump_dir, args.index_dir)
index_path = os.path.join(index_dir, args.index_name)
idx2id_path = os.path.join(index_dir, args.idx2id_name)
max_norm_path = os.path.join(index_dir, 'max_norm.json')
mips_init = MIPS
mips = mips_init(phrase_dump_dir=phrase_dump_dir, tfidf_dump_dir=tfidf_dump_dir, start_index_path=index_path, idx2id_path=idx2id_path, max_norm_path=max_norm_path, doc_rank_fn={'index': self.get_doc_scores, 'top_docs': self.get_top_docs, 'spvec': self.get_q_spvecs}, cuda=args.cuda, dump_only=dump_only)
return mips
def serve_phrase_index(self, index_port, args):
args.examples_path = os.path.join('static', args.examples_path)
self.mips = self.load_phrase_index(args)
app = Flask(__name__, static_url_path='/static')
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False
CORS(app)
def batch_search(batch_query, max_answer_length=20, start_top_k=1000, mid_top_k=100, top_k=10, doc_top_k=5, nprobe=64, sparse_weight=0.05, search_strategy='hybrid'):
t0 = time()
(outs, _) = self.embed_query(batch_query)()
start = np.concatenate([out[0] for out in outs], 0)
end = np.concatenate([out[1] for out in outs], 0)
sparse_uni = [out[2]['1'][1:(len(out[3]) + 1)] for out in outs]
sparse_bi = [out[2]['2'][1:(len(out[3]) + 1)] for out in outs]
input_ids = [out[3] for out in outs]
query_vec = np.concatenate([start, end, ([[1]] * len(outs))], 1)
rets = self.mips.search(query_vec, (input_ids, sparse_uni, sparse_bi), q_texts=batch_query, nprobe=nprobe, doc_top_k=doc_top_k, start_top_k=start_top_k, mid_top_k=mid_top_k, top_k=top_k, search_strategy=search_strategy, filter_=args.filter, max_answer_length=max_answer_length, sparse_weight=sparse_weight)
t1 = time()
out = {'ret': rets, 'time': int((1000 * (t1 - t0)))}
return out
('/')
def index():
return app.send_static_file('index.html')
('/files/<path:path>')
def static_files(path):
return app.send_static_file(('files/' + path))
('/api', methods=['GET'])
def api():
query = request.args['query']
strat = request.args['strat']
out = batch_search([query], max_answer_length=args.max_answer_length, top_k=args.top_k, nprobe=args.nprobe, search_strategy=strat, doc_top_k=args.doc_top_k)
out['ret'] = out['ret'][0]
return jsonify(out)
('/batch_api', methods=['POST'])
def batch_api():
batch_query = json.loads(request.form['query'])
max_answer_length = int(request.form['max_answer_length'])
start_top_k = int(request.form['start_top_k'])
mid_top_k = int(request.form['mid_top_k'])
top_k = int(request.form['top_k'])
doc_top_k = int(request.form['doc_top_k'])
nprobe = int(request.form['nprobe'])
sparse_weight = float(request.form['sparse_weight'])
strat = request.form['strat']
out = batch_search(batch_query, max_answer_length=max_answer_length, start_top_k=start_top_k, mid_top_k=mid_top_k, top_k=top_k, doc_top_k=doc_top_k, nprobe=nprobe, sparse_weight=sparse_weight, search_strategy=strat)
return jsonify(out)
('/get_examples', methods=['GET'])
def get_examples():
with open(args.examples_path, 'r') as fp:
examples = [line.strip() for line in fp.readlines()]
return jsonify(examples)
if (self.query_port is None):
logger.info('You must set self.query_port for querying. You can use self.update_query_port() later on.')
logger.info(f'Starting Index server at {self.get_address(index_port)}')
= HTTPServer(WSGIContainer(app))
IOLoop.instance().start()
def serve_doc_ranker(self, doc_port, args):
doc_ranker_path = os.path.join(args.dump_dir, args.doc_ranker_name)
doc_ranker = TfidfDocRanker(doc_ranker_path, strict=False)
app = Flask(__name__)
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False
CORS(app)
('/doc_index', methods=['POST'])
def doc_index():
batch_query = json.loads(request.form['query'])
doc_idxs = json.loads(request.form['doc_idxs'])
outs = doc_ranker.batch_doc_scores(batch_query, doc_idxs)
logger.info(f'Returning {len(outs)} from batch_doc_scores')
return jsonify(outs)
('/top_docs', methods=['POST'])
def top_docs():
batch_query = json.loads(request.form['query'])
top_k = int(request.form['top_k'])
batch_results = doc_ranker.batch_closest_docs(batch_query, k=top_k)
top_idxs = [b[0] for b in batch_results]
top_scores = [b[1].tolist() for b in batch_results]
logger.info(f'Returning from batch_doc_scores')
return jsonify([top_idxs, top_scores])
('/text2spvec', methods=['POST'])
def text2spvec():
batch_query = json.loads(request.form['query'])
q_spvecs = [doc_ranker.text2spvec(q, val_idx=True) for q in batch_query]
q_vals = [q_spvec[0].tolist() for q_spvec in q_spvecs]
q_idxs = [q_spvec[1].tolist() for q_spvec in q_spvecs]
logger.info(f'Returning {(len(q_vals), len(q_idxs))} q_spvecs')
return jsonify([q_vals, q_idxs])
logger.info(f'Starting DocRanker server at {self.get_address(doc_port)}')
= HTTPServer(WSGIContainer(app))
IOLoop.instance().start()
def get_address(self, port):
assert ((self.base_ip is not None) and (len(port) > 0))
return ((self.base_ip + ':') + port)
def embed_query(self, batch_query):
emb_session = FuturesSession()
r = emb_session.post((self.get_address(self.query_port) + '/batch_api'), data={'query': json.dumps(batch_query)})
def map_():
result = r.result()
emb = result.json()
return (emb, (result.elapsed.total_seconds() * 1000))
return map_
def query(self, query, search_strategy='hybrid'):
params = {'query': query, 'strat': search_strategy}
res = requests.get((self.get_address(self.index_port) + '/api'), params=params)
if (res.status_code != 200):
logger.info(('Wrong behavior %d' % res.status_code))
try:
outs = json.loads(res.text)
except Exception as e:
logger.info(f'no response or error for q {query}')
logger.info(res.text)
return outs
def batch_query(self, batch_query, max_answer_length=20, start_top_k=1000, mid_top_k=100, top_k=10, doc_top_k=5, nprobe=64, sparse_weight=0.05, search_strategy='hybrid'):
post_data = {'query': json.dumps(batch_query), 'max_answer_length': max_answer_length, 'start_top_k': start_top_k, 'mid_top_k': mid_top_k, 'top_k': top_k, 'doc_top_k': doc_top_k, 'nprobe': nprobe, 'sparse_weight': sparse_weight, 'strat': search_strategy}
res = requests.post((self.get_address(self.index_port) + '/batch_api'), data=post_data)
if (res.status_code != 200):
logger.info(('Wrong behavior %d' % res.status_code))
try:
outs = json.loads(res.text)
except Exception as e:
logger.info(f'no response or error for q {batch_query}')
logger.info(res.text)
return outs
def get_doc_scores(self, batch_query, doc_idxs):
post_data = {'query': json.dumps(batch_query), 'doc_idxs': json.dumps(doc_idxs)}
res = requests.post((self.get_address(self.doc_port) + '/doc_index'), data=post_data)
if (res.status_code != 200):
logger.info(('Wrong behavior %d' % res.status_code))
try:
result = json.loads(res.text)
except Exception as e:
logger.info(f'no response or error for {doc_idxs}')
logger.info(res.text)
return result
def get_top_docs(self, batch_query, top_k):
post_data = {'query': json.dumps(batch_query), 'top_k': top_k}
res = requests.post((self.get_address(self.doc_port) + '/top_docs'), data=post_data)
if (res.status_code != 200):
logger.info(('Wrong behavior %d' % res.status_code))
try:
result = json.loads(res.text)
except Exception as e:
logger.info(f'no response or error for {top_k}')
logger.info(res.text)
return result
def get_q_spvecs(self, batch_query):
post_data = {'query': json.dumps(batch_query)}
res = requests.post((self.get_address(self.doc_port) + '/text2spvec'), data=post_data)
if (res.status_code != 200):
logger.info(('Wrong behavior %d' % res.status_code))
try:
result = json.loads(res.text)
except Exception as e:
logger.info(f'no response or error for q {batch_query}')
logger.info(res.text)
return result |
class TestMetrics(unittest.TestCase):
def test_nesting(self):
with metrics.aggregate() as a:
metrics.log_scalar('loss', 1)
with metrics.aggregate() as b:
metrics.log_scalar('loss', 2)
self.assertEqual(a.get_smoothed_values()['loss'], 1.5)
self.assertEqual(b.get_smoothed_values()['loss'], 2)
def test_new_root(self):
with metrics.aggregate() as a:
metrics.log_scalar('loss', 1)
with metrics.aggregate(new_root=True) as b:
metrics.log_scalar('loss', 2)
self.assertEqual(a.get_smoothed_values()['loss'], 1)
self.assertEqual(b.get_smoothed_values()['loss'], 2)
def test_nested_new_root(self):
with metrics.aggregate() as layer1:
metrics.log_scalar('loss', 1)
with metrics.aggregate(new_root=True) as layer2:
metrics.log_scalar('loss', 2)
with metrics.aggregate() as layer3:
metrics.log_scalar('loss', 3)
with metrics.aggregate(new_root=True) as layer4:
metrics.log_scalar('loss', 4)
metrics.log_scalar('loss', 1.5)
self.assertEqual(layer4.get_smoothed_values()['loss'], 4)
self.assertEqual(layer3.get_smoothed_values()['loss'], 3)
self.assertEqual(layer2.get_smoothed_values()['loss'], 2.5)
self.assertEqual(layer1.get_smoothed_values()['loss'], 1.25)
def test_named(self):
name = str(uuid.uuid4())
metrics.reset_meters(name)
with metrics.aggregate(name):
metrics.log_scalar('loss', 1)
metrics.log_scalar('loss', 3)
with metrics.aggregate(name):
metrics.log_scalar('loss', 2)
self.assertEqual(metrics.get_smoothed_values(name)['loss'], 1.5)
def test_nested_duplicate_names(self):
name = str(uuid.uuid4())
metrics.reset_meters(name)
with metrics.aggregate(name):
metrics.log_scalar('loss', 1)
with metrics.aggregate() as other:
with metrics.aggregate(name):
metrics.log_scalar('loss', 2)
metrics.log_scalar('loss', 6)
self.assertEqual(metrics.get_smoothed_values(name)['loss'], 3)
self.assertEqual(other.get_smoothed_values()['loss'], 2) |
class Proper_Noun_Rate(object):
def __init__(self, sentence_objs):
self.sentence_objs = sentence_objs
def handle(self):
(tot_num_pron, tot_num_words) = (0, 0)
for so in self.sentence_objs:
tot_num_pron += so.pos_tag_counter.get_pos_tag_count(PROPER_NOUN)
tot_num_words += so.num_words()
return (tot_num_pron / tot_num_words) |
class ResizeVideo(object):
def __init__(self, target_size, interpolation_mode='bilinear'):
self.target_size = target_size
self.interpolation_mode = interpolation_mode
def __call__(self, clip):
return F.resize(clip, self.target_size, self.interpolation_mode)
def __repr__(self):
return (self.__class__.__name__ + '(resize_size={0})'.format(self.target_size)) |
class _FP16OptimizerMixin(object):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._multiply_factor = 1.0
def has_flat_params(self):
return (torch.is_tensor(self.fp32_params) or (isinstance(self.fp32_params, dict) and all((torch.is_tensor(t) for t in self.fp32_params.values()))))
def build_fp32_params(cls, args, params, flatten=True):
if flatten:
is_pipeline_parallel = (getattr(args, 'pipeline_model_parallel', False) and getattr(args, 'distributed_no_spawn', False))
total_param_size = sum((p.data.numel() for p in params))
devices = [torch.cuda.current_device()]
if is_pipeline_parallel:
devices = list(set(args.pipeline_devices))
fp32_params = {}
for device in devices:
if is_pipeline_parallel:
device_param_size = sum((p.data.numel() for p in params if (p.device.index == device)))
device_params = [p for p in params if (p.device.index == device)]
else:
device_param_size = total_param_size
device_params = params
fp32_params[device] = device_params[0].new(0).float().new(device_param_size)
offset = 0
for p in device_params:
numel = p.data.numel()
fp32_params[device][offset:(offset + numel)].copy_(p.data.view((- 1)))
offset += numel
fp32_params[device] = torch.nn.Parameter(fp32_params[device])
fp32_params[device].grad = fp32_params[device].data.new(device_param_size)
return fp32_params
else:
fp32_params = []
for p in params:
p32 = torch.nn.Parameter(p.data.float())
if hasattr(p, 'expert'):
p32.expert = True
elif hasattr(p, 'base_expert'):
p32.base_expert = True
p32.grad = torch.zeros_like(p32.data)
if hasattr(p, 'param_group'):
p32.param_group = p.param_group
if hasattr(p, 'optim_overrides'):
p32.optim_overrides = p.optim_overrides
fp32_params.append(p32)
return fp32_params
def state_dict(self):
state_dict = self.fp32_optimizer.state_dict()
if (self.scaler is not None):
state_dict['loss_scale'] = self.scaler.loss_scale
return state_dict
def load_state_dict(self, state_dict, optimizer_overrides=None):
if (('loss_scale' in state_dict) and (self.scaler is not None)):
self.scaler.loss_scale = state_dict['loss_scale']
self.fp32_optimizer.load_state_dict(state_dict, optimizer_overrides)
def backward(self, loss):
if (self.scaler is not None):
loss = self.scaler.scale(loss)
loss.backward()
self._needs_sync = True
def _sync_fp16_grads_to_fp32(self):
if self._needs_sync:
if self.has_flat_params:
devices = list(self.fp32_params.keys())
device_params_dict = defaultdict(list)
for p in self.fp16_params:
if p.requires_grad:
device_params_dict[p.device.index].append(p)
for device in devices:
device_params = device_params_dict[device]
offset = 0
for p in device_params:
grad_data = (p.grad.data if (p.grad is not None) else p.data.new_zeros(p.data.shape))
numel = grad_data.numel()
self.fp32_params[device].grad.data[offset:(offset + numel)].copy_(grad_data.view((- 1)))
offset += numel
else:
for (p, p32) in zip(self.fp16_params, self.fp32_params):
if (not p.requires_grad):
continue
if (p.grad is not None):
if (p32.grad is None):
p32.grad = p.grad.data.float()
else:
p32.grad.data.copy_(p.grad.data)
else:
p32.grad = torch.zeros_like(p.data, dtype=torch.float)
self._needs_sync = False
def _sync_fp32_params_to_fp16(self):
if self.has_flat_params:
devices = list(self.fp32_params.keys())
device_params_dict = defaultdict(list)
for p in self.fp16_params:
device_params_dict[p.device.index].append(p)
for device in devices:
device_params = device_params_dict[device]
offset = 0
for p in device_params:
numel = p.data.numel()
p.data.copy_(self.fp32_params[device].data[offset:(offset + numel)].view_as(p.data))
offset += numel
else:
for (p, p32) in zip(self.fp16_params, self.fp32_params):
if (not p.requires_grad):
continue
p.data.copy_(p32.data)
def _unscale_grads(self):
self._sync_fp16_grads_to_fp32()
if (torch.is_tensor(self._multiply_factor) or (self._multiply_factor != 1.0)):
self.fp32_optimizer.multiply_grads(self._multiply_factor)
self._multiply_factor = 1.0
def multiply_grads(self, c):
self._multiply_factor *= c
def clip_grad_norm(self, max_norm, aggregate_norm_fn=None):
self._sync_fp16_grads_to_fp32()
grad_norm = (self._multiply_factor * self.fp32_optimizer.clip_grad_norm(0, aggregate_norm_fn))
if torch.is_tensor(self._multiply_factor):
self._multiply_factor = self._multiply_factor.to(grad_norm.device)
if (self.scaler is not None):
if (grad_norm > max_norm > 0.0):
self._multiply_factor *= (max_norm / grad_norm)
self.scaler.check_overflow(grad_norm)
elif (max_norm > 0.0):
clip_coef = (max_norm / (grad_norm + 1e-06)).clamp_(max=1)
self._multiply_factor *= clip_coef
return grad_norm
def step(self, closure=None, groups=None):
self._sync_fp16_grads_to_fp32()
if getattr(self, 'supports_step_with_scale', False):
self.fp32_optimizer.step(closure, scale=(1.0 / self._multiply_factor), groups=groups)
else:
self._unscale_grads()
self.fp32_optimizer.step(closure, groups=groups)
if (self.scaler is not None):
self.scaler.update()
self._sync_fp32_params_to_fp16()
def zero_grad(self):
for p in self.fp16_params:
p.grad = None
if self.has_flat_params:
if torch.is_tensor(self.fp32_params):
self.fp32_params.grad.zero_()
elif isinstance(self.fp32_params, dict):
for fp32_params in self.fp32_params.values():
fp32_params.grad.zero_()
else:
raise RuntimeError('self.fp32_params must be a tensor or dict')
else:
for p32 in self.fp32_params:
if (p32.grad is not None):
p32.grad.zero_()
self._needs_sync = False
if (self.scaler is not None):
self._multiply_factor = (1.0 / float(self.scaler.loss_scale)) |
_module
class DefaultFormatBundle(object):
def __call__(self, results):
if ('img' in results):
img = results['img']
if (len(img.shape) < 3):
img = np.expand_dims(img, (- 1))
img = np.ascontiguousarray(img.transpose(2, 0, 1))
results['img'] = DC(to_tensor(img), stack=True)
for key in ['proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels']:
if (key not in results):
continue
results[key] = DC(to_tensor(results[key]))
if ('gt_masks' in results):
results['gt_masks'] = DC(results['gt_masks'], cpu_only=True)
if ('gt_semantic_seg' in results):
results['gt_semantic_seg'] = DC(to_tensor(results['gt_semantic_seg'][(None, ...)]), stack=True)
return results
def __repr__(self):
return self.__class__.__name__ |
def MSLE(y_true: 'ndarray', y_pred: 'ndarray', multioutput: str='raw_values') -> Union[(float64, 'ndarray')]:
(y_true, y_pred, original_shape) = _standardize_input(y_true, y_pred, multioutput)
result = mean_squared_log_error(y_true, y_pred, multioutput=multioutput)
if (multioutput == 'raw_values'):
return result.reshape(original_shape)
return result |
def test_cross_module_exceptions(msg):
with pytest.raises(RuntimeError) as excinfo:
cm.raise_runtime_error()
assert (str(excinfo.value) == 'My runtime error')
with pytest.raises(ValueError) as excinfo:
cm.raise_value_error()
assert (str(excinfo.value) == 'My value error')
with pytest.raises(ValueError) as excinfo:
cm.throw_pybind_value_error()
assert (str(excinfo.value) == 'pybind11 value error')
with pytest.raises(TypeError) as excinfo:
cm.throw_pybind_type_error()
assert (str(excinfo.value) == 'pybind11 type error')
with pytest.raises(StopIteration) as excinfo:
cm.throw_stop_iteration()
with pytest.raises(cm.LocalSimpleException) as excinfo:
cm.throw_local_simple_error()
assert (msg(excinfo.value) == 'external mod')
with pytest.raises(KeyError) as excinfo:
cm.throw_local_error()
assert (str(excinfo.value) == "'just local'") |
class FeedForward(nn.Module):
def __init__(self, dim, dropout=0.0, mult=4.0):
super().__init__()
self.net = nn.Sequential(nn.Linear(dim, ((dim * mult) * 2)), GEGLU(), nn.Dropout(dropout), nn.Linear((dim * mult), dim))
def forward(self, x):
return self.net(x) |
class Compose(object):
def __init__(self, mytransforms: list):
self.transforms = mytransforms
for t in mytransforms:
assert any([isinstance(t, Resize), isinstance(t, RandomCrop), isinstance(t, RandomHorizontalFlip), isinstance(t, RandomVerticalFlip), isinstance(t, transforms.ToTensor), isinstance(t, transforms.Normalize), isinstance(t, transforms.ColorJitter)])
def chec_if_random(self, transf):
if isinstance(transf, RandomCrop):
return True
def __call__(self, img, raw_img, std_cam, mask):
for t in self.transforms:
if isinstance(t, (RandomHorizontalFlip, RandomVerticalFlip, RandomCrop, Resize)):
(img, raw_img, std_cam, mask) = t(img, raw_img, std_cam, mask)
else:
img = t(img)
return (img, raw_img, std_cam, mask)
def __repr__(self):
format_string = (self.__class__.__name__ + '(')
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string |
def singularize(word, pos=NOUN, custom={}):
if (word in custom):
return custom[word]
w = word.lower()
if (pos == 'DT'):
if (w in ('i', 'gli')):
return 'il'
if (w == 'el'):
return 'la'
return w
if (len(w) < 3):
return w
if (w in singular_irregular):
return singular_irregular[w]
for (a, b) in singular_majority_vote:
if w.endswith(a):
return (w[:(- len(a))] + b)
if w.endswith(('ali', 'ari', 'ili', 'esi', 'nti')):
return (w[:(- 1)] + 'e')
if w.endswith('isti'):
return (w[:(- 1)] + 'a')
if w.endswith(('che', 'ghe')):
return (w[:(- 2)] + 'a')
if w.endswith(('chi', 'ghi')):
return (w[:(- 2)] + 'o')
if w.endswith('emi'):
return (w[:(- 1)] + 'a')
if w.endswith('e'):
return (w[:(- 1)] + 'a')
if w.endswith('i'):
return (w[:(- 1)] + 'o')
return w |
class LxmertEncoder(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def test_trainer(trainer, iterations=30, allow_gpu=False):
create_env = trainer.unwrapped.create_env
def wrap_env(env):
(env, original) = fake_env(env)
print(('Faked environment: %s' % original.__class__.__name__))
return env
def _create_env(*args, **kwargs):
env = create_env(*args, **kwargs)
if hasattr(trainer.unwrapped, 'validation_env'):
trainer.unwrapped.validation_env = wrap_env(trainer.unwrapped.validation_env)
env = wrap_env(env)
print(('Environment shape is %s' % str(get_space_shape(env.observation_space))))
return env
assert (trainer.__class__.__name__ == 'CompiledTrainer')
with tempfile.TemporaryDirectory() as tmpdir:
trainer.unwrapped.create_env = _create_env
t = trainer
while hasattr(t, 'trainer'):
if (t.__class__.__name__ == 'SaveWrapper'):
t.model_root_directory = tmpdir
t = t.trainer
if (allow_gpu is not None):
trainer.unwrapped.allow_gpu = allow_gpu
trainer.unwrapped.replay_size = 50
trainer.unwrapped.preprocess_steps = 10
process_base = trainer.process
def process(*args, **kwargs):
res = process_base(*args, **kwargs)
if (trainer.unwrapped._global_t > iterations):
raise TestFinished()
return res
trainer.process = process
try:
trainer.run()
trainer.run = (lambda *args, **kwargs: print('ERROR: Cannot run tested trainer'))
except TestFinished:
pass |
class DIAResUnit(nn.Module):
def __init__(self, in_channels, out_channels, stride, padding=1, dilation=1, bottleneck=True, conv1_stride=False, attention=None):
super(DIAResUnit, self).__init__()
self.resize_identity = ((in_channels != out_channels) or (stride != 1))
if bottleneck:
self.body = ResBottleneck(in_channels=in_channels, out_channels=out_channels, stride=stride, padding=padding, dilation=dilation, conv1_stride=conv1_stride)
else:
self.body = ResBlock(in_channels=in_channels, out_channels=out_channels, stride=stride)
if self.resize_identity:
self.identity_conv = conv1x1_block(in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None)
self.activ = nn.ReLU(inplace=True)
self.attention = attention
def forward(self, x, hc=None):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
(x, hc) = self.attention(x, hc)
x = (x + identity)
x = self.activ(x)
return (x, hc) |
def resolve_precision(precision: str):
assert (precision in ('amp', 'float16', 'bfloat16', 'float32'))
use_amp = False
model_dtype = torch.float32
data_dtype = torch.float32
if (precision == 'amp'):
use_amp = True
elif (precision == 'float16'):
model_dtype = torch.float16
data_dtype = torch.float16
elif (precision == 'bfloat16'):
model_dtype = torch.bfloat16
data_dtype = torch.bfloat16
return (use_amp, model_dtype, data_dtype) |
def process_document(doc_name, part_name, gold_doc, auto_doc, out, remove_singletons=True):
for ofile in [out['out'], out['short out']]:
print('', file=ofile)
print(('-' * 79), file=ofile)
print(doc_name, part_name, file=ofile)
print(('-' * 79), file=ofile)
print('', file=ofile)
text = gold_doc['text']
gold_parses = gold_doc['parses']
gold_heads = gold_doc['heads']
gold_mentions = gold_doc['mentions']
gold_clusters = gold_doc['clusters']
auto_mentions = auto_doc['mentions'].copy()
auto_clusters = auto_doc['clusters'].copy()
if remove_singletons:
to_remove = set()
for cluster in auto_clusters:
if (len(auto_clusters[cluster]) == 1):
to_remove.add(cluster)
for mention in auto_clusters[cluster]:
auto_mentions.pop(mention)
for cluster in to_remove:
auto_clusters.pop(cluster)
gold_cluster_set = coreference.set_of_clusters(gold_clusters)
auto_cluster_set = coreference.set_of_clusters(auto_clusters)
gold_mention_set = coreference.set_of_mentions(gold_clusters)
auto_mention_set = coreference.set_of_mentions(auto_clusters)
coreference_rendering.print_conll_style_part(out['system output'], text, auto_mentions, doc_name, part_name)
coreference_rendering.print_conll_style_part(out['gold'], text, gold_mentions, doc_name, part_name)
coreference_rendering.print_conll_style_part(out['error: original'], text, auto_mentions, doc_name, part_name)
errors = []
span_errors = match_boundaries(gold_mention_set, auto_mention_set, auto_mentions, auto_clusters, text, gold_parses, gold_heads)
if (len(span_errors) == 0):
print('No', end=' ', file=out['out'])
print('No', end=' ', file=out['short out'])
print('Span Errors: (system, gold)', file=out['out'])
print('Span Errors: (system, gold)', file=out['short out'])
for error in span_errors:
errors.append(('span mismatch', error))
before = coreference_rendering.print_mention(None, False, gold_parses, gold_heads, text, error[0], return_str=True)
after = coreference_rendering.print_mention(None, False, gold_parses, gold_heads, text, error[1], return_str=True)
print('{:<50} {:<50}'.format(before, after), file=out['out'])
print('{:<50} {:<50}'.format(before, after), file=out['short out'])
print('', file=out['out'])
print('', file=out['short out'])
for error in errors:
print('span mismatch', error, file=out['out'])
print((['span error'] + list(error[1])), file=out['properties'])
print('', file=out['out'])
print(('-' * 79), file=out['out'])
print('', file=out['short out'])
print(('-' * 79), file=out['short out'])
coreference_rendering.print_conll_style_part(out['error: span mismatch'], text, auto_mentions, doc_name, part_name)
auto_mentions_split = auto_mentions.copy()
auto_mentions_extra_mention = auto_mentions.copy()
auto_mentions_extra_entity = auto_mentions.copy()
auto_mentions_merge = auto_mentions.copy()
auto_mentions_missing_mention = auto_mentions.copy()
auto_mentions_missing_entity = auto_mentions.copy()
auto_mentions_extra_mention_prog = auto_mentions.copy()
auto_mentions_extra_entity_prog = auto_mentions.copy()
auto_mentions_merge_prog = auto_mentions.copy()
auto_mentions_missing_mention_prog = auto_mentions.copy()
auto_mentions_missing_entity_prog = auto_mentions.copy()
max_cluster = 0
if (len(auto_mentions) > 0):
max_cluster = auto_mentions[max(auto_mentions, key=(lambda mention: auto_mentions[mention]))]
groups = coreference.confusion_groups(gold_mentions, auto_mentions, gold_clusters, auto_clusters)
for (auto, gold) in groups:
if nlp_eval.coreference_cluster_match(gold, auto):
continue
print('', file=out['out'])
print('', file=out['short out'])
colours = coreference_rendering.print_cluster_error_group([auto, gold], out['out'], text, gold_parses, gold_heads, gold_mentions)
colours2 = coreference_rendering.print_cluster_error_group([auto, gold], out['short out'], text, gold_parses, gold_heads, gold_mentions)
changes = repair(auto, gold, auto_mentions, gold_mention_set, text, gold_parses, gold_heads, gold_clusters, gold_mentions, gold_doc)
print('\nRaw changes:', file=out['out'])
for name in changes:
print(name, len(changes[name]), file=out['out'])
for change in changes[name]:
errors.append((('raw ' + name), change))
changes = categorise(auto, gold, changes, text, gold_parses, gold_heads, gold_mention_set, auto_mentions, gold_doc)
if ('split' in changes):
for change in changes['split']:
max_cluster += 1
for mention in change[0]:
auto_mentions_split[mention] = max_cluster
auto_mentions_extra_mention_prog[mention] = max_cluster
auto_mentions_extra_entity_prog[mention] = max_cluster
auto_mentions_merge_prog[mention] = max_cluster
auto_mentions_missing_mention_prog[mention] = max_cluster
auto_mentions_missing_entity_prog[mention] = max_cluster
rest = change[1].difference(change[0])
if (len(rest) == 1):
rest = next(iter(rest))
if (rest not in gold_mentions):
auto_mentions_split.pop(rest)
auto_mentions_extra_mention_prog.pop(rest)
auto_mentions_extra_entity_prog.pop(rest)
auto_mentions_merge_prog.pop(rest)
auto_mentions_missing_mention_prog.pop(rest)
auto_mentions_missing_entity_prog.pop(rest)
if ('extra mention' in changes):
for change in changes['extra mention']:
for mention in change[0]:
auto_mentions_extra_mention.pop(mention)
auto_mentions_extra_mention_prog.pop(mention)
auto_mentions_extra_entity_prog.pop(mention)
auto_mentions_merge_prog.pop(mention)
auto_mentions_missing_mention_prog.pop(mention)
auto_mentions_missing_entity_prog.pop(mention)
if ('extra entity' in changes):
for change in changes['extra entity']:
for mention in change[0]:
auto_mentions_extra_entity.pop(mention)
auto_mentions_extra_entity_prog.pop(mention)
auto_mentions_merge_prog.pop(mention)
auto_mentions_missing_mention_prog.pop(mention)
auto_mentions_missing_entity_prog.pop(mention)
if ('merge' in changes):
for change in changes['merge']:
for cauto_mentions in [auto_mentions_merge, auto_mentions_merge_prog, auto_mentions_missing_mention_prog, auto_mentions_missing_entity_prog]:
non_pronoun = min_non_pronoun(change[1], text, gold_parses, gold_heads)
if (non_pronoun is None):
non_pronoun = min(change[1])
if (non_pronoun not in cauto_mentions):
max_cluster += 1
cauto_mentions[non_pronoun] = max_cluster
ncluster_id = cauto_mentions[non_pronoun]
done = set()
for mention in change[0]:
if (mention not in cauto_mentions):
cauto_mentions[mention] = ncluster_id
elif (cauto_mentions[mention] not in done):
pcluster_id = cauto_mentions[mention]
done.add(pcluster_id)
for smention in cauto_mentions:
if (cauto_mentions[smention] == pcluster_id):
cauto_mentions[smention] = ncluster_id
if ('missing mention' in changes):
for change in changes['missing mention']:
for cauto_mentions in [auto_mentions_missing_mention, auto_mentions_missing_mention_prog, auto_mentions_missing_entity_prog]:
min_in_goal = None
for mention in change[1]:
if (mention in cauto_mentions):
if ((min_in_goal is None) or (min_in_goal > mention)):
min_in_goal = mention
mention = next(iter(change[0]))
if (min_in_goal is not None):
cauto_mentions[mention] = cauto_mentions[min_in_goal]
else:
min_mention = min(change[1])
max_cluster += 1
cauto_mentions[min_mention] = max_cluster
cauto_mentions[mention] = max_cluster
if ('missing entity' in changes):
for change in changes['missing entity']:
max_cluster += 1
for mention in change[0]:
auto_mentions_missing_entity[mention] = max_cluster
auto_mentions_missing_entity_prog[mention] = max_cluster
print('\nCategorised:', file=out['out'])
print('\nErrors:', file=out['short out'])
rename = {'span mismatch': 'Span Error', 'split': 'Conflated Entities', 'extra mention': 'Extra Mention', 'extra entity': 'Extra Entity', 'merge': 'Divided Entity', 'missing mention': 'Missing Mention', 'missing entity': 'Missing Entity'}
for name in changes:
if (len(changes[name]) > 0):
print(len(changes[name]), rename[name], file=out['out'])
print(len(changes[name]), rename[name], file=out['short out'])
print('\nDetailed error listing:', file=out['out'])
for name in changes:
for change in changes[name]:
mention = None
if (len(change[0]) == 1):
mention = change[0].copy().pop()
if (mention is not None):
print(name, file=out['out'])
if (mention in gold_mentions):
colour = 15
if (gold_mentions[mention] in colours):
colour = colours[gold_mentions[mention]]
coreference_rendering.print_mention(out['out'], False, gold_parses, gold_heads, text, mention, colour)
else:
coreference_rendering.print_mention(out['out'], False, gold_parses, gold_heads, text, mention, extra=True)
print(name, change, file=out['out'])
print('Properties included:', name, change[(- 1)], file=out['out'])
print(([name] + change[(- 1)]), file=out['properties'])
errors.append((name, change))
print('', file=out['out'])
print(('-' * 79), file=out['out'])
print('', file=out['short out'])
print(('-' * 79), file=out['short out'])
coreference_rendering.print_conll_style_part(out['error: split'], text, auto_mentions_split, doc_name, part_name)
coreference_rendering.print_conll_style_part(out['error: extra mention'], text, auto_mentions_extra_mention, doc_name, part_name)
coreference_rendering.print_conll_style_part(out['error: extra entity'], text, auto_mentions_extra_entity, doc_name, part_name)
coreference_rendering.print_conll_style_part(out['error: merge'], text, auto_mentions_merge, doc_name, part_name)
coreference_rendering.print_conll_style_part(out['error: missing mention'], text, auto_mentions_missing_mention, doc_name, part_name)
coreference_rendering.print_conll_style_part(out['error: missing entity'], text, auto_mentions_missing_entity, doc_name, part_name)
coreference_rendering.print_conll_style_part(out['error: extra mention prog'], text, auto_mentions_extra_mention_prog, doc_name, part_name)
coreference_rendering.print_conll_style_part(out['error: extra entity prog'], text, auto_mentions_extra_entity_prog, doc_name, part_name)
coreference_rendering.print_conll_style_part(out['error: merge prog'], text, auto_mentions_merge_prog, doc_name, part_name)
coreference_rendering.print_conll_style_part(out['error: missing mention prog'], text, auto_mentions_missing_mention_prog, doc_name, part_name)
coreference_rendering.print_conll_style_part(out['error: missing entity prog'], text, auto_mentions_missing_entity_prog, doc_name, part_name)
return errors |
def read_fed_dataset(cfg: DictConfig):
if cfg.name.startswith('comb/'):
from .multi_domain import MDFedDataset as FedDataset
elif ((cfg.name in ('Mnist', 'MnistM', 'SVHN', 'USPS')) or cfg.name.startswith('ReviewBow') or cfg.name.startswith('ReviewTok') or cfg.name.startswith('Office31') or cfg.name.startswith('OfficeHome65') or cfg.name.startswith('DomainNet')):
from .federalize import FedExtDataset as FedDataset
else:
raise ValueError(f'''Unknown data: {cfg.name} with config:
{OmegaConf.to_yaml(cfg)}''')
fed_dict = FedDataset(cfg).load(generate_if_not_exist=True)
groups = (fed_dict['train']['hierarchies'] if ('hierarchies' in fed_dict['train']) else [])
fed_dict['train']['hierarchies'] = groups
fed_dict['test']['hierarchies'] = groups
assert (fed_dict['train']['users'] == fed_dict['test']['users'])
return fed_dict |
def all_reduce_dict(data: Mapping[(str, Any)], device, group=None) -> Dict[(str, Any)]:
data_keys = list(data.keys())
cpu_data = OrderedDict()
device_data = OrderedDict()
for k in data_keys:
t = data[k]
if (not torch.is_tensor(t)):
cpu_data[k] = torch.tensor(t, dtype=torch.double)
elif (t.device.type != device.type):
cpu_data[k] = t.to(dtype=torch.double)
else:
device_data[k] = t.to(dtype=torch.double)
def _all_reduce_dict(data: OrderedDict):
if (len(data) == 0):
return data
buf = torch.stack(list(data.values())).to(device=device)
all_reduce(buf, group=group)
return {k: buf[i] for (i, k) in enumerate(data)}
cpu_data = _all_reduce_dict(cpu_data)
device_data = _all_reduce_dict(device_data)
def get_from_stack(key):
if (key in cpu_data):
return cpu_data[key]
elif (key in device_data):
return device_data[key]
raise KeyError
return OrderedDict([(key, get_from_stack(key)) for key in data_keys]) |
def copy_file(source_file: str, destination_file: str) -> str:
os.makedirs(os.path.dirname(destination_file), exist_ok=True)
shutil.copyfile(source_file, destination_file)
return destination_file |
def wrap_agent_env(thunk):
from ..common.env import ScaledFloatFrame, TransposeImage
def _thunk():
env = thunk()
env = TransposeImage(env)
env = ScaledFloatFrame(env)
return env
return _thunk |
def cal_PMI(data_root_path, vocab_root_path, min_count, phase='train', window_size=6, min_cooccurence=2):
vocab = get_vocab_list(data_root_path, vocab_root_path, min_count)
all_text = get_content(data_root_path)
all_text = text_padding(all_text)
d = dict(zip(vocab, range(len(vocab))))
pair_count_matrix = np.zeros((len(vocab), len(vocab)), dtype=int)
word_count = np.zeros(len(vocab), dtype=int)
print('the shape of word_count: ', np.shape(word_count))
for sentence in all_text:
for (i, word) in enumerate(sentence):
if (word != 'PAD'):
try:
word_count[d[word]] += 1
except KeyError:
continue
start_index = max(0, (i - window_size))
end_index = min(len(sentence), (i + window_size))
for j in range(start_index, end_index):
if (i == j):
continue
else:
target_word = sentence[j]
try:
pair_count_matrix[(d[word], d[target_word])] += 1
except KeyError:
continue
flag = 0
for i in range(len(vocab)):
for j in range(len(vocab)):
if (pair_count_matrix[(i, j)] >= min_cooccurence):
flag = (flag + 1)
elif (pair_count_matrix[(i, j)] < min_cooccurence):
pair_count_matrix[(i, j)] = 0
print('the number count of co-occurence of two words less than 2: {}'.format(flag))
total_count = np.sum(word_count)
word_count = (word_count / total_count)
pair_count_matrix = (pair_count_matrix / total_count)
pmi_matrix = np.zeros((len(vocab), len(vocab)), dtype=float)
for i in range(len(vocab)):
for j in range(len(vocab)):
if ((word_count[i] * word_count[j]) == 0):
pmi_matrix[(i, j)] = 0
elif (pair_count_matrix[(i, j)] == 0):
pmi_matrix[(i, j)] = 0
else:
pmi_matrix[(i, j)] = np.log((pair_count_matrix[(i, j)] / (word_count[i] * word_count[j])))
pmi_matrix = np.nan_to_num(pmi_matrix)
pmi_matrix = np.maximum(pmi_matrix, 0.0)
edges_weights = [0.0]
count = 1
edges_mappings = np.zeros((len(vocab), len(vocab)), dtype=int)
for i in range(len(vocab)):
for j in range(len(vocab)):
if (pmi_matrix[(i, j)] != 0):
edges_weights.append(pmi_matrix[(i, j)])
edges_mappings[(i, j)] = count
count += 1
print('the shape of edges_mapping: ', np.shape(edges_mappings))
edges_weights = np.array(edges_weights)
edges_weights = edges_weights.reshape((- 1), 1)
print('the shape of edges_weights: ', edges_weights.shape)
edges_weights = torch.Tensor(edges_weights)
return (edges_weights, edges_mappings, count) |
def rollout_representation(representation_model, steps, obs_embed, action, prev_states, done):
priors = []
posteriors = []
for t in range(steps):
(prior_states, posterior_states) = representation_model(obs_embed[t], action[t], prev_states)
prev_states = posterior_states.map((lambda x: (x * (1.0 - done[t]))))
priors.append(prior_states)
posteriors.append(posterior_states)
prior = stack_states(priors, dim=0)
post = stack_states(posteriors, dim=0)
return (prior.map((lambda x: x[:(- 1)])), post.map((lambda x: x[:(- 1)])), post.deter[1:]) |
class GaussianDropout(ZooKerasLayer):
def __init__(self, p, input_shape=None, **kwargs):
super(GaussianDropout, self).__init__(None, float(p), (list(input_shape) if input_shape else None), **kwargs) |
def check_env_flag(name: str, default: bool=False) -> bool:
if default:
return (not (os.getenv(name, '').upper() in ['OFF', '0', 'FALSE', 'NO', 'N']))
else:
return (os.getenv(name, '').upper() in ['ON', '1', 'TRUE', 'YES', 'Y']) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.