code stringlengths 101 5.91M |
|---|
class AddNewModelLikeCommand(BaseTransformersCLICommand):
def register_subcommand(parser: ArgumentParser):
add_new_model_like_parser = parser.add_parser('add-new-model-like')
add_new_model_like_parser.add_argument('--config_file', type=str, help='A file with all the information for this model creation.')
add_new_model_like_parser.add_argument('--path_to_repo', type=str, help='When not using an editable install, the path to the Transformers repo.')
add_new_model_like_parser.set_defaults(func=add_new_model_like_command_factory)
def __init__(self, config_file=None, path_to_repo=None, *args):
if (config_file is not None):
with open(config_file, 'r', encoding='utf-8') as f:
config = json.load(f)
self.old_model_type = config['old_model_type']
self.model_patterns = ModelPatterns(**config['new_model_patterns'])
self.add_copied_from = config.get('add_copied_from', True)
self.frameworks = config.get('frameworks', ['pt', 'tf', 'flax'])
else:
(self.old_model_type, self.model_patterns, self.add_copied_from, self.frameworks) = get_user_input()
self.path_to_repo = path_to_repo
def run(self):
if (self.path_to_repo is not None):
global TRANSFORMERS_PATH
global REPO_PATH
REPO_PATH = Path(self.path_to_repo)
TRANSFORMERS_PATH = ((REPO_PATH / 'src') / 'transformers')
create_new_model_like(model_type=self.old_model_type, new_model_patterns=self.model_patterns, add_copied_from=self.add_copied_from, frameworks=self.frameworks) |
class ComparableSampler(Sampler[ComparableT], Generic[ComparableT]):
def __lt__(self, other: Union[(ComparableT, 'ComparableSampler')]) -> bool:
if isinstance(other, ComparableSampler):
return super().__lt__(other)
if (self.value is None):
raise ValueError('`self.value` is None')
return (self.value < other)
def __eq__(self, other: object) -> bool:
if isinstance(other, ComparableSampler):
return object.__eq__(self, other)
return (self.value == other)
def __ne__(self, other: object) -> bool:
if isinstance(other, ComparableSampler):
return object.__ne__(self, other)
return (self.value != other)
def __le__(self, other: Union[(ComparableT, 'ComparableSampler')]) -> bool:
return (self.__lt__(other) or self.__eq__(other))
def __gt__(self, other: Union[(ComparableT, 'ComparableSampler')]) -> bool:
return (not self.__le__(other))
def __ge__(self, other: Union[(ComparableT, 'ComparableSampler')]) -> bool:
return (self.__gt__(other) or self.__eq__(other)) |
def test_alias_delay_initialization1(capture):
class B(m.A):
def __init__(self):
super().__init__()
def f(self):
print('In python f()')
with capture:
a = m.A()
m.call_f(a)
del a
pytest.gc_collect()
assert (capture == 'A.f()')
with capture:
b = B()
m.call_f(b)
del b
pytest.gc_collect()
assert (capture == '\n PyA.PyA()\n PyA.f()\n In python f()\n PyA.~PyA()\n ') |
def read_issia_ground_truth(camera_id, dataset_path):
assert ((camera_id >= 1) and (camera_id <= 6))
dataset_path = os.path.expanduser(dataset_path)
annotation_path = os.path.join(dataset_path, 'Annotation Files')
annotation_file = (('Film Role-0 ID-' + str(camera_id)) + ' T-0 m00s00-026-m00s01-020.xgtf')
annotation_filepath = os.path.join(annotation_path, annotation_file)
gt = _load_groundtruth(annotation_filepath)
sequence = open_issia_sequence(camera_id, dataset_path=dataset_path)
(ret, frame) = sequence.read()
sequence.release()
annotations = _create_annotations(gt, camera_id, frame.shape)
return annotations |
class IRNode(object):
def __init__(self, node_type=None, parent=None, parse_info=None, raw_text=None):
super().__init__()
self.node_type = node_type
self.la_type = None
self.parent = None
self.set_parent(parent)
self.parse_info = parse_info
self.raw_text = raw_text
def is_node(self, node_type):
return (self.node_type == node_type)
def set_parent(self, parent):
if parent:
self.parent = weakref.ref(parent)
def get_ancestor(self, node_type):
if (self.parent is not None):
parent = self.parent()
while (parent is not None):
if (parent.node_type == node_type):
return parent
elif parent.parent:
parent = parent.parent()
else:
parent = None
return None
def get_child(self, node_type):
return None |
class TestPPOPendulumLSTM(TfGraphTestCase):
.mujoco_long
def test_ppo_pendulum_lstm(self):
with LocalTFRunner(snapshot_config) as runner:
env = GarageEnv(normalize(gym.make('InvertedDoublePendulum-v2')))
lstm_policy = GaussianLSTMPolicy(env_spec=env.spec)
baseline = GaussianMLPBaseline(env_spec=env.spec, regressor_args=dict(hidden_sizes=(32, 32)))
algo = PPO(env_spec=env.spec, policy=lstm_policy, baseline=baseline, max_path_length=100, discount=0.99, gae_lambda=0.95, lr_clip_range=0.2, optimizer_args=dict(batch_size=32, max_epochs=10), stop_entropy_gradient=True, entropy_method='max', policy_ent_coeff=0.02, center_adv=False)
runner.setup(algo, env)
last_avg_ret = runner.train(n_epochs=10, batch_size=2048)
assert (last_avg_ret > 60) |
class DataTrainingArguments():
lang: Optional[str] = field(default=None, metadata={'help': 'Language id for summarization.'})
dataset_name: Optional[str] = field(default=None, metadata={'help': 'The name of the dataset to use (via the datasets library).'})
dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
text_column: Optional[str] = field(default=None, metadata={'help': 'The name of the column in the datasets containing the full texts (for summarization).'})
summary_column: Optional[str] = field(default=None, metadata={'help': 'The name of the column in the datasets containing the summaries (for summarization).'})
train_file: Optional[str] = field(default=None, metadata={'help': 'The input training data file (a jsonlines or csv file).'})
validation_file: Optional[str] = field(default=None, metadata={'help': 'An optional input evaluation data file to evaluate the metrics (rouge) on (a jsonlines or csv file).'})
test_file: Optional[str] = field(default=None, metadata={'help': 'An optional input test data file to evaluate the metrics (rouge) on (a jsonlines or csv file).'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'})
preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'})
max_source_length: Optional[int] = field(default=1024, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'})
max_target_length: Optional[int] = field(default=128, metadata={'help': 'The maximum total sequence length for target text after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'})
val_max_target_length: Optional[int] = field(default=None, metadata={'help': 'The maximum total sequence length for validation target text after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`.This argument is also used to override the ``max_length`` param of ``model.generate``, which is used during ``evaluate`` and ``predict``.'})
pad_to_max_length: bool = field(default=False, metadata={'help': 'Whether to pad all samples to model maximum sentence length. If False, will pad the samples dynamically when batching to the maximum length in the batch. More efficient on GPU but very bad for TPU.'})
max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'})
max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set.'})
max_predict_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of prediction examples to this value if set.'})
num_beams: Optional[int] = field(default=None, metadata={'help': 'Number of beams to use for evaluation. This argument will be passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'})
ignore_pad_token_for_loss: bool = field(default=True, metadata={'help': 'Whether to ignore the tokens corresponding to padded labels in the loss computation or not.'})
source_prefix: Optional[str] = field(default='', metadata={'help': 'A prefix to add before every source text (useful for T5 models).'})
forced_bos_token: Optional[str] = field(default=None, metadata={'help': 'The token to force as the first generated token after the decoder_start_token_id.Useful for multilingual models like mBART where the first generated tokenneeds to be the target language token (Usually it is the target language token)'})
def __post_init__(self):
if ((self.dataset_name is None) and (self.train_file is None) and (self.validation_file is None) and (self.test_file is None)):
raise ValueError('Need either a dataset name or a training, validation, or test file.')
else:
if (self.train_file is not None):
extension = self.train_file.split('.')[(- 1)]
assert (extension in ['csv', 'json']), '`train_file` should be a csv or a json file.'
if (self.validation_file is not None):
extension = self.validation_file.split('.')[(- 1)]
assert (extension in ['csv', 'json']), '`validation_file` should be a csv or a json file.'
if (self.test_file is not None):
extension = self.test_file.split('.')[(- 1)]
assert (extension in ['csv', 'json']), '`test_file` should be a csv or a json file.'
if (self.val_max_target_length is None):
self.val_max_target_length = self.max_target_length |
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if (args.multiprocessing_distributed and (args.gpu != 0)):
def print_pass(*args):
pass
builtins.print = print_pass
if (args.gpu is not None):
print('Use GPU: {} for training'.format(args.gpu))
if args.distributed:
if ((args.dist_url == 'env://') and (args.rank == (- 1))):
args.rank = int(os.environ['RANK'])
if args.multiprocessing_distributed:
args.rank = ((args.rank * ngpus_per_node) + gpu)
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank)
print("=> creating model '{}'".format(args.arch))
if (args.arch == 'mobilenetv3'):
print("=> MobileV3: creating model '{}'".format(args.arch))
base_model = mobilenetv3.mobilenetv3_large_100(num_classes=1000)
elif (args.arch == 'efficientb0'):
model = efficientnet_b0(pretrained=False, num_classes=1000)
elif (args.arch == 'efficientb5'):
model = efficientnet_b5(pretrained=False, num_classes=1000)
else:
model = models.__dict__[args.arch]()
print(model)
for (name, param) in model.named_parameters():
if ((args.arch in ['resnet18', 'resnet34', 'resnet152']) and (name not in ['fc.weight', 'fc.bias'])):
param.requires_grad = False
if ((args.arch == 'efficientb0') and (name not in ['classifier.weight', 'classifier.bias'])):
param.requires_grad = False
if ((args.arch == 'efficientb5') and (name not in ['classifier.weight', 'classifier.bias'])):
param.requires_grad = False
if (args.arch in ['efficientb0', 'efficientb5']):
model.classifier.weight.data.normal_(mean=0.0, std=0.01)
model.classifier.bias.data.zero_()
else:
model.fc.weight.data.normal_(mean=0.0, std=0.01)
model.fc.bias.data.zero_()
if args.pretrained:
if os.path.isfile(args.pretrained):
print("=> loading checkpoint '{}'".format(args.pretrained))
checkpoint = torch.load(args.pretrained, map_location='cpu')
state_dict = checkpoint['state_dict']
for k in list(state_dict.keys()):
if (args.arch in ['efficientb0', 'efficientb5']):
if (k.startswith('module.encoder_q') and (not k.startswith('module.encoder_q.classifier'))):
state_dict[k[len('module.encoder_q.'):]] = state_dict[k]
elif (k.startswith('module.encoder_q') and (not k.startswith('module.encoder_q.fc'))):
state_dict[k[len('module.encoder_q.'):]] = state_dict[k]
del state_dict[k]
args.start_epoch = 0
msg = model.load_state_dict(state_dict, strict=False)
print(msg)
if (args.arch in ['efficientb0', 'efficientb5']):
assert (set(msg.missing_keys) == {'classifier.weight', 'classifier.bias'})
else:
assert (set(msg.missing_keys) == {'fc.weight', 'fc.bias'})
print("=> loaded pre-trained model '{}'".format(args.pretrained))
else:
print("=> no checkpoint found at '{}'".format(args.pretrained))
if args.distributed:
if (args.gpu is not None):
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
args.batch_size = int((args.batch_size / ngpus_per_node))
args.workers = int((((args.workers + ngpus_per_node) - 1) / ngpus_per_node))
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model)
elif (args.gpu is not None):
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
elif (args.arch.startswith('alexnet') or args.arch.startswith('vgg')):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
parameters = list(filter((lambda p: p.requires_grad), model.parameters()))
assert (len(parameters) == 2)
optimizer = torch.optim.SGD(parameters, args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
if args.resume:
checkpoint_path = get_last_checkpoint(args.resume)
if os.path.isfile(checkpoint_path):
print("=> loading checkpoint '{}'".format(checkpoint_path))
if (args.gpu is None):
checkpoint = torch.load(checkpoint_path)
else:
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(checkpoint_path, map_location=loc)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if (args.gpu is not None):
best_acc1 = best_acc1.to(args.gpu)
out = model.load_state_dict(checkpoint['state_dict'], strict=False)
print(out)
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".format(checkpoint_path, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(traindir, transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(valdir, transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize])), batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
train(train_loader, model, criterion, optimizer, epoch, args)
acc1 = validate(val_loader, model, criterion, args)
is_best = (acc1 > best_acc1)
best_acc1 = max(acc1, best_acc1)
if ((not args.multiprocessing_distributed) or (args.multiprocessing_distributed and ((args.rank % ngpus_per_node) == 0))):
save_checkpoint({'epoch': (epoch + 1), 'arch': args.arch, 'state_dict': model.state_dict(), 'best_acc1': best_acc1, 'optimizer': optimizer.state_dict()}, is_best)
if (epoch == args.start_epoch):
sanity_check(model.state_dict(), args.pretrained, args) |
def dump_file(*dps):
for dp in dps:
if (len(dp) != 2):
print(('issue:' + str(dp)))
continue
dfile = open(dp[1], 'wb')
cp.dump(dp[0], dfile)
dfile.close()
print('dump file done.') |
class MetaMonkey(torch.nn.Module):
def __init__(self, net):
super().__init__()
self.net = net
self.parameters = OrderedDict(net.named_parameters())
def forward(self, inputs, parameters=None):
if (parameters is None):
return self.net(inputs)
param_gen = iter(parameters.values())
method_pile = []
counter = 0
for (name, module) in self.net.named_modules():
if isinstance(module, torch.nn.Conv2d):
ext_weight = next(param_gen)
if (module.bias is not None):
ext_bias = next(param_gen)
else:
ext_bias = None
method_pile.append(module.forward)
module.forward = partial(F.conv2d, weight=ext_weight, bias=ext_bias, stride=module.stride, padding=module.padding, dilation=module.dilation, groups=module.groups)
elif isinstance(module, torch.nn.BatchNorm2d):
if (module.momentum is None):
exponential_average_factor = 0.0
else:
exponential_average_factor = module.momentum
if (module.training and module.track_running_stats):
if (module.num_batches_tracked is not None):
module.num_batches_tracked += 1
if (module.momentum is None):
exponential_average_factor = (1.0 / float(module.num_batches_tracked))
else:
exponential_average_factor = module.momentum
ext_weight = next(param_gen)
ext_bias = next(param_gen)
method_pile.append(module.forward)
module.forward = partial(F.batch_norm, running_mean=module.running_mean, running_var=module.running_var, weight=ext_weight, bias=ext_bias, training=(module.training or (not module.track_running_stats)), momentum=exponential_average_factor, eps=module.eps)
elif isinstance(module, torch.nn.Linear):
lin_weights = next(param_gen)
lin_bias = next(param_gen)
method_pile.append(module.forward)
module.forward = partial(F.linear, weight=lin_weights, bias=lin_bias)
elif (next(module.parameters(), None) is None):
pass
elif isinstance(module, torch.nn.Sequential):
pass
elif DEBUG:
warnings.warn(f'Patching for module {module.__class__} is not implemented.')
output = self.net(inputs)
for (name, module) in self.net.named_modules():
if isinstance(module, torch.nn.modules.conv.Conv2d):
module.forward = method_pile.pop(0)
elif isinstance(module, torch.nn.BatchNorm2d):
module.forward = method_pile.pop(0)
elif isinstance(module, torch.nn.Linear):
module.forward = method_pile.pop(0)
return output |
def xgboost_eval_metric_accuracy(preds, dtrain):
target = dtrain.get_label()
weight = dtrain.get_weight()
if (len(weight) == 0):
weight = None
return ('accuracy', negative_accuracy(target, preds, weight)) |
class convnext_large(nn.Module):
def __init__(self, pretrained=True):
super().__init__()
self.convnext = timm.create_model('convnext_large', pretrained=pretrained)
def forward(self, x, data=None, layer=2):
x = self.convnext.stem(x)
x = self.convnext.stages[0](x)
x = self.convnext.stages[1](x)
return x
def compute_params(self):
num = 0
for param in self.convnext.stem.parameters():
num += np.prod(param.size())
for param in self.convnext.stages[0].parameters():
num += np.prod(param.size())
for param in self.convnext.stages[1].parameters():
num += np.prod(param.size())
return num |
def create_data_info_service(port, pool_size=10):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=pool_size))
data_info_servicer = DataInfoServicer()
coworker_pb2_grpc.add_DataInfoServiceServicer_to_server(data_info_servicer, server)
server.add_insecure_port('[::]:{}'.format(port))
logger.info('Data Info Service is created with port {}.'.format(port))
return server |
class EmptyModule(nn.Module):
def __init__(self):
super(EmptyModule, self).__init__()
def forward(self, x):
return x |
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = (out_features or in_features)
hidden_features = (hidden_features or in_features)
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x |
def create_file_symlink(file1, file2):
try:
if g_pathmgr.exists(file2):
g_pathmgr.rm(file2)
g_pathmgr.symlink(file1, file2)
except Exception as e:
logging.info(f'Could NOT create symlink. Error: {e}') |
def update_models(opt, epoch, modelG, modelD, dataset_warp):
if (epoch > opt.niter):
modelG.module.update_learning_rate(epoch, 'G')
modelD.module.update_learning_rate(epoch, 'D')
if ((epoch % opt.niter_step) == 0):
dataset_warp.dataset.update_training_batch((epoch // opt.niter_step))
if ((opt.n_scales_spatial > 1) and (opt.niter_fix_global != 0) and (epoch == opt.niter_fix_global)):
modelG.module.update_fixed_params() |
class LZ09_F5(LZ09):
def __init__(self, number_of_variables=30):
super(LZ09_F5, self).__init__(number_of_variables, dtype=1, ltype=26, ptype=21)
self.obj_directions = [self.MINIMIZE, self.MINIMIZE]
self.obj_labels = ['f(x)', 'f(y)']
def number_of_objectives(self) -> int:
return len(self.obj_directions)
def name(self):
return 'LZ09_F5' |
def delexicalise(utt, dictionary):
for (key, val) in dictionary:
utt = ((' ' + utt) + ' ').replace(((' ' + key) + ' '), ((' ' + val) + ' '))
utt = utt[1:(- 1)]
return utt |
def main():
import argparse
import cv2
parser = argparse.ArgumentParser()
parser.add_argument('dataset_loader', type=str)
parser.add_argument('dataset_path', type=str)
parser.add_argument('--video_name', type=str)
parser.add_argument('--random_seed', type=int, help='Optional random seed for deterministic results')
add_resizing_arguments(parser)
add_image_processing_arguments(parser)
args = parser.parse_args()
random.seed(args.random_seed)
np.random.seed(args.random_seed)
synth_visualizer_gen = SyntheticSimpleVisualizer(**vars(args)).generate()
while True:
(synth_image, _) = next(synth_visualizer_gen)
cv2.imshow('synth_image', synth_image)
cv2.waitKey() |
def measure_model(model, x):
global count_ops, count_params
count_ops = 0
count_params = 0
def should_measure(x):
return (is_leaf(x) or is_pruned(x))
def modify_forward(model):
for child in model.children():
if should_measure(child):
def new_forward(m):
def lambda_forward(*args):
measure_layer(m, *args)
return m.old_forward(*args)
return lambda_forward
child.old_forward = child.forward
child.forward = new_forward(child)
else:
modify_forward(child)
def restore_forward(model):
for child in model.children():
if (is_leaf(child) and hasattr(child, 'old_forward')):
child.forward = child.old_forward
child.old_forward = None
else:
restore_forward(child)
modify_forward(model)
out = model.forward(x)
restore_forward(model)
return (out, count_ops, count_params) |
def switch_pool(input, pooling_switches, name=None):
pooled_shape = get_shape(pooling_switches)
batch_size = pooled_shape[0]
element_num = np.prod(pooled_shape[1:])
assert (get_shape(input)[0] == batch_size), 'mismatched batch_size'
global_base = np.reshape((np.array(range(batch_size)) * element_num), ([batch_size] + ([1] * (len(pooled_shape) - 1))))
pooling_switches += tf.constant(global_base, dtype=pooling_switches.dtype, name=((name + '/baseswitches') if (name is not None) else None))
flatten_switches = tf.reshape(pooling_switches, [(- 1)], name=((name + '/flattenswitches') if (name is not None) else None))
flatten_input = tf.reshape(input, [(- 1)], name=((name + '/flatteninput') if (name is not None) else None))
flatten_output = tf.gather(flatten_input, flatten_switches, name=((name + '/flattenoutput') if (name is not None) else None))
pooled_output = tf.reshape(flatten_output, pooled_shape, name=((name + '/pooledoutput') if (name is not None) else None))
return pooled_output |
class ERFNet(nn.Module):
def __init__(self, num_classes, opt):
super().__init__()
self.encoder = Encoder(num_classes)
self.decoder = Decoder(num_classes)
if (opt.weights_init == 'pretrained'):
path_getter = gp.GetPath()
checkpoint_path = path_getter.get_checkpoint_path()
encoder_path = os.path.join(checkpoint_path, 'erfnet', 'official_pretrained', 'erfnet_encoder_pretrained.pth.tar')
cur_state_dict = self.encoder.state_dict()
if opt.no_cuda:
load_state_dict = torch.load(encoder_path, map_location='cpu')
else:
load_state_dict = torch.load(encoder_path)
counter = 0
load_keys = list(load_state_dict['state_dict'].keys())
for key in list(cur_state_dict.keys()):
if (key in load_keys[counter]):
cur_state_dict[key] = load_state_dict['state_dict'][load_keys[counter]]
counter += 1
self.encoder.load_state_dict(cur_state_dict)
def forward(self, inputs):
features = self.encoder(inputs[('color_aug', 0, 0)])
seg_logits = self.decoder.forward(features)
seg_map = torch.argmax(seg_logits, dim=1)
output = {'segmentation_logits': seg_logits, 'segmentation': seg_map}
return output |
def test_dtype(simple_dtype):
from sys import byteorder
e = ('<' if (byteorder == 'little') else '>')
assert ([x.replace(' ', '') for x in m.print_dtypes()] == [simple_dtype_fmt(), packed_dtype_fmt(), f"[('a',{simple_dtype_fmt()}),('b',{packed_dtype_fmt()})]", partial_dtype_fmt(), partial_nested_fmt(), "[('a','S3'),('b','S3')]", (((((("{{'names':['a','b','c','d']," + "'formats':[('S4',(3,)),('") + e) + "i4',(2,)),('u1',(3,)),('") + e) + "f4',(4,2))],") + "'offsets':[0,12,20,24],'itemsize':56}}").format(e=e), (("[('e1','" + e) + "i8'),('e2','u1')]"), (("[('x','i1'),('y','" + e) + "u8')]"), (((("[('cflt','" + e) + "c8'),('cdbl','") + e) + "c16')]")])
d1 = np.dtype({'names': ['a', 'b'], 'formats': ['int32', 'float64'], 'offsets': [1, 10], 'itemsize': 20})
d2 = np.dtype([('a', 'i4'), ('b', 'f4')])
assert (m.test_dtype_ctors() == [np.dtype('int32'), np.dtype('float64'), np.dtype('bool'), d1, d1, np.dtype('uint32'), d2, np.dtype('d')])
assert (m.test_dtype_methods() == [np.dtype('int32'), simple_dtype, False, True, np.dtype('int32').itemsize, simple_dtype.itemsize])
assert (m.trailing_padding_dtype() == m.buffer_to_dtype(np.zeros(1, m.trailing_padding_dtype())))
expected_chars = 'bhilqBHILQefdgFDG?MmO'
assert (m.test_dtype_kind() == list('iiiiiuuuuuffffcccbMmO'))
assert (m.test_dtype_char_() == list(expected_chars))
assert (m.test_dtype_num() == [np.dtype(ch).num for ch in expected_chars])
assert (m.test_dtype_byteorder() == [np.dtype(ch).byteorder for ch in expected_chars])
assert (m.test_dtype_alignment() == [np.dtype(ch).alignment for ch in expected_chars])
assert (m.test_dtype_flags() == [chr(np.dtype(ch).flags) for ch in expected_chars]) |
def get_device(device=None):
if (device is None):
if torch.cuda.is_available():
device = 'cuda'
torch.set_default_tensor_type('torch.cuda.FloatTensor')
else:
device = 'cpu'
elif (device == 'cuda'):
if torch.cuda.is_available():
device = 'cuda'
torch.set_default_tensor_type('torch.cuda.FloatTensor')
else:
device = 'cpu'
else:
device = 'cpu'
if (device == 'cuda'):
logger.info("CUDA device set to '{}'.".format(torch.cuda.get_device_name(0)))
return device |
class ToyModel(nn.Module):
def __init__(self, in_features=16, out_features=4, num_linears=8):
super().__init__()
self.first_linear = nn.Linear(in_features, out_features)
self.linears = torch.nn.ModuleList([nn.Linear(out_features, out_features) for _ in range((num_linears - 1))])
def forward(self, inputs):
res = self.first_linear(inputs['input'])
for op in self.linears:
res = op(res)
return res |
def assign_pos_tag_for_bpe(src_path: str, bpe_path: str, trg_path: str) -> None:
src_file = open(src_path, 'r', encoding='utf-8')
bpe_file = open(bpe_path, 'r', encoding='utf-8')
trg_file = open(trg_path, 'w', encoding='utf-8')
for (src_pos_line, bpe_line) in zip(src_file.readlines(), bpe_file.readlines()):
src_pos_line = src_pos_line.strip().split()
bpe_line = bpe_line.strip().split()
trg_pos_tags = []
p = 0
for subword in bpe_line:
trg_pos_tags.append(src_pos_line[p])
if (not subword.endswith('')):
p += 1
assert (p == len(src_pos_line))
assert (len(bpe_line) == len(trg_pos_tags))
trg_pos_tags_line = ' '.join(trg_pos_tags)
trg_file.write((trg_pos_tags_line + '\n'))
src_file.close()
bpe_file.close()
trg_file.close() |
def resnet50_fc512_efdmix12_a0d1(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(num_classes=num_classes, loss=loss, block=Bottleneck, layers=[3, 4, 6, 3], last_stride=1, fc_dims=[512], dropout_p=None, efdmix_layers=['layer1', 'layer2'], efdmix_alpha=0.1, **kwargs)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model |
def browse_weights(weights_dir, model='generator'):
exit = False
while (exit is False):
weights = np.sort(os.listdir(weights_dir))[::(- 1)]
print_sel = dict(zip(np.arange(len(weights)), weights))
for k in print_sel.keys():
logger_message = '{item_n}: {item} \n'.format(item_n=k, item=print_sel[k])
logger.info(logger_message)
sel = select_positive_integer('>>> Select folder or weights for {}\n'.format(model))
if weights[sel].endswith('hdf5'):
weights_path = os.path.join(weights_dir, weights[sel])
exit = True
else:
weights_dir = os.path.join(weights_dir, weights[sel])
return weights_path |
def lowecase_list_of_sentences(sentences):
sentences_lowercased = [s.lower() for s in sentences]
return sentences_lowercased |
class IIDIsotropicGaussianUVLoss(nn.Module):
def __init__(self, sigma_lower_bound: float):
super(IIDIsotropicGaussianUVLoss, self).__init__()
self.sigma_lower_bound = sigma_lower_bound
self.log2pi = math.log((2 * math.pi))
def forward(self, u: torch.Tensor, v: torch.Tensor, sigma_u: torch.Tensor, target_u: torch.Tensor, target_v: torch.Tensor):
sigma2 = (F.softplus(sigma_u) + self.sigma_lower_bound)
delta_t_delta = (((u - target_u) ** 2) + ((v - target_v) ** 2))
loss = (0.5 * ((self.log2pi + (2 * torch.log(sigma2))) + (delta_t_delta / sigma2)))
return loss.sum() |
def compute_metrics(eval_preds):
(preds, labels) = eval_preds
if isinstance(preds, tuple):
preds = preds[0]
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
labels = np.where((labels != (- 100)), labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
(decoded_preds, decoded_labels) = postprocess_text(decoded_preds, decoded_labels)
result = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True)
result = {key: (value.mid.fmeasure * 100) for (key, value) in result.items()}
prediction_lens = [np.count_nonzero((pred != tokenizer.pad_token_id)) for pred in preds]
result['gen_len'] = np.mean(prediction_lens)
result = {k: round(v, 4) for (k, v) in result.items()}
return result |
class statm_loss(nn.Module):
def __init__(self):
super(statm_loss, self).__init__()
def forward(self, x, y):
x = x.view(x.size(0), x.size(1), (- 1))
y = y.view(y.size(0), y.size(1), (- 1))
x_mean = x.mean(dim=2)
y_mean = y.mean(dim=2)
mean_gap = (x_mean - y_mean).pow(2).mean(1)
return mean_gap.mean() |
def warn(msg, *args):
if (MIN_LEVEL <= WARN):
print(colorize(('%s: %s' % ('WARN', (msg % args))), 'yellow')) |
def all_metrics(results, test):
ndcg_ = defaultdict(list)
predictions_per_user = defaultdict((lambda : defaultdict(list)))
predictions_per_sensitive_attr = defaultdict((lambda : defaultdict(list)))
metrics_per_user = defaultdict(list)
metrics_per_sensitive_attr = defaultdict(list)
model = ('LR' if (results['model'] == 'LR') else ('FM' + str(len(str(results['model'])))))
print(model)
fold = results['predictions'][0]['fold']
y_pred = np.array(np.array(results['predictions'][0]['pred']))
y = np.array(results['predictions'][0]['y'])
try:
assert (len(y) == len(test))
except AssertionError:
print('This is not the right fold', len(y), len(test))
sys.exit(0)
for (user, pred, true) in zip(test['user_id'], y_pred, y):
predictions_per_user[user]['pred'].append(pred)
predictions_per_user[user]['y'].append(true)
attribute = np.array(test[SENSITIVE_ATTR])
protected = np.argwhere(((attribute % 2) == 0)).reshape((- 1))
unprotected = np.argwhere(((attribute % 2) == 1)).reshape((- 1))
print(type(y))
print(len(y), len(y[protected]), len(y[unprotected]))
for (attr, pred, true) in zip(test[SENSITIVE_ATTR], y_pred, y):
predictions_per_sensitive_attr[attr]['pred'].append(pred)
predictions_per_sensitive_attr[attr]['y'].append(true)
users_ids = []
attr_ids = []
for user in predictions_per_user:
this_pred = np.array(predictions_per_user[user]['pred'])
this_true = np.array(predictions_per_user[user]['y'])
if (len(this_pred) > 1):
users_ids.append(user)
metrics_per_user['nll'].append(log_loss(this_true, this_pred, labels=[0, 1]))
metrics_per_user['ndcg'].append(ndcg_score([this_true], [this_pred]))
metrics_per_user[''].append(ndcg_score([this_true], [this_pred], k=10))
metrics_per_user['ndcg-'].append(ndcg_score([(1 - this_true)], [(1 - this_pred)]))
ndcg_[model].append(ndcg_score([(1 - this_true)], [(1 - this_pred)]))
metrics_per_user['-'].append(ndcg_score([(1 - this_true)], [(1 - this_pred)], k=10))
if (len(np.unique(this_true)) > 1):
metrics_per_user['auc'].append(roc_auc_score(this_true, this_pred))
nb_samples = []
for attr in predictions_per_sensitive_attr:
this_pred = np.array(predictions_per_sensitive_attr[attr]['pred'])
this_true = np.array(predictions_per_sensitive_attr[attr]['y'])
if (len(this_pred) > 1):
metrics_per_sensitive_attr['ndcg'].append(ndcg_score([this_true], [this_pred]))
metrics_per_sensitive_attr[''].append(ndcg_score([this_true], [this_pred], k=10))
metrics_per_sensitive_attr['ndcg-'].append(ndcg_score([(1 - this_true)], [(1 - this_pred)]))
metrics_per_sensitive_attr['-'].append(ndcg_score([(1 - this_true)], [(1 - this_pred)], k=10))
if (len(np.unique(this_true)) > 1):
attr_ids.append(attr)
nb_samples.append(len(this_true))
metrics_per_sensitive_attr['auc'].append(roc_auc_score(this_true, this_pred))
print('Test length', len(y))
print(y[:10], test[:10])
print('overall auc', np.round(roc_auc_score(y, y_pred), 3))
print('overall nll', np.round(log_loss(y, y_pred), 3))
print('sliced auc (per user)', avgstd(metrics_per_user['auc']))
print('sliced auc (per group)', avgstd(metrics_per_sensitive_attr['auc']))
print('sliced nll', avgstd(metrics_per_user['nll']))
candidates = Counter()
val = 0
for (subgroup, auc, nb) in zip(attr_ids, metrics_per_sensitive_attr['auc'], nb_samples):
candidates[subgroup] = ((- auc), (- nb))
x = []
nb = []
for (k, (xi, yi)) in candidates.most_common():
if (val < 5):
print(k, ((- xi), (- yi)))
x.append((- xi))
nb.append((- yi))
val += 1
plt.stem(x, nb, use_line_collection=True)
plt.show()
print('Lowest AUC = {} on subgroup {}'.format(np.around(np.min(metrics_per_sensitive_attr['auc']), 5), attr_ids[np.argmin(metrics_per_sensitive_attr['auc'])]))
print('Highest AUC = {} on subgroup {}'.format(np.around(np.max(metrics_per_sensitive_attr['auc']), 5), attr_ids[np.argmax(metrics_per_sensitive_attr['auc'])]))
print('AUC of that group', roc_auc_score(y[protected], y_pred[protected]))
(fpr_protec, tpr_protec, _) = roc_curve(y[protected], y_pred[protected])
print('AUC of other group', roc_auc_score(y[unprotected], y_pred[unprotected]))
(fpr_unprotec, tpr_unprotec, _) = roc_curve(y[unprotected], y_pred[unprotected])
plt.plot(fpr_protec, tpr_protec, label='Protected group')
plt.plot(fpr_unprotec, tpr_unprotec, label='Unprotected group')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curves comparison between protected and unprotected groups')
plt.legend()
plt.show() |
('connect', namespace='/tms')
def ws_conn():
c = db.incr('user_count')
socketio.emit('msg', {'count': c}, namespace='/tms') |
class ExperimentNfS(ExperimentOTB):
def __init__(self, root_dir, fps=240, result_dir='results', report_dir='reports'):
self.dataset = NfS(root_dir, fps)
self.result_dir = os.path.join(result_dir, ('NfS/%d' % fps))
self.report_dir = os.path.join(report_dir, ('NfS/%d' % fps))
self.nbins_iou = 21
self.nbins_ce = 51 |
def test_digits_approximate():
model = FeatureBasedSelection(100, 'sqrt', optimizer='approximate-lazy')
model.fit(X_digits, sample_cost=X_digits_costs)
assert_array_equal(model.ranking, digits_approx_ranking)
assert_array_almost_equal(model.gains, digits_approx_gains, 4)
assert_less_equal(sum(X_digits_costs[model.ranking]), 100) |
_task('multilingual_masked_lm')
class MultiLingualMaskedLMTask(LegacyFairseqTask):
def add_args(parser):
parser.add_argument('data', help='colon separated path to data directories list, will be iterated upon during epochs in round-robin manner')
parser.add_argument('--sample-break-mode', default='complete', choices=['none', 'complete', 'complete_doc', 'eos'], help='If omitted or "none", fills each sample with tokens-per-sample tokens. If set to "complete", splits samples only at the end of sentence, but may include multiple sentences per sample. "complete_doc" is similar but respects doc boundaries. If set to "eos", includes only one sentence per sample.')
parser.add_argument('--tokens-per-sample', default=512, type=int, help='max number of total tokens over all segments per sample for BERT dataset')
parser.add_argument('--mask-prob', default=0.15, type=float, help='probability of replacing a token with mask')
parser.add_argument('--leave-unmasked-prob', default=0.1, type=float, help='probability that a masked token is unmasked')
parser.add_argument('--random-token-prob', default=0.1, type=float, help='probability of replacing a token with a random token')
parser.add_argument('--freq-weighted-replacement', action='store_true', help='sample random replacement words based on word frequencies')
parser.add_argument('--mask-whole-words', default=False, action='store_true', help='mask whole words; you may also want to set --bpe')
parser.add_argument('--multilang-sampling-alpha', type=float, default=1.0, help='smoothing alpha for sample rations across multiple datasets')
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.seed = args.seed
self.mask_idx = dictionary.add_symbol('<mask>')
def setup_task(cls, args, **kwargs):
paths = utils.split_paths(args.data)
assert (len(paths) > 0)
dictionary = Dictionary.load(os.path.join(paths[0], 'dict.txt'))
logger.info('dictionary: {} types'.format(len(dictionary)))
return cls(args, dictionary)
def _get_whole_word_mask(self):
if self.args.mask_whole_words:
bpe = encoders.build_bpe(self.args)
if (bpe is not None):
def is_beginning_of_word(i):
if (i < self.source_dictionary.nspecial):
return True
tok = self.source_dictionary[i]
if tok.startswith('madeupword'):
return True
try:
return bpe.is_beginning_of_word(tok)
except ValueError:
return True
mask_whole_words = torch.ByteTensor(list(map(is_beginning_of_word, range(len(self.source_dictionary)))))
else:
mask_whole_words = None
return mask_whole_words
def _get_sample_prob(self, dataset_lens):
prob = (dataset_lens / dataset_lens.sum())
smoothed_prob = (prob ** self.args.multilang_sampling_alpha)
smoothed_prob = (smoothed_prob / smoothed_prob.sum())
return smoothed_prob
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
paths = utils.split_paths(self.args.data)
assert (len(paths) > 0)
data_path = paths[((epoch - 1) % len(paths))]
languages = sorted((name for name in os.listdir(data_path) if os.path.isdir(os.path.join(data_path, name))))
logger.info('Training on {0} languages: {1}'.format(len(languages), languages))
logger.info('Language to id mapping: ', {lang: id for (id, lang) in enumerate(languages)})
mask_whole_words = self._get_whole_word_mask()
lang_datasets = []
for (lang_id, language) in enumerate(languages):
split_path = os.path.join(data_path, language, split)
dataset = data_utils.load_indexed_dataset(split_path, self.source_dictionary, self.args.dataset_impl, combine=combine)
if (dataset is None):
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, split_path))
dataset = TokenBlockDataset(dataset, dataset.sizes, (self.args.tokens_per_sample - 1), pad=self.source_dictionary.pad(), eos=self.source_dictionary.eos(), break_mode=self.args.sample_break_mode)
logger.info('loaded {} blocks from: {}'.format(len(dataset), split_path))
dataset = PrependTokenDataset(dataset, self.source_dictionary.bos())
(src_dataset, tgt_dataset) = MaskTokensDataset.apply_mask(dataset, self.source_dictionary, pad_idx=self.source_dictionary.pad(), mask_idx=self.mask_idx, seed=self.args.seed, mask_prob=self.args.mask_prob, leave_unmasked_prob=self.args.leave_unmasked_prob, random_token_prob=self.args.random_token_prob, freq_weighted_replacement=self.args.freq_weighted_replacement, mask_whole_words=mask_whole_words)
lang_dataset = NestedDictionaryDataset({'net_input': {'src_tokens': PadDataset(src_dataset, pad_idx=self.source_dictionary.pad(), left_pad=False), 'src_lengths': NumelDataset(src_dataset, reduce=False)}, 'target': PadDataset(tgt_dataset, pad_idx=self.source_dictionary.pad(), left_pad=False), 'nsentences': NumSamplesDataset(), 'ntokens': NumelDataset(src_dataset, reduce=True), 'lang_id': RawLabelDataset(([lang_id] * src_dataset.sizes.shape[0]))}, sizes=[src_dataset.sizes])
lang_datasets.append(lang_dataset)
dataset_lengths = np.array([len(d) for d in lang_datasets], dtype=float)
logger.info('loaded total {} blocks for all languages'.format(dataset_lengths.sum()))
if (split == self.args.train_subset):
sample_probs = self._get_sample_prob(dataset_lengths)
logger.info('Sample probability by language: ', {lang: '{0:.4f}'.format(sample_probs[id]) for (id, lang) in enumerate(languages)})
size_ratio = ((sample_probs * dataset_lengths.sum()) / dataset_lengths)
logger.info('Up/Down Sampling ratio by language: ', {lang: '{0:.2f}'.format(size_ratio[id]) for (id, lang) in enumerate(languages)})
resampled_lang_datasets = [ResamplingDataset(lang_datasets[i], size_ratio=size_ratio[i], seed=self.args.seed, epoch=epoch, replace=(size_ratio[i] >= 1.0)) for (i, d) in enumerate(lang_datasets)]
dataset = ConcatDataset(resampled_lang_datasets)
else:
dataset = ConcatDataset(lang_datasets)
lang_splits = [split]
for (lang_id, lang_dataset) in enumerate(lang_datasets):
split_name = ((split + '_') + languages[lang_id])
lang_splits.append(split_name)
self.datasets[split_name] = lang_dataset
if (split in self.args.valid_subset):
self.args.valid_subset = self.args.valid_subset.replace(split, ','.join(lang_splits))
with data_utils.numpy_seed((self.args.seed + epoch)):
shuffle = np.random.permutation(len(dataset))
self.datasets[split] = SortDataset(dataset, sort_order=[shuffle, dataset.sizes])
def build_dataset_for_inference(self, src_tokens, src_lengths, sort=True):
src_dataset = PadDataset(TokenBlockDataset(src_tokens, src_lengths, (self.args.tokens_per_sample - 1), pad=self.source_dictionary.pad(), eos=self.source_dictionary.eos(), break_mode='eos'), pad_idx=self.source_dictionary.pad(), left_pad=False)
src_dataset = PrependTokenDataset(src_dataset, self.source_dictionary.bos())
src_dataset = NestedDictionaryDataset({'id': IdDataset(), 'net_input': {'src_tokens': src_dataset, 'src_lengths': NumelDataset(src_dataset, reduce=False)}}, sizes=src_lengths)
if sort:
src_dataset = SortDataset(src_dataset, sort_order=[src_lengths])
return src_dataset
def source_dictionary(self):
return self.dictionary
def target_dictionary(self):
return self.dictionary |
def heatmap_generation(image, kernel_size):
kernel = gaussian_kernel(kernel_size)
map = scipy.signal.convolve(image, kernel, mode='same')
return rescale_values_array(map) |
class Normalization(nn.Module):
def __init__(self, mean, std):
super(Normalization, self).__init__()
self.mean = mean.view((- 1), 1, 1)
self.std = std.view((- 1), 1, 1)
def forward(self, input):
return ((input - self.mean) / self.std) |
class NaiveAssassin(NaiveAgent):
def __init__(self, id: int, name: str, config: AvalonBasicConfig, side: int=0, role_name: str='Assassin', role: int=7, sides: List[int]=None, **configs):
assert (role == 7)
super().__init__(id=id, name=name, config=config, side=side, role=role, sides=sides)
async def vote_on_mission(self, mission_id: int, team: frozenset, **kwargs):
num_fails = self.config.num_fails_for_quest[mission_id]
if (sum([(self.player_sides[i] == 0) for i in team]) < num_fails):
return 1
else:
return 0
async def vote_on_team(self, team: frozenset, mission_id: int, **kwargs):
num_fails = self.config.num_fails_for_quest[mission_id]
if (sum([(self.player_sides[i] == 0) for i in team]) >= num_fails):
return 1
else:
return 0
async def propose_team(self, mission_id: int, **kwargs):
num_fails = self.config.num_fails_for_quest[mission_id]
evil_team = (random.sample([i for i in range(self.config.num_players) if ((self.player_sides[i] == 0) and (i != self.id))], (num_fails - 1)) + [self.id])
return frozenset((random.sample([i for i in range(self.config.num_players) if ((i not in evil_team) and (self.player_sides[i] == 1))], (self.config.num_players_for_quest[mission_id] - num_fails)) + evil_team))
async def assassinate(self, **kwargs):
return random.choice([i for i in range(self.config.num_players) if (self.player_sides[i] == 1)]) |
def reduce_feat_size(feat_size, stride=2):
return (None if (feat_size is None) else tuple([(s // stride) for s in feat_size])) |
def randn_tensor(shape: Union[(Tuple, List)], generator: Optional[Union[(List['torch.Generator'], 'torch.Generator')]]=None, device: Optional['torch.device']=None, dtype: Optional['torch.dtype']=None, layout: Optional['torch.layout']=None):
rand_device = device
batch_size = shape[0]
layout = (layout or torch.strided)
device = (device or torch.device('cpu'))
if (generator is not None):
gen_device_type = (generator.device.type if (not isinstance(generator, list)) else generator[0].device.type)
if ((gen_device_type != device.type) and (gen_device_type == 'cpu')):
rand_device = 'cpu'
if (device != 'mps'):
logger.info(f"The passed generator was created on 'cpu' even though a tensor on {device} was expected. Tensors will be created on 'cpu' and then moved to {device}. Note that one can probably slighly speed up this function by passing a generator that was created on the {device} device.")
elif ((gen_device_type != device.type) and (gen_device_type == 'cuda')):
raise ValueError(f'Cannot generate a {device} tensor from a generator of type {gen_device_type}.')
if (isinstance(generator, list) and (len(generator) == 1)):
generator = generator[0]
if isinstance(generator, list):
shape = ((1,) + shape[1:])
latents = [torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype, layout=layout) for i in range(batch_size)]
latents = torch.cat(latents, dim=0).to(device)
else:
latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype, layout=layout).to(device)
return latents |
class TensorType(ExplicitEnum):
PYTORCH = 'pt'
TENSORFLOW = 'tf'
NUMPY = 'np'
JAX = 'jax' |
def main(args):
torch.manual_seed(3)
np.random.seed(2)
random.seed(2)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if (args.dataset in ['ppi', 'reddit']):
data = load_data(args)
g = data.g
train_mask = g.ndata['train_mask']
val_mask = g.ndata['val_mask']
test_mask = g.ndata['test_mask']
labels = g.ndata['label']
elif (args.dataset in ['ogbn-arxiv', 'ogbn-products']):
data = DglNodePropPredDataset(name=args.dataset)
split_idx = data.get_idx_split()
(g, labels) = data[0]
train_mask = split_idx['train']
val_mask = split_idx['valid']
test_mask = split_idx['test']
else:
path = osp.join('./qgtc_graphs', (args.dataset + '.npz'))
data = QGTC_dataset(path, args.dim, args.n_classes)
g = data.g
train_mask = data.train_mask
val_mask = data.val_mask
test_mask = data.test_mask
train_nid = np.nonzero(train_mask.data.numpy())[0].astype(np.int64)
in_feats = g.ndata['feat'].shape[1]
n_classes = data.num_classes
g = g.long()
cluster_iterator = ClusterIter(args.dataset, g, args.psize, args.batch_size, train_nid, use_pp=False, regular=args.regular)
torch.cuda.set_device(args.gpu)
val_mask = val_mask.cuda()
test_mask = test_mask.cuda()
g = g.int().to(args.gpu)
feat_size = g.ndata['feat'].shape[1]
if args.run_GIN:
model = GIN(in_feats, args.n_hidden, n_classes)
else:
model = GraphSAGE(in_feats, args.n_hidden, n_classes, args.n_layers)
model.cuda()
train_nid = torch.from_numpy(train_nid).cuda()
start_time = time.time()
transfering = 0
running_time = 0
cnt = 0
for epoch in tqdm(range(args.n_epochs)):
for (j, cluster) in enumerate(cluster_iterator):
if args.regular:
torch.cuda.synchronize()
t = time.perf_counter()
cluster = cluster.to(torch.cuda.current_device())
torch.cuda.synchronize()
transfering += (time.perf_counter() - t)
torch.cuda.synchronize()
t = time.perf_counter()
model(cluster)
torch.cuda.synchronize()
running_time += (time.perf_counter() - t)
cnt += 1
cluster = cluster.cpu()
torch.cuda.synchronize()
end_time = time.time()
print('Trans (ms): {:.3f}, Compute (ms): {:.3f}'.format(((transfering / cnt) * 1000.0), ((running_time / cnt) * 1000.0)))
print('Avg. Epoch: {:.3f} ms'.format((((end_time - start_time) * 1000) / cnt))) |
class Finetuner(object):
def __init__(self, args, model, teacher, train_loader, test_loader):
self.args = args
self.model = model
self.teacher = teacher
self.train_loader = train_loader
self.test_loader = test_loader
self.init_models()
def init_models(self):
args = self.args
eval_pretrained_model = eval('fe{}'.format(args.network))(pretrained=True).eval().cuda()
adversary = LinfPGDAttack(eval_pretrained_model, loss_fn=myloss, eps=args.B, nb_iter=args.pgd_iter, eps_iter=0.01, rand_init=True, clip_min=(- 2.2), clip_max=2.2, targeted=False)
adveval_test_loader = torch.utils.data.DataLoader(self.test_loader.dataset, batch_size=8, shuffle=False, num_workers=8, pin_memory=False)
self.adv_eval_fn = partial(advtest, loader=adveval_test_loader, adversary=adversary, args=args)
def adv_eval(self):
model = self.model
args = self.args
(clean_top1, adv_top1, adv_sr) = self.adv_eval_fn(model)
result_sum = f'Clean Top-1: {clean_top1:.2f} | Adv Top-1: {adv_top1:.2f} | Attack Success Rate: {adv_sr:.2f}'
with open(osp.join(args.output_dir, 'posttrain_eval.txt'), 'w') as f:
f.write(result_sum)
def compute_loss(self, batch, label, ce):
model = self.model
out = model(batch)
(_, pred) = out.max(dim=1)
top1 = ((float(pred.eq(label).sum().item()) / label.shape[0]) * 100.0)
loss = ce(out, label)
return (loss, top1)
def test(self, loader):
model = self.model
with torch.no_grad():
model.eval()
ce = CrossEntropyLabelSmooth(loader.dataset.num_classes)
total_ce = 0
total = 0
top1 = 0
for (i, (batch, label)) in enumerate(loader):
(batch, label) = (batch.to('cuda'), label.to('cuda'))
total += batch.size(0)
out = model(batch)
(_, pred) = out.max(dim=1)
top1 += int(pred.eq(label).sum().item())
total_ce += ce(out, label).item()
return (((float(top1) / total) * 100), (total_ce / (i + 1)))
def train(self):
model = self.model
train_loader = self.train_loader
iterations = self.args.iterations
lr = self.args.lr
output_dir = self.args.output_dir
teacher = self.teacher
args = self.args
model = model.to('cuda')
fc_module = model.fc
fc_params = list(map(id, fc_module.parameters()))
base_params = filter((lambda p: (id(p) not in fc_params)), self.model.parameters())
optimizer = torch.optim.SGD([{'params': base_params}, {'params': fc_module.parameters(), 'lr': (lr * 10)}], lr=lr, momentum=args.momentum, weight_decay=args.weight_decay)
teacher.eval()
ce = CrossEntropyLabelSmooth(train_loader.dataset.num_classes)
batch_time = MovingAverageMeter('Time', ':6.3f')
data_time = MovingAverageMeter('Data', ':6.3f')
ce_loss_meter = MovingAverageMeter('CE Loss', ':6.3f')
top1_meter = MovingAverageMeter('', ':6.2f')
train_path = osp.join(output_dir, 'train.tsv')
with open(train_path, 'w') as wf:
columns = ['time', 'iter', 'Acc', 'celoss']
wf.write(('\t'.join(columns) + '\n'))
test_path = osp.join(output_dir, 'test.tsv')
with open(test_path, 'w') as wf:
columns = ['time', 'iter', 'Acc', 'celoss']
wf.write(('\t'.join(columns) + '\n'))
adv_path = osp.join(output_dir, 'adv.tsv')
with open(adv_path, 'w') as wf:
columns = ['time', 'iter', 'Acc', 'AdvAcc', 'ASR']
wf.write(('\t'.join(columns) + '\n'))
dataloader_iterator = iter(train_loader)
for i in range(iterations):
model.train()
optimizer.zero_grad()
end = time.time()
try:
(batch, label) = next(dataloader_iterator)
except:
dataloader_iterator = iter(train_loader)
(batch, label) = next(dataloader_iterator)
(batch, label) = (batch.to('cuda'), label.to('cuda'))
data_time.update((time.time() - end))
(loss, top1) = self.compute_loss(batch, label, ce)
top1_meter.update(top1)
ce_loss_meter.update(loss)
loss.backward()
optimizer.step()
batch_time.update((time.time() - end))
if (((i % args.print_freq) == 0) or (i == (iterations - 1))):
progress = ProgressMeter(iterations, [batch_time, data_time, top1_meter, ce_loss_meter], prefix='PID {} '.format(self.args.pid), output_dir=output_dir)
progress.display(i)
if (((i % args.test_interval) == 0) or (i == (iterations - 1))):
(test_top1, test_ce_loss) = self.test(self.train_loader)
(train_top1, train_ce_loss) = self.test(self.test_loader)
print('Eval Train | Iteration {}/{} | Top-1: {:.2f} | CE Loss: {:.3f} | PID {}'.format((i + 1), iterations, train_top1, train_ce_loss, self.args.pid))
print('Eval Test | Iteration {}/{} | Top-1: {:.2f} | CE Loss: {:.3f} | PID {}'.format((i + 1), iterations, test_top1, test_ce_loss, self.args.pid))
localtime = time.asctime(time.localtime(time.time()))[4:(- 6)]
with open(train_path, 'a') as af:
train_cols = [localtime, i, round(train_top1, 2), round(train_ce_loss, 2)]
af.write(('\t'.join([str(c) for c in train_cols]) + '\n'))
with open(test_path, 'a') as af:
test_cols = [localtime, i, round(test_top1, 2), round(test_ce_loss, 2)]
af.write(('\t'.join([str(c) for c in test_cols]) + '\n'))
ckpt_path = osp.join(args.output_dir, 'ckpt.pth')
torch.save({'state_dict': model.state_dict()}, ckpt_path)
if (hasattr(self, 'iterative_prune') and ((i % args.prune_interval) == 0)):
self.iterative_prune(i)
if ((args.adv_test_interval > 0) and (((i % args.adv_test_interval) == 0) or (i == (iterations - 1)))):
(clean_top1, adv_top1, adv_sr) = self.adv_eval_fn(model)
localtime = time.asctime(time.localtime(time.time()))[4:(- 6)]
with open(adv_path, 'a') as af:
test_cols = [localtime, i, round(clean_top1, 2), round(adv_top1, 2), round(adv_sr, 2)]
af.write(('\t'.join([str(c) for c in test_cols]) + '\n'))
return model |
class SparseTransformerSentenceEncoder(TransformerSentenceEncoder):
def __init__(self, padding_idx: int, vocab_size: int, num_encoder_layers: int=6, embedding_dim: int=768, ffn_embedding_dim: int=3072, num_attention_heads: int=8, dropout: float=0.1, attention_dropout: float=0.1, activation_dropout: float=0.1, max_seq_len: int=256, num_segments: int=2, use_position_embeddings: bool=True, offset_positions_by_padding: bool=True, encoder_normalize_before: bool=False, apply_bert_init: bool=False, activation_fn: str='relu', learned_pos_embedding: bool=True, embed_scale: float=None, freeze_embeddings: bool=False, n_trans_layers_to_freeze: int=0, export: bool=False, is_bidirectional: bool=True, stride: int=32, expressivity: int=8) -> None:
super().__init__(padding_idx, vocab_size, num_encoder_layers, embedding_dim, ffn_embedding_dim, num_attention_heads, dropout, attention_dropout, activation_dropout, max_seq_len, num_segments, use_position_embeddings, offset_positions_by_padding, encoder_normalize_before, apply_bert_init, activation_fn, learned_pos_embedding, embed_scale, freeze_embeddings, n_trans_layers_to_freeze, export)
self.layers = nn.ModuleList([SparseTransformerSentenceEncoderLayer(embedding_dim=self.embedding_dim, ffn_embedding_dim=ffn_embedding_dim, num_attention_heads=num_attention_heads, dropout=self.dropout, attention_dropout=attention_dropout, activation_dropout=activation_dropout, activation_fn=activation_fn, export=export, is_bidirectional=is_bidirectional, stride=stride, expressivity=expressivity) for _ in range(num_encoder_layers)])
def freeze_module_params(m):
if (m is not None):
for p in m.parameters():
p.requires_grad = False
for layer in range(n_trans_layers_to_freeze):
freeze_module_params(self.layers[layer]) |
class XconfigFastLstmLayer(XconfigLayerBase):
def __init__(self, first_token, key_to_value, prev_names=None):
assert (first_token in ['fast-lstm-layer', 'fast-lstm-batchnorm-layer'])
XconfigLayerBase.__init__(self, first_token, key_to_value, prev_names)
def set_default_configs(self):
self.config = {'input': '[-1]', 'cell-dim': (- 1), 'clipping-threshold': 30.0, 'zeroing-interval': 20, 'zeroing-threshold': 15.0, 'delay': (- 1), 'lstm-nonlinearity-options': ' max-change=0.75', 'ng-affine-options': ' max-change=1.5', 'l2-regularize': 0.0, 'decay-time': (- 1.0)}
self.c_needed = False
def set_derived_configs(self):
if (self.config['cell-dim'] <= 0):
self.config['cell-dim'] = self.descriptors['input']['dim']
def check_configs(self):
key = 'cell-dim'
if (self.config['cell-dim'] <= 0):
raise RuntimeError('cell-dim has invalid value {0}.'.format(self.config[key]))
if (self.config['delay'] == 0):
raise RuntimeError('delay cannot be zero')
def auxiliary_outputs(self):
return ['c']
def output_name(self, auxiliary_output=None):
node_name = ('m_batchnorm' if (self.layer_type == 'fast-lstm-batchnorm-layer') else 'm')
if (auxiliary_output is not None):
if (auxiliary_output == 'c'):
node_name = 'c'
self.c_needed = True
else:
raise RuntimeError('Unknown auxiliary output name {0}'.format(auxiliary_output))
return '{0}.{1}'.format(self.name, node_name)
def output_dim(self, auxiliary_output=None):
if (auxiliary_output is not None):
if (auxiliary_output == 'c'):
self.c_needed = True
return self.config['cell-dim']
else:
raise RuntimeError('Unknown auxiliary output name {0}'.format(auxiliary_output))
return self.config['cell-dim']
def get_full_config(self):
ans = []
config_lines = self._generate_lstm_config()
for line in config_lines:
for config_name in ['ref', 'final']:
ans.append((config_name, line))
return ans
def _generate_lstm_config(self):
name = self.name
input_dim = self.descriptors['input']['dim']
input_descriptor = self.descriptors['input']['final-string']
cell_dim = self.config['cell-dim']
delay = self.config['delay']
affine_str = self.config['ng-affine-options']
l2_regularize = self.config['l2-regularize']
l2_regularize_option = ('l2-regularize={0} '.format(l2_regularize) if (l2_regularize != 0.0) else '')
decay_time = self.config['decay-time']
recurrence_scale = (1.0 if (decay_time < 0) else (1.0 - (abs(delay) / decay_time)))
assert (recurrence_scale > 0)
bptrunc_str = 'clipping-threshold={0} zeroing-threshold={1} zeroing-interval={2} recurrence-interval={3} scale={4}'.format(self.config['clipping-threshold'], self.config['zeroing-threshold'], self.config['zeroing-interval'], abs(delay), recurrence_scale)
lstm_str = self.config['lstm-nonlinearity-options']
configs = []
configs.append("### Begin LTSM layer '{0}'".format(name))
configs.append('# Gate control: contains W_i, W_f, W_c and W_o matrices as blocks.')
configs.append('component name={0}.W_all type=NaturalGradientAffineComponent input-dim={1} output-dim={2} {3} {4}'.format(name, (input_dim + cell_dim), (cell_dim * 4), affine_str, l2_regularize_option))
configs.append('# The core LSTM nonlinearity, implemented as a single component.')
configs.append('# Input = (i_part, f_part, c_part, o_part, c_{t-1}), output = (c_t, m_t)')
configs.append('# See cu-math.h:ComputeLstmNonlinearity() for details.')
configs.append('component name={0}.lstm_nonlin type=LstmNonlinearityComponent cell-dim={1} {2} {3}'.format(name, cell_dim, lstm_str, l2_regularize_option))
configs.append('# Component for backprop truncation, to avoid gradient blowup in long training examples.')
configs.append('component name={0}.cm_trunc type=BackpropTruncationComponent dim={1} {2}'.format(name, (2 * cell_dim), bptrunc_str))
configs.append('### Nodes for the components above.')
configs.append('component-node name={0}.W_all component={0}.W_all input=Append({1}, IfDefined(Offset({0}.m_trunc, {2})))'.format(name, input_descriptor, delay))
configs.append('component-node name={0}.lstm_nonlin component={0}.lstm_nonlin input=Append({0}.W_all, IfDefined(Offset({0}.c_trunc, {1})))'.format(name, delay))
configs.append('dim-range-node name={0}.m input-node={0}.lstm_nonlin dim-offset={1} dim={1}'.format(name, cell_dim))
configs.append('component-node name={0}.cm_trunc component={0}.cm_trunc input={0}.lstm_nonlin'.format(name))
configs.append('dim-range-node name={0}.c_trunc input-node={0}.cm_trunc dim-offset=0 dim={1}'.format(name, cell_dim))
configs.append('dim-range-node name={0}.m_trunc input-node={0}.cm_trunc dim-offset={1} dim={1}'.format(name, cell_dim))
if (self.layer_type == 'fast-lstm-batchnorm-layer'):
configs.append('component name={0}.m_batchnorm type=BatchNormComponent dim={1} '.format(name, cell_dim))
configs.append('component-node name={0}.m_batchnorm component={0}.m_batchnorm input={0}.m'.format(name))
configs.append("### End LTSM layer '{0}'".format(name))
return configs |
def iresgroup50(pretrained=False, **kwargs):
model = iResGroup(ResGroupBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
os.makedirs(default_cache_path, exist_ok=True)
model.load_state_dict(torch.load(download_from_url(model_urls['iresgroup50'], root=default_cache_path)))
return model |
def run_test(cfg, model, distributed):
if distributed:
model = model.module
torch.cuda.empty_cache()
iou_types = ('bbox',)
if cfg.MODEL.MASK_ON:
iou_types = (iou_types + ('segm',))
if cfg.MODEL.KEYPOINT_ON:
iou_types = (iou_types + ('keypoints',))
output_folders = ([None] * len(cfg.DATASETS.TEST))
dataset_names = cfg.DATASETS.TEST
if cfg.OUTPUT_DIR:
for (idx, dataset_name) in enumerate(dataset_names):
output_folder = os.path.join(cfg.OUTPUT_DIR, 'inference', dataset_name)
mkdir(output_folder)
output_folders[idx] = output_folder
data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=distributed)
for (output_folder, dataset_name, data_loader_val) in zip(output_folders, dataset_names, data_loaders_val):
inference(model, data_loader_val, dataset_name=dataset_name, iou_types=iou_types, box_only=(False if cfg.MODEL.RETINANET_ON else cfg.MODEL.RPN_ONLY), bbox_aug=cfg.TEST.BBOX_AUG.ENABLED, device=cfg.MODEL.DEVICE, expected_results=cfg.TEST.EXPECTED_RESULTS, expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL, output_folder=output_folder)
synchronize() |
def find_lora_modules(model: peft.LoraModel) -> Dict[(str, peft.tuners.lora.LoraLayer)]:
modules: Dict[(str, peft.tuners.lora.LoraLayer)] = {}
key_list = [key for (key, _) in model.model.named_modules() if ('lora' not in key)]
for key in key_list:
try:
(_parent, target, _target_name) = peft.utils._get_submodules(model.model, key)
except AttributeError:
continue
if isinstance(target, peft.tuners.lora.LoraLayer):
modules[key] = target
return modules |
class VGG19(torch.nn.Module):
def __init__(self):
super(VGG19, self).__init__()
features = models.vgg19(pretrained=True).features
self.relu1_1 = torch.nn.Sequential()
self.relu1_2 = torch.nn.Sequential()
self.relu2_1 = torch.nn.Sequential()
self.relu2_2 = torch.nn.Sequential()
self.relu3_1 = torch.nn.Sequential()
self.relu3_2 = torch.nn.Sequential()
self.relu3_3 = torch.nn.Sequential()
self.relu3_4 = torch.nn.Sequential()
self.relu4_1 = torch.nn.Sequential()
self.relu4_2 = torch.nn.Sequential()
self.relu4_3 = torch.nn.Sequential()
self.relu4_4 = torch.nn.Sequential()
self.relu5_1 = torch.nn.Sequential()
self.relu5_2 = torch.nn.Sequential()
self.relu5_3 = torch.nn.Sequential()
self.relu5_4 = torch.nn.Sequential()
for x in range(2):
self.relu1_1.add_module(str(x), features[x])
for x in range(2, 4):
self.relu1_2.add_module(str(x), features[x])
for x in range(4, 7):
self.relu2_1.add_module(str(x), features[x])
for x in range(7, 9):
self.relu2_2.add_module(str(x), features[x])
for x in range(9, 12):
self.relu3_1.add_module(str(x), features[x])
for x in range(12, 14):
self.relu3_2.add_module(str(x), features[x])
for x in range(14, 16):
self.relu3_3.add_module(str(x), features[x])
for x in range(16, 18):
self.relu3_4.add_module(str(x), features[x])
for x in range(18, 21):
self.relu4_1.add_module(str(x), features[x])
for x in range(21, 23):
self.relu4_2.add_module(str(x), features[x])
for x in range(23, 25):
self.relu4_3.add_module(str(x), features[x])
for x in range(25, 27):
self.relu4_4.add_module(str(x), features[x])
for x in range(27, 30):
self.relu5_1.add_module(str(x), features[x])
for x in range(30, 32):
self.relu5_2.add_module(str(x), features[x])
for x in range(32, 34):
self.relu5_3.add_module(str(x), features[x])
for x in range(34, 36):
self.relu5_4.add_module(str(x), features[x])
for param in self.parameters():
param.requires_grad = False
def forward(self, x):
relu1_1 = self.relu1_1(x)
relu1_2 = self.relu1_2(relu1_1)
relu2_1 = self.relu2_1(relu1_2)
relu2_2 = self.relu2_2(relu2_1)
relu3_1 = self.relu3_1(relu2_2)
relu3_2 = self.relu3_2(relu3_1)
relu3_3 = self.relu3_3(relu3_2)
relu3_4 = self.relu3_4(relu3_3)
relu4_1 = self.relu4_1(relu3_4)
relu4_2 = self.relu4_2(relu4_1)
relu4_3 = self.relu4_3(relu4_2)
relu4_4 = self.relu4_4(relu4_3)
relu5_1 = self.relu5_1(relu4_4)
relu5_2 = self.relu5_2(relu5_1)
relu5_3 = self.relu5_3(relu5_2)
relu5_4 = self.relu5_4(relu5_3)
out = {'relu1_1': relu1_1, 'relu1_2': relu1_2, 'relu2_1': relu2_1, 'relu2_2': relu2_2, 'relu3_1': relu3_1, 'relu3_2': relu3_2, 'relu3_3': relu3_3, 'relu3_4': relu3_4, 'relu4_1': relu4_1, 'relu4_2': relu4_2, 'relu4_3': relu4_3, 'relu4_4': relu4_4, 'relu5_1': relu5_1, 'relu5_2': relu5_2, 'relu5_3': relu5_3, 'relu5_4': relu5_4}
return out |
def can_convert_to_int(string):
try:
int(string)
return True
except ValueError:
return False |
def require_wandb(test_case):
if (not is_wandb_available()):
return unittest.skip('test requires wandb')(test_case)
else:
return test_case |
def pm_uniform_withCP(local_sub_patch_radius):
def random_neighbor_withCP_uniform(patch, coords, dims, structN2Vmask=None):
vals = []
for coord in zip(*coords):
(sub_patch, _, _) = get_subpatch(patch, coord, local_sub_patch_radius)
rand_coords = [np.random.randint(0, s) for s in sub_patch.shape[0:dims]]
vals.append(sub_patch[tuple(rand_coords)])
return vals
return random_neighbor_withCP_uniform |
def _get_sparsity(tsr):
total = tsr.numel()
nnz = tsr.nonzero().size(0)
return (nnz / total) |
(sample=sampled_from([((4, 5, 10), (1, 5, 10), (4, 5, 10)), ((4, 1, 10), (1, 5, 10), (4, 5, 10)), ((4, 5, 10), (4, 2, 10), RuntimeError), ((4, 5, 10), (10,), (4, 5, 10)), ((4, 5, 10), (5,), RuntimeError)]))
def test_logsumexp2_manual_broadcasting(sample):
(t1_shape, t2_shape, res_shape) = sample
if (res_shape == RuntimeError):
with pytest.raises(res_shape):
res = logsumexp2(torch.rand(*t1_shape), torch.rand(*t2_shape))
else:
res = logsumexp2(torch.rand(*t1_shape), torch.rand(*t2_shape))
assert (res.shape == res_shape) |
def _split_list(a: list, n: int):
(k, m) = divmod(len(a), n)
return [a[((i * k) + min(i, m)):(((i + 1) * k) + min((i + 1), m))] for i in range(n)] |
class Statistics():
def __init__(self, args):
self.best_mrr = 0
self.best_ndcg = 0
self.best_metrics = None
self.best_epoch = 0
self.metrics = defaultdict(int)
self.metrics['num_samples'] = 0
self.mail_message = ''
self.step = 0
self.mod_flag = 3
self.num_ndcg = 0
self.mydir = dir_path(args)
self.mail_server = args.mail_server
self.mail_to = args.mail_to
self.mail_from = args.mail_from
def reset(self):
self.metrics = defaultdict(int)
self.metrics['num_samples'] = 0
def update_ndcg(self, ndcg_score):
self.metrics['ndcg'] = ndcg_score
def update_metrics(self, groundtruth_rank, num_batch_samples, ndcg=0.0):
new_total = (num_batch_samples + self.metrics['num_samples'])
self.metrics['r1'] = ((float((groundtruth_rank == 1).sum()) + (self.metrics['r1'] * self.metrics['num_samples'])) / new_total)
self.metrics['r5'] = ((float((groundtruth_rank <= 5).sum()) + (self.metrics['r5'] * self.metrics['num_samples'])) / new_total)
self.metrics['r10'] = ((float((groundtruth_rank <= 10).sum()) + (self.metrics['r10'] * self.metrics['num_samples'])) / new_total)
self.metrics['mrr'] = ((groundtruth_rank.float().reciprocal().sum() + (self.metrics['mrr'] * self.metrics['num_samples'])) / new_total)
self.metrics['mrank'] = ((float(groundtruth_rank.sum()) + (self.metrics['mrank'] * self.metrics['num_samples'])) / new_total)
self.metrics['num_samples'] = new_total
def report(self, test_loss, correct, total_samples, output_file=None, epoch=(- 1)):
if (output_file is not None):
output_file.write('Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(test_loss, correct, total_samples, ((100.0 * correct) / total_samples)))
output_file.write('R1: {:.4f}, R5: {:.4f}, R10: {:.4f}, MRR: {:.4f}, MRank: {:.4f}, NDCG: {:.4f}\n'.format(self.metrics['r1'], self.metrics['r5'], self.metrics['r10'], self.metrics['mrr'], self.metrics['mrank'], self.metrics['ndcg']))
print('Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(test_loss, correct, total_samples, ((100.0 * correct) / total_samples)))
print('R1: {:.4f}, R5: {:.4f}, R10: {:.4f}, MRR: {:.4f}, MRank: {:.4f}, NDCG: {:.4f}\n'.format(self.metrics['r1'], self.metrics['r5'], self.metrics['r10'], self.metrics['mrr'], self.metrics['mrank'], self.metrics['ndcg']))
self.mail_message += 'R1: {:.4f}, R5: {:.4f}, R10: {:.4f}, MRR: {:.4f}, MRank: {:.4f}, NDCG: {:.4f}\n'.format(self.metrics['r1'], self.metrics['r5'], self.metrics['r10'], self.metrics['mrr'], self.metrics['mrank'], self.metrics['ndcg'])
if ((self.step % self.mod_flag) == 0):
self.send_mail(epoch)
self.step += 1
def send_mail(self, epoch):
if (self.mail_server == ''):
return
msg = EmailMessage()
msg['Subject'] = self.mydir
family = self.mail_to.split(',')
msg['From'] = self.mail_from
msg['To'] = ', '.join(family)
msg.preamble = 'VisDial'
msg.set_content(('This is automatic mail, step %d (part of epoch %d), experiement details:\n %s \n %s' % (self.step, epoch, self.mydir, self.mail_message)))
with smtplib.SMTP(self.mail_server) as s:
s.send_message(msg)
def save_best_model(self, model, optimizer, args, epoch, mydir, output_file, scores):
(best_mrr, best_ndcg) = (False, False)
if args.fast:
return (best_mrr, best_ndcg)
if (self.metrics['ndcg'] > self.best_ndcg):
self.best_ndcg = self.metrics['ndcg']
self.best_epoch = epoch
best_ndcg = True
torch.save({'model': model.state_dict(), 'optimizer': optimizer, 'metrics': self.metrics, 'args': args, 'epoch': epoch}, os.path.join(mydir, ('best_model_ndcg' + '.pth.tar')))
temp_save_path = os.path.join(mydir, 'ndcg_output_scores')
print(('Saving NDCG output score %s' % temp_save_path))
np.save(temp_save_path, scores)
if (self.metrics['mrr'] > self.best_mrr):
best_mrr = True
self.best_mrr = self.metrics['mrr']
self.best_epoch = epoch
torch.save({'model': model.state_dict(), 'optimizer': optimizer, 'metrics': self.metrics, 'args': args, 'epoch': epoch}, os.path.join(mydir, ('best_model_mrr' + '.pth.tar')))
temp_save_path = os.path.join(mydir, 'mrr_output_scores')
print(('Saving MRR output score %s' % temp_save_path))
np.save(temp_save_path, scores)
print('Best epoch till now: {}/{}, with MRR {}'.format(self.best_epoch, epoch, self.best_mrr))
output_file.write('Best epoch till now: {}/{}, with MRR {}\n'.format(self.best_epoch, epoch, self.best_mrr))
return (best_mrr, best_ndcg) |
def xavier_uniform_param_init_fn_(module: nn.Module, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[(int, float, str, bool)]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[(Tuple[(float, float)], float)]]=None, init_gain: float=0, verbose: int=0, **kwargs):
del kwargs
xavier_uniform_ = partial(torch.nn.init.xavier_uniform_, gain=init_gain)
if (verbose > 1):
warnings.warn((f'Using torch.nn.init.xavier_uniform_ init fn with parameters: ' + f'gain={init_gain}'))
generic_param_init_fn_(module=module, init_fn_=xavier_uniform_, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose) |
class EfficientDetResizeCrop(Augmentation):
def __init__(self, size, scale, interp=Image.BILINEAR):
super().__init__()
self.target_size = (size, size)
self.scale = scale
self.interp = interp
def get_transform(self, img):
scale_factor = np.random.uniform(*self.scale)
scaled_target_height = (scale_factor * self.target_size[0])
scaled_target_width = (scale_factor * self.target_size[1])
(width, height) = (img.shape[1], img.shape[0])
img_scale_y = (scaled_target_height / height)
img_scale_x = (scaled_target_width / width)
img_scale = min(img_scale_y, img_scale_x)
scaled_h = int((height * img_scale))
scaled_w = int((width * img_scale))
offset_y = (scaled_h - self.target_size[0])
offset_x = (scaled_w - self.target_size[1])
offset_y = int((max(0.0, float(offset_y)) * np.random.uniform(0, 1)))
offset_x = int((max(0.0, float(offset_x)) * np.random.uniform(0, 1)))
return EfficientDetResizeCropTransform(scaled_h, scaled_w, offset_y, offset_x, img_scale, self.target_size, self.interp) |
def crossdomain_mixhm(m):
if (type(m) == MixHistogram):
m.update_mix_method('crossdomain') |
class GPStructRunner():
def __init__(self, data_dir, n_inputs, mu_ranks, covs, bin_cov, lr=0.01, n_epoch=15, decay=None, batch_size=None, preprocess_op=None, te_preprocess_op=None, log_dir=None, save_dir=None, model_dir=None, load_model=False, print_freq=None, num_threads=1):
self.data_dir = data_dir
self.n_inputs = n_inputs
self.mu_ranks = mu_ranks
self.covs = covs
self.bin_cov = bin_cov
self.lr = lr
self.n_epoch = n_epoch
self.decay = decay
self.batch_size = batch_size
self.preprocess_op = preprocess_op
self.te_preprocess_op = te_preprocess_op
self.log_dir = log_dir
self.save_dir = save_dir
self.model_dir = model_dir
self.load_model = load_model
self.print_freq = print_freq
self.frequent_print = (not (print_freq is None))
self.num_threads = num_threads
def _init_inputs(d, n_inputs):
inputs = grid.InputsGrid(d, npoints=n_inputs, left=(- 1.0))
return inputs
def _get_data(data_dir):
(x_tr, y_tr, seq_lens_tr, x_te, y_te, seq_lens_te) = prepare_struct_data(data_dir)
x_tr = make_tensor(x_tr, 'x_tr')
y_tr = make_tensor(y_tr.astype(int), 'y_tr', dtype=tf.int64)
x_te = make_tensor(x_te, 'x_te')
seq_lens_tr = make_tensor(seq_lens_tr.astype(int), 'seq_lens_tr', dtype=tf.int64)
seq_lens_te = make_tensor(seq_lens_te.astype(int), 'seq_lens_tr', dtype=tf.int64)
return (x_tr, y_tr, seq_lens_tr, x_te, y_te, seq_lens_te)
def _make_batches(self, x, y, seq_lens, batch_size, test=False):
(sample_x, sample_y, sample_lens) = tf.train.slice_input_producer([x, y, seq_lens], shuffle=True)
if ((self.preprocess_op is not None) and (not test)):
sample_x = self.preprocess_op(sample_x)
if ((self.te_preprocess_op is not None) and test):
sample_x = self.te_preprocess_op(sample_x)
sample = [sample_x, sample_y, sample_lens]
(x_batch, y_batch, seq_len_batch) = tf.train.batch(sample, batch_size, num_threads=self.num_threads, capacity=(256 + (3 * batch_size)))
return (x_batch, y_batch, seq_len_batch)
def run_experiment(self):
start_compilation = time.time()
d = self.covs.feature_dim()
(x_tr, y_tr, seq_lens_tr, x_te, y_te_np, seq_lens_te) = self._get_data(self.data_dir)
(x_batch, y_batch, seq_batch) = self._make_batches(x_tr, y_tr, seq_lens_tr, self.batch_size)
inputs = self._init_inputs(d, self.n_inputs)
N = y_tr.get_shape()[0].value
iter_per_epoch = int((N / self.batch_size))
maxiter = (iter_per_epoch * self.n_epoch)
gp = TTGPstruct(self.covs, self.bin_cov, inputs, self.mu_ranks)
global_step = tf.Variable(0, trainable=False)
if (self.decay is not None):
steps = (iter_per_epoch * self.decay[0])
lr = tf.train.exponential_decay(self.lr, global_step, steps, self.decay[1], staircase=True)
else:
lr = tf.Variable(self.lr, trainable=False)
(elbo, train_op) = gp.fit(x_batch, y_batch, seq_batch, N, lr, global_step)
model_params = gp.get_params()
saver = tf.train.Saver(model_params)
coord = tf.train.Coordinator()
init = tf.global_variables_initializer()
data_initializer = tf.variables_initializer([x_tr, y_tr, x_te, seq_lens_tr, seq_lens_te])
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.Session() as sess:
sess.run(data_initializer)
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
gp.initialize(sess)
sess.run(init)
if self.load_model:
print('Restoring the model...')
saver.restore(sess, self.model_dir)
print('restored.')
batch_elbo = 0
start_epoch = time.time()
for i in range(maxiter):
if ((not (i % iter_per_epoch)) or (self.frequent_print and (not (i % self.print_freq)))):
if (i == 0):
print('Compilation took', (time.time() - start_compilation))
print('Epoch', (i / iter_per_epoch), ', lr=', lr.eval(), ':')
if (i != 0):
print('\tEpoch took:', (time.time() - start_epoch))
pred = gp.predict(x_te, seq_lens_te, sess)
accuracy_te = accuracy_struct(pred, y_te_np)
print('\taccuracy on test set:', accuracy_te)
print('\taverage elbo:', (batch_elbo / iter_per_epoch))
batch_elbo = 0
start_epoch = time.time()
(elbo_val, _, _) = sess.run([elbo, train_op, update_ops])
batch_elbo += elbo_val
accuracy_val = sess.run(accuracy_te)
print('Final accuracy:', accuracy_val)
if (not (self.save_dir is None)):
model_path = saver.save(sess, self.save_dir)
print(('Model saved in file: %s' % model_path))
gp.cov.projector.save_weights(sess) |
.parametrize('loader_parameters', [{'path_data': [str(Path(__data_testing_dir__, 'microscopy_png'))], 'target_suffix': ['_seg-myelin-manual'], 'extensions': ['.png'], 'roi_params': {'suffix': None, 'slice_filter_roi': None}, 'contrast_params': {'contrast_lst': [], 'balance': {}}, 'slice_axis': 'axial', 'slice_filter_params': {'filter_empty_mask': False, 'filter_empty_input': True}, 'patch_filter_params': {'filter_empty_mask': False, 'filter_empty_input': False}, 'multichannel': False}])
.parametrize('model_parameters', [{'name': 'Unet', 'dropout_rate': 0.3, 'bn_momentum': 0.1, 'final_activation': 'sigmoid', 'depth': 3}])
def test_microscopy_pixelsize(download_data_testing_test_files, loader_parameters, model_parameters):
loader_parameters.update({LoaderParamsKW.MODEL_PARAMS: model_parameters})
bids_df = BidsDataframe(loader_parameters, __tmp_dir__, derivatives=True)
data_lst = ['sub-rat2_sample-data5_SEM.png']
transform_parameters = {TransformationKW.RESAMPLE: {'wspace': 9.3e-05, 'hspace': 9.3e-05}}
ds = imed_loader.load_dataset(bids_df, **{**loader_parameters, **{'data_list': data_lst, 'transforms_params': transform_parameters, 'dataset_type': 'training'}})
assert (ds[0]['input'].shape == (1, 725, 725))
data_lst = ['sub-rat3_ses-02_sample-data11_run-1_SEM.png']
transform_parameters = {TransformationKW.RESAMPLE: {'wspace': 0.0001, 'hspace': 0.0001}}
ds = imed_loader.load_dataset(bids_df, **{**loader_parameters, **{'data_list': data_lst, 'transforms_params': transform_parameters, 'dataset_type': 'training'}})
assert (ds[0]['input'].shape == (1, 839, 769))
data_lst = ['sub-rat3_ses-02_sample-data10_SEM.png']
transform_parameters = {TransformationKW.RESAMPLE: {'wspace': 0.0001, 'hspace': 0.0001}}
ds = imed_loader.load_dataset(bids_df, **{**loader_parameters, **{'data_list': data_lst, 'transforms_params': transform_parameters, 'dataset_type': 'training'}})
assert (ds[0]['input'].shape == (1, 758, 737)) |
def make_beitl16_384(pretrained, use_readout='ignore', hooks=(5, 11, 17, 23)):
model = timm.create_model('beit_large_patch16_384', pretrained=pretrained)
return _make_beit_backbone(model, features=[256, 512, 1024, 1024], hooks=hooks, vit_features=1024, use_readout=use_readout) |
class Robot(xmlr.Object):
def __init__(self, name=None):
self.aggregate_init()
self.name = name
self.joints = []
self.links = []
self.materials = []
self.gazebos = []
self.transmissions = []
self.joint_map = {}
self.link_map = {}
self.parent_map = {}
self.child_map = {}
def add_aggregate(self, typeName, elem):
xmlr.Object.add_aggregate(self, typeName, elem)
if (typeName == 'joint'):
joint = elem
self.joint_map[joint.name] = joint
self.parent_map[joint.child] = (joint.name, joint.parent)
if (joint.parent in self.child_map):
self.child_map[joint.parent].append((joint.name, joint.child))
else:
self.child_map[joint.parent] = [(joint.name, joint.child)]
elif (typeName == 'link'):
link = elem
self.link_map[link.name] = link
def add_link(self, link):
self.add_aggregate('link', link)
def add_joint(self, joint):
self.add_aggregate('joint', joint)
def get_chain(self, root, tip, joints=True, links=True, fixed=True):
chain = []
if links:
chain.append(tip)
link = tip
while (link != root):
(joint, parent) = self.parent_map[link]
if joints:
if (fixed or (self.joint_map[joint].joint_type != 'fixed')):
chain.append(joint)
if links:
chain.append(parent)
link = parent
chain.reverse()
return chain
def get_root(self):
root = None
for link in self.link_map:
if (link not in self.parent_map):
assert (root is None), 'Multiple roots detected, invalid URDF.'
root = link
assert (root is not None), 'No roots detected, invalid URDF.'
return root
def from_parameter_server(cls, key='robot_description'):
import rospy
return cls.from_xml_string(rospy.get_param(key)) |
class Network(Assembly):
_class_label = '<net>'
def __init__(self, name=None):
super(Network, self).__init__(name=name)
self._monitors = OrderedDict()
self._learners = OrderedDict()
self._pipline = None
self._backend: Backend = None
self._forward_build = False
pass
def set_backend(self, backend=None, device='cpu', partition=False):
if isinstance(device, str):
device = [device]
if (backend is None):
self._backend = Torch_Backend(device)
self._backend.partition = partition
elif isinstance(backend, Backend):
self._backend = backend
elif isinstance(backend, str):
if ((backend == 'torch') or (backend == 'pytorch')):
self._backend = Torch_Backend(device)
self._backend.partition = partition
def set_backend_dt(self, dt=0.1, partition=False):
if (self._backend is None):
warn('have not set backend, default pytorch backend is set automatically')
self._backend = Torch_Backend('cpu')
self._backend.dt = dt
else:
self._backend.dt = dt
self._backend.partition = partition
def set_random_seed(self, seed):
if isinstance(self._backend, Torch_Backend):
import torch
torch.random.manual_seed(int(seed))
if (self._backend.device == 'cuda'):
torch.cuda.manual_seed(int(seed))
def get_testparams(self):
self.all_Wparams = list()
for (key, value) in self._backend._parameters_dict.items():
self.all_Wparams.append(value)
return self.all_Wparams
def add_learner(self, name, learner):
from ..Learning.Learner import Learner
assert isinstance(learner, Learner)
self.__setattr__(name, learner)
def __setattr__(self, name, value):
from ..Monitor.Monitor import Monitor
from ..Learning.Learner import Learner
super(Network, self).__setattr__(name, value)
if isinstance(value, Monitor):
self._monitors[name] = value
elif isinstance(value, Learner):
self._learners[name] = value
def build(self, backend=None, strategy=0, full_enable_grad=None, device=None):
if (full_enable_grad is not None):
self.enable_full_grad(full_enable_grad)
if (self._backend is None):
if (backend is not None):
if (device is not None):
self.set_backend(backend, device)
else:
self.set_backend(backend)
elif (device is not None):
self.set_backend(device=device)
else:
self.set_backend()
self._backend.clear_step()
if (self._backend.runtime is None):
self._backend.runtime = (10 * self._backend.dt)
all_groups = self.get_groups()
for asb in all_groups:
asb.set_id()
self.build_projections(self._backend)
all_connections = self.get_connections()
con_debug = False
con_syn_count = 0
for con in all_connections:
con.set_id()
con.pre.register_connection(con, True)
con.post.register_connection(con, False)
if (strategy == 1):
self.forward_build(all_groups, all_connections)
self._backend.forward_build = True
else:
from multiprocessing.pool import ThreadPool as Pool
self._backend.forward_build = False
def build_fn(module):
module.build(self._backend)
pool = Pool(4)
pool.map(build_fn, all_connections)
pool.close()
pool.join()
pool = Pool(4)
pool.map(build_fn, all_groups)
pool.close()
pool.join()
for learner in self._learners.values():
learner.set_id()
learner.build(self._backend)
for monitor in self._monitors.values():
monitor.build(self._backend)
self._backend.build_graph()
self._backend.builded = True
pass
def forward_build(self, all_groups=None, all_connections=None):
builded_groups = []
builded_connections = []
nod_groups = []
for group in all_groups.copy():
if (group._class_label == '<nod>'):
if ((group._node_sub_class == '<encoder>') or (group._node_sub_class == '<generator>')):
group.build(self._backend)
builded_groups.append(group)
all_groups.remove(group)
for conn in group._output_connections:
self.deep_forward_build(conn, all_groups, all_connections, builded_groups, builded_connections)
for module in group._output_modules:
self.deep_forward_build(module, all_groups, all_connections, builded_groups, builded_connections)
else:
all_groups.remove(group)
nod_groups.append(group)
while (all_groups or all_connections):
for group in all_groups:
self.deep_forward_build(group, all_groups, all_connections, builded_groups, builded_connections)
for conn in all_connections:
self.deep_forward_build(conn, all_groups, all_connections, builded_groups, builded_connections)
for group in nod_groups:
group.build(self._backend)
builded_groups.append(group)
def deep_forward_build(self, target, all_groups, all_connections, builded_groups, builded_connections):
if ((target in builded_groups) or (target in builded_connections)):
return
if (target._class_label == '<con>'):
pre = [target.pre]
post = [target.post]
elif (target._class_label == '<neg>'):
pre = (target._input_connections + target._input_modules)
post = (target._output_connections + target._output_modules)
elif (target._class_label == '<mod>'):
pre = target.input_targets.copy()
post = target.output_targets.copy()
else:
raise ValueError('Deep forward build Error, unsupported class label.')
for pr in pre:
if ((pr in all_groups) or (pr in all_connections)):
return
target.build(self._backend)
if (target._class_label == '<con>'):
builded_connections.append(target)
all_connections.remove(target)
elif ((target._class_label == '<neg>') or (target._class_label == '<mod>')):
builded_groups.append(target)
all_groups.remove(target)
for po in post:
self.deep_forward_build(po, all_groups, all_connections, builded_groups, builded_connections)
return
def run(self, backend_time):
self._backend.set_runtime(backend_time)
if (self._backend.builded is False):
self.build()
self._backend.initial_step()
self._backend.update_time_steps()
def run_continue(self, backend_time):
self._backend.set_runtime(backend_time)
if (self._backend.builded is False):
self.build()
self._backend.initial_step()
self._backend.initial_continue_step()
self._backend.update_time_steps()
def reset(self):
if (self._backend.builded is True):
self._backend.initial_step()
def enable_full_grad(self, requires_grad=True):
self._backend.full_enable_grad = requires_grad
def init_run(self):
self._backend.initial_step()
def add_monitor(self, name, monitor):
from ..Monitor.Monitor import Monitor
assert isinstance(monitor, Monitor), 'Type Error, it is not monitor'
assert (monitor not in self._monitors.values()), ('monitor %s is already added' % name)
assert (name not in self._monitors.keys()), ('monitor with name: %s have the same name with an already exists monitor' % name)
self.__setattr__(name, monitor)
def get_elements(self):
element_dict = dict()
for element in self.get_groups():
element_dict[element.id] = element
return element_dict
def save_state(self, filename=None, direct=None, save=True, hdf5=False):
from ..Neuron.Module import Module
state = self._backend._parameters_dict
if (not save):
return state
if (not filename):
filename = (self.name if self.name else 'autoname')
if (not direct):
direct = './'
file = filename.split('.')[0]
path = ((direct + file) + '/parameters/')
import os
import torch
origin_path = os.getcwd()
os.chdir(direct)
if (file not in os.listdir()):
os.mkdir(file)
if ('parameters' not in os.listdir(('./' + file))):
os.mkdir((('./' + file) + '/parameters'))
os.chdir((('./' + file) + '/parameters'))
if hdf5:
import h5py
filename = (filename if direct.endswith('.hdf5') else (direct + '.hdf5'))
with h5py.File(direct, 'w') as f:
for (i, item) in enumerate(state):
f.create_dataset(item, data=self._backend._parameters_dict[item].cpu().detach().numpy())
print(i, item, ': saved')
else:
torch.save(self._backend._parameters_dict, './_parameters_dict.pt')
module_dict = {}
module_exist = False
for group in self.get_groups():
if isinstance(group, Module):
module_dict[group.id] = group.state_dict
module_exist = True
if module_exist:
torch.save(module_dict, './module_dict.pt')
os.chdir(origin_path)
return
def state_from_dict(self, state=False, filename=None, direct=None, device=None):
from ..Neuron.Module import Module
if (not self._backend):
if device:
self.set_backend('torch', device=device)
else:
self.set_backend('torch')
if (self._backend.builded is False):
self.build()
if (self._backend.device != device):
import warnings
warnings.warn((('Backend device setting is ' + str(self._backend.device)) + '. Backend device selection is priority.'))
if state:
import torch
if (isinstance(state, dict) or isinstance(state, torch.Tensor)):
for (key, para) in state.items():
backend_key = self._backend.check_key(key, self._backend._parameters_dict)
if backend_key:
target_device = self._backend._parameters_dict[backend_key].device
self._backend._parameters_dict[backend_key] = para.to(target_device)
return
else:
raise ValueError('Given state has wrong type')
if direct:
if filename:
path = (((direct + '/') + filename) + '/parameters/')
else:
path = (direct + '/parameters/')
elif filename:
path = (('./' + filename) + '/parameters/')
else:
path = './parameters/'
import os
import torch
origin_path = os.getcwd()
try:
os.chdir(path)
except:
raise ValueError('Wrong Path.')
if ('_parameters_dict.pt' in os.listdir('./')):
data = torch.load('./_parameters_dict.pt', map_location=self._backend.device0)
for (key, para) in data.items():
backend_key = self._backend.check_key(key, self._backend._parameters_dict)
if backend_key:
target_device = self._backend._parameters_dict[backend_key].device
self._backend._parameters_dict[backend_key] = para.to(target_device)
if ('module_dict.pt' in os.listdir('./')):
module_data = torch.load('./module_dict.pt', map_location=self._backend.device0)
for group in self.get_groups():
if isinstance(group, Module):
target_key = self._backend.check_key(group.id, module_data)
group.load_state_dict(module_data[target_key])
else:
for file in os.listdir('./'):
if file.endswith('.hdf5'):
import h5py
with h5py.File(direct, 'r') as f:
for (key, para) in f.items():
backend_key = self._backend.check_key(key, self._backend._parameters_dict)
if key:
target_device = self._backend._parameters_dict[backend_key].device
self._backend._parameters_dict[backend_key] = para.to(target_device)
os.chdir(origin_path)
return |
def load_state_dict(checkpoint_path, use_ema=False):
if (checkpoint_path and os.path.isfile(checkpoint_path)):
checkpoint = torch.load(checkpoint_path, map_location='cpu')
state_dict_key = 'state_dict'
if isinstance(checkpoint, dict):
if (use_ema and ('state_dict_ema' in checkpoint)):
state_dict_key = 'state_dict_ema'
if (state_dict_key and (state_dict_key in checkpoint)):
new_state_dict = OrderedDict()
for (k, v) in checkpoint[state_dict_key].items():
name = (k[7:] if k.startswith('module') else k)
new_state_dict[name] = v
state_dict = new_state_dict
else:
state_dict = checkpoint
logging.info("Loaded {} from checkpoint '{}'".format(state_dict_key, checkpoint_path))
return state_dict
else:
logging.error("No checkpoint found at '{}'".format(checkpoint_path))
raise FileNotFoundError() |
def create_all_memmaps(dataset_nums, proportions, num_train, num_val, num_test):
intent_file_paths = [f'{file_path}_intent_pose.npy' for file_path in dataset_nums]
shufflers = [np.random.permutation(np.load(path).shape[0]) for path in intent_file_paths]
image_history_file_paths = [f'{file_path}_image_history.npy' for file_path in dataset_nums]
train_image_output_path = f'../data/train_image_history.npy'
val_image_output_path = f'../data/val_image_history.npy'
test_image_output_path = f'../data/test_image_history.npy'
first_memmap = np.load(image_history_file_paths[0], mmap_mode='c')
extra_shape = first_memmap.shape[1:]
train_memmap = np.memmap(train_image_output_path, dtype='float32', mode='w+', shape=(num_train, *extra_shape))
val_memmap = np.memmap(val_image_output_path, dtype='float32', mode='w+', shape=(num_val, *extra_shape))
test_memmap = np.memmap(test_image_output_path, dtype='float32', mode='w+', shape=(num_test, *extra_shape))
create_memmap(image_history_file_paths, train_image_output_path, val_image_output_path, test_image_output_path, proportions, extra_shape, shufflers)
trajectory_history_file_paths = [f'{file_path}_trajectory_history.npy' for file_path in dataset_nums]
train_trajectory_history_output_path = f'../data/train_trajectory_history.npy'
val_trajectory_history_output_path = f'../data/val_trajectory_history.npy'
test_trajectory_history_output_path = f'../data/test_trajectory_history.npy'
first_memmap = np.load(trajectory_history_file_paths[0], mmap_mode='c')
extra_shape = first_memmap.shape[1:]
train_memmap = np.memmap(train_trajectory_history_output_path, dtype='float32', mode='w+', shape=(num_train, *extra_shape))
val_memmap = np.memmap(val_trajectory_history_output_path, dtype='float32', mode='w+', shape=(num_val, *extra_shape))
test_memmap = np.memmap(test_trajectory_history_output_path, dtype='float32', mode='w+', shape=(num_test, *extra_shape))
create_memmap(trajectory_history_file_paths, train_trajectory_history_output_path, val_trajectory_history_output_path, test_trajectory_history_output_path, proportions, extra_shape, shufflers)
intent_file_paths = [f'{file_path}_intent_pose.npy' for file_path in dataset_nums]
train_intent_output_path = f'../data/train_intent.npy'
val_intent_output_path = f'../data/val_intent.npy'
test_intent_output_path = f'../data/test_intent.npy'
first_memmap = np.load(intent_file_paths[0], mmap_mode='c')
extra_shape = first_memmap.shape[1:]
train_memmap = np.memmap(train_intent_output_path, dtype='float32', mode='w+', shape=(num_train, *extra_shape))
val_memmap = np.memmap(val_intent_output_path, dtype='float32', mode='w+', shape=(num_val, *extra_shape))
test_memmap = np.memmap(test_intent_output_path, dtype='float32', mode='w+', shape=(num_test, *extra_shape))
create_memmap(intent_file_paths, train_intent_output_path, val_intent_output_path, test_intent_output_path, proportions, extra_shape, shufflers)
trajectory_future_file_paths = [f'{file_path}_trajectory_future.npy' for file_path in dataset_nums]
train_trajectory_future_output_path = f'../data/train_trajectory_future.npy'
val_trajectory_future_output_path = f'../data/val_trajectory_future.npy'
test_trajectory_future_output_path = f'../data/test_trajectory_future.npy'
first_memmap = np.load(trajectory_future_file_paths[0], mmap_mode='c')
extra_shape = first_memmap.shape[1:]
train_memmap = np.memmap(train_trajectory_future_output_path, dtype='float32', mode='w+', shape=(num_train, *extra_shape))
val_memmap = np.memmap(val_trajectory_future_output_path, dtype='float32', mode='w+', shape=(num_val, *extra_shape))
test_memmap = np.memmap(test_trajectory_future_output_path, dtype='float32', mode='w+', shape=(num_test, *extra_shape))
create_memmap(trajectory_future_file_paths, train_trajectory_future_output_path, val_trajectory_future_output_path, test_trajectory_future_output_path, proportions, extra_shape, shufflers) |
def cast_lora_weight(model, dtype=torch.bfloat16):
for (name, module) in model.named_modules():
if isinstance(module, LowBitLinear):
module.compute_dtype = dtype
if isinstance(module, LoraLayer):
module = module.to(dtype)
if isinstance(module, BF16Linear):
module = module.to(dtype)
module.compute_dtype = dtype
if ('norm' in name):
module = module.to(torch.float32)
if (('lm_head' in name) or ('embed_tokens' in name)):
if hasattr(module, 'weight'):
if (module.weight.dtype == torch.float32):
module = module.to(dtype) |
def test_dispatch_issue(msg):
class PyClass1(m.DispatchIssue):
def dispatch(self):
return 'Yay..'
class PyClass2(m.DispatchIssue):
def dispatch(self):
with pytest.raises(RuntimeError) as excinfo:
super(PyClass2, self).dispatch()
assert (msg(excinfo.value) == 'Tried to call pure virtual function "Base::dispatch"')
p = PyClass1()
return m.dispatch_issue_go(p)
b = PyClass2()
assert (m.dispatch_issue_go(b) == 'Yay..') |
def train_net(net_id, net, train_dataloader, test_dataloader, epochs, lr, args_optimizer, device='cpu'):
logger.info(('Training network %s' % str(net_id)))
train_acc = compute_accuracy(net, train_dataloader, device=device)
(test_acc, conf_matrix) = compute_accuracy(net, test_dataloader, get_confusion_matrix=True, device=device)
logger.info('>> Pre-Training Training accuracy: {}'.format(train_acc))
logger.info('>> Pre-Training Test accuracy: {}'.format(test_acc))
if (args_optimizer == 'adam'):
optimizer = optim.Adam(filter((lambda p: p.requires_grad), net.parameters()), lr=lr, weight_decay=args.reg)
elif (args_optimizer == 'amsgrad'):
optimizer = optim.Adam(filter((lambda p: p.requires_grad), net.parameters()), lr=lr, weight_decay=args.reg, amsgrad=True)
elif (args_optimizer == 'sgd'):
optimizer = optim.SGD(filter((lambda p: p.requires_grad), net.parameters()), lr=lr, momentum=args.rho, weight_decay=args.reg)
criterion = nn.CrossEntropyLoss().to(device)
cnt = 0
if (type(train_dataloader) == type([1])):
pass
else:
train_dataloader = [train_dataloader]
for epoch in range(epochs):
epoch_loss_collector = []
for tmp in train_dataloader:
for (batch_idx, (x, target)) in enumerate(tmp):
(x, target) = (x.to(device), target.to(device))
optimizer.zero_grad()
x.requires_grad = True
target.requires_grad = False
target = target.long()
out = net(x)
loss = criterion(out, target)
loss.backward()
optimizer.step()
cnt += 1
epoch_loss_collector.append(loss.item())
epoch_loss = (sum(epoch_loss_collector) / len(epoch_loss_collector))
logger.info(('Epoch: %d Loss: %f' % (epoch, epoch_loss)))
train_acc = compute_accuracy(net, train_dataloader, device=device)
(test_acc, conf_matrix) = compute_accuracy(net, test_dataloader, get_confusion_matrix=True, device=device)
logger.info(('>> Training accuracy: %f' % train_acc))
logger.info(('>> Test accuracy: %f' % test_acc))
net.to('cpu')
logger.info(' ** Training complete **')
return (train_acc, test_acc) |
def get_power_spectral_density_matrix(xs: ComplexTensor, mask: torch.Tensor, normalization=True, eps: float=1e-15) -> ComplexTensor:
psd_Y = FC.einsum('...ct,...et->...tce', [xs, xs.conj()])
mask = mask.mean(dim=(- 2))
if normalization:
mask = (mask / (mask.sum(dim=(- 1), keepdim=True) + eps))
psd = (psd_Y * mask[(..., None, None)])
psd = psd.sum(dim=(- 3))
return psd |
def GetDotNodeName(name_string, is_component=False):
node_name_string = re.sub('-', 'hyphen', name_string)
node_name_string = re.sub('\\.', '_dot_', node_name_string)
if is_component:
node_name_string += (node_name_string.strip() + '_component')
return {'label': name_string, 'node': node_name_string} |
class MNISTDataModule(LightningDataModule):
def __init__(self, data_dir: str='data/', train_val_test_split: Tuple[(int, int, int)]=(55000, 5000, 10000), batch_size: int=64, num_workers: int=0, pin_memory: bool=False):
super().__init__()
self.data_dir = data_dir
self.train_val_test_split = train_val_test_split
self.batch_size = batch_size
self.num_workers = num_workers
self.pin_memory = pin_memory
self.transforms = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
self.dims = (1, 28, 28)
self.data_train: Optional[Dataset] = None
self.data_val: Optional[Dataset] = None
self.data_test: Optional[Dataset] = None
def num_classes(self) -> int:
return 10
def prepare_data(self):
MNIST(self.data_dir, train=True, download=True)
MNIST(self.data_dir, train=False, download=True)
def setup(self, stage: Optional[str]=None):
if ((not self.data_train) or (not self.data_val) or (not self.data_test)):
trainset = MNIST(self.data_dir, train=True, transform=self.transforms)
testset = MNIST(self.data_dir, train=False, transform=self.transforms)
dataset = ConcatDataset(datasets=[trainset, testset])
(self.data_train, self.data_val, self.data_test) = random_split(dataset, self.train_val_test_split, generator=torch.Generator().manual_seed(42))
def train_dataloader(self):
return DataLoader(dataset=self.data_train, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=self.pin_memory, shuffle=True)
def val_dataloader(self):
return DataLoader(dataset=self.data_val, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=self.pin_memory, shuffle=False)
def test_dataloader(self):
return DataLoader(dataset=self.data_test, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=self.pin_memory, shuffle=False) |
def calc_2dsplinecoeffs_c(array2d):
out = copy.copy(array2d)
out = numpy.require(out, dtype=numpy.float64, requirements=['C', 'W'])
ndarrayFlags = ('C_CONTIGUOUS', 'WRITEABLE')
interppotential_calc_2dsplinecoeffs = _lib.samples_to_coefficients
interppotential_calc_2dsplinecoeffs.argtypes = [ndpointer(dtype=numpy.float64, flags=ndarrayFlags), ctypes.c_int, ctypes.c_int]
interppotential_calc_2dsplinecoeffs(out, out.shape[1], out.shape[0])
return out |
def multicolorline(x, y, cvals, ax, vmin=(- 90), vmax=90):
import cmasher as cmr
import matplotlib.colors as mcolors
from matplotlib.collections import LineCollection
points = np.array([x, y]).T.reshape((- 1), 1, 2)
segments = np.concatenate([points[:(- 1)], points[1:]], axis=1)
norm = plt.Normalize(vmin, vmax)
cmrmap = cmr.redshift
colors_pos = cmrmap(np.linspace(0.0, 0.3, 45))
colors_neg = cmrmap(np.linspace(0.7, 1.0, 45))
colors = np.vstack((colors_pos, colors_neg))
mymap = mcolors.LinearSegmentedColormap.from_list('my_colormap', colors)
lc = LineCollection(segments, cmap=mymap, norm=norm)
lc.set_array(cvals)
lc.set_linewidth(3)
line = ax.add_collection(lc)
return line |
class TemplateArg(object):
def __init__(self, parameter):
parts = splitParts(parameter)
self.name = Template.parse(parts[0])
if (len(parts) > 1):
self.default = Template.parse(parts[1])
else:
self.default = None
def __str__(self):
if self.default:
return ('{{{%s|%s}}}' % (self.name, self.default))
else:
return ('{{{%s}}}' % self.name)
def subst(self, params, extractor, depth):
paramName = self.name.subst(params, extractor, (depth + 1))
paramName = extractor.transform(paramName)
res = ''
if (paramName in params):
res = params[paramName]
elif self.default:
defaultValue = self.default.subst(params, extractor, (depth + 1))
res = extractor.transform(defaultValue)
return res |
class PaperClassifier1(nn.Module):
def __init__(self, in_dim, hid_dim_1, hid_dim_2, out_dim, norm, act, dropout=0.5):
super(PaperClassifier1, self).__init__()
no_norm = (lambda x, dim: x)
if (norm == 'weight'):
norm_layer = weight_norm
elif (norm == 'batch'):
norm_layer = nn.BatchNorm1d
elif (norm == 'layer'):
norm_layer = nn.LayerNorm
elif (norm == 'none'):
norm_layer = no_norm
else:
print('Invalid Normalization')
raise Exception('Invalid Normalization')
self.gated_tanh_1 = FCNet([in_dim, hid_dim_1], dropout=dropout, norm=norm, act=act)
self.gated_tanh_2 = FCNet([in_dim, hid_dim_2], dropout=dropout, norm=norm, act=act)
self.linear_1 = norm_layer(nn.Linear(hid_dim_1, out_dim), dim=None)
self.linear_2 = norm_layer(nn.Linear(hid_dim_2, out_dim), dim=None)
def forward(self, x):
v_1 = self.gated_tanh_1(x)
v_2 = self.gated_tanh_2(x)
v_1 = self.linear_1(v_1)
v_2 = self.linear_2(v_2)
logits = (v_1 + v_2)
return logits |
class TFPreTrainedModel():
def __init__(self, *args, **kwargs):
requires_tf(self)
def from_pretrained(self, *args, **kwargs):
requires_tf(self) |
class MEKF_MA(Optimizer):
def __init__(self, params, dim_out, p0=0.01, lbd=1, sigma_r=None, sigma_q=0, lr=1, miu_v=0, miu_p=0, k_p=1, R_decay=False, R_decay_step=1000000):
if (sigma_r is None):
sigma_r = max(lbd, 0)
self._check_format(dim_out, p0, lbd, sigma_r, sigma_q, lr, miu_v, miu_p, k_p, R_decay, R_decay_step)
defaults = dict(p0=p0, lbd=lbd, sigma_r=sigma_r, sigma_q=sigma_q, lr=lr, miu_v=miu_v, miu_p=miu_p, k_p=k_p, R_decay=R_decay, R_decay_step=R_decay_step)
super(MEKF_MA, self).__init__(params, defaults)
self.state['dim_out'] = dim_out
with torch.no_grad():
self._init_mekf_matrix()
def _check_format(self, dim_out, p0, lbd, sigma_r, sigma_q, lr, miu_v, miu_p, k_p, R_decay, R_decay_step):
if ((not isinstance(dim_out, int)) and (dim_out > 0)):
raise ValueError('Invalid output dimension: {}'.format(dim_out))
if (not (0.0 < p0)):
raise ValueError('Invalid initial P value: {}'.format(p0))
if (not (0.0 < lbd)):
raise ValueError('Invalid forgetting factor: {}'.format(lbd))
if (not (0.0 < sigma_r)):
raise ValueError('Invalid covariance matrix value for R: {}'.format(sigma_r))
if (not (0.0 <= sigma_q)):
raise ValueError('Invalid covariance matrix value for Q: {}'.format(sigma_q))
if (not (0.0 < lr)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (not (0.0 <= miu_v < 1.0)):
raise ValueError('Invalid EMA decaying factor for V matrix: {}'.format(miu_v))
if (not (0.0 <= miu_p < 1.0)):
raise ValueError('Invalid EMA decaying factor for P matrix: {}'.format(miu_p))
if ((not isinstance(k_p, int)) and (k_p >= 0)):
raise ValueError('Invalid delayed step size of Lookahead P: {}'.format(k_p))
if ((not isinstance(R_decay, int)) and (not isinstance(R_decay, bool))):
raise ValueError('Invalid R decay flag: {}'.format(R_decay))
if (not isinstance(R_decay_step, int)):
raise ValueError('Invalid max step for R decaying: {}'.format(R_decay_step))
def _init_mekf_matrix(self):
self.state['step'] = 0
self.state['mekf_groups'] = []
dim_out = self.state['dim_out']
for group in self.param_groups:
mekf_mat = []
for p in group['params']:
matrix = {}
size = p.size()
dim_w = 1
for dim in size:
dim_w *= dim
device = p.device
matrix['P'] = (group['p0'] * torch.eye(dim_w, dtype=torch.float, device=device))
matrix['R'] = (group['sigma_r'] * torch.eye(dim_out, dtype=torch.float, device=device))
matrix['Q'] = (group['sigma_q'] * torch.eye(dim_w, dtype=torch.float, device=device))
matrix['H'] = None
mekf_mat.append(matrix)
self.state['mekf_groups'].append(mekf_mat)
def step(self, closure=None, H_groups=None, err=None):
self.state['step'] += 1
if (closure is not None):
for y_ind in range(self.state['dim_out']):
err = closure(y_ind)
for group_ind in range(len(self.param_groups)):
group = self.param_groups[group_ind]
mekf_mat = self.state['mekf_groups'][group_ind]
for (ii, w) in enumerate(group['params']):
if (w.grad is None):
continue
H_n = mekf_mat[ii]['H']
grad = w.grad.data.detach()
if (len(w.size()) > 1):
grad = grad.transpose(1, 0)
grad = grad.contiguous().view((1, (- 1)))
if (y_ind == 0):
H_n = grad
else:
H_n = torch.cat([H_n, grad], dim=0)
self.state['mekf_groups'][group_ind][ii]['H'] = H_n
else:
for group_ind in range(len(self.param_groups)):
H_mats = H_groups[group_ind]
for (ii, H_n) in enumerate(H_mats):
self.state['mekf_groups'][group_ind][ii]['H'] = H_n
err_T = err.transpose(0, 1)
for group_ind in range(len(self.param_groups)):
group = self.param_groups[group_ind]
mekf_mat = self.state['mekf_groups'][group_ind]
miu_v = group['miu_v']
miu_p = group['miu_p']
k_p = group['k_p']
lr = group['lr']
lbd = group['lbd']
for (ii, w) in enumerate(group['params']):
if (w.grad is None):
continue
P_n_1 = mekf_mat[ii]['P']
R_n = mekf_mat[ii]['R']
Q_n = mekf_mat[ii]['Q']
H_n = mekf_mat[ii]['H']
H_n_T = H_n.transpose(0, 1)
if group['R_decay']:
miu = (1.0 / min(self.state['step'], group['R_decay_step']))
R_n = (R_n + (miu * ((err.mm(err_T) + H_n.mm(P_n_1).mm(H_n_T)) - R_n)))
self.state['mekf_groups'][group_ind][ii]['R'] = R_n
g_n = (H_n.mm(P_n_1).mm(H_n_T) + R_n)
g_n = g_n.inverse()
K_n = P_n_1.mm(H_n_T).mm(g_n)
V_n = (lr * K_n.mm(err))
if (len(w.size()) > 1):
V_n = V_n.view((w.size(1), w.size(0))).transpose(1, 0)
else:
V_n = V_n.view(w.size())
if (miu_v > 0):
param_state = self.state[w]
if ('buffer_V' not in param_state):
V_ema = param_state['buffer_V'] = torch.clone(V_n).detach()
else:
V_ema = param_state['buffer_V']
V_ema.mul_(miu_v).add_(V_n.mul((1 - miu_v)).detach())
V_n = V_ema
w.data.add_(V_n)
P_n = ((1 / lbd) * ((P_n_1 - K_n.mm(H_n).mm(P_n_1)) + Q_n))
if ((miu_p > 0) and (k_p > 0)):
if ((self.state['step'] % k_p) == 0):
param_state = self.state[w]
if ('buffer_P' not in param_state):
P_ema = param_state['buffer_P'] = torch.clone(P_n).detach()
else:
P_ema = param_state['buffer_P']
P_ema.mul_(miu_p).add_(P_n.mul((1 - miu_p)).detach())
P_n = P_ema
self.state['mekf_groups'][group_ind][ii]['P'] = P_n
return err |
def resdropresnet20_cifar10(classes=10, **kwargs):
return get_resdropresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name='resdropresnet20_cifar10', **kwargs) |
def compute_skip_echo_len(model_name, conv, prompt):
model_name = model_name.lower()
if ('chatglm' in model_name):
skip_echo_len = (len(conv.messages[(- 2)][1]) + 1)
elif ('dolly-v2' in model_name):
special_toks = ['### Instruction:', '### Response:', '### End']
skip_echo_len = len(prompt)
for tok in special_toks:
skip_echo_len -= (prompt.count(tok) * len(tok))
elif (('oasst' in model_name) and ('pythia' in model_name)):
special_toks = ['<|prompter|>', '<|assistant|>', '<|endoftext|>']
skip_echo_len = len(prompt)
for tok in special_toks:
skip_echo_len -= (prompt.count(tok) * len(tok))
elif ('stablelm' in model_name):
special_toks = ['<|SYSTEM|>', '<|USER|>', '<|ASSISTANT|>']
skip_echo_len = len(prompt)
for tok in special_toks:
skip_echo_len -= (prompt.count(tok) * len(tok))
elif ('baize' in model_name):
skip_echo_len = len(prompt)
elif ('mpt' in model_name):
special_toks = ['<|im_start|>', '<|im_end|>']
skip_echo_len = len(prompt)
for tok in special_toks:
skip_echo_len -= (prompt.count(tok) * len(tok))
elif ('llama-2' in model_name):
skip_echo_len = ((len(prompt) + 1) - (prompt.count('</s><s>') * 6))
elif ('neural-chat' in model_name):
skip_echo_len = (len(prompt) + 1)
else:
skip_echo_len = ((len(prompt) + 1) - (prompt.count('</s>') * 3))
return skip_echo_len |
def auto_wrap(module: nn.Module, auto_wrap_policy: Optional[Callable]=None, **kwargs: Any) -> nn.Module:
if ConfigAutoWrap.in_autowrap_context:
(wrapped_module, remainder) = ConfigAutoWrap.recursive_wrap(module, auto_wrap_policy=auto_wrap_policy, **kwargs)
return wrapped_module
return module |
def random_odd(key: chex.PRNGKey, max_val: int) -> chex.Array:
return ((jax.random.randint(key, (), 0, (max_val // 2)) * 2) + 1) |
def register_annotations_from_source(source: str, filename: str) -> Set[str]:
regisered_modules = set()
for node in ast.parse(source).body:
if (not isinstance(node, (ast.ClassDef, ast.FunctionDef, ast.Expr, ast.Str))):
continue
for module in (get_modules_from_decorators(getattr(node, 'decorator_list', [])) or [os.path.splitext(os.path.basename(filename))[0]]):
regisered_modules.add(module)
if isinstance(node, ast.ClassDef):
REGISTERED_CLASS_SPECS.setdefault(module, []).append(node)
elif isinstance(node, ast.FunctionDef):
REGISTERED_FUNCTION_SPECS.setdefault(module, []).append(node)
elif isinstance(node, (ast.Expr, ast.Str)):
regisered_modules |= (handle_string_annotation(node, filename) or set())
return regisered_modules |
class FPNSegmentationHead2(nn.Module):
def __init__(self, in_dim, out_dim, decode_intermediate_input=True, hidden_dim=256, shortcut_dims=[24, 32, 96, 1280], align_corners=True):
super().__init__()
self.align_corners = align_corners
self.decode_intermediate_input = decode_intermediate_input
self.conv_in = ConvGN(in_dim, hidden_dim, 1)
self.conv_16x = ConvGN(hidden_dim, hidden_dim, 3)
self.conv_8x = ConvGN(hidden_dim, (hidden_dim // 2), 3)
self.conv_4x = ConvGN((hidden_dim // 2), (hidden_dim // 2), 3)
self.adapter_16x = nn.Conv2d(shortcut_dims[(- 2)], hidden_dim, 1)
self.adapter_8x = nn.Conv2d(shortcut_dims[(- 3)], hidden_dim, 1)
self.adapter_4x = nn.Conv2d(shortcut_dims[(- 4)], (hidden_dim // 2), 1)
self.conv_out = nn.Conv2d((hidden_dim // 2), out_dim, 1)
self._init_weight()
def forward(self, inputs, shortcuts):
if self.decode_intermediate_input:
x = torch.cat(inputs, dim=1)
else:
x = inputs[(- 2)]
x = F.relu_(self.conv_in(x))
x = F.relu_(self.conv_16x((self.adapter_16x(shortcuts[(- 2)]) + x)))
x = F.interpolate(x, size=shortcuts[(- 3)].size()[(- 2):], mode='bilinear', align_corners=self.align_corners)
x = F.relu_(self.conv_8x((self.adapter_8x(shortcuts[(- 3)]) + x)))
x = F.interpolate(x, size=shortcuts[(- 4)].size()[(- 2):], mode='bilinear', align_corners=self.align_corners)
x = F.relu_(self.conv_4x(((self.adapter_4x(shortcuts[(- 4)]) + x) + inputs[(- 1)])))
x = self.conv_out(x)
return x
def _init_weight(self):
for p in self.parameters():
if (p.dim() > 1):
nn.init.xavier_uniform_(p) |
def ResidualTabularWPrior(num_classes, dim_in, coupling_layers, k, means_r=1.0, cov_std=1.0, nperlayer=1, acc=0.9):
device = torch.device('cuda')
inv_cov_std = (torch.ones((num_classes,), device=device) / cov_std)
model = TabularResidualFlow(in_dim=dim_in, hidden_dim=k, num_per_block=coupling_layers)
dist_scaling = np.sqrt(((- 8) * np.log((1 - acc))))
means = utils.get_means('random', r=(means_r * dist_scaling), num_means=num_classes, trainloader=None, shape=dim_in, device=device)
means[0] /= means[0].norm()
means[0] *= (dist_scaling / 2)
means[1] = (- means[0])
model.prior = SSLGaussMixture(means, inv_cov_std, device=device)
means_np = means.cpu().numpy()
return model |
def clf_video():
m = Classifier(cls_model_path)
video_path = './data/video/123.mp4'
result = m.classify_video(video_path)
metadata_info = result['metadata']
preds_info = result['preds']
print('# metadata -------')
for (k, v) in metadata_info.items():
print(k, v)
print('# preds -------')
for (k, v) in preds_info.items():
print(k, v) |
def init_pipe_distributed(rank, world_size):
seed_everything()
if (not torch.distributed.is_initialized()):
os.environ['LOCAL_RANK'] = str(rank)
os.environ['RANK'] = str(rank)
os.environ['WORLD_SIZE'] = str(world_size)
os.environ['NPROC_PER_NODE'] = str(world_size)
if torch.cuda.is_available():
atorch.init_distributed('nccl')
torch.cuda.set_device(rank)
else:
atorch.init_distributed('gloo')
if (parallel_group('pipe') is None):
gpu_partition = ([('pipe', world_size)], None)
create_parallel_group(gpu_partition) |
def get_variant_spec_image(universe, domain, task, policy, algorithm, *args, **kwargs):
variant_spec = get_variant_spec_base(universe, domain, task, policy, algorithm, *args, **kwargs)
if (('image' in task.lower()) or ('image' in domain.lower())):
preprocessor_params = {'type': 'convnet_preprocessor', 'kwargs': {'image_shape': variant_spec['training']['environment_params']['image_shape'], 'output_size': M, 'conv_filters': (4, 4), 'conv_kernel_sizes': ((3, 3), (3, 3)), 'pool_type': 'MaxPool2D', 'pool_sizes': ((2, 2), (2, 2)), 'pool_strides': (2, 2), 'dense_hidden_layer_sizes': ()}}
variant_spec['policy_params']['kwargs']['preprocessor_params'] = preprocessor_params.copy()
variant_spec['Q_params']['kwargs']['preprocessor_params'] = preprocessor_params.copy()
return variant_spec |
def get_data(name: str, data_type, transform=None, target_transform=None, user_list=None):
dataset = get_config_by_name(name)
if (dataset == femnist):
assert (data_type in ['train', 'test'])
if (transform is None):
transform = transforms.Compose([transforms.ToTensor()])
if (target_transform is None):
target_transform = transforms.Compose([DataToTensor(dtype=torch.long), OneHot(dataset.NUM_CLASSES, to_float=True)])
return FEMNIST(root=join('datasets', 'FEMNIST'), train=(data_type == 'train'), download=True, transform=transform, target_transform=target_transform, user_list=user_list)
elif (dataset == celeba):
assert (data_type in ['train', 'test'])
if (transform is None):
transform = transforms.Compose([transforms.Resize((84, 84)), transforms.ToTensor()])
if (target_transform is None):
target_transform = transforms.Compose([DataToTensor(dtype=torch.long)])
return CelebA(root=join('datasets', 'CelebA'), train=(data_type == 'train'), download=True, transform=transform, target_transform=target_transform, user_list=user_list)
elif (dataset == cifar10):
assert (data_type in ['train', 'test'])
if (transform is None):
mean = [0.4914, 0.4822, 0.4465]
std = [0.2023, 0.1994, 0.201]
if (data_type == 'train'):
transform = transforms.Compose([transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, 4), transforms.ToTensor(), transforms.Normalize(mean, std)])
else:
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)])
if (target_transform is None):
target_transform = transforms.Compose([DataToTensor(dtype=torch.long), OneHot(dataset.NUM_CLASSES, to_float=True)])
return torchvision.datasets.CIFAR10(root=join('datasets', 'CIFAR10'), train=(data_type == 'train'), download=True, transform=transform, target_transform=target_transform)
elif (dataset == imagenet100):
assert (data_type in ['train', 'val'])
if (transform is None):
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
if (data_type == 'train'):
transform = transforms.Compose([transforms.RandomResizedCrop(imagenet100.IMAGE_SIZE), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean, std)])
else:
transform = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(imagenet100.IMAGE_SIZE), transforms.ToTensor(), transforms.Normalize(mean, std)])
return ImageNet100(root=join('datasets', 'ImageNet100'), train=(data_type == 'train'), transform=transform, target_transform=None)
else:
raise ValueError('{} dataset is not supported.'.format(name)) |
class JumanjiToGymWrapper(gym.Env):
_gym_disable_underscore_compat: ClassVar[bool] = True
def __init__(self, env: Environment, seed: int=0, backend: Optional[str]=None):
self._env = env
self.metadata: Dict[(str, str)] = {}
self._key = jax.random.PRNGKey(seed)
self.backend = backend
self._state = None
self.observation_space = specs.jumanji_specs_to_gym_spaces(self._env.observation_spec())
self.action_space = specs.jumanji_specs_to_gym_spaces(self._env.action_spec())
def reset(key: chex.PRNGKey) -> Tuple[(State, Observation, Optional[Dict])]:
(state, timestep) = self._env.reset(key)
return (state, timestep.observation, timestep.extras)
self._reset = jax.jit(reset, backend=self.backend)
def step(state: State, action: chex.Array) -> Tuple[(State, Observation, chex.Array, bool, Optional[Any])]:
(state, timestep) = self._env.step(state, action)
done = jnp.bool_(timestep.last())
return (state, timestep.observation, timestep.reward, done, timestep.extras)
self._step = jax.jit(step, backend=self.backend)
def reset(self, *, seed: Optional[int]=None, return_info: bool=False, options: Optional[dict]=None) -> Union[(GymObservation, Tuple[(GymObservation, Optional[Any])])]:
if (seed is not None):
self.seed(seed)
(key, self._key) = jax.random.split(self._key)
(self._state, obs, extras) = self._reset(key)
obs = jumanji_to_gym_obs(obs)
if return_info:
info = jax.tree_util.tree_map(np.asarray, extras)
return (obs, info)
else:
return obs
def step(self, action: chex.ArrayNumpy) -> Tuple[(GymObservation, float, bool, Optional[Any])]:
action = jnp.array(action)
(self._state, obs, reward, done, extras) = self._step(self._state, action)
obs = jumanji_to_gym_obs(obs)
reward = float(reward)
terminated = bool(done)
info = jax.tree_util.tree_map(np.asarray, extras)
return (obs, reward, terminated, info)
def seed(self, seed: int=0) -> None:
self._key = jax.random.PRNGKey(seed)
def render(self, mode: str='human') -> Any:
del mode
return self._env.render(self._state)
def close(self) -> None:
self._env.close()
def unwrapped(self) -> Environment:
return self._env |
def get_codegen(parser_type):
if (parser_type not in _codegen_dict):
if (parser_type == ParserTypeEnum.LATEX):
gen = CodeGenLatex()
elif (parser_type == ParserTypeEnum.NUMPY):
gen = CodeGenNumpy()
elif (parser_type == ParserTypeEnum.EIGEN):
gen = CodeGenEigen()
elif (parser_type == ParserTypeEnum.MATHJAX):
gen = CodeGenMathjax()
elif (parser_type == ParserTypeEnum.MACROMATHJAX):
gen = CodeGenMacroMathjax()
elif (parser_type == ParserTypeEnum.MATHML):
gen = CodeGenMathML()
elif (parser_type == ParserTypeEnum.MATLAB):
gen = CodeGenMatlab()
_codegen_dict[parser_type] = gen
return _codegen_dict[parser_type] |
class ActorWatcher(NodeWatcher):
def __init__(self, job_name, namespace):
self._job_name = job_name
self._namespace = namespace
self._ray_client = RayClient.singleton_instance(job_name, namespace)
self.event_queue = RayEventQueue.singleton_instance()
def watch(self):
while True:
i = self.event_queue.get()
event = parse_event(i)
logger.info(i)
(yield event)
def list(self) -> List[Node]:
nodes: List[Node] = []
for (name, status) in self._ray_client.list_actor():
(actor_type, actor_index) = parse_type_id_from_actor_name(name)
node = Node(node_type=actor_type, node_id=actor_index, name=actor_index, rank_index=actor_index, status=status, start_time=None)
nodes.append(node)
return [] |
def plot_attentions(attention: np.ndarray, src_seq, word: str, effective_doc_len):
(num_layer, num_heads, len_att) = attention.shape
trim_src_seq = src_seq[:len_att]
data_for_timestep = []
for layer_idx in range(num_layer):
one_layer_attn = attention[layer_idx]
row_data = []
for jdx in range(num_heads):
(single_head_out, single_head_ent) = plot_single_head_attention(one_layer_attn[jdx], trim_src_seq, effective_doc_len)
row_data.append(single_head_out)
data_for_one_layer = ((f'<tr><th>{layer_idx}</th>' + ''.join(row_data)) + '</tr>')
data_for_timestep.append(data_for_one_layer)
header_row = ''.join([f'<th>{i}</th>' for i in range(num_heads)])
header_nheads = f'<tr><th>{word}</th>{header_row}</tr>'
table = f"<table rules='all'>{header_nheads}{''.join(data_for_timestep)}</table>"
return table |
def generate(text_list, attention_list, latex_file, color='red', rescale_value=False):
assert (len(text_list) == len(attention_list))
if rescale_value:
attention_list = rescale(attention_list)
word_num = len(text_list)
text_list = clean_word(text_list)
with open(latex_file, 'w') as f:
f.write(('\\documentclass[varwidth]{standalone}\n\\special{papersize=210mm,297mm}\n\\usepackage{color}\n\\usepackage{tcolorbox}\n\\usepackage{CJK}\n\\usepackage{adjustbox}\n\\tcbset{width=0.9\\textwidth,boxrule=0pt,colback=red,arc=0pt,auto outer arc,left=0pt,right=0pt,boxsep=5pt}\n\\begin{document}\n\\begin{CJK*}{UTF8}{gbsn}' + '\n'))
string = ('{\\setlength{\\fboxsep}{0pt}\\colorbox{white!0}{\\parbox{0.9\\textwidth}{' + '\n')
for idx in range(word_num):
string += (((('\\colorbox{%s!%s}{' % (color, attention_list[idx])) + '\\strut ') + text_list[idx]) + '} ')
string += '\n}}}'
f.write((string + '\n'))
f.write('\\end{CJK*}\n\\end{document}') |
class LDMTextToImagePipeline(metaclass=DummyObject):
_backends = ['torch', 'transformers']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch', 'transformers'])
def from_config(cls, *args, **kwargs):
requires_backends(cls, ['torch', 'transformers'])
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ['torch', 'transformers']) |
class DynamicMemory(torch.nn.Module):
'Based on
def __init__(self, nb_slots=4, memory_size=300, bidirectional=False, **kwargs):
super(DynamicMemory, self).__init__()
self.cell = DynamicMemoryCell(nb_slots, memory_size)
self.bidirectional = bidirectional
def forward(self, inputs, lengths):
(lengths_sorted, inputs_sorted_idx) = lengths.sort(descending=True)
inputs_sorted = inputs[inputs_sorted_idx]
inputs_lenghts = list(lengths_sorted.data)
(hiddens, outputs) = dynamic_rnn(self.cell, inputs_sorted, inputs_lenghts)
hiddens = torch.stack(hiddens, dim=1)
if self.bidirectional:
(hiddens_rev, outputs_rev) = dynamic_rnn_reversed(self.cell, inputs_sorted, inputs_lenghts)
hiddens_rev = torch.stack(hiddens_rev, dim=1)
hiddens = torch.cat([hiddens, hiddens_rev], dim=(- 1))
(_, inputs_unsorted_idx) = inputs_sorted_idx.sort(descending=False)
hiddens = hiddens[inputs_unsorted_idx]
return hiddens |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.