code stringlengths 17 6.64M |
|---|
def get_dataloaders(data_path, tasks, num_frames, batch_size=64, batch_size_val=4, transform={}, num_workers=0, load_to_mem=False, pin_memory=False, remove_last_step_in_traj=True, removed_actions=[]):
if ('rgb_filled' in tasks):
transform['rgb_filled'] = transforms.Compose([transforms.CenterCrop([256, 256]), transforms.Resize(256), transforms.ToTensor(), MAKE_RESCALE_0_1_NEG1_POS1(3)])
keys = [t for t in tasks if (t in KEYS)]
assert (len(keys) == len(tasks)), f'unrecognized task in {tasks} not in {KEYS}! cannot be added to Dataset'
dataloaders = {}
dataset = ExpertData(data_path, keys=keys, num_frames=num_frames, split='train', transform=transform, load_to_mem=load_to_mem, remove_last_step_in_traj=remove_last_step_in_traj, removed_actions=removed_actions)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=pin_memory)
dataloaders['train'] = dataloader
dataset = ExpertData(data_path, keys=keys, num_frames=num_frames, split='val', transform=transform, load_to_mem=load_to_mem, remove_last_step_in_traj=remove_last_step_in_traj, removed_actions=removed_actions)
dataloader = DataLoader(dataset, batch_size=batch_size_val, shuffle=False, num_workers=num_workers, pin_memory=pin_memory)
dataloaders['val'] = dataloader
dataset = ExpertData(data_path, keys=keys, num_frames=num_frames, split='test', transform=transform, load_to_mem=load_to_mem, remove_last_step_in_traj=remove_last_step_in_traj, removed_actions=removed_actions)
dataloader = DataLoader(dataset, batch_size=batch_size_val, shuffle=False, num_workers=num_workers, pin_memory=pin_memory)
dataloaders['test'] = dataloader
return dataloaders
|
def get_dataloaders(data_path, inputs_and_outputs, batch_size=64, batch_size_val=4, transform=None, num_workers=0, load_to_mem=False, pin_memory=False):
dataloaders = {}
dataset = torchvision.datasets.FashionMNIST(root, train=True, transform=transform, target_transform=None, download=True)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=pin_memory)
dataloaders['train'] = dataloader
dataset = torchvision.datasets.FashionMNIST(root, train=False, transform=transform, target_transform=None, download=False)
dataloader = DataLoader(dataset, batch_size=batch_size_val, shuffle=False, num_workers=num_workers, pin_memory=pin_memory)
dataloaders['val'] = dataloader
dataset = torchvision.datasets.FashionMNIST(root, train=False, transform=transform, target_transform=None, download=False)
dataloader = DataLoader(dataset, batch_size=batch_size_val, shuffle=False, num_workers=num_workers, pin_memory=pin_memory)
dataloaders['test'] = dataloader
return dataloaders
|
class iCIFAR100(torchvision.datasets.CIFAR100):
def __init__(self, root, class_idxs, train=True, transform=None, target_transform=None, download=False):
super().__init__(root, train, transform, target_transform, download)
self.class_idxs = list(class_idxs)
self.old_targets = self.targets
is_valid = np.isin(self.targets, self.class_idxs)
self.data = self.data[is_valid]
self.targets = np.int32(self.targets)[is_valid]
self.new_to_old_class_idx = np.sort(np.unique(self.targets))
self.old_to_new_class_idx = np.full(((np.max(self.targets) + 1),), (- 1), dtype=np.int32)
self.old_to_new_class_idx[self.new_to_old_class_idx] = np.arange(len(self.new_to_old_class_idx))
self.targets = self.old_to_new_class_idx[self.targets]
self.targets = torch.LongTensor(self.targets)
self.classes = [c for c in self.classes if (self.class_to_idx[c] in self.class_idxs)]
|
def get_dataloaders(data_path, targets, sources=None, masks=None, tasks=None, epochlength=20000, epochs_until_cycle=1, batch_size=64, batch_size_val=4, transform=None, num_workers=0, load_to_mem=False, pin_memory=False, imsize=256):
"\n Targets can either be of the form [iterable1, iterable2]\n or of the form 'cifarXX-YY'\n "
if (transform is None):
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
dataloaders = {}
train_dataloaders = []
classes = []
for target in targets:
if isinstance(target[0], str):
(start, end) = [int(i) for i in target[0].lower().replace('cifar', '').split('-')]
classes.append(np.arange(start, (end + 1)))
else:
classes.append(target)
for (i, task) in enumerate(tqdm(classes, 'Loading training data')):
should_dl = (int(i) == 0)
dataset = iCIFAR100(data_path, task, train=True, transform=transform, target_transform=None, download=should_dl)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=pin_memory)
train_dataloaders.append(dataloader)
dataloaders['train'] = CyclingDataLoader(train_dataloaders, epochlength, epochs_until_cycle=epochs_until_cycle)
val_dataloaders = []
for task in tqdm(classes, 'Loading validation data'):
dataset = iCIFAR100(data_path, task, train=False, transform=transform, target_transform=None, download=False)
dataloader = DataLoader(dataset, batch_size=batch_size_val, shuffle=False, num_workers=num_workers, pin_memory=pin_memory)
val_dataloaders.append(dataloader)
dataloaders['val'] = ConcatenatedDataLoader(val_dataloaders)
dataloaders['test'] = []
return dataloaders
|
def get_limited_dataloaders(data_path, sources, targets, masks, tasks=None, epochlength=20000, batch_size=64, batch_size_val=4, transform=None, num_workers=0, load_to_mem=False, pin_memory=False, imsize=256):
"\n Targets can either be of the form [iterable1, iterable2]\n or of the form 'cifarXX-YY'\n "
if (transform is None):
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
dataloaders = {}
train_dataloaders = []
classes = []
for target in targets:
if isinstance(target[0], str):
(start, end) = [int(i) for i in target[0].lower().replace('cifar', '').split('-')]
classes.append(np.arange(start, (end + 1)))
else:
classes.append(target)
for (i, task) in enumerate(tqdm(classes, 'Loading training data')):
should_dl = (int(i) == 0)
dataset = iCIFAR100(data_path, task, train=True, transform=transform, target_transform=None, download=should_dl)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=pin_memory)
train_dataloaders.append(dataloader)
dataloaders['train'] = KthDataLoader(train_dataloaders, k=0, epochlength=1000)
val_dataloaders = []
for task in tqdm(classes, 'Loading validation data'):
dataset = iCIFAR100(data_path, task, train=False, transform=transform, target_transform=None, download=False)
dataloader = DataLoader(dataset, batch_size=batch_size_val, shuffle=False, num_workers=num_workers, pin_memory=pin_memory)
val_dataloaders.append(dataloader)
dataloaders['val'] = KthDataLoader(val_dataloaders, k=0)
dataloaders['test'] = []
return dataloaders
|
def get_cifar_dataloaders(data_path, sources, targets, masks, tasks=None, epochlength=20000, batch_size=64, batch_size_val=4, transform=None, num_workers=0, load_to_mem=False, pin_memory=False, imsize=256):
"\n Targets can either be of the form [iterable1, iterable2]\n or of the form 'cifarXX-YY'\n "
if (transform is None):
transform_train = transforms.Compose([transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, 4), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
transform_val = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
else:
transform_train = transform
transform_val = transform
dataloaders = {}
dataset = torchvision.datasets.CIFAR10(data_path, train=True, transform=transform_train, target_transform=None, download=True)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=pin_memory)
dataloaders['train'] = dataloader
dataset = torchvision.datasets.CIFAR10(data_path, train=False, transform=transform_val, target_transform=None, download=False)
dataloader = DataLoader(dataset, batch_size=batch_size_val, shuffle=False, num_workers=num_workers, pin_memory=pin_memory)
dataloaders['val'] = dataloader
dataloaders['test'] = []
return dataloaders
|
def cycle_dl(dl):
while True:
for element in dl:
(yield element)
|
class CyclingDataLoader(object):
def __init__(self, dls, epoch_length_per_dl=None, start_dl=0, epochs_until_cycle=0, zip_idx=True):
'\n :param dls: list of dataloaders, one for each task\n :param epoch_length_per_dl: number of items to cycle thru dataset\n :param start_dl:\n :param epochs_until_cycle: num epochs to train on the first task exclusively before training on others\n :param zip_idx: when calling __next__, return the task_idx as well\n '
self.epochlength = epoch_length_per_dl
self.dls = dls
self.curr_iter_idx = (start_dl - 1)
self.start_dl = start_dl
self.epochs_until_cycle = (epochs_until_cycle + 1)
self.zip_idx = zip_idx
def __iter__(self):
self.curr_iter_idx = ((self.curr_iter_idx + 1) % len(self.dls))
if (self.epochs_until_cycle > 0):
self.curr_iter_idx = self.start_dl
self.epochs_until_cycle -= 1
self.curr_iter = cycle_dl(self.dls[self.curr_iter_idx])
self.count = 0
return self
def __next__(self):
if (self.count == len(self)):
raise StopIteration
self.count += 1
if self.zip_idx:
return (self.curr_iter_idx, next(self.curr_iter))
return next(self.curr_iter)
@property
def batch_size(self):
return self.dls[self.curr_iter_idx].batch_size
def __len__(self):
if (self.epochlength is None):
return len(self.dls[self.curr_iter_idx])
return ((self.epochlength // self.dls[self.curr_iter_idx].batch_size) + 1)
def get_last_dl(self):
return (self.curr_iter_idx, self.dls[self.curr_iter_idx])
|
class ErrorPassingCyclingDataLoader(CyclingDataLoader):
def __next__(self):
try:
return super().__next__()
except Exception as e:
if isinstance(e, StopIteration):
raise e
else:
warnings.warn('problem with this datapoint, resampling')
print(e)
return self.__next__()
|
class ConcatenatedDataLoader(object):
def __init__(self, dls, zip_idx=True):
self.dls = dls
self.curr_iter_idx = 0
self.zip_idx = zip_idx
def __iter__(self):
self.curr_iter_idx = 0
self.curr_iter = iter(self.dls[self.curr_iter_idx])
return self
def __next__(self):
try:
if self.zip_idx:
return (self.curr_iter_idx, next(self.curr_iter))
return next(self.curr_iter)
except StopIteration:
self.curr_iter_idx += 1
if (self.curr_iter_idx >= len(self.dls)):
self.curr_iter_idx = 0
raise StopIteration
self.curr_iter = iter(self.dls[self.curr_iter_idx])
return self.__next__()
def __len__(self):
return sum([len(d) for d in self.dls])
|
class ErrorPassingConcatenatedDataLoader(ConcatenatedDataLoader):
def __next__(self):
try:
return super().__next__()
except Exception as e:
if isinstance(e, StopIteration):
raise e
else:
warnings.warn('problem with this datapoint, resampling')
print(e)
return self.__next__()
|
class KthDataLoader(object):
def __init__(self, dls, k=0, epochlength=None):
self.dls = dls
self.dl = dls[k]
self.k = k
self.epochlength = epochlength
def __iter__(self):
self.count = 0
if self.epochlength:
self.curr_iter = cycle_dl(self.dl)
else:
self.curr_iter = iter(self.dl)
return self
def __next__(self):
if (self.epochlength and (self.count == self.epochlength)):
raise StopIteration
self.count += 1
return next(self.curr_iter)
def __len__(self):
return len(self.dl)
|
def get_splits(split_path):
with open(split_path) as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
train_list = []
val_list = []
test_list = []
for row in readCSV:
(name, is_train, is_val, is_test) = row
if (name in forbidden_buildings):
continue
if (is_train == '1'):
train_list.append(name)
if (is_val == '1'):
val_list.append(name)
if (is_test == '1'):
test_list.append(name)
return {'train': sorted(train_list), 'val': sorted(val_list), 'test': sorted(test_list)}
|
class IdentityFn(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, x, **kwargs):
return x
def requires_grad_(self, *args, **kwargs):
pass
|
def identity_fn(x):
return x
|
class ZeroFn(nn.Module):
def forward(self, *args, **kwargs):
return 0.0
def requires_grad_(self, *args, **kwargs):
pass
|
def zero_fn(x):
return 0.0
|
class ScaleLayer(nn.Module):
def __init__(self, init_value=0.001):
super().__init__()
self.scale = nn.Parameter(torch.FloatTensor([init_value]))
def forward(self, input):
return (input * self.scale)
|
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
|
class ResidualLayer(nn.Module):
def __init__(self, net: nn.Module):
super().__init__()
self.net = net
def forward(self, x):
return (x + self.net(x))
|
class EvalOnlyModel(nn.Module):
def __init__(self, eval_only=None, train=False, **kwargs):
super().__init__()
if (eval_only is None):
warnings.warn(f'Model eval_only flag is not set for {type(self)}. Defaulting to True')
eval_only = True
if train:
warnings.warn('Model train flag is deprecated')
self.eval_only = eval_only
def forward(self, x, cache={}, time_idx: int=(- 1)):
pass
def train(self, train):
if self.eval_only:
super().train(False)
for p in self.parameters():
p.requires_grad = False
if (train and self.eval_only):
warnings.warn("Ignoring 'train()' in TaskonomyEncoder since 'eval_only' was set during initialization.", RuntimeWarning)
else:
return super().train(train)
|
class EWC():
def __init__(self, loss_fn, model, coef=0.001, avg_tasks=False, n_samples_fisher=1000, **kwargs):
self.loss_fn = loss_fn
self.model = model
self.coef = coef
self.avg_tasks = avg_tasks
self.weights_anchor_list = []
self.precision_matrices_list = []
self.n_samples_fisher = n_samples_fisher
self.n_tasks = 0
def __call__(self, *args, **kwargs):
orig_losses = self.loss_fn(*args, **kwargs)
regularization_loss = self.compute_penalty(cur_model=self.model)
orig_losses.update({'total': (orig_losses['total'] + (self.coef * regularization_loss)), 'weight_tying': regularization_loss})
return orig_losses
def compute_penalty(self, cur_model):
loss = torch.tensor(0.0).to(next(cur_model.parameters()).device)
hits = 0
for (weights_anchor, precision_matrices) in zip(self.weights_anchor_list, self.precision_matrices_list):
for (name, param) in cur_model.base.named_parameters():
if (name in precision_matrices):
hits += 1
_loss = (precision_matrices[name] * ((weights_anchor[name] - param) ** 2))
loss += _loss.sum()
assert ((hits != 0) or (len(self.weights_anchor_list) == 0)), 'No parameters for computing ewc penalty, are you sure the names in model and precision_matrix match?'
return loss
def post_training_epoch(self, model, dataloader, post_training_cache, **kwargs):
if ('weights_anchor' in post_training_cache):
weights_anchor = post_training_cache['weights_anchor']
precision_matrices = post_training_cache['precision_matrices']
else:
weights_anchor = copy.deepcopy({n: p.detach() for (n, p) in model.base.named_parameters() if p.requires_grad})
precision_matrices = self._diag_fisher(model, dataloader, copy.deepcopy(weights_anchor), kwargs['cfg'])
post_training_cache['weights_anchor'] = weights_anchor
post_training_cache['precision_matrices'] = precision_matrices
if self.avg_tasks:
self.n_tasks += 1
if (len(self.weights_anchor_list) == 0):
self.weights_anchor_list.append(weights_anchor)
self.precision_matrices_list.append(precision_matrices)
else:
self.weights_anchor_list[0] = self._compute_running_avg(self.weights_anchor_list[0], weights_anchor, self.n_tasks)
self.precision_matrices_list[0] = self._compute_running_avg(self.precision_matrices_list[0], precision_matrices, self.n_tasks)
else:
self.weights_anchor_list.append(weights_anchor)
self.precision_matrices_list.append(precision_matrices)
def _compute_running_avg(self, running_avg, sample, n):
for name in running_avg.keys():
running_avg[name] = (((running_avg[name] * (n - 1)) / n) + (sample[name] / n))
return running_avg
def _diag_fisher(self, model, dataloader, precision_matrices, cfg):
model.eval()
for (name, param) in precision_matrices.items():
param.data.zero_()
n_samples = 0
(task_idx, dataloader) = dataloader.get_last_dl()
for batch_tuple in tqdm(dataloader, f'Computing fisher matrix for task {task_idx}'):
n_samples += len(batch_tuple)
if (n_samples > self.n_samples_fisher):
break
(x, label, masks) = process_batch_tuple(batch_tuple, task_idx, cfg)
model.zero_grad()
predictions = model(x, task_idx=task_idx)
log_p = self.loss_fn(predictions, label, masks)
log_p['total'].backward()
for (name, param) in model.base.named_parameters():
if (name in precision_matrices):
precision_matrices[name].data += ((param.grad.data.detach() ** 2) / len(dataloader))
return precision_matrices
|
class FCN5MidFeedback(FCN5):
def __init__(self, kernel_size=3, *args, **kwargs):
super().__init__(*args, **kwargs)
if (kernel_size == 3):
net_kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1}
elif (kernel_size == 1):
net_kwargs = {'kernel_size': 1, 'stride': 1, 'padding': 0}
else:
assert False, f'kernel size not recognized ({kernel_size})'
self.fb_conv1 = _make_layer(8, 64, **net_kwargs)
self.fb_conv2 = _make_layer(64, 8, **net_kwargs)
self.feedback_net = nn.Sequential(self.fb_conv1, self.fb_conv2)
def forward(self, x, task_idx: int=(- 1), cache={}):
last_repr = cache['last_repr']
last_repr_tweeked = self.feedback_net(last_repr)
last_repr_tweeked = upsampler(last_repr_tweeked)
last_repr_tweeked = last_repr_tweeked.repeat(1, (256 // 8), 1, 1)
x = self.conv1(x)
x = self.conv2(x)
x = (x + last_repr_tweeked)
x2 = x
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
x = (x + self.skip(x2))
if self.normalize_outputs:
x = self.groupnorm(x)
return (last_repr + x)
|
class FCN5LateFeedback(FCN5):
def __init__(self, kernel_size=3, *args, **kwargs):
super().__init__(*args, **kwargs)
if (kernel_size == 3):
net_kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1}
elif (kernel_size == 1):
net_kwargs = {'kernel_size': 1, 'stride': 1, 'padding': 0}
else:
assert False, f'kernel size not recognized ({kernel_size})'
self.fb_conv1 = _make_layer(8, 64, **net_kwargs)
self.fb_conv2 = _make_layer(64, 8, **net_kwargs)
self.feedback_net = nn.Sequential(self.fb_conv1, self.fb_conv2)
def forward(self, x, task_idx: int=(- 1), cache={}):
last_repr = cache['last_repr']
ret_input_only = super().forward(x, task_idx)
ret_output_only = self.feedback_net(last_repr)
return ((last_repr + ret_input_only) + ret_output_only)
|
class LifelongNetwork(nn.Module):
def forward(self, x, task_idx=None):
pass
def start_training(self):
pass
def start_task(self, task_idx, train):
pass
|
class LifelongSidetuneNetwork(LifelongNetwork):
def __init__(self, dataset='taskonomy', use_baked_encoding=False, normalize_pre_transfer=True, base_class=None, base_weights_path=None, base_kwargs={}, transfer_class=None, transfer_weights_path=None, transfer_kwargs={}, side_class=None, side_weights_path=None, side_kwargs={}, tasks=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], task_specific_transfer_kwargs=None, task_specific_side_kwargs=None, dense=False, pnn=False, merge_method='merge_operators.Alpha', base_uses_other_sensors=False):
super().__init__()
if (side_class is None):
self.merge_method = 'merge_operators.BaseOnly'
elif (base_class is None):
self.merge_method = 'merge_operators.SideOnly'
else:
self.merge_method = merge_method
self.dataset = dataset
self.tasks = tasks
self.dense = dense
self.pnn = pnn
self.base = load_submodule(eval(str(base_class)), base_weights_path, base_kwargs, zero_fn)
self.sides = nn.ModuleDict()
self.transfers = nn.ModuleDict()
self.merge_operators = nn.ModuleDict()
task_specific_transfer_kwargs = ([{} for _ in self.tasks] if (task_specific_transfer_kwargs is None) else task_specific_transfer_kwargs)
task_specific_side_kwargs = ([{} for _ in self.tasks] if (task_specific_side_kwargs is None) else task_specific_side_kwargs)
if (self.dense and self.pnn):
for (side_kwargs, task_idx) in zip(task_specific_side_kwargs, tasks):
side_kwargs['task_idx'] = task_idx
side_kwargs['dense'] = True
for task_idx in self.tasks:
task_id = str(task_idx)
if (task_id in self.sides):
continue
merged_side_kwargs = {**side_kwargs, **task_specific_side_kwargs[task_idx]}
merged_transfer_kwargs = {**transfer_kwargs, **task_specific_transfer_kwargs[task_idx]}
self.sides[task_id] = load_submodule(eval(str(side_class)), side_weights_path, merged_side_kwargs, ZeroFn())
self.transfers[task_id] = load_submodule(eval(str(transfer_class)), transfer_weights_path, merged_transfer_kwargs, identity_fn)
self.merge_operators[task_id] = eval(self.merge_method)(dense=self.dense, task_idx=task_idx, dataset=self.dataset)
self.use_baked_encoding = use_baked_encoding
self.base_uses_other_sensors = base_uses_other_sensors
self.normalize_pre_transfer = normalize_pre_transfer
if self.normalize_pre_transfer:
self.groupnorm = nn.GroupNorm(8, 8, affine=False)
self.cache = {}
self.eval()
def forward(self, x, task_idx=None, pass_i=0):
if (task_idx is None):
warnings.warn('No task_idx is passed, are you sure? (only should do this for torchsummary)')
task_idx = 0
task_id = str(task_idx)
if (pass_i == 0):
self.base_encoding = self.forward_base(x, task_idx)
self.cache = {'last_repr': self.base_encoding}
self.side = self.sides[task_id]
self.transfer = self.transfers[task_id]
self.merge_operator = self.merge_operators[task_id]
if self.pnn:
assert (isinstance(self.base, TaskonomyEncoderWithCache) or isinstance(self.base, ResnetiCifar44NoLinearWithCache)), 'PNN needs to have cache!'
(self.base_encoding, pnn_base_cache) = self.base_encoding
pnn_side_caches = []
prev_side_encodings = []
pnn_full_cache = [pnn_base_cache, pnn_side_caches]
if self.dense:
for t in range(task_idx):
(this_side_encoding, this_side_cache) = self.sides[str(t)](x, pnn_full_cache)
pnn_side_caches.append(this_side_cache)
prev_side_encodings.append(this_side_encoding.detach())
if self.pnn:
(self.side_encoding, _) = self.side(x, pnn_full_cache)
else:
self.side_encoding = self.side(x, cache=self.cache)
self.cache['last_repr'] = self.side_encoding
additional_encodings = []
if (self.dense and self.pnn):
additional_encodings = prev_side_encodings
elif self.dense:
additional_encodings = [self.sides[str(t)](x).detach() for t in range(task_idx)]
self.merged_encoding = self.merge_operator(self.base_encoding, self.side_encoding, additional_encodings)
if self.normalize_pre_transfer:
self.normalized_merged_encoding = self.groupnorm(self.merged_encoding)
self.transfered_encoding = self.transfer(self.normalized_merged_encoding)
else:
self.transfered_encoding = self.transfer(self.merged_encoding)
return self.transfered_encoding
def forward_base(self, x, task_idx):
if isinstance(x, dict):
x_dict = x
assert ('rgb_filled' in x_dict.keys()), 'need input images to work with'
x = x_dict['rgb_filled']
if ('taskonomy' in x_dict):
base_encoding = x_dict['taskonomy']
else:
try:
base_encoding = self.base(x, task_idx)
except TypeError:
base_encoding = self.base(x)
elif self.use_baked_encoding:
(x, base_encoding) = x
else:
if self.base_uses_other_sensors:
assert (isinstance(x, list) and (len(x) > 1)), 'Must have additional sensors for base!'
(x, other_sensors) = (x[0], x[1:])
assert (len(other_sensors) == 1), 'Our system can only take ONE other_sensors'
other_sensors = other_sensors[0]
base_input = other_sensors
else:
base_input = x
try:
base_encoding = self.base(base_input, task_idx)
except TypeError as e:
base_encoding = self.base(base_input)
return base_encoding
def forward_transfer(self, x):
return self.transfer(x)
def start_task(self, task_idx, train, print_alpha=False) -> list:
for task in self.tasks:
task_id = str(task)
if ((task == task_idx) or ((task < task_idx) and self.dense)):
self.sides[task_id].cuda()
self.transfers[task_id].cuda()
self.merge_operators[task_id].cuda()
self.sides[task_id].requires_grad_(train)
self.transfers[task_id].requires_grad_(train)
self.merge_operators[task_id].requires_grad_(train)
else:
self.sides[task_id].cpu()
self.transfers[task_id].cpu()
self.merge_operators[task_id].cpu()
self.sides[task_id].requires_grad_(False)
self.transfers[task_id].requires_grad_(False)
self.merge_operators[task_id].requires_grad_(False)
self._set_grad_to_none(self.sides[task_id])
self._set_grad_to_none(self.transfers[task_id])
self._set_grad_to_none(self.merge_operators[task_id])
if print_alpha:
alphas = [self.merge_operators[str(task_idx)].param for task_idx in self.tasks]
print(f'''Setting grad True for task {task_idx} and others False.
Alphas: {alphas}''')
return [p for p in self.parameters() if p.requires_grad]
def _set_grad_to_none(self, vars):
if isinstance(vars, nn.parameter.Parameter):
vars.grad = None
elif isinstance(vars, nn.Module):
for p in vars.parameters():
p.grad = None
def start_training(self):
if hasattr(self.base, 'start_training'):
self.base.start_training()
|
class MergeOperator(nn.Module):
def __init__(self, dense, task_idx, dataset):
super().__init__()
self.dense = dense
self.task_idx = task_idx
self.dataset = dataset
def __call__(self, base_encoding, side_encoding, additional_encodings=[]) -> torch.Tensor:
pass
@property
def weights(self):
return []
@property
def param(self):
return (- 1)
|
class BaseOnly(MergeOperator):
def __call__(self, base_encoding, side_encoding, additional_encodings=[]):
return base_encoding
|
class SideOnly(MergeOperator):
def __call__(self, base_encoding, side_encoding, additional_encodings=[]):
return side_encoding
|
class Summation(MergeOperator):
def __call__(self, base_encoding, side_encoding, additional_encodings=[]):
merged_encoding = ((base_encoding + side_encoding) + sum(additional_encodings))
return merged_encoding
|
class Product(MergeOperator):
def __call__(self, base_encoding, side_encoding, additional_encodings=[]):
merged_encoding = (base_encoding * side_encoding)
for add_encoding in additional_encodings:
merged_encoding *= add_encoding
return merged_encoding
|
class Alpha(MergeOperator):
def __init__(self, dense, task_idx, **kwargs):
super().__init__(dense, task_idx, **kwargs)
if dense:
self.alphas = nn.Parameter(torch.tensor(0.0).repeat((task_idx + 2)))
else:
self.alphas = nn.Parameter(torch.tensor(0.0))
@property
def weights(self):
if self.dense:
weights = torch.softmax(self.alphas, dim=0)
else:
alpha_squashed = torch.sigmoid(self.alphas)
weights = [alpha_squashed, (1 - alpha_squashed)]
return weights
def __call__(self, base_encoding, side_encoding, additional_encodings=[]):
outputs_to_merge = (([base_encoding] + additional_encodings) + [side_encoding])
merged_encoding = (torch.zeros_like(base_encoding) if isinstance(base_encoding, torch.Tensor) else torch.zeros_like(side_encoding))
assert (len(self.weights) == len(outputs_to_merge)), f'# of outputs ({len(outputs_to_merge)}) != # of alphas ({len(weights)})'
for (a, out) in zip(self.weights, outputs_to_merge):
merged_encoding += (a * out)
return merged_encoding
@property
def param(self):
return self.weights[0].item()
|
class FiLMNet(nn.Module):
def __init__(self, n_in, n_out, kernel_size=1):
super().__init__()
if (kernel_size == 3):
net_kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1}
elif (kernel_size == 1):
net_kwargs = {'kernel_size': 1, 'stride': 1, 'padding': 0}
else:
assert False, f'kernel size not recognized ({kernel_size})'
self.base_layer = _make_layer(n_in, 64, **net_kwargs)
self.mult_head = nn.Conv2d(64, n_out, bias=True, **net_kwargs)
self.add_head = nn.Conv2d(64, n_out, bias=True, **net_kwargs)
def forward(self, x):
x1 = self.base_layer(x)
mult_factor = (self.mult_head(x1) + x)
add_factor = (self.add_head(x1) + x)
return (mult_factor, add_factor)
|
class FiLM(MergeOperator):
def __init__(self, dense, **kwargs):
super().__init__(dense, **kwargs)
assert (not dense)
self.film = FiLMNet(n_in=8, n_out=8, kernel_size=1)
def __call__(self, base_encoding, side_encoding, additional_encodings=[]):
(mult_factor, add_factor) = self.film(side_encoding)
merged_encoding = ((base_encoding * mult_factor) + add_factor)
return merged_encoding
|
class MLP(MergeOperator):
def __init__(self, dense, task_idx, dataset):
super().__init__(dense, task_idx, dataset)
if (dataset == 'icifar'):
self.make_layer = make_linear_layer
elif (dataset == 'taskonomy'):
self.make_layer = make_conv_layer
self.base_net = self.make_layer()
self.side_net = IdentityFn()
if dense:
self.dense_side_nets = nn.ModuleList([self.make_layer() for _ in range(task_idx)])
def __call__(self, base_encoding, side_encoding, additional_encodings=[]):
merged_encoding = (self.base_net(base_encoding) + self.side_net(side_encoding))
if self.dense:
merged_encoding += sum([net(add_encoding) for (net, add_encoding) in zip(self.dense_side_nets, additional_encodings)])
return merged_encoding
|
class MLP2(MLP):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.side_net = self.make_layer()
|
class ResMLP2(MLP):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.side_net = ResidualLayer(self.make_layer())
|
class MLPHidden(MLP):
def __call__(self, base_encoding, side_encoding, additional_encodings=[]):
merged_encoding = (self.base_net(base_encoding) + self.side_net(side_encoding))
if self.dense:
merged_encoding += sum([net(add_encoding) for (net, add_encoding) in zip(self.dense_side_nets, additional_encodings)])
return F.ReLU(merged_encoding)
|
def load_submodule(model_class, model_weights_path, model_kwargs, backup_fn=zero_fn):
if (model_class is not None):
model = model_class(**model_kwargs)
if (model_weights_path is not None):
(model, _) = load_state_dict_from_path(model, model_weights_path)
else:
model = backup_fn
assert (model_weights_path is None), 'cannot have weights without model'
return model
|
def _make_layer(in_channels, out_channels, num_groups=2, kernel_size=3, stride=1, padding=0, dilation=1, normalize=True, bsp=False, period=None, debug=False, projected=False, scaling=False, postlinear=False, linear=False):
assert (not (bsp and projected)), 'cannot do bsp and projectedconv'
if linear:
conv = nn.Linear(in_channels, out_channels, bias=False)
elif bsp:
assert (dilation == 1), 'Dilation is not implemented for binary superposition'
assert (period is not None), 'Need to specify period'
conv = HashConv2d(in_channels, out_channels, kernel_size=kernel_size, period=period, stride=stride, padding=padding, bias=False, debug=debug)
elif projected:
assert (dilation == 1), 'Dilation is not implemented for projected conv'
conv = ProjectedConv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=False)
else:
conv = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=False, dilation=dilation)
gn = nn.GroupNorm(num_groups, out_channels)
relu = nn.ReLU()
layers = [conv, relu]
if normalize:
layers = [conv, gn, relu]
if scaling:
layers = ([ScaleLayer(0.9)] + layers)
if postlinear:
if linear:
layers = (layers + [nn.Linear(in_channels, out_channels)])
else:
layers = (layers + [nn.Conv2d(out_channels, out_channels, kernel_size=1, bias=False)])
return nn.Sequential(*layers)
|
class SampleGroupStackModule(nn.Module):
def __init__(self, *args, **kwargs):
super(SampleGroupStackModule, self).__init__()
def forward(self, *args, **kwargs):
return downsample_group_stack(*args, **kwargs)
def requires_grad_(self, *args, **kwargs):
pass
|
class ConstantModel():
def __init__(self, data):
if isinstance(data, str):
if ('.png' in data):
img = Image.open(data)
self.const = RESCALE_0_1_NEG1_POS1(transforms.ToTensor()(img))
else:
self.const = torch.load(data)
else:
self.const = data
def forward(self, x):
return self.const
def to(self, device):
self.const = self.const.to(device)
def train(self, x):
pass
def __call__(self, x):
return self.const
|
class EnsembleNet(nn.Module):
def __init__(self, n_models, model_class, model_weights_path, **kwargs):
super().__init__()
self.nets = nn.ModuleList([load_submodule(eval(model_class), model_weights_path, kwargs) for _ in range(n_models)])
def forward(self, x):
return sum([net(x) for net in self.nets])
|
class BoostedNetwork(nn.Module):
def __init__(self, use_baked_encoding=False, normalize_pre_transfer=True, encoder_class=None, encoder_weights_path=None, encoder_kwargs={}, transfer_network_class=None, transfer_network_weights_path=None, transfer_network_kwargs={}, sidetuner_network_class=None, sidetuner_network_weights_path=None, sidetuner_kwargs={}, decoder_class=None, decoder_weights_path=None, decoder_kwargs={}, tasks=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]):
super().__init__()
self.encoder = load_submodule(eval(encoder_class), encoder_weights_path, encoder_kwargs, zero_fn)
self.side_network_args = (eval(sidetuner_network_class), sidetuner_network_weights_path, sidetuner_kwargs, ZeroFn())
self.side_networks = nn.ModuleDict()
self.transfer_network = load_submodule(eval(transfer_network_class), transfer_network_weights_path, transfer_network_kwargs, identity_fn)
self.alphas = ({} if (sidetuner_network_class is None) else nn.ParameterDict())
for task_idx in tasks:
task_id = str(task_idx)
if (task_id not in self.side_networks):
self.side_networks[task_id] = load_submodule(*self.side_network_args)
self.alphas[task_id] = nn.Parameter(torch.tensor(0.0))
self.alphas['base'] = nn.Parameter(torch.tensor(0.0))
assert (decoder_class is None), 'we do not use decoder yet'
self.decoder = load_submodule(eval(decoder_class), decoder_weights_path, decoder_kwargs, identity_fn)
self.use_baked_encoding = use_baked_encoding
self.normalize_pre_transfer = normalize_pre_transfer
if self.normalize_pre_transfer:
self.groupnorm = nn.GroupNorm(8, 8, affine=False)
def forward(self, x, task_idx):
task_id = str(task_idx)
if isinstance(x, dict):
x_dict = x
assert ('rgb_filled' in x_dict.keys()), 'need input images to work with'
x = x_dict['rgb_filled']
self.base_encoding = (x_dict['taskonomy'] if ('taskonomy' in x_dict.keys()) else self.encoder(x))
elif self.use_baked_encoding:
(x, self.base_encoding) = x
else:
self.base_encoding = self.encoder(x)
self.side_output = (([self.base_encoding] + [self.side_networks[str(t)](x).detach() for t in range(task_idx)]) + [self.side_networks[task_id](x)])
self.alpha = (([self.alphas['base'].detach()] + [self.alphas[str(t)].detach() for t in range(task_idx)]) + [self.alphas[task_id]])
alpha_squashed = torch.softmax(torch.tensor(self.alpha), dim=0)
self.alpha = alpha_squashed[(- 1)]
self.merged_encoding = torch.zeros_like(self.side_output[(- 1)])
for (a, out) in zip(alpha_squashed, self.side_output):
self.merged_encoding += (a * out)
if self.normalize_pre_transfer:
self.normalized_merged_encoding = self.groupnorm(self.merged_encoding)
self.transfered_encoding = self.transfer_network(self.normalized_merged_encoding)
else:
self.transfered_encoding = self.transfer_network(self.merged_encoding)
return self.transfered_encoding
def transfer(self, x):
return self.transfer_network(x)
def decode(self, x):
return self.decoder(x)
def start_task(self, task_idx):
pass
|
class FCN5(EvalOnlyModel):
def __init__(self, num_groups=2, img_channels=3, use_residual=False, normalize_outputs=False, bsp=False, period=None, projected=False, final_act=True, **kwargs):
super(FCN5, self).__init__(**kwargs)
self.conv1 = _make_layer(img_channels, 64, num_groups=num_groups, kernel_size=8, stride=4, padding=2, bsp=bsp, period=period, projected=projected)
self.conv2 = _make_layer(64, 256, num_groups=num_groups, kernel_size=3, stride=2, padding=1, bsp=bsp, period=period, projected=projected)
self.conv3 = _make_layer(256, 256, num_groups=num_groups, kernel_size=3, stride=2, padding=1, bsp=bsp, period=period, projected=projected)
self.conv4 = _make_layer(256, 64, num_groups=num_groups, kernel_size=3, stride=1, padding=1, bsp=bsp, period=period, projected=projected)
self.conv5 = _make_layer(64, 8, num_groups=num_groups, kernel_size=3, stride=1, padding=1, bsp=bsp, period=period, projected=projected)
self.skip = _make_layer(256, 8, num_groups=num_groups, kernel_size=3, stride=2, padding=1, bsp=bsp, period=period, projected=projected)
self.normalize_outputs = normalize_outputs
self.final_act = final_act
if normalize_outputs:
self.groupnorm = nn.GroupNorm(num_groups, 8)
self.use_residual = use_residual
if use_residual:
res1 = nn.Conv2d(img_channels, 64, kernel_size=8, stride=4, padding=2, bias=False, dilation=1)
res2 = nn.Conv2d(64, 256, kernel_size=3, stride=2, padding=1, bias=False, dilation=1)
self.residual = nn.Sequential(res1, res2)
self.bsp = bsp
def forward(self, x, task_idx: int=(- 1), cache={}):
x0 = x
if self.bsp:
x = forward_sequential(x, self.conv1, task_idx)
x = forward_sequential(x, self.conv2, task_idx)
else:
x = self.conv1(x)
x = self.conv2(x)
if self.use_residual:
x = (x + self.residual(x0))
x2 = x
if self.bsp:
x = forward_sequential(x, self.conv3, task_idx)
x = forward_sequential(x, self.conv4, task_idx)
x = forward_sequential(x, self.conv5, task_idx)
x = (x + forward_sequential(x2, self.skip, task_idx))
else:
x = self.conv3(x)
x = self.conv4(x)
if self.final_act:
x = self.conv5(x)
else:
x = self.conv5[0](x)
x = (x + self.skip(x2))
if self.normalize_outputs:
x = self.groupnorm(x)
return x
|
class FCN8(EvalOnlyModel):
def __init__(self, img_channels=3, normalize_outputs=False, **kwargs):
super(FCN8, self).__init__(**kwargs)
self.conv1 = _make_layer(img_channels, 64, kernel_size=8, stride=4, padding=2)
self.conv2 = _make_layer(64, 128, kernel_size=3, stride=2, padding=1)
self.conv3 = _make_layer(128, 256, kernel_size=3, stride=2, padding=1)
self.conv4 = _make_layer(256, 256, kernel_size=3, stride=1, padding=1)
self.conv5 = _make_layer(256, 256, kernel_size=3, stride=1, padding=1)
self.conv6 = _make_layer(256, 256, kernel_size=3, stride=1, padding=1)
self.conv7 = _make_layer(256, 128, kernel_size=3, stride=1, padding=1)
self.conv8 = _make_layer(128, 8, kernel_size=3, stride=1, padding=1)
self.skip1 = _make_layer(128, 256, kernel_size=3, stride=2, padding=1)
self.skip2 = _make_layer(256, 256, kernel_size=3, stride=1, padding=1)
self.skip3 = _make_layer(256, 8, kernel_size=3, stride=1, padding=1)
self.normalize_outputs = normalize_outputs
if self.normalize_outputs:
self.groupnorm = nn.GroupNorm(2, 8)
def forward(self, x, task_idx: int=(- 1), cache={}):
x = self.conv1(x)
x = self.conv2(x)
x2 = x
x = self.conv3(x)
x = self.conv4(x)
x = (x + self.skip1(x2))
x4 = x
x = self.conv5(x)
x = self.conv6(x)
x = (x + self.skip2(x4))
x6 = x
x = self.conv7(x)
x = self.conv8(x)
x = (x + self.skip3(x6))
if self.normalize_outputs:
x = self.groupnorm(x)
return x
|
class FCN4(EvalOnlyModel):
def __init__(self, num_groups=2, img_channels=3, use_residual=False, normalize_outputs=False, bsp=False, period=None, debug=False, projected=False, final_act=True, **kwargs):
super(FCN4, self).__init__(**kwargs)
self.conv1 = _make_layer(img_channels, 16, num_groups=num_groups, kernel_size=3, stride=1, padding=1, bsp=bsp, period=period, debug=debug, projected=projected)
self.conv2 = _make_layer(16, 16, num_groups=num_groups, kernel_size=3, stride=2, padding=0, bsp=bsp, period=period, debug=debug, projected=projected)
self.conv3 = _make_layer(16, 32, num_groups=num_groups, kernel_size=3, stride=2, bsp=bsp, period=period, debug=debug, projected=projected)
self.conv5 = _make_layer(32, 64, num_groups=num_groups, kernel_size=3, stride=1, normalize=normalize_outputs, bsp=bsp, period=period, debug=debug, projected=projected)
self.bsp = bsp
self.use_residual = use_residual
self.final_act = final_act
if use_residual:
res1 = nn.Conv2d(img_channels, 8, kernel_size=3, stride=1, padding=0, bias=False, dilation=2)
res2 = nn.Conv2d(8, 8, kernel_size=3, stride=2, padding=0, bias=False, dilation=2)
res3 = nn.Conv2d(8, 64, kernel_size=3, stride=2, bias=False, dilation=1)
self.residual = nn.Sequential(res1, res2, res3)
def forward(self, x, task_idx: int=(- 1)):
if self.bsp:
x = forward_sequential(x, self.conv1, task_idx)
x = forward_sequential(x, self.conv2, task_idx)
x = forward_sequential(x, self.conv3, task_idx)
x = forward_sequential(x, self.conv5, task_idx)
else:
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
if self.final_act:
x = self.conv5(x)
else:
x = self.conv5[0](x)
if self.use_residual:
res = self.residual(x)
x = (x + res)
return x
|
class FCN4Reshaped(FCN4):
def forward(self, x, cache={}, time_idx: int=(- 1)):
x = super().forward(x, time_idx)
x = F.avg_pool2d(x, x.size()[3]).view(x.shape[0], 64)
return x
|
class FCN3(EvalOnlyModel):
def __init__(self, num_groups=2, img_channels=3, normalize_outputs=False, **kwargs):
super(FCN3, self).__init__(**kwargs)
self.conv1 = _make_layer(img_channels, 64, num_groups=num_groups, kernel_size=8, stride=4, padding=1)
self.conv2 = _make_layer(64, 256, num_groups=num_groups, kernel_size=3, stride=2, padding=2)
self.conv3 = _make_layer(256, 8, num_groups=num_groups, kernel_size=3, stride=2, normalize=normalize_outputs)
def forward(self, x, task_idx: int=(- 1), cache={}):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
|
def get_output_sizes():
base_path = '/root/tlkit/tlkit/taskonomy_data/'
decoder_paths = [os.path.join(base_path, f'{task}_decoder.dat') for task in LIST_OF_TASKS]
decoder_state_dicts = [torch.load(path) for path in decoder_paths]
output_sizes = [decoder['state_dict']['decoder_output.0.bias'].numpy().size for decoder in decoder_state_dicts]
print(list(zip(LIST_OF_TASKS, output_sizes)))
|
class HiddenPrints():
def __enter__(self):
self._original_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
def __exit__(self, exc_type, exc_val, exc_tb):
sys.stdout.close()
sys.stdout = self._original_stdout
|
def update(d, u):
for (k, v) in u.items():
if isinstance(v, collections.Mapping):
d[k] = update(d.get(k, {}), v)
else:
d[k] = v
return d
|
def flatten(d, parent_key='', sep='.'):
items = []
for (k, v) in d.items():
new_key = (((parent_key + sep) + k) if parent_key else k)
if isinstance(v, collections.MutableMapping):
items.extend(flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
|
def var_to_numpy(encoding):
encoding = encoding.detach().cpu().numpy()
return encoding
|
def checkpoint_name(checkpoint_dir, epoch='latest'):
return os.path.join(checkpoint_dir, 'ckpt-{}.dat'.format(epoch))
|
def save_checkpoint(obj, directory, step_num):
os.makedirs(directory, exist_ok=True)
torch.save(obj, checkpoint_name(directory))
subprocess.call('cp {} {} &'.format(checkpoint_name(directory), checkpoint_name(directory, step_num)), shell=True)
|
def get_parent_dirname(path):
return os.path.basename(os.path.dirname(path))
|
def get_subdir(training_directory, subdir_name):
"\n look through all files/directories in training_directory\n return all files/subdirectories whose basename have subdir_name\n if 0, return none\n if 1, return it\n if more, return list of them\n\n e.g. training_directory: '/path/to/exp'\n subdir_name: 'checkpoints' (directory)\n subdir_name: 'rewards' (files)\n "
training_directory = training_directory.strip()
subdirectories = os.listdir(training_directory)
special_subdirs = []
for subdir in subdirectories:
if (subdir_name in subdir):
special_subdir = os.path.join(training_directory, subdir)
special_subdirs.append(special_subdir)
if (len(special_subdirs) == 0):
return None
elif (len(special_subdirs) == 1):
return special_subdirs[0]
return special_subdirs
|
def read_pkl(pkl_name):
with open(pkl_name, 'rb') as f:
data = pickle.load(f)
return data
|
def get_number(name):
'\n use regex to get the first integer in the name\n if none exists, return -1\n '
try:
num = int(re.findall('[0-9]+', name)[0])
except:
num = (- 1)
return num
|
def unused_dir_name(output_dir):
"\n Returns a unique (not taken) output_directory name with similar structure to existing one\n Specifically,\n if dir is not taken, return itself\n if dir is taken, return a new name where\n if dir = base + number, then newdir = base + {number+1}\n ow: newdir = base1\n e.g. if output_dir = '/eval/'\n if empty: return '/eval/'\n if '/eval/' exists: return '/eval1/'\n if '/eval/' and '/eval1/' exists, return '/eval2/'\n\n "
existing_output_paths = []
if os.path.exists(output_dir):
if (os.path.basename(output_dir) == ''):
output_dir = os.path.dirname(output_dir)
dirname = os.path.dirname(output_dir)
base_name_prefix = re.sub('\\d+$', '', os.path.basename(output_dir))
existing_output_paths = get_subdir(dirname, base_name_prefix)
assert (existing_output_paths is not None), f'Bug, cannot find output_dir {output_dir}'
if (not isinstance(existing_output_paths, list)):
existing_output_paths = [existing_output_paths]
numbers = [get_number(os.path.basename(path)[(- 5):]) for path in existing_output_paths]
eval_num = (max(max(numbers), 0) + 1)
output_dir = os.path.join(dirname, f'{base_name_prefix}{eval_num}', '')
print('New output dir', output_dir)
return (output_dir, existing_output_paths)
|
def index_to_image(idxs: torch.Tensor, dictionary: np.ndarray, img_size):
imgs = []
for inst_top5 in dictionary[idxs]:
inst_top5 = [w.split(' ', 1)[1] for w in inst_top5]
to_print = ('Top 5 predictions: \n ' + ' '.join([f'''{w}
''' for w in inst_top5]))
img = Image.new('RGB', (img_size, img_size), (255, 255, 255))
d = ImageDraw.Draw(img)
d.text((20, 5), to_print, fill=(255, 0, 0))
imgs.append(np.array(img))
ret = np.transpose(np.stack(imgs), (0, 3, 1, 2)).astype(np.float32)
ret -= 127.5
ret /= 127.5
return torch.Tensor(ret)
|
def pil_to_np(img):
img_arr = np.frombuffer(img.tobytes(), dtype=np.uint8)
img_arr = img_arr.reshape((img.size[1], img.size[0], 3))
return img_arr
|
def np_to_pil(img_arr):
return Image.fromarray(img_arr.astype(np.uint8))
|
def count_open():
tensor_count = {}
var_count = {}
np_count = {}
for obj in gc.get_objects():
try:
if isinstance(obj, np.ndarray):
if (obj.shape in np_count):
np_count[obj.shape] += 1
else:
np_count[obj.shape] = 1
if torch.is_tensor(obj):
if (obj.size() in tensor_count):
tensor_count[obj.size()] += 1
else:
tensor_count[obj.size()] = 1
if (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
if (obj.size() in tensor_count):
var_count[obj.size()] += 1
else:
var_count[obj.size()] = 1
except:
pass
biggest_hitters = sorted(list(tensor_count.items()), key=(lambda x: x[1]))[(- 3):]
biggest_hitters = biggest_hitters[::(- 1)]
print('Most frequent tensor shape:', biggest_hitters)
biggest_hitters = sorted(list(np_count.items()), key=(lambda x: x[1]))[(- 3):]
biggest_hitters = biggest_hitters[::(- 1)]
print('Most frequent numpy array shape:', biggest_hitters)
return biggest_hitters
|
def process_batch_tuple(batch_tuple, task_idx, cfg):
batch_tuple = [x.to(device, non_blocking=True) for x in batch_tuple]
if (task_idx is None):
sources = cfg['training']['sources']
targets = cfg['training']['targets']
else:
sources = cfg['training']['sources'][task_idx]
targets = cfg['training']['targets'][task_idx]
x = batch_tuple[:len(sources)]
if (len(sources) == 1):
x = x[0]
if cfg['training']['sources_as_dict']:
x = dict(zip(sources, x))
if cfg['training']['suppress_target_and_use_annotator']:
labels = [cfg['training']['annotator'](x)]
else:
labels = batch_tuple[len(sources):(len(sources) + len(targets))]
if ((isinstance(cfg['training']['use_masks'], list) and cfg['training']['use_masks'][task_idx]) or (isinstance(cfg['training']['use_masks'], bool) and cfg['training']['use_masks'])):
masks = batch_tuple[(- 1)]
else:
masks = None
assert (len(targets) == 1), 'Transferring is only supported for one target task'
label = labels[0]
return (x, label, masks)
|
def forward_sequential(x, layers, task_idx):
if (isinstance(layers, nn.Sequential) or isinstance(layers, list) or isinstance(layers, nn.ModuleList)):
for layer in layers:
try:
x = layer(x, task_idx)
except TypeError:
x = layer(x)
else:
try:
x = layers(x, task_idx)
except TypeError:
x = layers(x)
return x
|
def load_state_dict_from_path(model, path):
checkpoint = torch.load(path)
if ('state_dict' in checkpoint.keys()):
if any([('module' in k) for k in checkpoint['state_dict']]):
state_dict = {k.replace('module.', ''): v for (k, v) in checkpoint['state_dict'].items()}
else:
state_dict = checkpoint['state_dict']
try:
model.load_state_dict(state_dict, strict=True)
except RuntimeError as e:
print(f'''{e}, reloaded with strict=False
''')
incompatible = model.load_state_dict(state_dict, strict=False)
if (incompatible is not None):
print(f'''Num matches: {len([k for k in model.state_dict() if (k in state_dict)])}
Num missing: {len(incompatible.missing_keys)}
Num unexpected: {len(incompatible.unexpected_keys)}''')
else:
model.load_state_dict(checkpoint)
return (model, checkpoint)
|
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
|
def setup(app):
app.add_stylesheet('css/pytorch_theme.css')
|
def get_iterator(mode):
ds = MNIST(root='./', download=True, train=mode)
data = getattr(ds, ('train_data' if mode else 'test_data'))
labels = getattr(ds, ('train_labels' if mode else 'test_labels'))
tds = tnt.dataset.TensorDataset([data, labels])
return tds.parallel(batch_size=128, num_workers=4, shuffle=mode)
|
def conv_init(ni, no, k):
return kaiming_normal(torch.Tensor(no, ni, k, k))
|
def linear_init(ni, no):
return kaiming_normal(torch.Tensor(no, ni))
|
def f(params, inputs, mode):
o = inputs.view(inputs.size(0), 1, 28, 28)
o = F.conv2d(o, params['conv0.weight'], params['conv0.bias'], stride=2)
o = F.relu(o)
o = F.conv2d(o, params['conv1.weight'], params['conv1.bias'], stride=2)
o = F.relu(o)
o = o.view(o.size(0), (- 1))
o = F.linear(o, params['linear2.weight'], params['linear2.bias'])
o = F.relu(o)
o = F.linear(o, params['linear3.weight'], params['linear3.bias'])
return o
|
def main():
params = {'conv0.weight': conv_init(1, 50, 5), 'conv0.bias': torch.zeros(50), 'conv1.weight': conv_init(50, 50, 5), 'conv1.bias': torch.zeros(50), 'linear2.weight': linear_init(800, 512), 'linear2.bias': torch.zeros(512), 'linear3.weight': linear_init(512, 10), 'linear3.bias': torch.zeros(10)}
params = {k: Variable(v, requires_grad=True) for (k, v) in params.items()}
optimizer = torch.optim.SGD(params.values(), lr=0.01, momentum=0.9, weight_decay=0.0005)
engine = Engine()
meter_loss = tnt.meter.AverageValueMeter()
classerr = tnt.meter.ClassErrorMeter(accuracy=True)
def h(sample):
inputs = Variable((sample[0].float() / 255.0))
targets = Variable(torch.LongTensor(sample[1]))
o = f(params, inputs, sample[2])
return (F.cross_entropy(o, targets), o)
def reset_meters():
classerr.reset()
meter_loss.reset()
def on_sample(state):
state['sample'].append(state['train'])
def on_forward(state):
classerr.add(state['output'].data, torch.LongTensor(state['sample'][1]))
meter_loss.add(state['loss'].data[0])
def on_start_epoch(state):
reset_meters()
state['iterator'] = tqdm(state['iterator'])
def on_end_epoch(state):
print(('Training loss: %.4f, accuracy: %.2f%%' % (meter_loss.value()[0], classerr.value()[0])))
reset_meters()
engine.test(h, get_iterator(False))
print(('Testing loss: %.4f, accuracy: %.2f%%' % (meter_loss.value()[0], classerr.value()[0])))
engine.hooks['on_sample'] = on_sample
engine.hooks['on_forward'] = on_forward
engine.hooks['on_start_epoch'] = on_start_epoch
engine.hooks['on_end_epoch'] = on_end_epoch
engine.train(h, get_iterator(True), maxepoch=10, optimizer=optimizer)
|
def get_iterator(mode):
ds = MNIST(root='./', download=True, train=mode)
data = getattr(ds, ('train_data' if mode else 'test_data'))
labels = getattr(ds, ('train_labels' if mode else 'test_labels'))
tds = tnt.dataset.TensorDataset([data, labels])
return tds.parallel(batch_size=128, num_workers=4, shuffle=mode)
|
def conv_init(ni, no, k):
return kaiming_normal(torch.Tensor(no, ni, k, k))
|
def linear_init(ni, no):
return kaiming_normal(torch.Tensor(no, ni))
|
def f(params, inputs, mode):
o = inputs.view(inputs.size(0), 1, 28, 28)
o = F.conv2d(o, params['conv0.weight'], params['conv0.bias'], stride=2)
o = F.relu(o)
o = F.conv2d(o, params['conv1.weight'], params['conv1.bias'], stride=2)
o = F.relu(o)
o = o.view(o.size(0), (- 1))
o = F.linear(o, params['linear2.weight'], params['linear2.bias'])
o = F.relu(o)
o = F.linear(o, params['linear3.weight'], params['linear3.bias'])
return o
|
def main():
params = {'conv0.weight': conv_init(1, 50, 5), 'conv0.bias': torch.zeros(50), 'conv1.weight': conv_init(50, 50, 5), 'conv1.bias': torch.zeros(50), 'linear2.weight': linear_init(800, 512), 'linear2.bias': torch.zeros(512), 'linear3.weight': linear_init(512, 10), 'linear3.bias': torch.zeros(10)}
params = {k: Variable(v, requires_grad=True) for (k, v) in params.items()}
optimizer = torch.optim.SGD(params.values(), lr=0.01, momentum=0.9, weight_decay=0.0005)
engine = Engine()
mlog = MeterLogger(server='10.10.30.91', port=9917, nclass=10, title='mnist_meterlogger')
def h(sample):
inputs = Variable((sample[0].float() / 255.0))
targets = Variable(torch.LongTensor(sample[1]))
o = f(params, inputs, sample[2])
return (F.cross_entropy(o, targets), o)
def on_sample(state):
state['sample'].append(state['train'])
def on_forward(state):
loss = state['loss']
output = state['output']
target = state['sample'][1]
mlog.update_loss(loss, meter='loss')
mlog.update_meter(output, target, meters={'accuracy', 'map', 'confusion'})
def on_start_epoch(state):
mlog.timer.reset()
state['iterator'] = tqdm(state['iterator'])
def on_end_epoch(state):
mlog.print_meter(mode='Train', iepoch=state['epoch'])
mlog.reset_meter(mode='Train', iepoch=state['epoch'])
engine.test(h, get_iterator(False))
mlog.print_meter(mode='Test', iepoch=state['epoch'])
mlog.reset_meter(mode='Test', iepoch=state['epoch'])
engine.hooks['on_sample'] = on_sample
engine.hooks['on_forward'] = on_forward
engine.hooks['on_start_epoch'] = on_start_epoch
engine.hooks['on_end_epoch'] = on_end_epoch
engine.train(h, get_iterator(True), maxepoch=10, optimizer=optimizer)
|
def get_iterator(mode):
ds = MNIST(root='./', download=True, train=mode)
data = getattr(ds, ('train_data' if mode else 'test_data'))
labels = getattr(ds, ('train_labels' if mode else 'test_labels'))
tds = tnt.dataset.TensorDataset([data, labels])
return tds.parallel(batch_size=128, num_workers=4, shuffle=mode)
|
def conv_init(ni, no, k):
return kaiming_normal(torch.Tensor(no, ni, k, k))
|
def linear_init(ni, no):
return kaiming_normal(torch.Tensor(no, ni))
|
def f(params, inputs, mode):
o = inputs.view(inputs.size(0), 1, 28, 28)
o = F.conv2d(o, params['conv0.weight'], params['conv0.bias'], stride=2)
o = F.relu(o)
o = F.conv2d(o, params['conv1.weight'], params['conv1.bias'], stride=2)
o = F.relu(o)
o = o.view(o.size(0), (- 1))
o = F.linear(o, params['linear2.weight'], params['linear2.bias'])
o = F.relu(o)
o = F.linear(o, params['linear3.weight'], params['linear3.bias'])
return o
|
def main():
params = {'conv0.weight': conv_init(1, 50, 5), 'conv0.bias': torch.zeros(50), 'conv1.weight': conv_init(50, 50, 5), 'conv1.bias': torch.zeros(50), 'linear2.weight': linear_init(800, 512), 'linear2.bias': torch.zeros(512), 'linear3.weight': linear_init(512, 10), 'linear3.bias': torch.zeros(10)}
params = {k: Variable(v, requires_grad=True) for (k, v) in params.items()}
optimizer = torch.optim.SGD(params.values(), lr=0.01, momentum=0.9, weight_decay=0.0005)
engine = Engine()
meter_loss = tnt.meter.AverageValueMeter()
classerr = tnt.meter.ClassErrorMeter(accuracy=True)
confusion_meter = tnt.meter.ConfusionMeter(10, normalized=True)
port = 8097
train_loss_logger = VisdomPlotLogger('line', port=port, opts={'title': 'Train Loss'})
train_err_logger = VisdomPlotLogger('line', port=port, opts={'title': 'Train Class Error'})
test_loss_logger = VisdomPlotLogger('line', port=port, opts={'title': 'Test Loss'})
test_err_logger = VisdomPlotLogger('line', port=port, opts={'title': 'Test Class Error'})
confusion_logger = VisdomLogger('heatmap', port=port, opts={'title': 'Confusion matrix', 'columnnames': list(range(10)), 'rownames': list(range(10))})
def h(sample):
inputs = Variable((sample[0].float() / 255.0))
targets = Variable(torch.LongTensor(sample[1]))
o = f(params, inputs, sample[2])
return (F.cross_entropy(o, targets), o)
def reset_meters():
classerr.reset()
meter_loss.reset()
confusion_meter.reset()
def on_sample(state):
state['sample'].append(state['train'])
def on_forward(state):
classerr.add(state['output'].data, torch.LongTensor(state['sample'][1]))
confusion_meter.add(state['output'].data, torch.LongTensor(state['sample'][1]))
meter_loss.add(state['loss'].data[0])
def on_start_epoch(state):
reset_meters()
state['iterator'] = tqdm(state['iterator'])
def on_end_epoch(state):
print(('Training loss: %.4f, accuracy: %.2f%%' % (meter_loss.value()[0], classerr.value()[0])))
train_loss_logger.log(state['epoch'], meter_loss.value()[0])
train_err_logger.log(state['epoch'], classerr.value()[0])
reset_meters()
engine.test(h, get_iterator(False))
test_loss_logger.log(state['epoch'], meter_loss.value()[0])
test_err_logger.log(state['epoch'], classerr.value()[0])
confusion_logger.log(confusion_meter.value())
print(('Testing loss: %.4f, accuracy: %.2f%%' % (meter_loss.value()[0], classerr.value()[0])))
engine.hooks['on_sample'] = on_sample
engine.hooks['on_forward'] = on_forward
engine.hooks['on_start_epoch'] = on_start_epoch
engine.hooks['on_end_epoch'] = on_end_epoch
engine.train(h, get_iterator(True), maxepoch=10, optimizer=optimizer)
|
class TestDatasets(unittest.TestCase):
def testListDataset(self):
h = [0, 1, 2]
d = dataset.ListDataset(elem_list=h, load=(lambda x: x))
self.assertEqual(len(d), 3)
self.assertEqual(d[0], 0)
t = torch.LongTensor([0, 1, 2])
d = dataset.ListDataset(elem_list=t, load=(lambda x: x))
self.assertEqual(len(d), 3)
self.assertEqual(d[0], 0)
a = np.asarray([0, 1, 2])
d = dataset.ListDataset(elem_list=a, load=(lambda x: x))
self.assertEqual(len(d), 3)
self.assertEqual(d[0], 0)
def testListDataset_path(self):
tbl = [0, 1, 2]
d = dataset.ListDataset(tbl, 'bar/{}'.format, 'foo')
self.assertEqual(len(d), 3)
self.assertEqual(d[2], 'bar/foo/2')
def testListDataset_file(self):
(_, filename) = tempfile.mkstemp()
with open(filename, 'w') as f:
for i in range(0, 50):
f.write((str(i) + '\n'))
d = dataset.ListDataset(filename, (lambda x: x), 'foo')
self.assertEqual(len(d), 50)
self.assertEqual(d[15], 'foo/15')
os.remove(filename)
def testTensorDataset(self):
data = {'input': np.arange(0, 8), 'target': np.arange(0, 8)}
d = dataset.TensorDataset(data)
self.assertEqual(len(d), 8)
self.assertEqual(d[2], {'input': 2, 'target': 2})
a = torch.randn(8)
d = dataset.TensorDataset(a)
self.assertEqual(len(a), len(d))
self.assertEqual(a[1], d[1])
d = dataset.TensorDataset([a])
self.assertEqual(len(a), len(d))
self.assertEqual(a[1], d[1][0])
def testBatchDataset(self):
if hasattr(torch, 'arange'):
t = torch.arange(0, 16).long()
else:
t = torch.range(0, 15).long()
batchsize = 8
d = dataset.ListDataset(t, (lambda x: {'input': x}))
d = dataset.BatchDataset(d, batchsize)
ex = d[0]['input']
self.assertEqual(len(ex), batchsize)
self.assertEqual(ex[(- 1)], (batchsize - 1))
def testResampleDataset(self):
tbl = dataset.TensorDataset(np.asarray([0, 1, 2]))
d = dataset.ResampleDataset(tbl, (lambda dataset, i: (i % 2)))
self.assertEqual(len(d), 3)
self.assertEqual(d[0], 0)
self.assertEqual(d[2], 0)
def testShuffleDataset(self):
tbl = dataset.TensorDataset(np.asarray([0, 1, 2, 3, 4]))
d = dataset.ShuffleDataset(tbl)
self.assertEqual(len(d), 5)
def testSplitDataset(self):
h = [0, 1, 2, 3]
listdataset = dataset.ListDataset(elem_list=h)
splitdataset = dataset.SplitDataset(listdataset, {'train': 3, 'val': 1})
splitdataset.select('train')
self.assertEqual(len(splitdataset), 3)
self.assertEqual(splitdataset[2], 2)
splitdataset.select('val')
self.assertEqual(len(splitdataset), 1)
self.assertEqual(splitdataset[0], 3)
splitdataset = listdataset.split({'train': 3, 'val': 1})
splitdataset.select('train')
self.assertEqual(len(splitdataset), 3)
self.assertEqual(splitdataset[2], 2)
def testSplitDataset_fractions(self):
h = [0, 1, 2, 3]
listdataset = dataset.ListDataset(elem_list=h)
splitdataset = dataset.SplitDataset(listdataset, {'train': 0.75, 'val': 0.25})
splitdataset.select('train')
self.assertEqual(len(splitdataset), 3)
self.assertEqual(splitdataset[2], 2)
splitdataset.select('val')
self.assertEqual(len(splitdataset), 1)
self.assertEqual(splitdataset[0], 3)
def testConcatDataset(self):
l1 = dataset.ListDataset(elem_list=[0, 1, 2, 3])
l2 = dataset.ListDataset(elem_list=[10, 11, 13])
concatdataset = dataset.ConcatDataset([l1, l2])
self.assertEqual(len(concatdataset), 7)
self.assertEqual(concatdataset[0], 0)
self.assertEqual(concatdataset[3], 3)
self.assertEqual(concatdataset[4], 10)
self.assertEqual(concatdataset[6], 13)
|
class TestMeters(unittest.TestCase):
def testAverageValueMeter(self):
m = meter.AverageValueMeter()
for i in range(1, 10):
m.add(i)
(mean, std) = m.value()
self.assertEqual(mean, 5.0)
m.reset()
(mean, std) = m.value()
self.assertTrue(np.isnan(mean))
def testAverageValueMeter_np_2d(self):
m = meter.AverageValueMeter()
for i in range(1, 10):
m.add(np.float32([[i, (i + 1)]]))
(mean, std) = m.value()
self.assertTrue(np.allclose(mean, [[5.0, 6.0]]))
self.assertTrue(np.allclose(std, [[2.738613, 2.738613]]))
m.reset()
(mean, std) = m.value()
self.assertTrue(np.isnan(mean))
def testAverageValueMeter_torch_2d(self):
m = meter.AverageValueMeter()
for i in range(1, 10):
m.add(torch.Tensor([[i, (i + 1)]]))
(mean, std) = m.value()
self.assertTrue(np.allclose(mean, [[5.0, 6.0]]))
self.assertTrue(np.allclose(std, [[2.738613, 2.738613]]))
m.reset()
(mean, std) = m.value()
self.assertTrue(np.isnan(mean))
def testAverageValueMeter_n(self):
'Test the case of adding more than 1 value.\n '
m = meter.AverageValueMeter()
for i in range(1, 11):
m.add((i * i), n=i)
(mean, std) = m.value()
self.assertEqual(mean, 7.0)
m.reset()
(mean, std) = m.value()
self.assertTrue(np.isnan(mean))
def testAverageValueMeter_stable(self):
'Test the case of near-zero variance.\n\n The test compares the results to numpy, and uses\n isclose() to allow for some small differences in\n the results, which are due to slightly different arithmetic\n operations and order.\n '
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return (abs((a - b)) <= max((rel_tol * max(abs(a), abs(b))), abs_tol))
m = meter.AverageValueMeter()
samples = ([0.7] * 10)
truth = np.array([])
for sample in samples:
truth = np.append(truth, sample)
m.add(sample)
(mean, std) = m.value()
self.assertTrue(isclose(truth.mean(), mean))
self.assertTrue(((math.isnan(std) and math.isnan(truth.std(ddof=1))) or (math.isinf(std) and math.isnan(truth.std(ddof=1))) or isclose(std, truth.std(ddof=1), abs_tol=1e-07)))
def testClassErrorMeter(self):
mtr = meter.ClassErrorMeter(topk=[1])
output = torch.eye(3)
if hasattr(torch, 'arange'):
target = torch.arange(0, 3)
else:
target = torch.range(0, 2)
mtr.add(output, target)
err = mtr.value()
self.assertEqual(err, [0], 'All should be correct')
target[0] = 1
target[1] = 0
target[2] = 0
mtr.add(output, target)
err = mtr.value()
self.assertEqual(err, [50.0], 'Half should be correct')
def testClassErrorMeteri_batch1(self):
mtr = meter.ClassErrorMeter(topk=[1])
output = torch.tensor([1, 0, 0])
if hasattr(torch, 'arange'):
target = torch.arange(0, 1)
else:
target = torch.range(0, 0)
mtr.add(output, target)
err = mtr.value()
self.assertEqual(err, [0], 'All should be correct')
def testConfusionMeter(self):
mtr = meter.ConfusionMeter(k=3)
output = torch.Tensor([[0.8, 0.1, 0.1], [10, 11, 10], [0.2, 0.2, 0.3]])
if hasattr(torch, 'arange'):
target = torch.arange(0, 3)
else:
target = torch.range(0, 2)
mtr.add(output, target)
conf_mtrx = mtr.value()
self.assertEqual(conf_mtrx.sum(), 3, 'All should be correct')
self.assertEqual(conf_mtrx.diagonal().sum(), 3, 'All should be correct')
target = torch.Tensor([1, 0, 0])
mtr.add(output, target)
self.assertEqual(conf_mtrx.sum(), 6, 'Six tests should give six values')
self.assertEqual(conf_mtrx.diagonal().sum(), 3, "Shouldn't have changed since all new values were false")
self.assertEqual(conf_mtrx[0].sum(), 3, 'All top have gotten one guess')
self.assertEqual(conf_mtrx[1].sum(), 2, 'Two first at the 2nd row have a guess')
self.assertEqual(conf_mtrx[1][2], 0, 'The last one should be empty')
self.assertEqual(conf_mtrx[2].sum(), 1, 'Bottom row has only the first test correct')
self.assertEqual(conf_mtrx[2][2], 1, 'Bottom row has only the first test correct')
mtr = meter.ConfusionMeter(k=4, normalized=True)
output = torch.Tensor([[0.8, 0.1, 0.1, 0], [10, 11, 10, 0], [0.2, 0.2, 0.3, 0], [0, 0, 0, 1]])
target = torch.Tensor([0, 1, 2, 3])
mtr.add(output, target)
conf_mtrx = mtr.value()
self.assertEqual(conf_mtrx.sum(), output.size(1), 'All should be correct')
self.assertEqual(conf_mtrx.diagonal().sum(), output.size(1), 'All should be correct')
target[0] = 1
target[1] = 0
target[2] = 0
mtr.add(output, target)
conf_mtrx = mtr.value()
self.assertEqual(conf_mtrx.sum(), output.size(1), 'The normalization should sum all values to 1')
for (i, row) in enumerate(conf_mtrx):
self.assertEqual(row.sum(), 1, (('Row no ' + str(i)) + ' fails to sum to one in normalized mode'))
def testMSEMeter(self):
a = torch.ones(7)
b = torch.zeros(7)
mtr = meter.MSEMeter()
mtr.add(a, b)
self.assertEqual(1.0, mtr.value())
def testMovingAverageValueMeter(self):
mtr = meter.MovingAverageValueMeter(3)
mtr.add(1)
(avg, var) = mtr.value()
self.assertEqual(avg, 1.0)
self.assertEqual(var, 0.0)
mtr.add(3)
(avg, var) = mtr.value()
self.assertEqual(avg, 2.0)
self.assertEqual(var, math.sqrt(2))
mtr.add(5)
(avg, var) = mtr.value()
self.assertEqual(avg, 3.0)
self.assertEqual(var, 2.0)
mtr.add(4)
(avg, var) = mtr.value()
self.assertEqual(avg, 4.0)
self.assertEqual(var, 1.0)
mtr.add(0)
(avg, var) = mtr.value()
self.assertEqual(avg, 3.0)
self.assertEqual(var, math.sqrt(7))
def testAUCMeter(self):
mtr = meter.AUCMeter()
test_size = 1000
mtr.add(torch.rand(test_size), torch.zeros(test_size))
mtr.add(torch.rand(test_size), torch.Tensor(test_size).fill_(1))
(val, tpr, fpr) = mtr.value()
self.assertTrue((math.fabs((val - 0.5)) < 0.1), msg='AUC Meter fails')
mtr.reset()
mtr.add(torch.Tensor(test_size).fill_(0), torch.zeros(test_size))
mtr.add(torch.Tensor(test_size).fill_(0.1), torch.zeros(test_size))
mtr.add(torch.Tensor(test_size).fill_(0.2), torch.zeros(test_size))
mtr.add(torch.Tensor(test_size).fill_(0.3), torch.zeros(test_size))
mtr.add(torch.Tensor(test_size).fill_(0.4), torch.zeros(test_size))
mtr.add(torch.Tensor(test_size).fill_(1), torch.Tensor(test_size).fill_(1))
(val, tpr, fpr) = mtr.value()
self.assertEqual(val, 1.0, msg='AUC Meter fails')
def testAPMeter(self):
mtr = meter.APMeter()
target = torch.Tensor([0, 1, 0, 1])
output = torch.Tensor([0.1, 0.2, 0.3, 4])
weight = torch.Tensor([0.5, 1.0, 2.0, 0.1])
mtr.add(output, target, weight)
ap = mtr.value()
val = ((((((1 * 0.1) / 0.1) + ((0 * 2.0) / 2.1)) + ((1.1 * 1) / 3.1)) + ((0 * 1) / 4)) / 2.0)
self.assertTrue((math.fabs((ap[0] - val)) < 0.01), msg='ap test1 failed')
mtr.reset()
mtr.add(output, target)
ap = mtr.value()
val = ((((((1 * 1.0) / 1.0) + ((0 * 1.0) / 2.0)) + ((2 * 1.0) / 3.0)) + ((0 * 1.0) / 4.0)) / 2.0)
self.assertTrue((math.fabs((ap[0] - val)) < 0.01), msg='ap test2 failed')
target = torch.Tensor([0, 1, 0, 1])
output = torch.Tensor([4, 3, 2, 1])
weight = torch.Tensor([1, 2, 3, 4])
mtr.reset()
mtr.add(output, target, weight)
ap = mtr.value()
val = ((((((0 * 1.0) / 1.0) + ((1.0 * 2.0) / 3.0)) + ((2.0 * 0) / 6.0)) + ((6.0 * 1.0) / 10.0)) / 2.0)
self.assertTrue((math.fabs((ap[0] - val)) < 0.01), msg='ap test3 failed')
mtr.reset()
mtr.add(output, target)
ap = mtr.value()
val = (((((0 * 1.0) + ((1 * 1.0) / 2.0)) + ((0 * 1.0) / 3.0)) + ((2 * 1.0) / 4.0)) / 2.0)
self.assertTrue((math.fabs((ap[0] - val)) < 0.01), msg='ap test4 failed')
target = torch.Tensor([0, 1, 0, 1])
output = torch.Tensor([1, 4, 2, 3])
weight = torch.Tensor([1, 2, 3, 4])
mtr.reset()
mtr.add(output, target, weight)
ap = mtr.value()
val = ((((((4 * 1.0) / 4.0) + ((6 * 1.0) / 6.0)) + ((0 * 6.0) / 9.0)) + ((0 * 6.0) / 10.0)) / 2.0)
self.assertTrue((math.fabs((ap[0] - val)) < 0.01), msg='ap test5 failed')
mtr.reset()
mtr.add(output, target)
ap = mtr.value()
val = (((((1 * 1.0) + ((2 * 1.0) / 2.0)) + ((0 * 1.0) / 3.0)) + ((0 * 1.0) / 4.0)) / 2.0)
self.assertTrue((math.fabs((ap[0] - val)) < 0.01), msg='ap test6 failed')
target = torch.Tensor([0, 0, 0, 0])
output = torch.Tensor([1, 4, 2, 3])
weight = torch.Tensor([1.0, 0.1, 0.0, 0.5])
mtr.reset()
mtr.add(output, target, weight)
ap = mtr.value()
self.assertEqual(ap[0], 0.0)
mtr.reset()
mtr.add(output, target)
ap = mtr.value()
self.assertEqual(ap[0], 0.0)
target = torch.Tensor([1, 1, 0])
output = torch.Tensor([3, 1, 2])
weight = torch.Tensor([1, 0.1, 3])
mtr.reset()
mtr.add(output, target, weight)
ap = mtr.value()
val = (((((1 * 1.0) / 1.0) + ((1 * 0.0) / 4.0)) + (1.1 / 4.1)) / 2.0)
self.assertTrue((math.fabs((ap[0] - val)) < 0.01), msg='ap test7 failed')
mtr.reset()
mtr.add(output, target)
ap = mtr.value()
val = ((((1 * 1.0) + ((0 * 1.0) / 2.0)) + ((2 * 1.0) / 3.0)) / 2.0)
self.assertTrue((math.fabs((ap[0] - val)) < 0.01), msg='ap test8 failed')
target = torch.Tensor([[0, 1, 0, 1], [0, 1, 0, 1]]).transpose(0, 1)
output = torch.Tensor([[0.1, 0.2, 0.3, 4], [4, 3, 2, 1]]).transpose(0, 1)
weight = torch.Tensor([[1.0, 0.5, 2.0, 3.0]]).transpose(0, 1)
mtr.reset()
mtr.add(output, target, weight)
ap = mtr.value()
self.assertTrue((math.fabs((ap.sum() - torch.Tensor([((((((1 * 3.0) / 3.0) + ((0 * 3.0) / 5.0)) + ((3.5 * 1) / 5.5)) + ((0 * 3.5) / 6.5)) / 2.0), ((((((0 * 1.0) / 1.0) + ((1 * 0.5) / 1.5)) + ((0 * 0.5) / 3.5)) + ((1 * 3.5) / 6.5)) / 2.0)]).sum())) < 0.01), msg='ap test9 failed')
mtr.reset()
mtr.add(output, target)
ap = mtr.value()
self.assertTrue((math.fabs((ap.sum() - torch.Tensor([(((((1 * 1.0) + ((0 * 1.0) / 2.0)) + ((2 * 1.0) / 3)) + ((0 * 1.0) / 4.0)) / 2.0), (((((0 * 1.0) + ((1 * 1.0) / 2.0)) + ((0 * 1.0) / 3.0)) + ((2.0 * 1.0) / 4.0)) / 2.0)]).sum())) < 0.01), msg='ap test10 failed')
mtr.reset()
output = torch.Tensor(5, 4).fill_(0.25)
target = torch.ones(5, 4)
mtr.add(output, target)
output = torch.Tensor(1, 4).fill_(0.25)
target = torch.ones(1, 4)
mtr.add(output, target)
self.assertEqual(mtr.value().size(0), 4, msg='ap test11 failed')
def testmAPMeter(self):
mtr = meter.mAPMeter()
target = torch.Tensor([0, 1, 0, 1])
output = torch.Tensor([0.1, 0.2, 0.3, 4])
weight = torch.Tensor([0.5, 1.0, 2.0, 0.1])
mtr.add(output, target)
ap = mtr.value()
val = ((((((1 * 1.0) / 1.0) + ((0 * 1.0) / 2.0)) + ((2.0 * 1.0) / 3.0)) + ((0 * 1.0) / 4.0)) / 2.0)
self.assertTrue((math.fabs((ap - val)) < 0.01), msg='mAP test1 failed')
mtr.reset()
mtr.add(output, target, weight)
ap = mtr.value()
val = ((((((1 * 0.1) / 0.1) + ((0 * 2.0) / 2.1)) + ((1.1 * 1) / 3.1)) + ((0 * 1.0) / 4.0)) / 2.0)
self.assertTrue((math.fabs((ap - val)) < 0.01), msg='mAP test2 failed')
target = torch.Tensor([[0, 1, 0, 1], [0, 1, 0, 1]]).transpose(0, 1)
output = torch.Tensor([[0.1, 0.2, 0.3, 4], [4, 3, 2, 1]]).transpose(0, 1)
weight = torch.Tensor([[1.0, 0.5, 2.0, 3.0]]).transpose(0, 1)
mtr.reset()
mtr.add(output, target, weight)
ap = mtr.value()
self.assertTrue((math.fabs((ap - torch.Tensor([((((((1 * 3.0) / 3.0) + ((0 * 3.0) / 5.0)) + ((3.5 * 1) / 5.5)) + ((0 * 3.5) / 6.5)) / 2.0), ((((((0 * 1.0) / 1.0) + ((1 * 0.5) / 1.5)) + ((0 * 0.5) / 3.5)) + ((1 * 3.5) / 6.5)) / 2.0)]).mean())) < 0.01), msg='mAP test3 failed')
mtr.reset()
mtr.add(output, target)
ap = mtr.value()
self.assertTrue((math.fabs((ap - torch.Tensor([(((((1 * 1.0) + ((0 * 1.0) / 2.0)) + ((2 * 1.0) / 3.0)) + ((0 * 1.0) / 4.0)) / 2.0), (((((0 * 1.0) + ((1 * 1.0) / 2.0)) + ((0 * 1.0) / 3.0)) + ((2 * 1.0) / 4.0)) / 2.0)]).mean())) < 0.01), msg='mAP test4 failed')
|
class TestTransforms(unittest.TestCase):
def testCompose(self):
self.assertEqual(transform.compose([(lambda x: (x + 1)), (lambda x: (x + 2)), (lambda x: (x / 2))])(1), 2)
def testTableMergeKeys(self):
x = {'sample1': {'input': 1, 'target': 'a'}, 'sample2': {'input': 2, 'target': 'b', 'flag': 'hard'}}
y = transform.tablemergekeys()(x)
self.assertEqual(y['input'], {'sample1': 1, 'sample2': 2})
self.assertEqual(y['target'], {'sample1': 'a', 'sample2': 'b'})
self.assertEqual(y['flag'], {'sample2': 'hard'})
def testTableApply(self):
x = {1: 1, 2: 2}
y = transform.tableapply((lambda x: (x + 1)))(x)
self.assertEqual(y, {1: 2, 2: 3})
def testMakeBatch(self):
x = [{'input': torch.randn(4), 'target': 'a'}, {'input': torch.randn(4), 'target': 'b'}]
y = transform.makebatch()(x)
self.assertEqual(y['input'].size(), torch.Size([2, 4]))
self.assertEqual(y['target'], ['a', 'b'])
|
class BatchDataset(Dataset):
'\n Dataset which batches the data from a given dataset.\n\n Given a `dataset`, `BatchDataset` merges samples from this dataset to\n form a new sample which can be interpreted as a batch of size `batchsize`.\n\n The `merge` function controls how the batching is performed. By default\n the occurrences are supposed to be tensors, and they aggregated along the\n first dimension.\n\n It is often important to shuffle examples while performing the batch\n operation. `perm(idx, size)` is a function which returns the shuffled index\n of the sample at position `idx` in the underlying dataset. For convenience,\n the `size` of the underlying dataset is also passed to the function. By\n default, the function is the identity.\n\n The underlying dataset size might or might not be always divisible by\n `batchsize`. The optional `policy` string specify how to handle corner\n cases.\n\n Purpose: the concept of batch is problem dependent. In *torchnet*, it is up\n to the user to interpret a sample as a batch or not. When one wants to\n assemble samples from an existing dataset into a batch, then\n `BatchDataset` is suited for the job. Sometimes it is however more\n convenient to write a dataset from scratch providing "batched" samples.\n\n Args:\n dataset (Dataset): Dataset to be batched.\n batchsize (int): Size of the batch.\n perm (function, optional): Function used to shuffle the dataset before\n batching. `perm(idx, size)` should return the shuffled index of\n `idx` th sample. By default, the function is the identity.\n merge (function, optional): Function to control batching behaviour.\n `transform.makebatch(merge)` is used to make the batch. Default is\n None.\n policy (str, optional): Policy to handle the corner cases when the\n underlying dataset size is not divisible by `batchsize`. One of\n (`include-last`, `skip-last`, `divisible-only`).\n\n - `include-last` makes sure all samples of the underlying dataset\n will be seen, batches will be of size equal or inferior to\n `batchsize`.\n - `skip-last` will skip last examples of the underlying dataset if\n its size is not properly divisible. Batches will be always of\n size equal to `batchsize`.\n - `divisible-only` will raise an error if the underlying dataset\n has not a size divisible by `batchsize`.\n filter (function, optional): Function to filter the sample before\n batching. If `filter(sample)` is True, then sample is included for\n batching. Otherwise, it is excluded. By default, `filter(sample)`\n returns True for any `sample`.\n\n '
def __init__(self, dataset, batchsize, perm=(lambda idx, size: idx), merge=None, policy='include-last', filter=(lambda sample: True)):
super(BatchDataset, self).__init__()
self.dataset = dataset
self.perm = perm
self.batchsize = batchsize
self.policy = policy
self.filter = filter
self.makebatch = transform.makebatch(merge)
len(self)
def __len__(self):
if (self.policy == 'include-last'):
return int(math.ceil(float((len(self.dataset) / self.batchsize))))
elif (self.policy == 'skip-last'):
return int(math.floor(float((len(self.dataset) / self.batchsize))))
elif (self.policy == 'divisible-only'):
assert ((len(self.dataset) % self.batchsize) == 0), 'dataset size is not divisible by batch size'
return (len(self.dataset) / self.batchsize)
else:
assert False, 'invalid policy (include-last | skip-last | divisible-only expected)'
def __getitem__(self, idx):
super(BatchDataset, self).__getitem__(idx)
maxidx = len(self.dataset)
samples = []
for i in range(0, self.batchsize):
j = ((idx * self.batchsize) + i)
if (j >= maxidx):
break
j = self.perm(j, maxidx)
sample = self.dataset[j]
if self.filter(sample):
samples.append(sample)
samples = self.makebatch(samples)
return samples
|
class ConcatDataset(Dataset):
'\n Dataset to concatenate multiple datasets.\n\n Purpose: useful to assemble different existing datasets, possibly\n large-scale datasets as the concatenation operation is done in an\n on-the-fly manner.\n\n Args:\n datasets (iterable): List of datasets to be concatenated\n '
def __init__(self, datasets):
super(ConcatDataset, self).__init__()
self.datasets = list(datasets)
assert (len(datasets) > 0), 'datasets should not be an empty iterable'
self.cum_sizes = np.cumsum([len(x) for x in self.datasets])
def __len__(self):
return self.cum_sizes[(- 1)]
def __getitem__(self, idx):
super(ConcatDataset, self).__getitem__(idx)
dataset_index = self.cum_sizes.searchsorted(idx, 'right')
if (dataset_index == 0):
dataset_idx = idx
else:
dataset_idx = (idx - self.cum_sizes[(dataset_index - 1)])
return self.datasets[dataset_index][dataset_idx]
|
class Dataset(object):
def __init__(self):
pass
def __len__(self):
pass
def __getitem__(self, idx):
if (idx >= len(self)):
raise IndexError('CustomRange index out of range')
pass
def batch(self, *args, **kwargs):
return torchnet.dataset.BatchDataset(self, *args, **kwargs)
def transform(self, *args, **kwargs):
return torchnet.dataset.TransformDataset(self, *args, **kwargs)
def shuffle(self, *args, **kwargs):
return torchnet.dataset.ShuffleDataset(self, *args, **kwargs)
def parallel(self, *args, **kwargs):
return DataLoader(self, *args, **kwargs)
def split(self, *args, **kwargs):
return torchnet.dataset.SplitDataset(self, *args, **kwargs)
|
class ListDataset(Dataset):
'\n Dataset which loads data from a list using given function.\n\n Considering a `elem_list` (can be an iterable or a `string` ) i-th sample\n of a dataset will be returned by `load(elem_list[i])`, where `load()`\n is a function provided by the user.\n\n If `path` is provided, `elem_list` is assumed to be a list of strings, and\n each element `elem_list[i]` will prefixed by `path/` when fed to `load()`.\n\n Purpose: many low or medium-scale datasets can be seen as a list of files\n (for example representing input samples). For this list of file, a target\n can be often inferred in a simple manner.\n\n Args:\n elem_list (iterable/str): List of arguments which will be passed to\n `load` function. It can also be a path to file with each line\n containing the arguments to `load`\n load (function, optional): Function which loads the data.\n i-th sample is returned by `load(elem_list[i])`. By default `load`\n is identity i.e, `lambda x: x`\n path (str, optional): Defaults to None. If a string is provided,\n `elem_list` is assumed to be a list of strings, and each element\n `elem_list[i]` will prefixed by this string when fed to `load()`.\n\n '
def __init__(self, elem_list, load=(lambda x: x), path=None):
super(ListDataset, self).__init__()
if isinstance(elem_list, str):
with open(elem_list) as f:
self.list = [line.replace('\n', '') for line in f]
else:
self.list = elem_list
self.path = path
self.load = load
def __len__(self):
return len(self.list)
def __getitem__(self, idx):
super(ListDataset, self).__getitem__(idx)
if (self.path is not None):
return self.load(('%s/%s' % (self.path, self.list[idx])))
else:
return self.load(self.list[idx])
|
class ResampleDataset(Dataset):
'\n Dataset which resamples a given dataset.\n\n Given a `dataset`, creates a new dataset which will (re-)sample from this\n underlying dataset using the provided `sampler(dataset, idx)` function.\n\n If `size` is provided, then the newly created dataset will have the\n specified `size`, which might be different than the underlying dataset\n size. If `size` is not provided, then the new dataset will have the same\n size as the underlying one.\n\n Purpose: shuffling data, re-weighting samples, getting a subset of the\n data. Note that an important sub-class `ShuffleDataset` is provided for\n convenience.\n\n Args:\n dataset (Dataset): Dataset to be resampled.\n sampler (function, optional): Function used for sampling. `idx`th\n sample is returned by `dataset[sampler(dataset, idx)]`. By default\n `sampler(dataset, idx)` is the identity, simply returning `idx`.\n `sampler(dataset, idx)` must return an index in the range\n acceptable for the underlying `dataset`.\n size (int, optional): Desired size of the dataset after resampling. By\n default, the new dataset will have the same size as the underlying\n one.\n\n '
def __init__(self, dataset, sampler=(lambda ds, idx: idx), size=None):
super(ResampleDataset, self).__init__()
self.dataset = dataset
self.sampler = sampler
self.size = size
def __len__(self):
return (((self.size and (self.size > 0)) and self.size) or len(self.dataset))
def __getitem__(self, idx):
super(ResampleDataset, self).__getitem__(idx)
idx = self.sampler(self.dataset, idx)
if ((idx < 0) or (idx >= len(self.dataset))):
raise IndexError('out of range')
return self.dataset[idx]
|
class ShuffleDataset(ResampleDataset):
'\n Dataset which shuffles a given dataset.\n\n `ShuffleDataset` is a sub-class of `ResampleDataset` provided for\n convenience. It samples uniformly from the given `dataset` with, or without\n `replacement`. The chosen partition can be redrawn by calling `resample()`\n\n If `replacement` is `true`, then the specified `size` may be larger than\n the underlying `dataset`.\n If `size` is not provided, then the new dataset size will be equal to the\n underlying `dataset` size.\n\n Purpose: the easiest way to shuffle a dataset!\n\n Args:\n dataset (Dataset): Dataset to be shuffled.\n size (int, optional): Desired size of the shuffled dataset. If\n `replacement` is `true`, then can be larger than the `len(dataset)`.\n By default, the new dataset will have the same size as `dataset`.\n replacement (bool, optional): True if uniform sampling is to be done\n with replacement. False otherwise. Defaults to false.\n\n Raises:\n ValueError: If `size` is larger than the size of the underlying dataset\n and `replacement` is False.\n '
def __init__(self, dataset, size=None, replacement=False):
if (size and (not replacement) and (size > len(dataset))):
raise ValueError('size cannot be larger than underlying dataset size when sampling without replacement')
super(ShuffleDataset, self).__init__(dataset, (lambda dataset, idx: self.perm[idx]), size)
self.replacement = replacement
self.resample()
def resample(self, seed=None):
'Resample the dataset.\n\n Args:\n seed (int, optional): Seed for resampling. By default no seed is\n used.\n '
if (seed is not None):
gen = torch.manual_seed(seed)
else:
gen = torch.default_generator
if self.replacement:
self.perm = torch.LongTensor(len(self)).random_(len(self.dataset), generator=gen)
else:
self.perm = torch.randperm(len(self.dataset), generator=gen).narrow(0, 0, len(self))
|
class SplitDataset(Dataset):
'\n Dataset to partition a given dataset.\n\n Partition a given `dataset`, according to the specified `partitions`. Use\n the method `select()` to select the current partition in use.\n\n The `partitions` is a dictionary where a key is a user-chosen string\n naming the partition, and value is a number representing the weight (as a\n number between 0 and 1) or the size (in number of samples) of the\n corresponding partition.\n\n Partioning is achieved linearly (no shuffling). See `ShuffleDataset` if you\n want to shuffle the dataset before partitioning.\n\n Args:\n dataset (Dataset): Dataset to be split.\n partitions (dict): Dictionary where key is a user-chosen string\n naming the partition, and value is a number representing the weight\n (as a number between 0 and 1) or the size (in number of samples)\n of the corresponding partition.\n initial_partition (str, optional): Initial parition to be selected.\n\n '
def __init__(self, dataset, partitions, initial_partition=None):
super(SplitDataset, self).__init__()
self.dataset = dataset
self.partitions = partitions
assert isinstance(partitions, dict), 'partitions must be a dict'
assert (len(partitions) >= 2), 'SplitDataset should have at least two partitions'
assert (min(partitions.values()) >= 0), 'partition sizes cannot be negative'
assert (max(partitions.values()) > 0), 'all partitions cannot be empty'
self.partition_names = sorted(list(self.partitions.keys()))
self.partition_index = {partition: i for (i, partition) in enumerate(self.partition_names)}
self.partition_sizes = [self.partitions[parition] for parition in self.partition_names]
if (sum(self.partition_sizes) <= 1):
self.partition_sizes = [round((x * len(dataset))) for x in self.partition_sizes]
else:
for x in self.partition_sizes:
assert (x == int(x)), 'partition sizes should be integer numbers, or sum up to <= 1 '
self.partition_cum_sizes = np.cumsum(self.partition_sizes)
if (initial_partition is not None):
self.select(initial_partition)
def select(self, partition):
'\n Select the parition.\n\n Args:\n partition (str): Partition to be selected.\n '
self.current_partition_idx = self.partition_index[partition]
def __len__(self):
try:
return self.partition_sizes[self.current_partition_idx]
except AttributeError:
raise ValueError('Select a partition before accessing data.')
def __getitem__(self, idx):
super(SplitDataset, self).__getitem__(idx)
try:
if (self.current_partition_idx == 0):
return self.dataset[idx]
else:
offset = self.partition_cum_sizes[(self.current_partition_idx - 1)]
return self.dataset[(int(offset) + idx)]
except AttributeError:
raise ValueError('Select a partition before accessing data.')
|
class TensorDataset(Dataset):
'\n Dataset from a tensor or array or list or dict.\n\n `TensorDataset` provides a way to create a dataset out of the data that is\n already loaded into memory. It accepts data in the following forms:\n\n tensor or numpy array\n `idx`th sample is `data[idx]`\n\n dict of tensors or numpy arrays\n `idx`th sample is `{k: v[idx] for k, v in data.items()}`\n\n list of tensors or numpy arrays\n `idx`th sample is `[v[idx] for v in data]`\n\n Purpose: Easy way to create a dataset out of standard data structures.\n\n Args:\n data (dict/list/tensor/ndarray): Data for the dataset.\n '
def __init__(self, data):
super(TensorDataset, self).__init__()
if isinstance(data, dict):
assert (len(data) > 0), 'Should have at least one element'
n_elem = len(list(data.values())[0])
for v in data.values():
assert (len(v) == n_elem), 'All values must have the same size'
elif isinstance(data, list):
assert (len(data) > 0), 'Should have at least one element'
n_elem = len(data[0])
for v in data:
assert (len(v) == n_elem), 'All elements must have the same size'
self.data = data
def __len__(self):
if isinstance(self.data, dict):
return len(list(self.data.values())[0])
elif isinstance(self.data, list):
return len(self.data[0])
elif (torch.is_tensor(self.data) or isinstance(self.data, np.ndarray)):
return len(self.data)
def __getitem__(self, idx):
super(TensorDataset, self).__getitem__(idx)
if isinstance(self.data, dict):
return {k: v[idx] for (k, v) in self.data.items()}
elif isinstance(self.data, list):
return [v[idx] for v in self.data]
elif (torch.is_tensor(self.data) or isinstance(self.data, np.ndarray)):
return self.data[idx]
|
class TransformDataset(Dataset):
'\n Dataset which transforms a given dataset with a given function.\n\n Given a function `transform`, and a `dataset`, `TransformDataset` applies\n the function in an on-the-fly manner when querying a sample with\n `__getitem__(idx)` and therefore returning `transform[dataset[idx]]`.\n\n `transform` can also be a dict with functions as values. In this case, it\n is assumed that `dataset[idx]` is a dict which has all the keys in\n `transform`. Then, `transform[key]` is applied to dataset[idx][key] for\n each key in `transform`\n\n The size of the new dataset is equal to the size of the underlying\n `dataset`.\n\n Purpose: when performing pre-processing operations, it is convenient to be\n able to perform on-the-fly transformations to a dataset.\n\n Args:\n dataset (Dataset): Dataset which has to be transformed.\n transforms (function/dict): Function or dict with function as values.\n These functions will be applied to data.\n '
def __init__(self, dataset, transforms):
super(TransformDataset, self).__init__()
assert (isinstance(transforms, dict) or callable(transforms)), 'expected a dict of transforms or a function'
if isinstance(transforms, dict):
for (k, v) in transforms.items():
assert callable(v), (str(k) + ' is not a function')
self.dataset = dataset
self.transforms = transforms
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
super(TransformDataset, self).__getitem__(idx)
z = self.dataset[idx]
if isinstance(self.transforms, dict):
for (k, transform) in self.transforms.items():
z[k] = transform(z[k])
else:
z = self.transforms(z)
return z
|
class Engine(object):
def __init__(self):
self.hooks = {}
def hook(self, name, state):
'Registers a backward hook.\n\n The hook will be called every time a gradient with respect to the\n Tensor is computed. The hook should have the following signature::\n\n hook (grad) -> Tensor or None\n\n The hook should not modify its argument, but it can optionally return\n a new gradient which will be used in place of :attr:`grad`.\n This function returns a handle with a method ``handle.remove()``\n that removes the hook from the module.\n\n Example:\n >>> v = torch.tensor([0., 0., 0.], requires_grad=True)\n >>> h = v.register_hook(lambda grad: grad * 2) # double the gradient\n >>> v.backward(torch.tensor([1., 2., 3.]))\n >>> v.grad\n 2\n 4\n 6\n [torch.FloatTensor of size (3,)]\n >>> h.remove() # removes the hook\n\n '
if (name in self.hooks):
self.hooks[name](state)
def train(self, network, iterator, maxepoch, optimizer):
state = {'network': network, 'iterator': iterator, 'maxepoch': maxepoch, 'optimizer': optimizer, 'epoch': 0, 't': 0, 'train': True}
self.hook('on_start', state)
while (state['epoch'] < state['maxepoch']):
self.hook('on_start_epoch', state)
for sample in state['iterator']:
state['sample'] = sample
self.hook('on_sample', state)
def closure():
(loss, output) = state['network'](state['sample'])
state['output'] = output
state['loss'] = loss
loss.backward()
self.hook('on_forward', state)
state['output'] = None
state['loss'] = None
return loss
state['optimizer'].zero_grad()
state['optimizer'].step(closure)
self.hook('on_update', state)
state['t'] += 1
state['epoch'] += 1
self.hook('on_end_epoch', state)
self.hook('on_end', state)
return state
def test(self, network, iterator):
state = {'network': network, 'iterator': iterator, 't': 0, 'train': False}
self.hook('on_start', state)
for sample in state['iterator']:
state['sample'] = sample
self.hook('on_sample', state)
def closure():
(loss, output) = state['network'](state['sample'])
state['output'] = output
state['loss'] = loss
self.hook('on_forward', state)
state['output'] = None
state['loss'] = None
closure()
state['t'] += 1
self.hook('on_end', state)
return state
|
class FileLogger(object):
"Logs results to a file.\n\n The FileLogger provides a convenient interface for periodically writing\n results to a file. It is designed to capture all information for a given\n experiment, which may have a sequence of distinct tasks. Therefore, it writes\n results in the format::\n\n {\n 'tasks': [...]\n 'results': [...]\n }\n\n The FileLogger class chooses to use a top-level list instead of a dictionary\n to preserve temporal order of tasks (by default).\n\n Args:\n filepath (str): Path to write results to\n overwrite (bool): whether to clobber a file if it exists\n\n Example:\n >>> result_writer = ResultWriter(path)\n >>> for task in ['CIFAR-10', 'SVHN']:\n >>> train_results = train_model()\n >>> test_results = test_model()\n >>> result_writer.log(task, {'Train': train_results, 'Test': test_results})\n\n "
def __init__(self, filepath, overwrite=False):
if (not overwrite):
assert (not os.path.exists(filepath)), 'Cannot write results to "{}". Already exists!'.format(filepath)
with open(filepath, 'wb') as f:
pickle.dump({'tasks': [], 'results': []}, f)
self.filepath = filepath
self.tasks = set()
def _add_task(self, task_name):
assert (task_name not in self.tasks), 'Task already added! Use a different name.'
self.tasks.add(task_name)
def log(self, task_name, result):
' Update the results file with new information.\n\n Args:\n task_name (str): Name of the currently running task. A previously unseen\n ``task_name`` will create a new entry in both :attr:`tasks`\n and :attr:`results`.\n result: This will be appended to the list in :attr:`results` which\n corresponds to the ``task_name`` in ``task_name``:attr:`tasks`.\n\n '
with open(self.filepath, 'rb') as f:
existing_results = pickle.load(f)
if (task_name not in self.tasks):
self._add_task(task_name)
existing_results['tasks'].append(task_name)
existing_results['results'].append([])
task_name_idx = existing_results['tasks'].index(task_name)
results = existing_results['results'][task_name_idx]
results.append(result)
with open(self.filepath, 'wb') as f:
pickle.dump(existing_results, f)
|
class Logger(object):
_fields = None
@property
def fields(self):
assert (self._fields is not None), 'self.fields is not set!'
return self._fields
@fields.setter
def fields(self, value):
self._fields
def __init__(self, fields=None):
" Automatically logs the variables in 'fields' "
self.fields = fields
def log(self, *args, **kwargs):
pass
def log_state(self, state_dict):
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.