code stringlengths 17 6.64M |
|---|
def mkdirs(*paths: Path, exist_ok: bool=True, parents: bool=True, **kwargs) -> None:
'Create all input directories with laxer defaults.'
for p in paths:
p.mkdir(exist_ok=exist_ok, parents=parents, **kwargs)
|
def pil2np(img: Image, /) -> ty.A:
'Convert PIL image [0, 255] into numpy [0, 1].'
return (np.array(img, dtype=np.float32) / 255.0)
|
def np2pil(arr: ty.A, /) -> Image:
'Convert numpy image [0, 1] into PIL [0, 255].'
if (arr.dtype == np.uint8):
return Image.fromarray(arr)
assert (arr.max() <= 1)
return Image.fromarray((arr * 255).astype(np.uint8))
|
def write_yaml(file: Path, data: dict, mkdir: bool=False, sort_keys: bool=False) -> None:
'Write data to a yaml file.'
file = Path(file).with_suffix('.yaml')
if mkdir:
mkdirs(file.parent)
with open(file, 'w') as f:
yaml.dump(data, f, sort_keys=sort_keys)
|
def load_yaml(file: Path, loader: ty.N[yaml.Loader]=yaml.FullLoader) -> dict:
'Load a single yaml file.'
with open(file) as f:
return yaml.load(f, Loader=loader)
|
def load_merge_yaml(*files: Path) -> dict:
'Load a list of YAML configs and recursively merge into a single config.\n\n Following dictionary merging rules, the first file is the "base" config, which gets updated by the second file.\n We chain this rule for however many cfg we have, i.e. ((((1 <- 2) <- 3) <- 4) ... <- n)\n\n :param files: (Sequence[PathLike]) List of YAML config files to load, from "oldest" to "newest".\n :return: (dict) The merged config from all given files.\n '
(old, *datas) = [load_yaml(file) for file in files]
for new in datas:
old = _merge_yaml(old, new)
return old
|
def _merge_yaml(old: dict, new: dict) -> dict:
'Recursively merge two YAML cfg.\n Dictionaries are recursively merged. All other types simply update the current value.\n\n NOTE: This means that a "list of dicts" will simply be updated to whatever the new value is,\n not appended to or recursively checked!\n\n :param old: (dict) Base dictionary containing default keys.\n :param new: (dict) New dictionary containing keys to overwrite in `old`.\n :return: (dict) The merge config.\n '
d = old.copy()
for (k, v) in new.items():
d[k] = (_merge_yaml(d[k], v) if ((k in d) and isinstance(v, dict)) else v)
return d
|
class ConcatDataLoader():
"Concatenate multiple DataLoaders in a round-robin manner.\n Example:\n dl1 = [0, 1, 2, 3, 4, 5, 6, 7, 8]\n dl2 = ['a', 'b', 'c']\n dl3 = [0.1, 0.2, 0.3, 0.4. 0.5]\n\n [0, 'a', 0.1, 1, 'b', 0.2, 3, 'c', 0.3, 4, 0.4, 5, 0.5, 6, 7, 8]\n\n :param dls: (Sequence[DataLoader]) List of dataloaders to combine.\n "
def __init__(self, dls: ty.S[DataLoader]):
self.dls = dls
print(f'-> Created Concat DataLoader with lengths: {[len(dl) for dl in self.dls]}')
def __len__(self) -> int:
'Number of items across all dataloaders.'
return sum(map(len, self.dls))
def __iter__(self) -> ty.BatchData:
'Iterate over dataloaders in a round-robin manner.'
(yield from (i for i in chain.from_iterable(zip_longest(*self.dls)) if (i is not None)))
def set_epoch(self, epoch: int) -> None:
'Set the epoch number. Required for DitributedSampler to randomize samples across multiple GPUs.'
[dl.sampler.set_epoch(epoch) for dl in self.dls if isinstance(dl.sampler, DistributedSampler)]
|
class BaseMetric(Metric):
'Base class for depth estimation metrics.'
higher_is_better = False
full_state_update = False
def __init__(self, mode: str='raw', **kwargs):
super().__init__(**kwargs)
if (mode not in _MODES):
raise ValueError(f'Invalid mode! ({mode} vs. {_MODES})')
self.mode: str = mode
self.sf: int = {'raw': 1, 'log': 100, 'inv': 1000}[self.mode]
self.add_state('metric', default=torch.tensor(0.0), dist_reduce_fx='sum')
self.add_state('total', default=torch.tensor(0), dist_reduce_fx='sum')
def _preprocess(self, input: ty.T, /):
'Convert input into log-depth or disparity.'
if (self.mode == 'raw'):
pass
elif (self.mode == 'log'):
input = input.log()
elif (self.mode == 'inv'):
input = (1 / input.clip(min=0.001))
return input
def _compute(self, pred: ty.T, target: ty.T) -> ty.T:
'Compute an error metric for a single pair.\n\n :param pred: (Tensor) (b, n) Predicted depth.\n :param target: (Tensor) (b, n) Target depth.\n :return: (Tensor) (b,) Computed metric.\n '
raise NotImplementedError
def update(self, pred: ty.T, target: ty.T) -> None:
'Compute an error metric for a whole batch of predictions and update the state.\n\n :param pred: (Tensor) (b, n) Predicted depths masked with NaNs.\n :param target: (Tensor) (b, n) Target depths masked with NaNs.\n :return:\n '
self.metric += (self.sf * self._compute(self._preprocess(pred), self._preprocess(target)).sum())
self.total += pred.shape[0]
def compute(self) -> ty.T:
'Compute the average metric given the current state.'
return (self.metric / self.total)
|
class MAE(BaseMetric):
'Compute the mean absolute error.'
def _compute(self, pred: ty.T, target: ty.T) -> ty.T:
return (pred - target).abs().nanmean(dim=1)
|
class RMSE(BaseMetric):
'Compute the root mean squared error.'
def _compute(self, pred: ty.T, target: ty.T) -> ty.T:
return (pred - target).pow(2).nanmean(dim=1).sqrt()
|
class ScaleInvariant(BaseMetric):
'Compute the scale invariant error.'
def _compute(self, pred: ty.T, target: ty.T) -> ty.T:
err = (pred - target)
return (err.pow(2).nanmean(dim=1) - err.nanmean(dim=1).pow(2)).sqrt()
|
class AbsRel(BaseMetric):
'Compute the absolute relative error.'
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.sf = 100
def _compute(self, pred: ty.T, target: ty.T) -> ty.T:
return ((pred - target).abs() / target).nanmean(dim=1)
|
class SqRel(BaseMetric):
'Compute the absolute relative squared error.'
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.sf = 100
def _compute(self, pred: ty.T, target: ty.T) -> ty.T:
return ((pred - target).pow(2) / target.pow(2)).nanmean(dim=1)
|
class DeltaAcc(BaseMetric):
'Compute the accuracy for a given error threshold.'
higher_is_better = True
def __init__(self, delta: float, **kwargs):
super().__init__(**kwargs)
if (self.mode != 'raw'):
raise ValueError('DeltaAcc should only be computed using raw depths.')
self.delta: float = delta
self.sf = 100
def _compute(self, pred: ty.T, target: ty.T) -> ty.T:
thresh = torch.max((target / pred), (pred / target))
return ((thresh < self.delta).nansum(dim=1) / thresh.nansum(dim=1))
|
class Timer():
"Context manager for timing a block of code.\n\n Attributes:\n :param name: (str) Timer label when printing.\n :param as_ms: (bool) If `True`, store time as `milliseconds`, otherwise `seconds`.\n :param sync_gpu: (bool) If `True`, ensure that GPU is synced on Timer enter and exit.\n :param precision: (int) Number of decimal places to print.\n\n Example:\n ```\n with Timer('MyTimer') as t:\n time.sleep(1)\n elapsed = t.elapsed\n print(t)\n\n ===>\n MyTimer: 1.003 s\n ```\n "
def __init__(self, name: str='Timer', as_ms: bool=False, sync_gpu: bool=False, precision: int=6) -> None:
self.name: str = name
self.as_ms: bool = as_ms
self.sync_gpu: bool = sync_gpu
self.precision: int = precision
self._sf: int = (1000 if self.as_ms else 1)
self._units: str = ('ms' if self.as_ms else 's')
self._sync_fn: ty.N[ty.Callable] = None
self._start: ty.N[float] = None
self._end: ty.N[float] = None
if self.sync_gpu:
import torch
self._sync_fn = torch.cuda.synchronize
def __repr__(self) -> str:
'Convert class constructor into string representation.'
sig = inspect.signature(self.__init__)
kwargs = {key: getattr(self, key) for key in sig.parameters if hasattr(self, key)}
s = ', '.join((f'{k}={v}' for (k, v) in kwargs.items()))
return f'{self.__class__.__qualname__}({s})'
def __str__(self) -> str:
'Convert into string representation.'
return f'{self.name}: {self.elapsed} {self._units}'
def __enter__(self) -> 'Timer':
'Start timer and sync GPU.'
if self.sync_gpu:
self._sync_fn()
self._start = time.perf_counter()
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
'End timer and sync GPU.'
if self.sync_gpu:
self._sync_fn()
self._end = time.perf_counter()
@property
def elapsed(self) -> float:
'Time taken between enter and exit.'
assert self._start, '`Timer` has not begun'
assert self._end, '`Timer` has not finished'
time_taken = (self._sf * (self._end - self._start))
return round(time_taken, self.precision)
|
class MultiLevelTimer():
"Context manager Timer capable of being nested across multiple levels.\n\n NOTE: We use the *instance* of this class as a context manager, not the class itself (see examples).\n\n Timers are stored as a dict, mapping labels to (depth, start, end, elapsed).\n In order to allow for the nesting of these timers, we keep track of what timers are active (effectively, a stack).\n On __exit__ we pop the most recent label and end that timer.\n\n Attributes:\n :param name: (str) Global Timer name.\n :param as_ms: (bool) If `True`, store time as `'milliseconds`', otherwise `seconds`.\n :param sync_gpu: (bool) If `True`, ensure that GPU is synced on Timer enter and exit.\n :param precision: (int) Number of decimal places to print.\n\n Examples:\n ```\n timer = MultiLevelTimer(name='MyTimer', as_ms=True, precision=4)\n\n with timer('OuterLevel'):\n time.sleep(2)\n with timer('InnerLevel'):\n time.sleep(1)\n\n print(timer)\n\n ==>\n MyTimer\n OuterLevel: 3002.3414 ms\n InnerLevel: 1000.7601 ms\n ```\n\n Levels can also be named automatically\n ```\n timer = MultiLevelTimer(name='MyTimer')\n\n with timer:\n time.sleep(2)\n\n print(timer)\n\n ==>\n MyTimer\n Level1: 2.002093 s\n ```\n "
def __init__(self, name: str='Timer', as_ms: bool=False, sync_gpu: bool=False, precision: int=6) -> None:
self.name: str = name
self.as_ms: bool = as_ms
self.sync_gpu: bool = sync_gpu
self.precision: int = precision
self.depth: int = 0
self._sf: int = (1000 if self.as_ms else 1)
self._units: str = ('ms' if self.as_ms else 's')
self._sync_fn: ty.N[ty.Callable] = None
self._label: ty.N[str] = None
self._active: list[str] = []
self._data: dict[(str, ty.TimerData)] = {}
if self.sync_gpu:
import torch
self._sync_fn = torch.cuda.synchronize
def __repr__(self) -> str:
'Convert class constructor into string representation.'
sig = inspect.signature(self.__init__)
kwargs = {key: getattr(self, key) for key in sig.parameters if hasattr(self, key)}
s = ', '.join((f'{k}={v}' for (k, v) in kwargs.items()))
return f'{self.__class__.__qualname__}({s})'
def __str__(self) -> str:
'Convert into string representation.'
s = [self.name]
s += [(('\t' * v['depth']) + f"{k}: {v['elapsed']} {self._units}") for (k, v) in self]
return '\n'.join(s)
def __getitem__(self, label: str) -> ty.TimerData:
'Return timer info for the given label.'
return self._data[label]
def __iter__(self) -> ty.Generator[(tuple[(str, ty.TimerData)], None, None)]:
'Iterate through all timers as (`label`, `timer`)'
for k in self._data:
(yield (k, self[k]))
def __call__(self, label: str) -> 'MultiLevelTimer':
'Required to call a `Timer` instance in a context manager and create a new label.'
self._label = label
return self
def __enter__(self) -> 'MultiLevelTimer':
'Context manager entry point.'
self.depth += 1
(label, self._label) = (self._label, None)
label = (label or f'Level{self.depth}')
if (label in self._data):
raise KeyError(f'Duplicate Timer key: {label}')
if self.sync_gpu:
self._sync_fn()
self._active.append(label)
self._data[label] = {'depth': self.depth, 'start': time.perf_counter(), 'end': None, 'elapsed': None}
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
'Context manager exit point.'
assert self._active, 'What are you doing here??'
label = self._active.pop()
timer = self._data[label]
if self.sync_gpu:
self._sync_fn()
timer['end'] = time.perf_counter()
timer['elapsed'] = round((self._sf * (timer['end'] - timer['start'])), self.precision)
self.depth -= 1
def reset(self) -> None:
'Delete all existing `Timer` data.'
if self._active:
raise RuntimeError(f'Attempt to reset Timer while active: {self._active}')
self._data = {}
def copy(self) -> 'MultiLevelTimer':
'Return a deep copy of the timer.'
return copy.deepcopy(self)
def to_dict(self, key: str='elapsed') -> dict:
'Return a dict containing only the data for the specified key.'
return {label: data[key] for (label, data) in self}
@staticmethod
def mean_elapsed(timers: ty.S['MultiLevelTimer']) -> ty.U[(ty.S, ty.FloatDict)]:
'Return the average elapsed time for each label in a list of timers.'
if (not timers):
return timers
data = {}
for t in timers:
for (k, v) in t:
if (k in data):
data[k].append(v['elapsed'])
else:
data[k] = [v['elapsed']]
data = {k: (sum(v) / len(v)) for (k, v) in data.items()}
return data
|
def iter_graph(root, callback):
queue = [root]
seen = set()
while queue:
fn = queue.pop()
if (fn in seen):
continue
seen.add(fn)
for (next_fn, _) in fn.next_functions:
if (next_fn is not None):
queue.append(next_fn)
callback(fn)
|
def register_hooks(var):
fn_dict = {}
def hook_cb(fn):
def register_grad(grad_input, grad_output):
fn_dict[fn] = grad_input
fn.register_hook(register_grad)
iter_graph(var.grad_fn, hook_cb)
def is_bad_grad(grad_output):
grad_output = grad_output.data
return (grad_output.ne(grad_output).any() or grad_output.gt(1000000.0).any())
def make_dot():
node_attr = dict(style='filled', shape='box', align='left', fontsize='12', ranksep='0.1', height='0.2')
dot = Digraph(node_attr=node_attr, graph_attr=dict(size='12,12'))
def size_to_str(size):
return (('(' + ', '.join(map(str, size))) + ')')
def build_graph(fn):
if hasattr(fn, 'variable'):
u = fn.variable
node_name = ('Variable\n ' + size_to_str(u.size()))
dot.node(str(id(u)), node_name, fillcolor='lightblue')
else:
assert (fn in fn_dict), fn
fillcolor = 'white'
if any((is_bad_grad(gi) for gi in fn_dict[fn])):
fillcolor = 'red'
dot.node(str(id(fn)), str(type(fn).__name__), fillcolor=fillcolor)
for (next_fn, _) in fn.next_functions:
if (next_fn is not None):
next_id = id(getattr(next_fn, 'variable', next_fn))
dot.edge(str(next_id), str(id(fn)))
iter_graph(var.grad_fn, build_graph)
return dot
return make_dot
|
class Checkpoints():
def __init__(self, args):
self.dir_save = args.save
self.dir_load = args.resume
if (os.path.isdir(self.dir_save) == False):
os.makedirs(self.dir_save)
def latest(self, name):
if (name == 'resume'):
if (self.dir_load == None):
return None
else:
return self.dir_load
def save(self, epoch, model, best):
if (best == True):
torch.save(model.state_dict(), ('%s/model_epoch_%d.pth' % (self.dir_save, epoch)))
return None
def load(self, filename):
if os.path.isfile(filename):
print("=> loading checkpoint '{}'".format(filename))
model = torch.load(filename)
else:
print("=> no checkpoint found at '{}'".format(filename))
return model
|
class Dataloader():
def __init__(self, args):
self.args = args
self.loader_input = args.loader_input
self.loader_label = args.loader_label
self.split_test = args.split_test
self.split_train = args.split_train
self.dataset_test_name = args.dataset_test
self.dataset_train_name = args.dataset_train
self.resolution = (args.resolution_wide, args.resolution_high)
self.input_filename_test = args.input_filename_test
self.label_filename_test = args.label_filename_test
self.input_filename_train = args.input_filename_train
self.label_filename_train = args.label_filename_train
if (self.dataset_train_name == 'LSUN'):
self.dataset_train = getattr(datasets, self.dataset_train_name)(db_path=args.dataroot, classes=['bedroom_train'], transform=transforms.Compose([transforms.Scale(self.resolution), transforms.CenterCrop(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif ((self.dataset_train_name == 'CIFAR10') or (self.dataset_train_name == 'CIFAR100')):
self.dataset_train = getattr(datasets, self.dataset_train_name)(root=self.args.dataroot, train=True, download=True, transform=transforms.Compose([transforms.RandomCrop(self.resolution, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))]))
elif ((self.dataset_train_name == 'CocoCaption') or (self.dataset_train_name == 'CocoDetection')):
self.dataset_train = getattr(datasets, self.dataset_train_name)(root=self.args.dataroot, train=True, download=True, transform=transforms.Compose([transforms.Scale(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif ((self.dataset_train_name == 'STL10') or (self.dataset_train_name == 'SVHN')):
self.dataset_train = getattr(datasets, self.dataset_train_name)(root=self.args.dataroot, split='train', download=True, transform=transforms.Compose([transforms.Scale(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif (self.dataset_train_name == 'MNIST'):
self.dataset_train = getattr(datasets, self.dataset_train_name)(root=self.args.dataroot, train=True, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]))
elif (self.dataset_train_name == 'ImageNet'):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
self.dataset_train = datasets.ImageFolder(root=os.path.join(self.args.dataroot, self.args.input_filename_train), transform=transforms.Compose([transforms.RandomSizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize]))
elif (self.dataset_train_name == 'FRGC'):
self.dataset_train = datasets.ImageFolder(root=(self.args.dataroot + self.args.input_filename_train), transform=transforms.Compose([transforms.Scale(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif (self.dataset_train_name == 'Folder'):
self.dataset_train = datasets.ImageFolder(root=(self.args.dataroot + self.args.input_filename_train), transform=transforms.Compose([transforms.Scale(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif (self.dataset_train_name == 'FileList'):
self.dataset_train = datasets.FileList(self.input_filename_train, self.label_filename_train, self.split_train, self.split_test, train=True, transform_train=transforms.Compose([transforms.Scale(self.resolution), transforms.CenterCrop(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]), transform_test=transforms.Compose([transforms.Scale(self.resolution), transforms.CenterCrop(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]), loader_input=self.loader_input, loader_label=self.loader_label)
elif (self.dataset_train_name == 'FolderList'):
self.dataset_train = datasets.FileList(self.input_filename_train, self.label_filename_train, self.split_train, self.split_test, train=True, transform_train=transforms.Compose([transforms.Scale(self.resolution), transforms.CenterCrop(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]), transform_test=transforms.Compose([transforms.Scale(self.resolution), transforms.CenterCrop(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]), loader_input=self.loader_input, loader_label=self.loader_label)
else:
raise Exception('Unknown Dataset')
if (self.dataset_test_name == 'LSUN'):
self.dataset_test = getattr(datasets, self.dataset_test_name)(db_path=args.dataroot, classes=['bedroom_val'], transform=transforms.Compose([transforms.Scale(self.resolution), transforms.CenterCrop(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif ((self.dataset_test_name == 'CIFAR10') or (self.dataset_test_name == 'CIFAR100')):
self.dataset_test = getattr(datasets, self.dataset_test_name)(root=self.args.dataroot, train=False, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))]))
elif ((self.dataset_test_name == 'CocoCaption') or (self.dataset_test_name == 'CocoDetection')):
self.dataset_test = getattr(datasets, self.dataset_test_name)(root=self.args.dataroot, train=False, download=True, transform=transforms.Compose([transforms.Scale(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif ((self.dataset_test_name == 'STL10') or (self.dataset_test_name == 'SVHN')):
self.dataset_test = getattr(datasets, self.dataset_test_name)(root=self.args.dataroot, split='test', download=True, transform=transforms.Compose([transforms.Scale(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif (self.dataset_test_name == 'MNIST'):
self.dataset_test = getattr(datasets, self.dataset_test_name)(root=self.args.dataroot, train=False, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]))
elif (self.dataset_test_name == 'ImageNet'):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
self.dataset_test = datasets.ImageFolder(root=os.path.join(self.args.dataroot, self.args.input_filename_test), transform=transforms.Compose([transforms.Scale(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize]))
elif (self.dataset_test_name == 'FRGC'):
self.dataset_test = datasets.ImageFolder(root=(self.args.dataroot + self.args.input_filename_test), transform=transforms.Compose([transforms.Scale(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif (self.dataset_test_name == 'Folder'):
self.dataset_test = datasets.ImageFolder(root=(self.args.dataroot + self.args.input_filename_test), transform=transforms.Compose([transforms.Scale(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif (self.dataset_test_name == 'FileList'):
self.dataset_test = datasets.FileList(self.input_filename_test, self.label_filename_test, self.split_train, self.split_test, train=True, transform_train=transforms.Compose([transforms.Scale(self.resolution), transforms.CenterCrop(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]), loader_input=self.loader_input, loader_label=self.loader_label)
elif (self.dataset_test_name == 'FolderList'):
self.dataset_test = datasets.FileList(self.input_filename_test, self.label_filename_test, self.split_train, self.split_test, train=True, transform_train=transforms.Compose([transforms.Scale(self.resolution), transforms.CenterCrop(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]), loader_input=self.loader_input, loader_label=self.loader_label)
else:
raise Exception('Unknown Dataset')
def create(self, flag=None):
if (flag == 'Train'):
dataloader_train = torch.utils.data.DataLoader(self.dataset_train, batch_size=self.args.batch_size, shuffle=True, num_workers=int(self.args.nthreads), pin_memory=True)
return dataloader_train
if (flag == 'Test'):
dataloader_test = torch.utils.data.DataLoader(self.dataset_test, batch_size=self.args.batch_size, shuffle=False, num_workers=int(self.args.nthreads), pin_memory=True)
return dataloader_test
if (flag == None):
dataloader_train = torch.utils.data.DataLoader(self.dataset_train, batch_size=self.args.batch_size, shuffle=True, num_workers=int(self.args.nthreads), pin_memory=True)
dataloader_test = torch.utils.data.DataLoader(self.dataset_test, batch_size=self.args.batch_size, shuffle=False, num_workers=int(self.args.nthreads), pin_memory=True)
return (dataloader_train, dataloader_test)
|
class FileList(data.Dataset):
def __init__(self, ifile, lfile=None, split_train=1.0, split_test=0.0, train=True, transform_train=None, transform_test=None, loader_input=loaders.loader_image, loader_label=loaders.loader_torch):
self.ifile = ifile
self.lfile = lfile
self.train = train
self.split_test = split_test
self.split_train = split_train
self.transform_test = transform_test
self.transform_train = transform_train
self.loader_input = loader_input
self.loader_label = loader_label
if (loader_input == 'image'):
self.loader_input = loaders.loader_image
if (loader_input == 'torch'):
self.loader_input = loaders.loader_torch
if (loader_input == 'numpy'):
self.loader_input = loaders.loader_numpy
if (loader_label == 'image'):
self.loader_label = loaders.loader_image
if (loader_label == 'torch'):
self.loader_label = loaders.loader_torch
if (loader_label == 'numpy'):
self.loader_label = loaders.loader_numpy
if (ifile != None):
imagelist = utils.readtextfile(ifile)
imagelist = [x.rstrip('\n') for x in imagelist]
else:
imagelist = []
if (lfile != None):
labellist = utils.readtextfile(lfile)
labellist = [x.rstrip('\n') for x in labellist]
else:
labellist = []
if (len(imagelist) == len(labellist)):
shuffle(imagelist, labellist)
if ((len(imagelist) > 0) and (len(labellist) == 0)):
shuffle(imagelist)
if ((len(labellist) > 0) and (len(imagelist) == 0)):
shuffle(labellist)
if ((self.split_train < 1.0) & (self.split_train > 0.0)):
if (len(imagelist) > 0):
num = math.floor((self.split * len(imagelist)))
self.images_train = imagelist[0:num]
self.images_test = images[(num + 1):len(imagelist)]
if (len(labellist) > 0):
num = math.floor((self.split * len(labellist)))
self.labels_train = labellist[0:num]
self.labels_test = labellist[(num + 1):len(labellist)]
elif (self.split_train == 1.0):
if (len(imagelist) > 0):
self.images_train = imagelist
if (len(labellist) > 0):
self.labels_train = labellist
elif (self.split_test == 1.0):
if (len(imagelist) > 0):
self.images_test = imagelist
if (len(labellist) > 0):
self.labels_test = labellist
def __len__(self):
if (self.train == True):
return len(self.images_train)
if (self.train == False):
return len(self.images_test)
def __getitem__(self, index):
input = {}
if (self.train == True):
if (len(self.images_train) > 0):
path = self.images_train[index]
input['inp'] = self.loader_input(path)
if (len(self.labels_train) > 0):
path = self.labels_train[index]
input['tgt'] = self.loader_label(path)
if (self.transform_train is not None):
input = self.transform_train(input)
image = input['inp']
label = input['tgt']
if (self.train == False):
if (len(self.images_test) > 0):
path = self.images_test[index]
input['inp'] = self.loader_input(path)
if (len(self.labels_test) > 0):
path = self.labels_test[index]
input['tgt'] = self.loader_label(path)
if (self.transform_test is not None):
input = self.transform_test(input)
image = input['inp']
label = input['tgt']
return (image, label)
|
def is_image_file(filename):
return any((filename.endswith(extension) for extension in IMG_EXTENSIONS))
|
def make_dataset(classlist, labellist=None):
images = []
labels = []
classes = utils.readtextfile(ifile)
classes = [x.rstrip('\n') for x in classes]
classes.sort()
for i in len(classes):
for fname in os.listdir(classes[i]):
if is_image_file(fname):
label = {}
label['class'] = os.path.split(classes[i])
images.append(fname)
labels.append(label)
if (labellist != None):
labels = utils.readtextfile(ifile)
labels = [x.rstrip('\n') for x in labels]
labels.sort()
for i in len(labels):
for fname in os.listdir(labels[i]):
if is_image_file(fname):
labels.append(os.path.split(classes[i]))
return (images, labels)
|
class FolderList(data.Dataset):
def __init__(self, ifile, lfile=None, split_train=1.0, split_test=0.0, train=True, transform_train=None, transform_test=None, loader_input=loaders.loader_image, loader_label=loaders.loader_torch):
(imagelist, labellist) = make_dataset(ifile, lfile)
if (len(imagelist) == 0):
raise RuntimeError('No images found')
if (len(labellist) == 0):
raise RuntimeError('No labels found')
self.loader_input = loader_input
self.loader_label = loader_label
if (loader_input == 'image'):
self.loader_input = loaders.loader_image
if (loader_input == 'torch'):
self.loader_input = loaders.loader_torch
if (loader_input == 'numpy'):
self.loader_input = loaders.loader_numpy
if (loader_label == 'image'):
self.loader_label = loaders.loader_image
if (loader_label == 'torch'):
self.loader_label = loaders.loader_torch
if (loader_label == 'numpy'):
self.loader_label = loaders.loader_numpy
self.imagelist = imagelist
self.labellist = labellist
self.transform_test = transform_test
self.transform_train = transform_train
if (len(imagelist) == len(labellist)):
shuffle(imagelist, labellist)
if ((len(imagelist) > 0) and (len(labellist) == 0)):
shuffle(imagelist)
if ((len(labellist) > 0) and (len(imagelist) == 0)):
shuffle(labellist)
if ((args.split_train < 1.0) & (args.split_train > 0.0)):
if (len(imagelist) > 0):
num = math.floor((args.split * len(imagelist)))
self.images_train = imagelist[0:num]
self.images_test = images[(num + 1):len(imagelist)]
if (len(labellist) > 0):
num = math.floor((args.split * len(labellist)))
self.labels_train = labellist[0:num]
self.labels_test = labellist[(num + 1):len(labellist)]
elif (args.split_train == 1.0):
if (len(imagelist) > 0):
self.images_train = imagelist
if (len(labellist) > 0):
self.labels_train = labellist
elif (args.split_test == 1.0):
if (len(imagelist) > 0):
self.images_test = imagelist
if (len(labellist) > 0):
self.labels_test = labellist
def __len__(self):
if (self.train == True):
return len(self.images_train)
if (self.train == False):
return len(self.images_test)
def __getitem__(self, index):
if (self.train == True):
if (len(self.images_train) > 0):
path = self.images_train[index]
input['inp'] = self.loader_input(path)
if (len(self.labels_train) > 0):
path = self.labels_train[index]
input['tgt'] = self.loader_label(path)
if (self.transform_train is not None):
input = self.transform_train(input)
image = input['inp']
label = input['tgt']
if (self.train == False):
if (len(self.images_test) > 0):
path = self.images_test[index]
input['inp'] = self.loader_input(path)
if (len(self.labels_test) > 0):
path = self.labels_test[index]
input['tgt'] = self.loader_label(path)
if (self.transform_test is not None):
input = self.transform_test(input)
image = input['inp']
label = input['tgt']
return (image, label)
|
def loader_image(path):
return Image.open(path).convert('RGB')
|
def loader_torch(path):
return torch.load(path)
|
def loader_numpy(path):
return np.load(path)
|
class Classification():
def __init__(self, topk=(1,)):
self.topk = topk
def forward(self, output, target):
'Computes the precision@k for the specified values of k'
maxk = max(self.topk)
batch_size = target.size(0)
(_, pred) = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, (- 1)).expand_as(pred))
res = []
for k in self.topk:
correct_k = correct[:k].view((- 1)).float().sum(0)
res.append(correct_k.mul_((100.0 / batch_size)))
return res
|
class Classification(nn.Module):
def __init__(self):
super(Classification, self).__init__()
self.loss = nn.CrossEntropyLoss()
def forward(self, input, target):
loss = self.loss(input, target)
return loss
|
class Regression(nn.Module):
def __init__(self):
super(Regression, self).__init__()
self.loss = nn.MSELoss()
def forward(self, input, target):
loss = self.loss.forward(input, target)
return loss
|
def weights_init(m):
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
|
class Model():
def __init__(self, args):
self.cuda = args.cuda
self.nfilters = args.nfilters
self.nclasses = args.nclasses
self.nchannels = args.nchannels
self.nblocks = args.nblocks
self.nlayers = args.nlayers
self.level = args.level
self.nchannels = args.nchannels
self.net_type = args.net_type
self.avgpool = args.avgpool
def setup(self, checkpoints):
model = getattr(models, self.net_type)(self.nchannels, self.nfilters, self.nclasses, self.avgpool, self.level)
criterion = losses.Classification()
if (checkpoints.latest('resume') == None):
model.apply(weights_init)
else:
tmp = checkpoints.load(checkpoints.latest('resume'))
model.load_state_dict(tmp)
if self.cuda:
model = model.cuda()
criterion = criterion.cuda()
return (model, criterion)
|
class NoiseLayer(nn.Module):
def __init__(self, in_planes, out_planes, level):
super(NoiseLayer, self).__init__()
self.noise = torch.randn(1, in_planes, 1, 1)
self.level = level
self.layers = nn.Sequential(nn.ReLU(True), nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1), nn.BatchNorm2d(out_planes))
def forward(self, x):
tmp1 = x.data.shape
tmp2 = self.noise.shape
if ((tmp1[1] != tmp2[1]) or (tmp1[2] != tmp2[2]) or (tmp1[3] != tmp2[3])):
self.noise = (((2 * torch.rand(x.data.shape)) - 1) * self.level)
self.noise = self.noise.cuda()
x.data = (x.data + self.noise)
x = self.layers(x)
return x
|
class NoiseModel(nn.Module):
def __init__(self, nblocks, nlayers, nchannels, nfilters, nclasses, level):
super(NoiseModel, self).__init__()
self.num = nfilters
self.level = level
layers = []
layers.append(NoiseLayer(3, nfilters, self.level))
for i in range(1, nlayers):
layers.append(self._make_layer(nfilters, nfilters, nblocks, self.level))
layers.append(nn.MaxPool2d(2, 2))
self.features = nn.Sequential(*layers)
self.classifier = nn.Linear(self.num, nclasses)
def _make_layer(self, in_planes, out_planes, nblocks, level):
layers = []
for i in range(nblocks):
layers.append(NoiseLayer(in_planes, out_planes, level))
return nn.Sequential(*layers)
def forward(self, x):
x = self.features(x)
x = x.view((- 1), self.num)
x = self.classifier(x)
return x
|
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
|
class NoiseLayer(nn.Module):
def __init__(self, in_planes, out_planes, level):
super(NoiseLayer, self).__init__()
self.noise = nn.Parameter(torch.Tensor(0), requires_grad=False).to(device)
self.level = level
self.layers = nn.Sequential(nn.ReLU(True), nn.BatchNorm2d(in_planes), nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1))
def forward(self, x):
if (self.noise.numel() == 0):
self.noise.resize_(x.data[0].shape).uniform_()
self.noise = (((2 * self.noise) - 1) * self.level)
y = torch.add(x, self.noise)
z = self.layers(y)
return z
|
class NoiseBasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, shortcut=None, level=0.2):
super(NoiseBasicBlock, self).__init__()
self.layers = nn.Sequential(NoiseLayer(in_planes, planes, level), nn.MaxPool2d(stride, stride), nn.BatchNorm2d(planes), nn.ReLU(True), NoiseLayer(planes, planes, level), nn.BatchNorm2d(planes))
self.shortcut = shortcut
def forward(self, x):
residual = x
y = self.layers(x)
if self.shortcut:
residual = self.shortcut(x)
y += residual
y = F.relu(y)
return y
|
class NoiseBottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1, shortcut=None, level=0.2):
super(NoiseBottleneck, self).__init__()
self.layers = nn.Sequential(nn.Conv2d(in_planes, planes, kernel_size=1, bias=False), nn.BatchNorm2d(planes), nn.ReLU(True), NoiseLayer(planes, planes, level), nn.MaxPool2d(stride, stride), nn.BatchNorm2d(planes), nn.ReLU(True), nn.Conv2d(planes, (planes * 4), kernel_size=1, bias=False), nn.BatchNorm2d((planes * 4)))
self.shortcut = shortcut
def forward(self, x):
residual = x
y = self.layers(x)
if self.shortcut:
residual = self.shortcut(x)
y += residual
y = F.relu(y)
return y
|
class NoiseResNet(nn.Module):
def __init__(self, block, nblocks, nchannels, nfilters, nclasses, pool, level):
super(NoiseResNet, self).__init__()
self.in_planes = nfilters
self.pre_layers = nn.Sequential(nn.Conv2d(nchannels, nfilters, kernel_size=7, stride=2, padding=3, bias=False), nn.BatchNorm2d(nfilters), nn.ReLU(True), nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
self.layer1 = self._make_layer(block, (1 * nfilters), nblocks[0], level=level)
self.layer2 = self._make_layer(block, (2 * nfilters), nblocks[1], stride=2, level=level)
self.layer3 = self._make_layer(block, (4 * nfilters), nblocks[2], stride=2, level=level)
self.layer4 = self._make_layer(block, (8 * nfilters), nblocks[3], stride=2, level=level)
self.avgpool = nn.AvgPool2d(pool, stride=1)
self.linear = nn.Linear(((8 * nfilters) * block.expansion), nclasses)
def _make_layer(self, block, planes, nblocks, stride=1, level=0.2):
shortcut = None
if ((stride != 1) or (self.in_planes != (planes * block.expansion))):
shortcut = nn.Sequential(nn.Conv2d(self.in_planes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.in_planes, planes, stride, shortcut, level=level))
self.in_planes = (planes * block.expansion)
for i in range(1, nblocks):
layers.append(block(self.in_planes, planes, level=level))
return nn.Sequential(*layers)
def forward(self, x):
x1 = self.pre_layers(x)
x2 = self.layer1(x1)
x3 = self.layer2(x2)
x4 = self.layer3(x3)
x5 = self.layer4(x4)
x6 = self.avgpool(x5)
x7 = x6.view(x6.size(0), (- 1))
x8 = self.linear(x7)
return x8
|
def noiseresnet18(nchannels, nfilters, nclasses, pool=7, level=0.1):
return NoiseResNet(NoiseBasicBlock, [2, 2, 2, 2], nchannels=nchannels, nfilters=nfilters, nclasses=nclasses, pool=pool, level=level)
|
def noiseresnet34(nchannels, nfilters, nclasses, pool=7, level=0.1):
return NoiseResNet(NoiseBasicBlock, [3, 4, 6, 3], nchannels=nchannels, nfilters=nfilters, nclasses=nclasses, pool=pool, level=level)
|
def noiseresnet50(nchannels, nfilters, nclasses, pool=7, level=0.1):
return NoiseResNet(NoiseBottleneck, [3, 4, 6, 3], nchannels=nchannels, nfilters=nfilters, nclasses=nclasses, pool=pool, level=level)
|
def noiseresnet101(nchannels, nfilters, nclasses, pool=7, level=0.1):
return NoiseResNet(NoiseBottleneck, [3, 4, 23, 3], nchannels=nchannels, nfilters=nfilters, nclasses=nclasses, pool=pool, level=level)
|
def noiseresnet152(nchannels, nfilters, nclasses, pool=7, level=0.1):
return NoiseResNet(NoiseBottleneck, [3, 8, 36, 3], nchannels=nchannels, nfilters=nfilters, nclasses=nclasses, pool=pool, level=level)
|
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(((16 * 5) * 5), 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view((- 1), ((16 * 5) * 5))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
|
def conv3x3(in_planes, out_planes, stride=1):
'3x3 convolution with padding'
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
|
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, (planes * 4), kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d((planes * 4))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
class ResNet(nn.Module):
def __init__(self, block, layers, nchannels, nfilters, nclasses=1000):
self.inplanes = nfilters
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(nchannels, nfilters, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(nfilters)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, nfilters, layers[0])
self.layer2 = self._make_layer(block, (2 * nfilters), layers[1], stride=2)
self.layer3 = self._make_layer(block, (4 * nfilters), layers[2], stride=2)
self.layer4 = self._make_layer(block, (8 * nfilters), layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(((8 * nfilters) * block.expansion), nclasses)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x0):
x = self.conv1(x0)
x = self.bn1(x)
x = self.relu(x)
x1 = self.maxpool(x)
x2 = self.layer1(x1)
x3 = self.layer2(x2)
x4 = self.layer3(x3)
x5 = self.layer4(x4)
x = self.avgpool(x5)
x = x.view(x.size(0), (- 1))
x6 = self.fc(x)
return [x0, x1, x2, x3, x4, x5]
|
def resnet18(nchannels, nfilters, nclasses):
return ResNet(BasicBlock, [2, 2, 2, 2], nchannels=nchannels, nfilters=nfilters, nclasses=nclasses)
|
def resnet34(nchannels, nfilters, nclasses):
return ResNet(BasicBlock, [3, 4, 6, 3], nchannels=nchannels, nfilters=nfilters, nclasses=nclasses)
|
def resnet50(nchannels, nfilters, nclasses):
return ResNet(Bottleneck, [3, 4, 6, 3], nchannels=nchannels, nfilters=nfilters, nclasses=nclasses)
|
def resnet101(nchannels, nfilters, nclasses):
return ResNet(Bottleneck, [3, 4, 23, 3], nchannels=nchannels, nfilters=nfilters, nclasses=nclasses)
|
def resnet152(nchannels, nfilters, nclasses):
return ResNet(Bottleneck, [3, 8, 36, 3], nchannels=nchannels, nfilters=nfilters, nclasses=nclasses)
|
class Image():
def __init__(self, path, ext='png'):
if (os.path.isdir(path) == False):
os.makedirs(path)
self.path = path
self.names = []
self.ext = ext
self.iteration = 1
self.num = 0
def register(self, modules):
self.num = (self.num + len(modules))
for tmp in modules:
self.names.append(tmp)
def update(self, modules):
for i in range(self.num):
name = os.path.join(self.path, ('%s_%03d.png' % (self.names[i], self.iteration)))
nrow = math.ceil(math.sqrt(modules[i].size(0)))
vutils.save_image(modules[i], name, nrow=nrow, padding=0, normalize=True, scale_each=True)
self.iteration = (self.iteration + 1)
|
class Logger():
def __init__(self, path, filename):
self.num = 0
if (os.path.isdir(path) == False):
os.makedirs(path)
self.filename = os.path.join(path, filename)
self.fid = open(self.filename, 'w')
self.fid.close()
def register(self, modules):
self.num = (self.num + len(modules))
tmpstr = ''
for tmp in modules:
tmpstr = ((tmpstr + tmp) + '\t')
tmpstr = (tmpstr + '\n')
self.fid = open(self.filename, 'a')
self.fid.write(tmpstr)
self.fid.close()
def update(self, modules):
tmpstr = ''
for tmp in modules:
tmpstr = ((tmpstr + ('%.4f' % modules[tmp])) + '\t')
tmpstr = (tmpstr + '\n')
self.fid = open(self.filename, 'a')
self.fid.write(tmpstr)
self.fid.close()
|
class Monitor():
def __init__(self, smoothing=True, smoothness=0.7):
self.keys = []
self.losses = {}
self.smoothing = smoothing
self.smoothness = smoothness
self.num = 0
def register(self, modules):
for m in modules:
self.keys.append(m)
self.losses[m] = 0
def reset(self):
self.num = 0
for (key, value) in self.losses.items():
value = 0
def update(self, modules, batch_size):
if (self.smoothing == False):
for (key, value) in modules.items():
self.losses[key] = (((self.losses[key] * self.num) + (value * batch_size)) / (self.num + batch_size))
if (self.smoothing == True):
for (key, value) in modules.items():
temp = (((self.losses[key] * self.num) + (value * batch_size)) / (self.num + batch_size))
self.losses[key] = ((self.losses[key] * self.smoothness) + (value * (1 - self.smoothness)))
self.num += batch_size
def getvalues(self, key=None):
if (key != None):
return self.losses[key]
if (key == None):
return OrderedDict([(key, self.losses[key]) for key in self.keys])
|
class Visualizer():
def __init__(self, port, title):
self.keys = []
self.values = {}
self.viz = visdom.Visdom(port=port)
self.iteration = 0
self.title = title
def register(self, modules):
for key in modules:
self.keys.append(key)
self.values[key] = {}
self.values[key]['dtype'] = modules[key]['dtype']
self.values[key]['vtype'] = modules[key]['vtype']
if (modules[key]['vtype'] == 'plot'):
self.values[key]['value'] = []
self.values[key]['win'] = self.viz.line(X=np.array([0]), Y=np.array([0]), opts=dict(title=self.title, xlabel='Epoch', ylabel=key))
elif (modules[key]['vtype'] == 'image'):
self.values[key]['value'] = None
elif (modules[key]['vtype'] == 'images'):
self.values[key]['value'] = None
else:
sys.exit('Data type not supported, please update the visualizer plugin and rerun !!')
def update(self, modules):
for key in modules:
if (self.values[key]['dtype'] == 'scalar'):
self.values[key]['value'].append(modules[key])
elif (self.values[key]['dtype'] == 'image'):
self.values[key]['value'] = modules[key]
elif (self.values[key]['dtype'] == 'images'):
self.values[key]['value'] = modules[key]
else:
sys.exit('Data type not supported, please update the visualizer plugin and rerun !!')
for key in self.keys:
if (self.values[key]['vtype'] == 'plot'):
self.viz.updateTrace(X=np.array([self.iteration]), Y=np.array([self.values[key]['value'][(- 1)]]), win=self.values[key]['win'])
elif (self.values[key]['vtype'] == 'image'):
temp = self.values[key]['value'].numpy()
for i in range(temp.shape[0]):
temp[i] = (temp[i] - temp[i].min())
temp[i] = (temp[i] / temp[i].max())
if (self.iteration == 0):
self.values[key]['win'] = self.viz.image(temp, opts=dict(title=key, caption=self.iteration))
else:
self.viz.image(temp, opts=dict(title=key, caption=self.iteration), win=self.values[key]['win'])
elif (self.values[key]['vtype'] == 'images'):
temp = self.values[key]['value'].numpy()
for i in range(temp.shape[0]):
for j in range(temp.shape[1]):
temp[i][j] = (temp[i][j] - temp[i][j].min())
temp[i][j] = (temp[i][j] / temp[i][j].max())
if (self.iteration == 0):
self.values[key]['win'] = self.viz.images(temp, opts=dict(title=key, caption=self.iteration))
else:
self.viz.images(temp, opts=dict(title=key, caption=self.iteration), win=self.values[key]['win'])
else:
sys.exit('Visualization type not supported, please update the visualizer plugin and rerun !!')
self.iteration = (self.iteration + 1)
|
class Trainer():
def __init__(self, args, model, criterion):
self.args = args
self.model = model
self.criterion = criterion
self.port = args.port
self.dir_save = args.save
self.cuda = args.cuda
self.nepochs = args.nepochs
self.nclasses = args.nclasses
self.nchannels = args.nchannels
self.batch_size = args.batch_size
self.resolution_high = args.resolution_high
self.resolution_wide = args.resolution_wide
self.lr = args.learning_rate
self.momentum = args.momentum
self.adam_beta1 = args.adam_beta1
self.adam_beta2 = args.adam_beta2
self.weight_decay = args.weight_decay
self.optim_method = args.optim_method
self.dataset_train_name = args.dataset_train
parameters = filter((lambda p: p.requires_grad), model.parameters())
if (self.optim_method == 'Adam'):
self.optimizer = optim.Adam(parameters, lr=self.lr, betas=(self.adam_beta1, self.adam_beta2), weight_decay=self.weight_decay)
elif (self.optim_method == 'RMSprop'):
self.optimizer = optim.RMSprop(parameters, lr=self.lr, momentum=self.momentum, weight_decay=self.weight_decay)
elif (self.optim_method == 'SGD'):
self.optimizer = optim.SGD(parameters, lr=self.lr, momentum=self.momentum, weight_decay=self.weight_decay, nesterov=True)
else:
raise Exception('Unknown Optimization Method')
self.label = torch.zeros(self.batch_size).long()
self.input = torch.zeros(self.batch_size, self.nchannels, self.resolution_high, self.resolution_wide)
if args.cuda:
self.label = self.label.cuda()
self.input = self.input.cuda()
self.input = Variable(self.input)
self.label = Variable(self.label)
self.log_loss_train = plugins.Logger(args.logs, 'TrainLogger.txt')
self.params_loss_train = ['Loss', 'Accuracy']
self.log_loss_train.register(self.params_loss_train)
self.log_loss_test = plugins.Logger(args.logs, 'TestLogger.txt')
self.params_loss_test = ['Loss', 'Accuracy']
self.log_loss_test.register(self.params_loss_test)
self.monitor_train = plugins.Monitor()
self.params_monitor_train = ['Loss', 'Accuracy']
self.monitor_train.register(self.params_monitor_train)
self.monitor_test = plugins.Monitor()
self.params_monitor_test = ['Loss', 'Accuracy']
self.monitor_test.register(self.params_monitor_test)
self.visualizer_train = plugins.Visualizer(self.port, 'Train')
self.params_visualizer_train = {'Loss': {'dtype': 'scalar', 'vtype': 'plot'}, 'Accuracy': {'dtype': 'scalar', 'vtype': 'plot'}}
self.visualizer_train.register(self.params_visualizer_train)
self.visualizer_test = plugins.Visualizer(self.port, 'Test')
self.params_visualizer_test = {'Loss': {'dtype': 'scalar', 'vtype': 'plot'}, 'Accuracy': {'dtype': 'scalar', 'vtype': 'plot'}}
self.visualizer_test.register(self.params_visualizer_test)
self.print_train = '[%d/%d][%d/%d] '
for item in self.params_loss_train:
self.print_train = ((self.print_train + item) + ' %.4f ')
self.print_test = '[%d/%d][%d/%d] '
for item in self.params_loss_test:
self.print_test = ((self.print_test + item) + ' %.4f ')
self.evalmodules = []
self.giterations = 0
self.losses_test = {}
self.losses_train = {}
def learning_rate(self, epoch):
if (self.dataset_train_name == 'CIFAR10'):
return (self.lr * (((0.1 ** int((epoch >= 60))) * (0.1 ** int((epoch >= 90)))) * (0.1 ** int((epoch >= 120)))))
elif (self.dataset_train_name == 'CIFAR100'):
return (self.lr * (((0.1 ** int((epoch >= 80))) * (0.1 ** int((epoch >= 120)))) * (0.1 ** int((epoch >= 160)))))
elif (self.dataset_train_name == 'MNIST'):
return (self.lr * (((0.1 ** int((epoch >= 80))) * (0.1 ** int((epoch >= 120)))) * (0.1 ** int((epoch >= 160)))))
elif (self.dataset_train_name == 'FRGC'):
return (self.lr * (((0.1 ** int((epoch >= 80))) * (0.1 ** int((epoch >= 120)))) * (0.1 ** int((epoch >= 160)))))
elif (self.dataset_train_name == 'ImageNet'):
decay = math.floor(((epoch - 1) / 30))
return (self.lr * math.pow(0.1, decay))
def get_optimizer(self, epoch, optimizer):
lr = self.learning_rate(epoch)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return optimizer
def model_eval(self):
self.model.eval()
for m in self.model.modules():
for i in range(len(self.evalmodules)):
if isinstance(m, self.evalmodules[i]):
m.train()
def model_train(self):
self.model.train()
def train(self, epoch, dataloader):
self.monitor_train.reset()
data_iter = iter(dataloader)
self.input.volatile = False
self.label.volatile = False
self.optimizer = self.get_optimizer((epoch + 1), self.optimizer)
self.model_train()
i = 0
while (i < len(dataloader)):
(input, label) = data_iter.next()
i += 1
batch_size = input.size(0)
if (batch_size == self.batch_size):
self.input.data.resize_(input.size()).copy_(input)
self.label.data.resize_(label.size()).copy_(label)
output = self.model(self.input)
loss = self.criterion(output, self.label)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
pred = output.data.max(1)[1]
acc = (float((pred.eq(self.label.data).cpu().sum() * 100.0)) / float(batch_size))
self.losses_train['Accuracy'] = float(acc)
self.losses_train['Loss'] = float(loss.data[0])
self.monitor_train.update(self.losses_train, batch_size)
print((self.print_train % tuple(([epoch, self.nepochs, i, len(dataloader)] + [self.losses_train[key] for key in self.params_monitor_train]))))
loss = self.monitor_train.getvalues()
self.log_loss_train.update(loss)
self.visualizer_train.update(loss)
return self.monitor_train.getvalues('Accuracy')
def test(self, epoch, dataloader):
self.monitor_test.reset()
data_iter = iter(dataloader)
self.input.volatile = True
self.label.volatile = True
self.model_eval()
i = 0
while (i < len(dataloader)):
(input, label) = data_iter.next()
i += 1
batch_size = input.size(0)
if (batch_size == self.batch_size):
self.input.data.resize_(input.size()).copy_(input)
self.label.data.resize_(label.size()).copy_(label)
self.model.zero_grad()
output = self.model(self.input)
loss = self.criterion(output, self.label)
pred = output.data.max(1)[1]
acc = (float((pred.eq(self.label.data).cpu().sum() * 100.0)) / float(batch_size))
self.losses_test['Accuracy'] = float(acc)
self.losses_test['Loss'] = float(loss.data[0])
self.monitor_test.update(self.losses_test, batch_size)
print((self.print_test % tuple(([epoch, self.nepochs, i, len(dataloader)] + [self.losses_test[key] for key in self.params_monitor_test]))))
loss = self.monitor_test.getvalues()
self.log_loss_test.update(loss)
self.visualizer_test.update(loss)
return self.monitor_test.getvalues('Accuracy')
|
def readtextfile(filename):
with open(filename) as f:
content = f.readlines()
f.close()
return content
|
def writetextfile(data, filename):
with open(filename, 'w') as f:
f.writelines(data)
f.close()
|
def delete_file(filename):
if (os.path.isfile(filename) == True):
os.remove(filename)
|
def eformat(f, prec, exp_digits):
s = ('%.*e' % (prec, f))
(mantissa, exp) = s.split('e')
return ('%se%+0*d' % (mantissa, (exp_digits + 1), int(exp)))
|
def saveargs(args):
path = args.logs
if (os.path.isdir(path) == False):
os.makedirs(path)
with open(os.path.join(path, 'args.txt'), 'w') as f:
for arg in vars(args):
f.write((((arg + ' ') + str(getattr(args, arg))) + '\n'))
|
class Dataloader():
def __init__(self, args, input_size):
self.args = args
self.dataset_test_name = args.dataset_test
self.dataset_train_name = args.dataset_train
self.input_size = input_size
if (self.dataset_train_name == 'LSUN'):
self.dataset_train = getattr(datasets, self.dataset_train_name)(db_path=args.dataroot, classes=['bedroom_train'], transform=transforms.Compose([transforms.Scale(self.input_size), transforms.CenterCrop(self.input_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif ((self.dataset_train_name == 'CIFAR10') or (self.dataset_train_name == 'CIFAR100')):
self.dataset_train = getattr(datasets, self.dataset_train_name)(root=self.args.dataroot, train=True, download=True, transform=transforms.Compose([transforms.RandomCrop(self.input_size, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))]))
elif ((self.dataset_train_name == 'CocoCaption') or (self.dataset_train_name == 'CocoDetection')):
self.dataset_train = getattr(datasets, self.dataset_train_name)(root=self.args.dataroot, train=True, download=True, transform=transforms.Compose([transforms.Scale(self.input_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif ((self.dataset_train_name == 'STL10') or (self.dataset_train_name == 'SVHN')):
self.dataset_train = getattr(datasets, self.dataset_train_name)(root=self.args.dataroot, split='train', download=True, transform=transforms.Compose([transforms.Scale(self.input_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif (self.dataset_train_name == 'MNIST'):
self.dataset_train = getattr(datasets, self.dataset_train_name)(root=self.args.dataroot, train=True, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]))
elif (self.dataset_train_name == 'ImageNet'):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
self.dataset_train = datasets.ImageFolder(root=os.path.join(self.args.dataroot, self.args.input_filename_train), transform=transforms.Compose([transforms.RandomSizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize]))
elif (self.dataset_train_name == 'FRGC'):
self.dataset_train = datasets.ImageFolder(root=(self.args.dataroot + self.args.input_filename_train), transform=transforms.Compose([transforms.Scale(self.input_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif (self.dataset_train_name == 'Folder'):
self.dataset_train = datasets.ImageFolder(root=(self.args.dataroot + self.args.input_filename_train), transform=transforms.Compose([transforms.Scale(self.input_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif (self.dataset_train_name == 'FileList'):
self.dataset_train = datasets.FileList(self.input_filename_train, self.label_filename_train, self.split_train, self.split_test, train=True, transform_train=transforms.Compose([transforms.Scale(self.input_size), transforms.CenterCrop(self.input_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]), transform_test=transforms.Compose([transforms.Scale(self.input_size), transforms.CenterCrop(self.input_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]), loader_input=self.loader_input, loader_label=self.loader_label)
elif (self.dataset_train_name == 'FolderList'):
self.dataset_train = datasets.FileList(self.input_filename_train, self.label_filename_train, self.split_train, self.split_test, train=True, transform_train=transforms.Compose([transforms.Scale(self.input_size), transforms.CenterCrop(self.input_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]), transform_test=transforms.Compose([transforms.Scale(self.input_size), transforms.CenterCrop(self.input_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]), loader_input=self.loader_input, loader_label=self.loader_label)
else:
raise Exception('Unknown Dataset')
if (self.dataset_test_name == 'LSUN'):
self.dataset_test = getattr(datasets, self.dataset_test_name)(db_path=args.dataroot, classes=['bedroom_val'], transform=transforms.Compose([transforms.Scale(self.input_size), transforms.CenterCrop(self.input_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif ((self.dataset_test_name == 'CIFAR10') or (self.dataset_test_name == 'CIFAR100')):
self.dataset_test = getattr(datasets, self.dataset_test_name)(root=self.args.dataroot, train=False, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))]))
elif ((self.dataset_test_name == 'CocoCaption') or (self.dataset_test_name == 'CocoDetection')):
self.dataset_test = getattr(datasets, self.dataset_test_name)(root=self.args.dataroot, train=False, download=True, transform=transforms.Compose([transforms.Scale(self.input_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif ((self.dataset_test_name == 'STL10') or (self.dataset_test_name == 'SVHN')):
self.dataset_test = getattr(datasets, self.dataset_test_name)(root=self.args.dataroot, split='test', download=True, transform=transforms.Compose([transforms.Scale(self.input_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif (self.dataset_test_name == 'MNIST'):
self.dataset_test = getattr(datasets, self.dataset_test_name)(root=self.args.dataroot, train=False, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]))
elif (self.dataset_test_name == 'ImageNet'):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
self.dataset_test = datasets.ImageFolder(root=os.path.join(self.args.dataroot, self.args.input_filename_test), transform=transforms.Compose([transforms.Scale(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize]))
elif (self.dataset_test_name == 'FRGC'):
self.dataset_test = datasets.ImageFolder(root=(self.args.dataroot + self.args.input_filename_test), transform=transforms.Compose([transforms.Scale(self.input_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif (self.dataset_test_name == 'Folder'):
self.dataset_test = datasets.ImageFolder(root=(self.args.dataroot + self.args.input_filename_test), transform=transforms.Compose([transforms.Scale(self.input_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif (self.dataset_test_name == 'FileList'):
self.dataset_test = datasets.FileList(self.input_filename_test, self.label_filename_test, self.split_train, self.split_test, train=True, transform_train=transforms.Compose([transforms.Scale(self.input_size), transforms.CenterCrop(self.input_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]), loader_input=self.loader_input, loader_label=self.loader_label)
elif (self.dataset_test_name == 'FolderList'):
self.dataset_test = datasets.FileList(self.input_filename_test, self.label_filename_test, self.split_train, self.split_test, train=True, transform_train=transforms.Compose([transforms.Scale(self.input_size), transforms.CenterCrop(self.input_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]), loader_input=self.loader_input, loader_label=self.loader_label)
else:
raise Exception('Unknown Dataset')
def create(self, flag=None):
if (flag == 'Train'):
dataloader_train = torch.utils.data.DataLoader(self.dataset_train, batch_size=self.args.batch_size, shuffle=True, num_workers=int(self.args.nthreads), pin_memory=True)
return dataloader_train
if (flag == 'Test'):
dataloader_test = torch.utils.data.DataLoader(self.dataset_test, batch_size=self.args.batch_size, shuffle=False, num_workers=int(self.args.nthreads), pin_memory=True)
return dataloader_test
if (flag == None):
dataloader_train = torch.utils.data.DataLoader(self.dataset_train, batch_size=self.args.batch_size, shuffle=True, num_workers=int(self.args.nthreads), pin_memory=True)
dataloader_test = torch.utils.data.DataLoader(self.dataset_test, batch_size=self.args.batch_size, shuffle=False, num_workers=int(self.args.nthreads), pin_memory=True)
return (dataloader_train, dataloader_test)
|
class FileList(data.Dataset):
def __init__(self, ifile, lfile=None, split_train=1.0, split_test=0.0, train=True, transform_train=None, transform_test=None, loader_input=loaders.loader_image, loader_label=loaders.loader_torch):
self.ifile = ifile
self.lfile = lfile
self.train = train
self.split_test = split_test
self.split_train = split_train
self.transform_test = transform_test
self.transform_train = transform_train
self.loader_input = loader_input
self.loader_label = loader_label
if (loader_input == 'image'):
self.loader_input = loaders.loader_image
if (loader_input == 'torch'):
self.loader_input = loaders.loader_torch
if (loader_input == 'numpy'):
self.loader_input = loaders.loader_numpy
if (loader_label == 'image'):
self.loader_label = loaders.loader_image
if (loader_label == 'torch'):
self.loader_label = loaders.loader_torch
if (loader_label == 'numpy'):
self.loader_label = loaders.loader_numpy
if (ifile != None):
imagelist = utils.readtextfile(ifile)
imagelist = [x.rstrip('\n') for x in imagelist]
else:
imagelist = []
if (lfile != None):
labellist = utils.readtextfile(lfile)
labellist = [x.rstrip('\n') for x in labellist]
else:
labellist = []
if (len(imagelist) == len(labellist)):
shuffle(imagelist, labellist)
if ((len(imagelist) > 0) and (len(labellist) == 0)):
shuffle(imagelist)
if ((len(labellist) > 0) and (len(imagelist) == 0)):
shuffle(labellist)
if ((self.split_train < 1.0) & (self.split_train > 0.0)):
if (len(imagelist) > 0):
num = math.floor((self.split * len(imagelist)))
self.images_train = imagelist[0:num]
self.images_test = images[(num + 1):len(imagelist)]
if (len(labellist) > 0):
num = math.floor((self.split * len(labellist)))
self.labels_train = labellist[0:num]
self.labels_test = labellist[(num + 1):len(labellist)]
elif (self.split_train == 1.0):
if (len(imagelist) > 0):
self.images_train = imagelist
if (len(labellist) > 0):
self.labels_train = labellist
elif (self.split_test == 1.0):
if (len(imagelist) > 0):
self.images_test = imagelist
if (len(labellist) > 0):
self.labels_test = labellist
def __len__(self):
if (self.train == True):
return len(self.images_train)
if (self.train == False):
return len(self.images_test)
def __getitem__(self, index):
input = {}
if (self.train == True):
if (len(self.images_train) > 0):
path = self.images_train[index]
input['inp'] = self.loader_input(path)
if (len(self.labels_train) > 0):
path = self.labels_train[index]
input['tgt'] = self.loader_label(path)
if (self.transform_train is not None):
input = self.transform_train(input)
image = input['inp']
label = input['tgt']
if (self.train == False):
if (len(self.images_test) > 0):
path = self.images_test[index]
input['inp'] = self.loader_input(path)
if (len(self.labels_test) > 0):
path = self.labels_test[index]
input['tgt'] = self.loader_label(path)
if (self.transform_test is not None):
input = self.transform_test(input)
image = input['inp']
label = input['tgt']
return (image, label)
|
def is_image_file(filename):
return any((filename.endswith(extension) for extension in IMG_EXTENSIONS))
|
def make_dataset(classlist, labellist=None):
images = []
labels = []
classes = utils.readtextfile(ifile)
classes = [x.rstrip('\n') for x in classes]
classes.sort()
for i in len(classes):
for fname in os.listdir(classes[i]):
if is_image_file(fname):
label = {}
label['class'] = os.path.split(classes[i])
images.append(fname)
labels.append(label)
if (labellist != None):
labels = utils.readtextfile(ifile)
labels = [x.rstrip('\n') for x in labels]
labels.sort()
for i in len(labels):
for fname in os.listdir(labels[i]):
if is_image_file(fname):
labels.append(os.path.split(classes[i]))
return (images, labels)
|
class FolderList(data.Dataset):
def __init__(self, ifile, lfile=None, split_train=1.0, split_test=0.0, train=True, transform_train=None, transform_test=None, loader_input=loaders.loader_image, loader_label=loaders.loader_torch):
(imagelist, labellist) = make_dataset(ifile, lfile)
if (len(imagelist) == 0):
raise RuntimeError('No images found')
if (len(labellist) == 0):
raise RuntimeError('No labels found')
self.loader_input = loader_input
self.loader_label = loader_label
if (loader_input == 'image'):
self.loader_input = loaders.loader_image
if (loader_input == 'torch'):
self.loader_input = loaders.loader_torch
if (loader_input == 'numpy'):
self.loader_input = loaders.loader_numpy
if (loader_label == 'image'):
self.loader_label = loaders.loader_image
if (loader_label == 'torch'):
self.loader_label = loaders.loader_torch
if (loader_label == 'numpy'):
self.loader_label = loaders.loader_numpy
self.imagelist = imagelist
self.labellist = labellist
self.transform_test = transform_test
self.transform_train = transform_train
if (len(imagelist) == len(labellist)):
shuffle(imagelist, labellist)
if ((len(imagelist) > 0) and (len(labellist) == 0)):
shuffle(imagelist)
if ((len(labellist) > 0) and (len(imagelist) == 0)):
shuffle(labellist)
if ((args.split_train < 1.0) & (args.split_train > 0.0)):
if (len(imagelist) > 0):
num = math.floor((args.split * len(imagelist)))
self.images_train = imagelist[0:num]
self.images_test = images[(num + 1):len(imagelist)]
if (len(labellist) > 0):
num = math.floor((args.split * len(labellist)))
self.labels_train = labellist[0:num]
self.labels_test = labellist[(num + 1):len(labellist)]
elif (args.split_train == 1.0):
if (len(imagelist) > 0):
self.images_train = imagelist
if (len(labellist) > 0):
self.labels_train = labellist
elif (args.split_test == 1.0):
if (len(imagelist) > 0):
self.images_test = imagelist
if (len(labellist) > 0):
self.labels_test = labellist
def __len__(self):
if (self.train == True):
return len(self.images_train)
if (self.train == False):
return len(self.images_test)
def __getitem__(self, index):
if (self.train == True):
if (len(self.images_train) > 0):
path = self.images_train[index]
input['inp'] = self.loader_input(path)
if (len(self.labels_train) > 0):
path = self.labels_train[index]
input['tgt'] = self.loader_label(path)
if (self.transform_train is not None):
input = self.transform_train(input)
image = input['inp']
label = input['tgt']
if (self.train == False):
if (len(self.images_test) > 0):
path = self.images_test[index]
input['inp'] = self.loader_input(path)
if (len(self.labels_test) > 0):
path = self.labels_test[index]
input['tgt'] = self.loader_label(path)
if (self.transform_test is not None):
input = self.transform_test(input)
image = input['inp']
label = input['tgt']
return (image, label)
|
def loader_image(path):
return Image.open(path).convert('RGB')
|
def loader_torch(path):
return torch.load(path)
|
def loader_numpy(path):
return np.load(path)
|
class Model():
def __init__(self, args):
self.cuda = torch.cuda.is_available()
self.lr = args.learning_rate
self.dataset_train_name = args.dataset_train
self.nfilters = args.nfilters
self.batch_size = args.batch_size
self.level = args.level
self.net_type = args.net_type
self.nmasks = args.nmasks
self.unique_masks = args.unique_masks
self.filter_size = args.filter_size
self.first_filter_size = args.first_filter_size
self.scale_noise = args.scale_noise
self.noise_type = args.noise_type
self.act = args.act
self.use_act = args.use_act
self.dropout = args.dropout
self.train_masks = args.train_masks
self.debug = args.debug
self.pool_type = args.pool_type
self.mix_maps = args.mix_maps
if self.dataset_train_name.startswith('CIFAR'):
self.input_size = 32
self.nclasses = 10
if (self.filter_size < 7):
self.avgpool = 4
elif (self.filter_size == 7):
self.avgpool = 1
elif self.dataset_train_name.startswith('MNIST'):
self.nclasses = 10
self.input_size = 28
if (self.filter_size < 7):
self.avgpool = 14
elif (self.filter_size == 7):
self.avgpool = 7
self.model = getattr(models, self.net_type)(nfilters=self.nfilters, avgpool=self.avgpool, nclasses=self.nclasses, nmasks=self.nmasks, unique_masks=self.unique_masks, level=self.level, filter_size=self.filter_size, first_filter_size=self.first_filter_size, act=self.act, scale_noise=self.scale_noise, noise_type=self.noise_type, use_act=self.use_act, dropout=self.dropout, train_masks=self.train_masks, pool_type=self.pool_type, debug=self.debug, input_size=self.input_size, mix_maps=self.mix_maps)
self.loss_fn = nn.CrossEntropyLoss()
if self.cuda:
self.model = self.model.cuda()
self.loss_fn = self.loss_fn.cuda()
parameters = filter((lambda p: p.requires_grad), self.model.parameters())
if (args.optim_method == 'Adam'):
self.optimizer = optim.Adam(parameters, lr=self.lr, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.weight_decay)
elif (args.optim_method == 'RMSprop'):
self.optimizer = optim.RMSprop(parameters, lr=self.lr, momentum=args.momentum, weight_decay=args.weight_decay)
elif (args.optim_method == 'SGD'):
self.optimizer = optim.SGD(parameters, lr=self.lr, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=True)
"\n # use this to set different learning rates for training noise masks and regular parameters:\n self.optimizer = optim.SGD([{'params': [param for name, param in self.model.named_parameters() if 'noise' not in name]},\n {'params': [param for name, param in self.model.named_parameters() if 'noise' in name], 'lr': self.lr * 10},\n ], lr=self.lr, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=True) #"
else:
raise Exception('Unknown Optimization Method')
def learning_rate(self, epoch):
if (self.dataset_train_name == 'CIFAR10'):
new_lr = (self.lr * (((((0.2 ** int((epoch >= 150))) * (0.2 ** int((epoch >= 250)))) * (0.2 ** int((epoch >= 300)))) * (0.2 ** int((epoch >= 350)))) * (0.2 ** int((epoch >= 400)))))
elif (self.dataset_train_name == 'CIFAR100'):
new_lr = (self.lr * (((0.1 ** int((epoch >= 80))) * (0.1 ** int((epoch >= 120)))) * (0.1 ** int((epoch >= 160)))))
elif (self.dataset_train_name == 'MNIST'):
new_lr = (self.lr * (((0.2 ** int((epoch >= 30))) * (0.2 ** int((epoch >= 60)))) * (0.2 ** int((epoch >= 90)))))
elif (self.dataset_train_name == 'FRGC'):
new_lr = (self.lr * (((0.1 ** int((epoch >= 80))) * (0.1 ** int((epoch >= 120)))) * (0.1 ** int((epoch >= 160)))))
elif (self.dataset_train_name == 'ImageNet'):
decay = math.floor(((epoch - 1) / 30))
new_lr = (self.lr * math.pow(0.1, decay))
return new_lr
def train(self, epoch, dataloader):
self.model.train()
lr = self.learning_rate((epoch + 1))
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
losses = []
accuracies = []
for (i, (input, label)) in enumerate(dataloader):
if self.cuda:
label = label.cuda()
input = input.cuda()
output = self.model(input)
loss = self.loss_fn(output, label)
if self.debug:
print('\nBatch:', i)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
pred = output.data.max(1)[1]
acc = ((pred.eq(label.data).cpu().sum() * 100.0) / self.batch_size)
losses.append(loss.item())
accuracies.append(acc)
return (np.mean(losses), np.mean(accuracies))
def test(self, dataloader):
self.model.eval()
losses = []
accuracies = []
with torch.no_grad():
for (i, (input, label)) in enumerate(dataloader):
if self.cuda:
label = label.cuda()
input = input.cuda()
output = self.model(input)
loss = self.loss_fn(output, label)
pred = output.data.max(1)[1]
acc = ((pred.eq(label.data).cpu().sum() * 100.0) / self.batch_size)
losses.append(loss.item())
accuracies.append(acc)
return (np.mean(losses), np.mean(accuracies))
|
class PerturbLayerFirst(nn.Module):
def __init__(self, in_channels=None, out_channels=None, nmasks=None, level=None, filter_size=None, debug=False, use_act=False, stride=1, act=None, unique_masks=False, mix_maps=None, train_masks=False, noise_type='uniform', input_size=None):
super(PerturbLayerFirst, self).__init__()
self.nmasks = nmasks
self.unique_masks = unique_masks
self.train_masks = train_masks
self.level = level
self.filter_size = filter_size
self.use_act = use_act
self.act = act_fn('sigmoid')
self.debug = debug
self.noise_type = noise_type
self.in_channels = in_channels
self.input_size = input_size
self.mix_maps = mix_maps
if (filter_size == 1):
padding = 0
bias = True
elif ((filter_size == 3) or (filter_size == 5)):
padding = 1
bias = False
elif (filter_size == 7):
stride = 2
padding = 3
bias = False
if (self.filter_size > 0):
self.noise = None
self.layers = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=filter_size, padding=padding, stride=stride, bias=bias), nn.BatchNorm2d(out_channels), self.act)
else:
noise_channels = (in_channels if self.unique_masks else 1)
shape = (1, noise_channels, self.nmasks, input_size, input_size)
self.noise = nn.Parameter(torch.Tensor(*shape), requires_grad=self.train_masks)
if (noise_type == 'uniform'):
self.noise.data.uniform_((- 1), 1)
elif (self.noise_type == 'normal'):
self.noise.data.normal_()
else:
print('\n\nNoise type {} is not supported / understood\n\n'.format(self.noise_type))
if (nmasks != 1):
if ((out_channels % in_channels) != 0):
print('\n\n\nnfilters must be divisible by 3 if using multiple noise masks per input channel\n\n\n')
groups = in_channels
else:
groups = 1
self.layers = nn.Sequential(nn.BatchNorm2d((in_channels * self.nmasks)), self.act, nn.Conv2d((in_channels * self.nmasks), out_channels, kernel_size=1, stride=1, groups=groups), nn.BatchNorm2d(out_channels), self.act)
if self.mix_maps:
self.mix_layers = nn.Sequential(nn.Conv2d(out_channels, out_channels, kernel_size=1, stride=1, groups=1), nn.BatchNorm2d(out_channels), self.act)
def forward(self, x):
if (self.filter_size > 0):
return self.layers(x)
else:
y = torch.add(x.unsqueeze(2), (self.noise * self.level))
if self.debug:
print_values(x, self.noise, y, self.unique_masks)
y = y.view((- 1), (self.in_channels * self.nmasks), self.input_size, self.input_size)
y = self.layers(y)
if self.mix_maps:
y = self.mix_layers(y)
return y
|
class PerturbLayer(nn.Module):
def __init__(self, in_channels=None, out_channels=None, nmasks=None, level=None, filter_size=None, debug=False, use_act=False, stride=1, act=None, unique_masks=False, mix_maps=None, train_masks=False, noise_type='uniform', input_size=None):
super(PerturbLayer, self).__init__()
self.nmasks = nmasks
self.unique_masks = unique_masks
self.train_masks = train_masks
self.level = level
self.filter_size = filter_size
self.use_act = use_act
self.act = act_fn(act)
self.debug = debug
self.noise_type = noise_type
self.in_channels = in_channels
self.input_size = input_size
self.mix_maps = mix_maps
if (filter_size == 1):
padding = 0
bias = True
elif ((filter_size == 3) or (filter_size == 5)):
padding = 1
bias = False
elif (filter_size == 7):
stride = 2
padding = 3
bias = False
if (self.filter_size > 0):
self.noise = None
self.layers = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=filter_size, padding=padding, stride=stride, bias=bias), nn.BatchNorm2d(out_channels), self.act)
else:
noise_channels = (in_channels if self.unique_masks else 1)
shape = (1, noise_channels, self.nmasks, input_size, input_size)
self.noise = nn.Parameter(torch.Tensor(*shape), requires_grad=self.train_masks)
if (noise_type == 'uniform'):
self.noise.data.uniform_((- 1), 1)
elif (self.noise_type == 'normal'):
self.noise.data.normal_()
else:
print('\n\nNoise type {} is not supported / understood\n\n'.format(self.noise_type))
if (nmasks != 1):
if ((out_channels % in_channels) != 0):
print('\n\n\nnfilters must be divisible by 3 if using multiple noise masks per input channel\n\n\n')
groups = in_channels
else:
groups = 1
self.layers = nn.Sequential(nn.Conv2d((in_channels * self.nmasks), out_channels, kernel_size=1, stride=1, groups=groups), nn.BatchNorm2d(out_channels), self.act)
if self.mix_maps:
self.mix_layers = nn.Sequential(nn.Conv2d(out_channels, out_channels, kernel_size=1, stride=1, groups=1), nn.BatchNorm2d(out_channels), self.act)
def forward(self, x):
if (self.filter_size > 0):
return self.layers(x)
else:
y = torch.add(x.unsqueeze(2), (self.noise * self.level))
if self.debug:
print_values(x, self.noise, y, self.unique_masks)
if self.use_act:
y = self.act(y)
y = y.view((- 1), (self.in_channels * self.nmasks), self.input_size, self.input_size)
y = self.layers(y)
if self.mix_maps:
y = self.mix_layers(y)
return y
|
class PerturbBasicBlock(nn.Module):
expansion = 1
def __init__(self, in_channels=None, out_channels=None, stride=1, shortcut=None, nmasks=None, train_masks=False, level=None, use_act=False, filter_size=None, act=None, unique_masks=False, noise_type=None, input_size=None, pool_type=None, mix_maps=None):
super(PerturbBasicBlock, self).__init__()
self.shortcut = shortcut
if (pool_type == 'max'):
pool = nn.MaxPool2d
elif (pool_type == 'avg'):
pool = nn.AvgPool2d
else:
print('\n\nPool Type {} is not supported/understood\n\n'.format(pool_type))
return
self.layers = nn.Sequential(PerturbLayer(in_channels=in_channels, out_channels=out_channels, nmasks=nmasks, input_size=input_size, level=level, filter_size=filter_size, use_act=use_act, train_masks=train_masks, act=act, unique_masks=unique_masks, noise_type=noise_type, mix_maps=mix_maps), pool(stride, stride), PerturbLayer(in_channels=out_channels, out_channels=out_channels, nmasks=nmasks, input_size=(input_size // stride), level=level, filter_size=filter_size, use_act=use_act, train_masks=train_masks, act=act, unique_masks=unique_masks, noise_type=noise_type, mix_maps=mix_maps))
def forward(self, x):
residual = x
y = self.layers(x)
if self.shortcut:
residual = self.shortcut(x)
y += residual
y = F.relu(y)
return y
|
class PerturbResNet(nn.Module):
def __init__(self, block, nblocks=None, avgpool=None, nfilters=None, nclasses=None, nmasks=None, input_size=32, level=None, filter_size=None, first_filter_size=None, use_act=False, train_masks=False, mix_maps=None, act=None, scale_noise=1, unique_masks=False, debug=False, noise_type=None, pool_type=None):
super(PerturbResNet, self).__init__()
self.nfilters = nfilters
self.unique_masks = unique_masks
self.noise_type = noise_type
self.train_masks = train_masks
self.pool_type = pool_type
self.mix_maps = mix_maps
self.act = act_fn(act)
layers = [PerturbLayerFirst(in_channels=3, out_channels=(3 * nfilters), nmasks=(nfilters * 5), level=((level * scale_noise) * 20), debug=debug, filter_size=first_filter_size, use_act=use_act, train_masks=train_masks, input_size=input_size, act=act, unique_masks=self.unique_masks, noise_type=self.noise_type, mix_maps=mix_maps)]
if (first_filter_size == 7):
layers.append(nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
self.pre_layers = nn.Sequential(*layers, nn.Conv2d(((self.nfilters * 3) * 1), self.nfilters, kernel_size=1, stride=1, bias=False), nn.BatchNorm2d(self.nfilters), self.act)
self.layer1 = self._make_layer(block, (1 * nfilters), nblocks[0], stride=1, level=level, nmasks=nmasks, use_act=True, filter_size=filter_size, act=act, input_size=input_size)
self.layer2 = self._make_layer(block, (2 * nfilters), nblocks[1], stride=2, level=level, nmasks=nmasks, use_act=True, filter_size=filter_size, act=act, input_size=input_size)
self.layer3 = self._make_layer(block, (4 * nfilters), nblocks[2], stride=2, level=level, nmasks=nmasks, use_act=True, filter_size=filter_size, act=act, input_size=(input_size // 2))
self.layer4 = self._make_layer(block, (8 * nfilters), nblocks[3], stride=2, level=level, nmasks=nmasks, use_act=True, filter_size=filter_size, act=act, input_size=(input_size // 4))
self.avgpool = nn.AvgPool2d(avgpool, stride=1)
self.linear = nn.Linear(((8 * nfilters) * block.expansion), nclasses)
def _make_layer(self, block, out_channels, nblocks, stride=1, level=0.2, nmasks=None, use_act=False, filter_size=None, act=None, input_size=None):
shortcut = None
if ((stride != 1) or (self.nfilters != (out_channels * block.expansion))):
shortcut = nn.Sequential(nn.Conv2d(self.nfilters, (out_channels * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((out_channels * block.expansion)))
layers = []
layers.append(block(self.nfilters, out_channels, stride, shortcut, level=level, nmasks=nmasks, use_act=use_act, filter_size=filter_size, act=act, unique_masks=self.unique_masks, noise_type=self.noise_type, train_masks=self.train_masks, input_size=input_size, pool_type=self.pool_type, mix_maps=self.mix_maps))
self.nfilters = (out_channels * block.expansion)
for i in range(1, nblocks):
layers.append(block(self.nfilters, out_channels, level=level, nmasks=nmasks, use_act=use_act, train_masks=self.train_masks, filter_size=filter_size, act=act, unique_masks=self.unique_masks, noise_type=self.noise_type, input_size=(input_size // stride), pool_type=self.pool_type, mix_maps=self.mix_maps))
return nn.Sequential(*layers)
def forward(self, x):
x = self.pre_layers(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
x = self.linear(x)
return x
|
class LeNet(nn.Module):
def __init__(self, nfilters=None, nclasses=None, nmasks=None, level=None, filter_size=None, linear=128, input_size=28, debug=False, scale_noise=1, act='relu', use_act=False, first_filter_size=None, pool_type=None, dropout=None, unique_masks=False, train_masks=False, noise_type='uniform', mix_maps=None):
super(LeNet, self).__init__()
if (filter_size == 5):
n = 5
else:
n = 4
if (input_size == 32):
first_channels = 3
elif (input_size == 28):
first_channels = 1
if (pool_type == 'max'):
pool = nn.MaxPool2d
elif (pool_type == 'avg'):
pool = nn.AvgPool2d
else:
print('\n\nPool Type {} is not supported/understood\n\n'.format(pool_type))
return
self.linear1 = nn.Linear(((nfilters * n) * n), linear)
self.linear2 = nn.Linear(linear, nclasses)
self.dropout = nn.Dropout(p=dropout)
self.act = act_fn(act)
self.batch_norm = nn.BatchNorm1d(linear)
self.first_layers = nn.Sequential(PerturbLayer(in_channels=first_channels, out_channels=nfilters, nmasks=nmasks, level=(level * scale_noise), filter_size=first_filter_size, use_act=use_act, act=act, unique_masks=unique_masks, train_masks=train_masks, noise_type=noise_type, input_size=input_size, mix_maps=mix_maps), pool(kernel_size=3, stride=2, padding=1), PerturbLayer(in_channels=nfilters, out_channels=nfilters, nmasks=nmasks, level=level, filter_size=filter_size, use_act=True, act=act, unique_masks=unique_masks, debug=debug, train_masks=train_masks, noise_type=noise_type, input_size=(input_size // 2), mix_maps=mix_maps), pool(kernel_size=3, stride=2, padding=1), PerturbLayer(in_channels=nfilters, out_channels=nfilters, nmasks=nmasks, level=level, filter_size=filter_size, use_act=True, act=act, unique_masks=unique_masks, train_masks=train_masks, noise_type=noise_type, input_size=(input_size // 4), mix_maps=mix_maps), pool(kernel_size=3, stride=2, padding=1))
self.last_layers = nn.Sequential(self.dropout, self.linear1, self.batch_norm, self.act, self.dropout, self.linear2)
def forward(self, x):
x = self.first_layers(x)
x = x.view(x.size(0), (- 1))
x = self.last_layers(x)
return x
|
class CifarNet(nn.Module):
def __init__(self, nfilters=None, nclasses=None, nmasks=None, level=None, filter_size=None, input_size=32, linear=256, scale_noise=1, act='relu', use_act=False, first_filter_size=None, pool_type=None, dropout=None, unique_masks=False, debug=False, train_masks=False, noise_type='uniform', mix_maps=None):
super(CifarNet, self).__init__()
if (filter_size == 5):
n = 5
else:
n = 4
if (input_size == 32):
first_channels = 3
elif (input_size == 28):
first_channels = 1
if (pool_type == 'max'):
pool = nn.MaxPool2d
elif (pool_type == 'avg'):
pool = nn.AvgPool2d
else:
print('\n\nPool Type {} is not supported/understood\n\n'.format(pool_type))
return
self.linear1 = nn.Linear(((nfilters * n) * n), linear)
self.linear2 = nn.Linear(linear, nclasses)
self.dropout = nn.Dropout(p=dropout)
self.act = act_fn(act)
self.batch_norm = nn.BatchNorm1d(linear)
self.first_layers = nn.Sequential(PerturbLayer(in_channels=first_channels, out_channels=nfilters, nmasks=nmasks, level=(level * scale_noise), unique_masks=unique_masks, filter_size=first_filter_size, use_act=use_act, input_size=input_size, act=act, train_masks=train_masks, noise_type=noise_type, mix_maps=mix_maps), PerturbLayer(in_channels=nfilters, out_channels=nfilters, nmasks=nmasks, level=level, filter_size=filter_size, debug=debug, use_act=True, act=act, mix_maps=mix_maps, unique_masks=unique_masks, train_masks=train_masks, noise_type=noise_type, input_size=input_size), pool(kernel_size=3, stride=2, padding=1), PerturbLayer(in_channels=nfilters, out_channels=nfilters, nmasks=nmasks, level=level, filter_size=filter_size, use_act=True, act=act, unique_masks=unique_masks, mix_maps=mix_maps, train_masks=train_masks, noise_type=noise_type, input_size=(input_size // 2)), PerturbLayer(in_channels=nfilters, out_channels=nfilters, nmasks=nmasks, level=level, filter_size=filter_size, use_act=True, act=act, unique_masks=unique_masks, mix_maps=mix_maps, train_masks=train_masks, noise_type=noise_type, input_size=(input_size // 2)), pool(kernel_size=3, stride=2, padding=1), PerturbLayer(in_channels=nfilters, out_channels=nfilters, nmasks=nmasks, level=level, filter_size=filter_size, use_act=True, act=act, unique_masks=unique_masks, mix_maps=mix_maps, train_masks=train_masks, noise_type=noise_type, input_size=(input_size // 4)), PerturbLayer(in_channels=nfilters, out_channels=nfilters, nmasks=nmasks, level=level, filter_size=filter_size, use_act=True, act=act, unique_masks=unique_masks, mix_maps=mix_maps, train_masks=train_masks, noise_type=noise_type, input_size=(input_size // 4)), pool(kernel_size=3, stride=2, padding=1))
self.last_layers = nn.Sequential(self.dropout, self.linear1, self.batch_norm, self.act, self.dropout, self.linear2)
def forward(self, x):
x = self.first_layers(x)
x = x.view(x.size(0), (- 1))
x = self.last_layers(x)
return x
|
class NoiseLayer(nn.Module):
def __init__(self, in_planes, out_planes, level):
super(NoiseLayer, self).__init__()
self.noise = nn.Parameter(torch.Tensor(0), requires_grad=False).to(device)
self.level = level
self.layers = nn.Sequential(nn.ReLU(True), nn.BatchNorm2d(in_planes), nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1))
def forward(self, x):
if (self.noise.numel() == 0):
self.noise.resize_(x.data[0].shape).uniform_()
self.noise = (((2 * self.noise) - 1) * self.level)
y = torch.add(x, self.noise)
return self.layers(y)
|
class NoiseBasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, shortcut=None, level=0.2):
super(NoiseBasicBlock, self).__init__()
self.layers = nn.Sequential(NoiseLayer(in_planes, planes, level), nn.MaxPool2d(stride, stride), nn.BatchNorm2d(planes), nn.ReLU(True), NoiseLayer(planes, planes, level), nn.BatchNorm2d(planes))
self.shortcut = shortcut
def forward(self, x):
residual = x
y = self.layers(x)
if self.shortcut:
residual = self.shortcut(x)
y += residual
y = F.relu(y)
return y
|
class NoiseResNet(nn.Module):
def __init__(self, block, nblocks, nfilters, nclasses, pool, level, first_filter_size=3):
super(NoiseResNet, self).__init__()
self.in_planes = nfilters
if (first_filter_size == 7):
pool = 1
self.pre_layers = nn.Sequential(nn.Conv2d(3, nfilters, kernel_size=first_filter_size, stride=2, padding=3, bias=False), nn.BatchNorm2d(nfilters), nn.ReLU(True), nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
elif (first_filter_size == 3):
pool = 4
self.pre_layers = nn.Sequential(nn.Conv2d(3, nfilters, kernel_size=first_filter_size, stride=1, padding=1, bias=False), nn.BatchNorm2d(nfilters), nn.ReLU(True))
elif (first_filter_size == 0):
print('\n\nThe original noiseresnet18 model does not support noise masks in the first layer, use perturb_resnet18 model, or set first_filter_size to 3 or 7\n\n')
return
self.pre_layers[0].weight.requires_grad = False
self.layer1 = self._make_layer(block, (1 * nfilters), nblocks[0], stride=1, level=level)
self.layer2 = self._make_layer(block, (2 * nfilters), nblocks[1], stride=2, level=level)
self.layer3 = self._make_layer(block, (4 * nfilters), nblocks[2], stride=2, level=level)
self.layer4 = self._make_layer(block, (8 * nfilters), nblocks[3], stride=2, level=level)
self.avgpool = nn.AvgPool2d(pool, stride=1)
self.linear = nn.Linear(((8 * nfilters) * block.expansion), nclasses)
def _make_layer(self, block, planes, nblocks, stride=1, level=0.2, filter_size=1):
shortcut = None
if ((stride != 1) or (self.in_planes != (planes * block.expansion))):
shortcut = nn.Sequential(nn.Conv2d(self.in_planes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.in_planes, planes, stride, shortcut, level=level))
self.in_planes = (planes * block.expansion)
for i in range(1, nblocks):
layers.append(block(self.in_planes, planes, level=level))
return nn.Sequential(*layers)
def forward(self, x):
x1 = self.pre_layers(x)
x2 = self.layer1(x1)
x3 = self.layer2(x2)
x4 = self.layer3(x3)
x5 = self.layer4(x4)
x6 = self.avgpool(x5)
x7 = x6.view(x6.size(0), (- 1))
x8 = self.linear(x7)
return x8
|
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if ((stride != 1) or (in_planes != (self.expansion * planes))):
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, (self.expansion * planes), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((self.expansion * planes)))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
|
class ResNet(nn.Module):
def __init__(self, block, num_blocks, nfilters=64, avgpool=4, nclasses=10):
super(ResNet, self).__init__()
self.in_planes = nfilters
self.avgpool = avgpool
self.conv1 = nn.Conv2d(3, nfilters, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(nfilters)
self.layer1 = self._make_layer(block, nfilters, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, (nfilters * 2), num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, (nfilters * 4), num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, (nfilters * 8), num_blocks[3], stride=2)
self.linear = nn.Linear(((nfilters * 8) * block.expansion), nclasses)
def _make_layer(self, block, planes, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = (planes * block.expansion)
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, self.avgpool)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out
|
def resnet18(nfilters, avgpool=4, nclasses=10, nmasks=32, level=0.1, filter_size=0, first_filter_size=0, pool_type=None, input_size=None, scale_noise=1, act='relu', use_act=True, dropout=0.5, unique_masks=False, noise_type='uniform', train_masks=False, debug=False, mix_maps=None):
return ResNet(BasicBlock, [2, 2, 2, 2], nfilters=nfilters, avgpool=avgpool, nclasses=nclasses)
|
def noiseresnet18(nfilters, avgpool=4, nclasses=10, nmasks=32, level=0.1, filter_size=0, first_filter_size=7, pool_type=None, input_size=None, scale_noise=1, act='relu', use_act=True, dropout=0.5, unique_masks=False, debug=False, noise_type='uniform', train_masks=False, mix_maps=None):
return NoiseResNet(NoiseBasicBlock, [2, 2, 2, 2], nfilters=nfilters, pool=avgpool, nclasses=nclasses, level=level, first_filter_size=first_filter_size)
|
def perturb_resnet18(nfilters, avgpool=4, nclasses=10, nmasks=32, level=0.1, filter_size=0, first_filter_size=0, pool_type=None, input_size=None, scale_noise=1, act='relu', use_act=True, dropout=0.5, unique_masks=False, debug=False, noise_type='uniform', train_masks=False, mix_maps=None):
return PerturbResNet(PerturbBasicBlock, [2, 2, 2, 2], nfilters=nfilters, avgpool=avgpool, nclasses=nclasses, pool_type=pool_type, scale_noise=scale_noise, nmasks=nmasks, level=level, filter_size=filter_size, train_masks=train_masks, first_filter_size=first_filter_size, act=act, use_act=use_act, unique_masks=unique_masks, debug=debug, noise_type=noise_type, input_size=input_size, mix_maps=mix_maps)
|
def lenet(nfilters, avgpool=None, nclasses=10, nmasks=32, level=0.1, filter_size=3, first_filter_size=0, pool_type=None, input_size=None, scale_noise=1, act='relu', use_act=True, dropout=0.5, unique_masks=False, debug=False, noise_type='uniform', train_masks=False, mix_maps=None):
return LeNet(nfilters=nfilters, nclasses=nclasses, nmasks=nmasks, level=level, filter_size=filter_size, pool_type=pool_type, scale_noise=scale_noise, act=act, first_filter_size=first_filter_size, input_size=input_size, mix_maps=mix_maps, use_act=use_act, dropout=dropout, unique_masks=unique_masks, debug=debug, noise_type=noise_type, train_masks=train_masks)
|
def cifarnet(nfilters, avgpool=None, nclasses=10, nmasks=32, level=0.1, filter_size=3, first_filter_size=0, pool_type=None, input_size=None, scale_noise=1, act='relu', use_act=True, dropout=0.5, unique_masks=False, debug=False, noise_type='uniform', train_masks=False, mix_maps=None):
return CifarNet(nfilters=nfilters, nclasses=nclasses, nmasks=nmasks, level=level, filter_size=filter_size, pool_type=pool_type, scale_noise=scale_noise, act=act, use_act=use_act, first_filter_size=first_filter_size, input_size=input_size, dropout=dropout, unique_masks=unique_masks, debug=debug, noise_type=noise_type, train_masks=train_masks, mix_maps=mix_maps)
|
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.linear = nn.Linear(((9 * 6) * 6), 10)
self.noise = nn.Parameter(torch.Tensor(1, 1, 28, 28), requires_grad=True)
self.noise.data.uniform_((- 1), 1)
self.layers = nn.Sequential(nn.Conv2d(1, 9, kernel_size=5, stride=2, bias=False), nn.MaxPool2d(2, 2), nn.ReLU())
def forward(self, x):
x = torch.add(x, self.noise)
x = self.layers(x)
x = x.view(x.size(0), (- 1))
x = self.linear(x)
print('{:.5f}'.format(self.noise.data[(0, 0, 0, 0)].cpu().numpy()))
return x
|
def EmbedWord2Vec(walks, dimension):
time_start = time.time()
print('Creating embeddings.')
model = Word2Vec(walks, size=dimension, window=5, min_count=0, sg=1, workers=32, iter=1)
node_ids = model.wv.index2word
node_embeddings = model.wv.vectors
print('Embedding generation runtime: ', (time.time() - time_start))
return (node_ids, node_embeddings)
|
def EmbedPoincare(relations, epochs, dimension):
model = PoincareModel(relations, size=dimension, workers=32)
model.train(epochs)
node_ids = model.index2entity
node_embeddings = model.vectors
return (node_ids, node_embeddings)
|
def TraverseAndSelect(length, num_walks, hyperedges, vertexMemberships, alpha=1.0, beta=0):
walksTAS = []
for hyperedge_index in hyperedges:
hyperedge = hyperedges[hyperedge_index]
walk_hyperedge = []
for _ in range(num_walks):
curr_vertex = random.choice(hyperedge['members'])
initial = True
curr_hyperedge_num = hyperedge_index
curr_hyperedge = hyperedge
for i in range(length):
proba = ((float(alpha) / len(vertexMemberships[curr_vertex])) + beta)
if (random.random() < proba):
adjacent_vertices = curr_hyperedge['members']
curr_vertex = random.choice(adjacent_vertices)
walk_hyperedge.append(str(curr_hyperedge_num))
adjacent_hyperedges = vertexMemberships[curr_vertex]
curr_hyperedge_num = random.choice(adjacent_hyperedges)
curr_hyperedge = hyperedges[curr_hyperedge_num]
walksTAS.append(walk_hyperedge)
return walksTAS
|
def SubsampleAndTraverse(length, num_walks, hyperedges, vertexMemberships, alpha=1.0, beta=0):
walksSAT = []
for hyperedge_index in hyperedges:
hyperedge = hyperedges[hyperedge_index]
walk_vertex = []
curr_vertex = random.choice(hyperedge['members'])
for _ in range(num_walks):
initial = True
hyperedge_num = hyperedge_index
curr_hyperedge = hyperedge
for i in range(length):
proba = ((float(alpha) / len(curr_hyperedge['members'])) + beta)
if (random.random() < proba):
adjacent_hyperedges = vertexMemberships[curr_vertex]
hyperedge_num = random.choice(adjacent_hyperedges)
curr_hyperedge = hyperedges[hyperedge_num]
walk_vertex.append(str(curr_vertex))
curr_vertex = random.choice(curr_hyperedge['members'])
walksSAT.append(walk_vertex)
return walksSAT
|
def getFeaturesTrainingData():
i = 0
lists = []
labels = []
for vertex in G.nodes:
vertex_embedding_list = []
lists.append({'f': vertex_features[vertex].tolist()})
labels.append(vertex_labels[vertex])
X_unshuffled = []
for hlist in lists:
x = np.zeros((feature_dimension,))
x[:feature_dimension] = hlist['f']
X_unshuffled.append(x)
labels = np.asarray(labels)
(X_arr, Y_arr) = shuffle(X_unshuffled, labels)
X_Features = np.asarray(X_arr)
Y_Features = np.asarray(Y_arr)
return (X_Features, Y_Features)
|
def getTrainingData():
i = 0
lists = []
labels = []
for h in hyperedges:
vertex_embedding_list = []
hyperedge = hyperedges[h]
for vertex in hyperedge['members']:
i += 1
if ((i % 100000) == 0):
print(i)
try:
vertex_embedding_list.append(vertex_embeddings[vertex_ids.index(vertex)].tolist())
except:
print('Missed one: ', vertex)
lists.append({'v': vertex_embedding_list, 'h': hyperedge_embeddings[hyperedge_ids.index(h)].tolist(), 'f': vertex_features[h].tolist()})
label = np.zeros((num_categories,))
label[(int(hyperedge['category']) - 1)] = 1
labels.append(label)
X_unshuffled = []
for hlist in lists:
np_vertex_embeddings = np.asarray(hlist['v'])
x = np.zeros((((hyperedge_embedding_dimension + (vertex_embedding_dimension * max_groupsize)) + feature_dimension),))
i = 0
x[:hyperedge_embedding_dimension] = hlist['h']
x[(hyperedge_embedding_dimension + (vertex_embedding_dimension * max_groupsize)):] = hlist['f']
for embedding in np_vertex_embeddings:
x[(hyperedge_embedding_dimension + (i * embedding.shape[0])):(hyperedge_embedding_dimension + ((i + 1) * embedding.shape[0]))] = embedding
i += 1
X_unshuffled.append(x)
labels = np.asarray(labels)
(X_arr, Y_arr) = shuffle(X_unshuffled, labels)
X = np.asarray(X_arr)
Y = np.asarray(Y_arr)
return (X, Y)
|
def getMLPTrainingData():
i = 0
lists = []
labels = []
maxi = 0
for h in hyperedges:
vertex_embedding_list = []
hyperedge = hyperedges[h]
lists.append({'h': hyperedge_embeddings[hyperedge_ids.index(h)].tolist(), 'f': vertex_features[h].tolist()})
label = np.zeros((num_categories,))
label[(int(hyperedge['category']) - 1)] = 1
labels.append(label)
X_unshuffled = []
for hlist in lists:
x = np.zeros(((hyperedge_embedding_dimension + feature_dimension),))
x[:hyperedge_embedding_dimension] = hlist['h']
x[hyperedge_embedding_dimension:] = hlist['f']
X_unshuffled.append(x)
labels = np.asarray(labels)
(X_arr, Y_arr) = shuffle(X_unshuffled, labels)
X_MLP = np.asarray(X_arr)
Y_MLP = np.asarray(Y_arr)
return (X_MLP, Y_MLP)
|
def getDSTrainingData():
i = 0
lists = []
labels = []
maxi = 0
for h in hyperedges:
vertex_embedding_list = []
hyperedge = hyperedges[h]
for vertex in hyperedge['members']:
i += 1
if ((i % 100000) == 0):
print(i)
try:
vertex_embedding_list.append(vertex_embeddings[vertex_ids.index(vertex)].tolist())
except:
print('Missed one: ', vertex)
lists.append({'v': vertex_embedding_list, 'f': vertex_features[h].tolist()})
lists.append
label = np.zeros((num_categories,))
label[(int(hyperedge['category']) - 1)] = 1
labels.append(label)
X_unshuffled = []
for hlist in lists:
np_vertex_embeddings = np.asarray(hlist['v'])
x = np.zeros((((vertex_embedding_dimension * max_groupsize) + feature_dimension),))
x[(vertex_embedding_dimension * max_groupsize):] = hlist['f']
i = 0
for embedding in np_vertex_embeddings:
x[(i * embedding.shape[0]):((i + 1) * embedding.shape[0])] = embedding
i += 1
X_unshuffled.append(x)
labels = np.asarray(labels)
(X_arr, Y_arr) = shuffle(X_unshuffled, labels)
X = np.asarray(X_arr)
Y = np.asarray(Y_arr)
return (X, Y)
|
def hyperedgesTrain(X_train, Y_train, num_epochs):
deephyperedges_transductive_model.load_weights((('models/' + dataset_name) + '/deephyperedges_transductive_model.h5'))
history = deephyperedges_transductive_model.fit(X_train, Y_train, epochs=num_epochs, batch_size=batch_size, shuffle=True, validation_split=0, verbose=0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.