repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
DFMGAN | DFMGAN-main/torch_utils/custom_ops.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import glob
import torch
import torch.utils.cpp_extension
import importlib
import hashlib
import shutil
from pathlib import Path
from torch.utils.file_baton import FileBaton
#----------------------------------------------------------------------------
# Global options.
verbosity = 'brief' # Verbosity level: 'none', 'brief', 'full'
#----------------------------------------------------------------------------
# Internal helper funcs.
def _find_compiler_bindir():
patterns = [
'C:/Program Files (x86)/Microsoft Visual Studio/*/Professional/VC/Tools/MSVC/*/bin/Hostx64/x64',
'C:/Program Files (x86)/Microsoft Visual Studio/*/BuildTools/VC/Tools/MSVC/*/bin/Hostx64/x64',
'C:/Program Files (x86)/Microsoft Visual Studio/*/Community/VC/Tools/MSVC/*/bin/Hostx64/x64',
'C:/Program Files (x86)/Microsoft Visual Studio */vc/bin',
]
for pattern in patterns:
matches = sorted(glob.glob(pattern))
if len(matches):
return matches[-1]
return None
#----------------------------------------------------------------------------
# Main entry point for compiling and loading C++/CUDA plugins.
_cached_plugins = dict()
def get_plugin(module_name, sources, **build_kwargs):
assert verbosity in ['none', 'brief', 'full']
# Already cached?
if module_name in _cached_plugins:
return _cached_plugins[module_name]
# Print status.
if verbosity == 'full':
print(f'Setting up PyTorch plugin "{module_name}"...')
elif verbosity == 'brief':
print(f'Setting up PyTorch plugin "{module_name}"... ', end='', flush=True)
try: # pylint: disable=too-many-nested-blocks
# Make sure we can find the necessary compiler binaries.
if os.name == 'nt' and os.system("where cl.exe >nul 2>nul") != 0:
compiler_bindir = _find_compiler_bindir()
if compiler_bindir is None:
raise RuntimeError(f'Could not find MSVC/GCC/CLANG installation on this computer. Check _find_compiler_bindir() in "{__file__}".')
os.environ['PATH'] += ';' + compiler_bindir
# Compile and load.
verbose_build = (verbosity == 'full')
# Incremental build md5sum trickery. Copies all the input source files
# into a cached build directory under a combined md5 digest of the input
# source files. Copying is done only if the combined digest has changed.
# This keeps input file timestamps and filenames the same as in previous
# extension builds, allowing for fast incremental rebuilds.
#
# This optimization is done only in case all the source files reside in
# a single directory (just for simplicity) and if the TORCH_EXTENSIONS_DIR
# environment variable is set (we take this as a signal that the user
# actually cares about this.)
source_dirs_set = set(os.path.dirname(source) for source in sources)
if len(source_dirs_set) == 1 and ('TORCH_EXTENSIONS_DIR' in os.environ):
all_source_files = sorted(list(x for x in Path(list(source_dirs_set)[0]).iterdir() if x.is_file()))
# Compute a combined hash digest for all source files in the same
# custom op directory (usually .cu, .cpp, .py and .h files).
hash_md5 = hashlib.md5()
for src in all_source_files:
with open(src, 'rb') as f:
hash_md5.update(f.read())
build_dir = torch.utils.cpp_extension._get_build_directory(module_name, verbose=verbose_build) # pylint: disable=protected-access
digest_build_dir = os.path.join(build_dir, hash_md5.hexdigest())
if not os.path.isdir(digest_build_dir):
os.makedirs(digest_build_dir, exist_ok=True)
baton = FileBaton(os.path.join(digest_build_dir, 'lock'))
if baton.try_acquire():
try:
for src in all_source_files:
shutil.copyfile(src, os.path.join(digest_build_dir, os.path.basename(src)))
finally:
baton.release()
else:
# Someone else is copying source files under the digest dir,
# wait until done and continue.
baton.wait()
digest_sources = [os.path.join(digest_build_dir, os.path.basename(x)) for x in sources]
torch.utils.cpp_extension.load(name=module_name, build_directory=build_dir,
verbose=verbose_build, sources=digest_sources, **build_kwargs)
else:
torch.utils.cpp_extension.load(name=module_name, verbose=verbose_build, sources=sources, **build_kwargs)
module = importlib.import_module(module_name)
except:
if verbosity == 'brief':
print('Failed!')
raise
# Print status and add to cache.
if verbosity == 'full':
print(f'Done setting up PyTorch plugin "{module_name}".')
elif verbosity == 'brief':
print('Done.')
_cached_plugins[module_name] = module
return module
#----------------------------------------------------------------------------
| 5,644 | 43.448819 | 146 | py |
DFMGAN | DFMGAN-main/torch_utils/training_stats.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Facilities for reporting and collecting training statistics across
multiple processes and devices. The interface is designed to minimize
synchronization overhead as well as the amount of boilerplate in user
code."""
import re
import numpy as np
import torch
import dnnlib
from . import misc
#----------------------------------------------------------------------------
_num_moments = 3 # [num_scalars, sum_of_scalars, sum_of_squares]
_reduce_dtype = torch.float32 # Data type to use for initial per-tensor reduction.
_counter_dtype = torch.float64 # Data type to use for the internal counters.
_rank = 0 # Rank of the current process.
_sync_device = None # Device to use for multiprocess communication. None = single-process.
_sync_called = False # Has _sync() been called yet?
_counters = dict() # Running counters on each device, updated by report(): name => device => torch.Tensor
_cumulative = dict() # Cumulative counters on the CPU, updated by _sync(): name => torch.Tensor
#----------------------------------------------------------------------------
def init_multiprocessing(rank, sync_device):
r"""Initializes `torch_utils.training_stats` for collecting statistics
across multiple processes.
This function must be called after
`torch.distributed.init_process_group()` and before `Collector.update()`.
The call is not necessary if multi-process collection is not needed.
Args:
rank: Rank of the current process.
sync_device: PyTorch device to use for inter-process
communication, or None to disable multi-process
collection. Typically `torch.device('cuda', rank)`.
"""
global _rank, _sync_device
assert not _sync_called
_rank = rank
_sync_device = sync_device
#----------------------------------------------------------------------------
@misc.profiled_function
def report(name, value):
r"""Broadcasts the given set of scalars to all interested instances of
`Collector`, across device and process boundaries.
This function is expected to be extremely cheap and can be safely
called from anywhere in the training loop, loss function, or inside a
`torch.nn.Module`.
Warning: The current implementation expects the set of unique names to
be consistent across processes. Please make sure that `report()` is
called at least once for each unique name by each process, and in the
same order. If a given process has no scalars to broadcast, it can do
`report(name, [])` (empty list).
Args:
name: Arbitrary string specifying the name of the statistic.
Averages are accumulated separately for each unique name.
value: Arbitrary set of scalars. Can be a list, tuple,
NumPy array, PyTorch tensor, or Python scalar.
Returns:
The same `value` that was passed in.
"""
if name not in _counters:
_counters[name] = dict()
elems = torch.as_tensor(value)
if elems.numel() == 0:
return value
elems = elems.detach().flatten().to(_reduce_dtype)
moments = torch.stack([
torch.ones_like(elems).sum(),
elems.sum(),
elems.square().sum(),
])
assert moments.ndim == 1 and moments.shape[0] == _num_moments
moments = moments.to(_counter_dtype)
device = moments.device
if device not in _counters[name]:
_counters[name][device] = torch.zeros_like(moments)
_counters[name][device].add_(moments)
return value
#----------------------------------------------------------------------------
def report0(name, value):
r"""Broadcasts the given set of scalars by the first process (`rank = 0`),
but ignores any scalars provided by the other processes.
See `report()` for further details.
"""
report(name, value if _rank == 0 else [])
return value
#----------------------------------------------------------------------------
class Collector:
r"""Collects the scalars broadcasted by `report()` and `report0()` and
computes their long-term averages (mean and standard deviation) over
user-defined periods of time.
The averages are first collected into internal counters that are not
directly visible to the user. They are then copied to the user-visible
state as a result of calling `update()` and can then be queried using
`mean()`, `std()`, `as_dict()`, etc. Calling `update()` also resets the
internal counters for the next round, so that the user-visible state
effectively reflects averages collected between the last two calls to
`update()`.
Args:
regex: Regular expression defining which statistics to
collect. The default is to collect everything.
keep_previous: Whether to retain the previous averages if no
scalars were collected on a given round
(default: True).
"""
def __init__(self, regex='.*', keep_previous=True):
self._regex = re.compile(regex)
self._keep_previous = keep_previous
self._cumulative = dict()
self._moments = dict()
self.update()
self._moments.clear()
def names(self):
r"""Returns the names of all statistics broadcasted so far that
match the regular expression specified at construction time.
"""
return [name for name in _counters if self._regex.fullmatch(name)]
def update(self):
r"""Copies current values of the internal counters to the
user-visible state and resets them for the next round.
If `keep_previous=True` was specified at construction time, the
operation is skipped for statistics that have received no scalars
since the last update, retaining their previous averages.
This method performs a number of GPU-to-CPU transfers and one
`torch.distributed.all_reduce()`. It is intended to be called
periodically in the main training loop, typically once every
N training steps.
"""
if not self._keep_previous:
self._moments.clear()
for name, cumulative in _sync(self.names()):
if name not in self._cumulative:
self._cumulative[name] = torch.zeros([_num_moments], dtype=_counter_dtype)
delta = cumulative - self._cumulative[name]
self._cumulative[name].copy_(cumulative)
if float(delta[0]) != 0:
self._moments[name] = delta
def _get_delta(self, name):
r"""Returns the raw moments that were accumulated for the given
statistic between the last two calls to `update()`, or zero if
no scalars were collected.
"""
assert self._regex.fullmatch(name)
if name not in self._moments:
self._moments[name] = torch.zeros([_num_moments], dtype=_counter_dtype)
return self._moments[name]
def num(self, name):
r"""Returns the number of scalars that were accumulated for the given
statistic between the last two calls to `update()`, or zero if
no scalars were collected.
"""
delta = self._get_delta(name)
return int(delta[0])
def mean(self, name):
r"""Returns the mean of the scalars that were accumulated for the
given statistic between the last two calls to `update()`, or NaN if
no scalars were collected.
"""
delta = self._get_delta(name)
if int(delta[0]) == 0:
return float('nan')
return float(delta[1] / delta[0])
def std(self, name):
r"""Returns the standard deviation of the scalars that were
accumulated for the given statistic between the last two calls to
`update()`, or NaN if no scalars were collected.
"""
delta = self._get_delta(name)
if int(delta[0]) == 0 or not np.isfinite(float(delta[1])):
return float('nan')
if int(delta[0]) == 1:
return float(0)
mean = float(delta[1] / delta[0])
raw_var = float(delta[2] / delta[0])
return np.sqrt(max(raw_var - np.square(mean), 0))
def as_dict(self):
r"""Returns the averages accumulated between the last two calls to
`update()` as an `dnnlib.EasyDict`. The contents are as follows:
dnnlib.EasyDict(
NAME = dnnlib.EasyDict(num=FLOAT, mean=FLOAT, std=FLOAT),
...
)
"""
stats = dnnlib.EasyDict()
for name in self.names():
stats[name] = dnnlib.EasyDict(num=self.num(name), mean=self.mean(name), std=self.std(name))
return stats
def __getitem__(self, name):
r"""Convenience getter.
`collector[name]` is a synonym for `collector.mean(name)`.
"""
return self.mean(name)
#----------------------------------------------------------------------------
def _sync(names):
r"""Synchronize the global cumulative counters across devices and
processes. Called internally by `Collector.update()`.
"""
if len(names) == 0:
return []
global _sync_called
_sync_called = True
# Collect deltas within current rank.
deltas = []
device = _sync_device if _sync_device is not None else torch.device('cpu')
for name in names:
delta = torch.zeros([_num_moments], dtype=_counter_dtype, device=device)
for counter in _counters[name].values():
delta.add_(counter.to(device))
counter.copy_(torch.zeros_like(counter))
deltas.append(delta)
deltas = torch.stack(deltas)
# Sum deltas across ranks.
if _sync_device is not None:
torch.distributed.all_reduce(deltas)
# Update cumulative values.
deltas = deltas.cpu()
for idx, name in enumerate(names):
if name not in _cumulative:
_cumulative[name] = torch.zeros([_num_moments], dtype=_counter_dtype)
_cumulative[name].add_(deltas[idx])
# Return name-value pairs.
return [(name, _cumulative[name]) for name in names]
#----------------------------------------------------------------------------
| 10,707 | 38.806691 | 118 | py |
DFMGAN | DFMGAN-main/torch_utils/persistence.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Facilities for pickling Python code alongside other data.
The pickled code is automatically imported into a separate Python module
during unpickling. This way, any previously exported pickles will remain
usable even if the original code is no longer available, or if the current
version of the code is not consistent with what was originally pickled."""
import sys
import pickle
import io
import inspect
import copy
import uuid
import types
import dnnlib
#----------------------------------------------------------------------------
_version = 6 # internal version number
_decorators = set() # {decorator_class, ...}
_import_hooks = [] # [hook_function, ...]
_module_to_src_dict = dict() # {module: src, ...}
_src_to_module_dict = dict() # {src: module, ...}
#----------------------------------------------------------------------------
def persistent_class(orig_class):
r"""Class decorator that extends a given class to save its source code
when pickled.
Example:
from torch_utils import persistence
@persistence.persistent_class
class MyNetwork(torch.nn.Module):
def __init__(self, num_inputs, num_outputs):
super().__init__()
self.fc = MyLayer(num_inputs, num_outputs)
...
@persistence.persistent_class
class MyLayer(torch.nn.Module):
...
When pickled, any instance of `MyNetwork` and `MyLayer` will save its
source code alongside other internal state (e.g., parameters, buffers,
and submodules). This way, any previously exported pickle will remain
usable even if the class definitions have been modified or are no
longer available.
The decorator saves the source code of the entire Python module
containing the decorated class. It does *not* save the source code of
any imported modules. Thus, the imported modules must be available
during unpickling, also including `torch_utils.persistence` itself.
It is ok to call functions defined in the same module from the
decorated class. However, if the decorated class depends on other
classes defined in the same module, they must be decorated as well.
This is illustrated in the above example in the case of `MyLayer`.
It is also possible to employ the decorator just-in-time before
calling the constructor. For example:
cls = MyLayer
if want_to_make_it_persistent:
cls = persistence.persistent_class(cls)
layer = cls(num_inputs, num_outputs)
As an additional feature, the decorator also keeps track of the
arguments that were used to construct each instance of the decorated
class. The arguments can be queried via `obj.init_args` and
`obj.init_kwargs`, and they are automatically pickled alongside other
object state. A typical use case is to first unpickle a previous
instance of a persistent class, and then upgrade it to use the latest
version of the source code:
with open('old_pickle.pkl', 'rb') as f:
old_net = pickle.load(f)
new_net = MyNetwork(*old_obj.init_args, **old_obj.init_kwargs)
misc.copy_params_and_buffers(old_net, new_net, require_all=True)
"""
assert isinstance(orig_class, type)
if is_persistent(orig_class):
return orig_class
assert orig_class.__module__ in sys.modules
orig_module = sys.modules[orig_class.__module__]
orig_module_src = _module_to_src(orig_module)
class Decorator(orig_class):
_orig_module_src = orig_module_src
_orig_class_name = orig_class.__name__
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._init_args = copy.deepcopy(args)
self._init_kwargs = copy.deepcopy(kwargs)
assert orig_class.__name__ in orig_module.__dict__
_check_pickleable(self.__reduce__())
@property
def init_args(self):
return copy.deepcopy(self._init_args)
@property
def init_kwargs(self):
return dnnlib.EasyDict(copy.deepcopy(self._init_kwargs))
def __reduce__(self):
fields = list(super().__reduce__())
fields += [None] * max(3 - len(fields), 0)
if fields[0] is not _reconstruct_persistent_obj:
meta = dict(type='class', version=_version, module_src=self._orig_module_src, class_name=self._orig_class_name, state=fields[2])
fields[0] = _reconstruct_persistent_obj # reconstruct func
fields[1] = (meta,) # reconstruct args
fields[2] = None # state dict
return tuple(fields)
Decorator.__name__ = orig_class.__name__
_decorators.add(Decorator)
return Decorator
#----------------------------------------------------------------------------
def is_persistent(obj):
r"""Test whether the given object or class is persistent, i.e.,
whether it will save its source code when pickled.
"""
try:
if obj in _decorators:
return True
except TypeError:
pass
return type(obj) in _decorators # pylint: disable=unidiomatic-typecheck
#----------------------------------------------------------------------------
def import_hook(hook):
r"""Register an import hook that is called whenever a persistent object
is being unpickled. A typical use case is to patch the pickled source
code to avoid errors and inconsistencies when the API of some imported
module has changed.
The hook should have the following signature:
hook(meta) -> modified meta
`meta` is an instance of `dnnlib.EasyDict` with the following fields:
type: Type of the persistent object, e.g. `'class'`.
version: Internal version number of `torch_utils.persistence`.
module_src Original source code of the Python module.
class_name: Class name in the original Python module.
state: Internal state of the object.
Example:
@persistence.import_hook
def wreck_my_network(meta):
if meta.class_name == 'MyNetwork':
print('MyNetwork is being imported. I will wreck it!')
meta.module_src = meta.module_src.replace("True", "False")
return meta
"""
assert callable(hook)
_import_hooks.append(hook)
#----------------------------------------------------------------------------
def _reconstruct_persistent_obj(meta):
r"""Hook that is called internally by the `pickle` module to unpickle
a persistent object.
"""
meta = dnnlib.EasyDict(meta)
meta.state = dnnlib.EasyDict(meta.state)
for hook in _import_hooks:
meta = hook(meta)
assert meta is not None
assert meta.version == _version
module = _src_to_module(meta.module_src)
assert meta.type == 'class'
orig_class = module.__dict__[meta.class_name]
decorator_class = persistent_class(orig_class)
obj = decorator_class.__new__(decorator_class)
setstate = getattr(obj, '__setstate__', None)
if callable(setstate):
setstate(meta.state) # pylint: disable=not-callable
else:
obj.__dict__.update(meta.state)
return obj
#----------------------------------------------------------------------------
def _module_to_src(module):
r"""Query the source code of a given Python module.
"""
src = _module_to_src_dict.get(module, None)
if src is None:
src = inspect.getsource(module)
_module_to_src_dict[module] = src
_src_to_module_dict[src] = module
return src
def _src_to_module(src):
r"""Get or create a Python module for the given source code.
"""
module = _src_to_module_dict.get(src, None)
if module is None:
module_name = "_imported_module_" + uuid.uuid4().hex
module = types.ModuleType(module_name)
sys.modules[module_name] = module
_module_to_src_dict[module] = src
_src_to_module_dict[src] = module
exec(src, module.__dict__) # pylint: disable=exec-used
return module
#----------------------------------------------------------------------------
def _check_pickleable(obj):
r"""Check that the given object is pickleable, raising an exception if
it is not. This function is expected to be considerably more efficient
than actually pickling the object.
"""
def recurse(obj):
if isinstance(obj, (list, tuple, set)):
return [recurse(x) for x in obj]
if isinstance(obj, dict):
return [[recurse(x), recurse(y)] for x, y in obj.items()]
if isinstance(obj, (str, int, float, bool, bytes, bytearray)):
return None # Python primitive types are pickleable.
if f'{type(obj).__module__}.{type(obj).__name__}' in ['numpy.ndarray', 'torch.Tensor']:
return None # NumPy arrays and PyTorch tensors are pickleable.
if is_persistent(obj):
return None # Persistent objects are pickleable, by virtue of the constructor check.
return obj
with io.BytesIO() as f:
pickle.dump(recurse(obj), f)
#----------------------------------------------------------------------------
| 9,708 | 37.527778 | 144 | py |
DFMGAN | DFMGAN-main/torch_utils/misc.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import re
import contextlib
import numpy as np
import torch
import warnings
import dnnlib
#----------------------------------------------------------------------------
# Cached construction of constant tensors. Avoids CPU=>GPU copy when the
# same constant is used multiple times.
_constant_cache = dict()
def constant(value, shape=None, dtype=None, device=None, memory_format=None):
value = np.asarray(value)
if shape is not None:
shape = tuple(shape)
if dtype is None:
dtype = torch.get_default_dtype()
if device is None:
device = torch.device('cpu')
if memory_format is None:
memory_format = torch.contiguous_format
key = (value.shape, value.dtype, value.tobytes(), shape, dtype, device, memory_format)
tensor = _constant_cache.get(key, None)
if tensor is None:
tensor = torch.as_tensor(value.copy(), dtype=dtype, device=device)
if shape is not None:
tensor, _ = torch.broadcast_tensors(tensor, torch.empty(shape))
tensor = tensor.contiguous(memory_format=memory_format)
_constant_cache[key] = tensor
return tensor
#----------------------------------------------------------------------------
# Replace NaN/Inf with specified numerical values.
try:
nan_to_num = torch.nan_to_num # 1.8.0a0
except AttributeError:
def nan_to_num(input, nan=0.0, posinf=None, neginf=None, *, out=None): # pylint: disable=redefined-builtin
assert isinstance(input, torch.Tensor)
if posinf is None:
posinf = torch.finfo(input.dtype).max
if neginf is None:
neginf = torch.finfo(input.dtype).min
assert nan == 0
return torch.clamp(input.unsqueeze(0).nansum(0), min=neginf, max=posinf, out=out)
#----------------------------------------------------------------------------
# Symbolic assert.
try:
symbolic_assert = torch._assert # 1.8.0a0 # pylint: disable=protected-access
except AttributeError:
symbolic_assert = torch.Assert # 1.7.0
#----------------------------------------------------------------------------
# Context manager to suppress known warnings in torch.jit.trace().
class suppress_tracer_warnings(warnings.catch_warnings):
def __enter__(self):
super().__enter__()
warnings.simplefilter('ignore', category=torch.jit.TracerWarning)
return self
#----------------------------------------------------------------------------
# Assert that the shape of a tensor matches the given list of integers.
# None indicates that the size of a dimension is allowed to vary.
# Performs symbolic assertion when used in torch.jit.trace().
def assert_shape(tensor, ref_shape):
if tensor.ndim != len(ref_shape):
raise AssertionError(f'Wrong number of dimensions: got {tensor.ndim}, expected {len(ref_shape)}')
for idx, (size, ref_size) in enumerate(zip(tensor.shape, ref_shape)):
if ref_size is None:
pass
elif isinstance(ref_size, torch.Tensor):
with suppress_tracer_warnings(): # as_tensor results are registered as constants
symbolic_assert(torch.equal(torch.as_tensor(size), ref_size), f'Wrong size for dimension {idx}')
elif isinstance(size, torch.Tensor):
with suppress_tracer_warnings(): # as_tensor results are registered as constants
symbolic_assert(torch.equal(size, torch.as_tensor(ref_size)), f'Wrong size for dimension {idx}: expected {ref_size}')
elif size != ref_size:
raise AssertionError(f'Wrong size for dimension {idx}: got {size}, expected {ref_size}')
#----------------------------------------------------------------------------
# Function decorator that calls torch.autograd.profiler.record_function().
def profiled_function(fn):
def decorator(*args, **kwargs):
with torch.autograd.profiler.record_function(fn.__name__):
return fn(*args, **kwargs)
decorator.__name__ = fn.__name__
return decorator
#----------------------------------------------------------------------------
# Sampler for torch.utils.data.DataLoader that loops over the dataset
# indefinitely, shuffling items as it goes.
class InfiniteSampler(torch.utils.data.Sampler):
def __init__(self, dataset, rank=0, num_replicas=1, shuffle=True, seed=0, window_size=0.5):
assert len(dataset) > 0
assert num_replicas > 0
assert 0 <= rank < num_replicas
assert 0 <= window_size <= 1
super().__init__(dataset)
self.dataset = dataset
self.rank = rank
self.num_replicas = num_replicas
self.shuffle = shuffle
self.seed = seed
self.window_size = window_size
def __iter__(self):
order = np.arange(len(self.dataset))
rnd = None
window = 0
if self.shuffle:
rnd = np.random.RandomState(self.seed)
rnd.shuffle(order)
window = int(np.rint(order.size * self.window_size))
idx = 0
while True:
i = idx % order.size
if idx % self.num_replicas == self.rank:
yield order[i]
if window >= 2:
j = (i - rnd.randint(window)) % order.size
order[i], order[j] = order[j], order[i]
idx += 1
#----------------------------------------------------------------------------
# Utilities for operating with torch.nn.Module parameters and buffers.
def params_and_buffers(module):
assert isinstance(module, torch.nn.Module)
return list(module.parameters()) + list(module.buffers())
def named_params_and_buffers(module):
assert isinstance(module, torch.nn.Module)
return list(module.named_parameters()) + list(module.named_buffers())
def copy_params_and_buffers(src_module, dst_module, require_all=False):
assert isinstance(src_module, torch.nn.Module)
assert isinstance(dst_module, torch.nn.Module)
src_tensors = {name: tensor for name, tensor in named_params_and_buffers(src_module)}
copied_list = []
for name, tensor in named_params_and_buffers(dst_module):
assert (name in src_tensors) or (not require_all)
if name in src_tensors:
tensor.copy_(src_tensors[name].detach()).requires_grad_(tensor.requires_grad)
copied_list.append(name)
return copied_list
#----------------------------------------------------------------------------
# Context manager for easily enabling/disabling DistributedDataParallel
# synchronization.
@contextlib.contextmanager
def ddp_sync(module, sync):
assert isinstance(module, torch.nn.Module)
if sync or not isinstance(module, torch.nn.parallel.DistributedDataParallel):
yield
else:
with module.no_sync():
yield
#----------------------------------------------------------------------------
# Check DistributedDataParallel consistency across processes.
def check_ddp_consistency(module, ignore_regex=None):
assert isinstance(module, torch.nn.Module)
for name, tensor in named_params_and_buffers(module):
fullname = type(module).__name__ + '.' + name
if ignore_regex is not None and re.fullmatch(ignore_regex, fullname):
continue
tensor = tensor.detach()
other = tensor.clone()
torch.distributed.broadcast(tensor=other, src=0)
assert (nan_to_num(tensor) == nan_to_num(other)).all(), fullname
#----------------------------------------------------------------------------
# Print summary table of module hierarchy.
def print_module_summary(module, inputs, max_nesting=3, skip_redundant=True):
assert isinstance(module, torch.nn.Module)
assert not isinstance(module, torch.jit.ScriptModule)
assert isinstance(inputs, (tuple, list))
# Register hooks.
entries = []
nesting = [0]
def pre_hook(_mod, _inputs):
nesting[0] += 1
def post_hook(mod, _inputs, outputs):
nesting[0] -= 1
if nesting[0] <= max_nesting:
outputs = list(outputs) if isinstance(outputs, (tuple, list)) else [outputs]
outputs = [t for t in outputs if isinstance(t, torch.Tensor)]
entries.append(dnnlib.EasyDict(mod=mod, outputs=outputs))
hooks = [mod.register_forward_pre_hook(pre_hook) for mod in module.modules()]
hooks += [mod.register_forward_hook(post_hook) for mod in module.modules()]
# Run module.
outputs = module(*inputs)
for hook in hooks:
hook.remove()
# Identify unique outputs, parameters, and buffers.
tensors_seen = set()
for e in entries:
e.unique_params = [t for t in e.mod.parameters() if id(t) not in tensors_seen]
e.unique_buffers = [t for t in e.mod.buffers() if id(t) not in tensors_seen]
e.unique_outputs = [t for t in e.outputs if id(t) not in tensors_seen]
tensors_seen |= {id(t) for t in e.unique_params + e.unique_buffers + e.unique_outputs}
# Filter out redundant entries.
if skip_redundant:
entries = [e for e in entries if len(e.unique_params) or len(e.unique_buffers) or len(e.unique_outputs)]
# Construct table.
rows = [[type(module).__name__, 'Parameters', 'Buffers', 'Output shape', 'Datatype']]
rows += [['---'] * len(rows[0])]
param_total = 0
buffer_total = 0
submodule_names = {mod: name for name, mod in module.named_modules()}
for e in entries:
name = '<top-level>' if e.mod is module else submodule_names[e.mod]
param_size = sum(t.numel() for t in e.unique_params)
buffer_size = sum(t.numel() for t in e.unique_buffers)
output_shapes = [str(list(e.outputs[0].shape)) for t in e.outputs]
output_dtypes = [str(t.dtype).split('.')[-1] for t in e.outputs]
rows += [[
name + (':0' if len(e.outputs) >= 2 else ''),
str(param_size) if param_size else '-',
str(buffer_size) if buffer_size else '-',
(output_shapes + ['-'])[0],
(output_dtypes + ['-'])[0],
]]
for idx in range(1, len(e.outputs)):
rows += [[name + f':{idx}', '-', '-', output_shapes[idx], output_dtypes[idx]]]
param_total += param_size
buffer_total += buffer_size
rows += [['---'] * len(rows[0])]
rows += [['Total', str(param_total), str(buffer_total), '-', '-']]
# Print table.
widths = [max(len(cell) for cell in column) for column in zip(*rows)]
print()
for row in rows:
print(' '.join(cell + ' ' * (width - len(cell)) for cell, width in zip(row, widths)))
print()
return outputs
#----------------------------------------------------------------------------
| 11,073 | 40.631579 | 133 | py |
DFMGAN | DFMGAN-main/torch_utils/ops/bias_act.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Custom PyTorch ops for efficient bias and activation."""
import os
import warnings
import numpy as np
import torch
import dnnlib
import traceback
from .. import custom_ops
from .. import misc
#----------------------------------------------------------------------------
activation_funcs = {
'linear': dnnlib.EasyDict(func=lambda x, **_: x, def_alpha=0, def_gain=1, cuda_idx=1, ref='', has_2nd_grad=False),
'relu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.relu(x), def_alpha=0, def_gain=np.sqrt(2), cuda_idx=2, ref='y', has_2nd_grad=False),
'lrelu': dnnlib.EasyDict(func=lambda x, alpha, **_: torch.nn.functional.leaky_relu(x, alpha), def_alpha=0.2, def_gain=np.sqrt(2), cuda_idx=3, ref='y', has_2nd_grad=False),
'tanh': dnnlib.EasyDict(func=lambda x, **_: torch.tanh(x), def_alpha=0, def_gain=1, cuda_idx=4, ref='y', has_2nd_grad=True),
'sigmoid': dnnlib.EasyDict(func=lambda x, **_: torch.sigmoid(x), def_alpha=0, def_gain=1, cuda_idx=5, ref='y', has_2nd_grad=True),
'elu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.elu(x), def_alpha=0, def_gain=1, cuda_idx=6, ref='y', has_2nd_grad=True),
'selu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.selu(x), def_alpha=0, def_gain=1, cuda_idx=7, ref='y', has_2nd_grad=True),
'softplus': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.softplus(x), def_alpha=0, def_gain=1, cuda_idx=8, ref='y', has_2nd_grad=True),
'swish': dnnlib.EasyDict(func=lambda x, **_: torch.sigmoid(x) * x, def_alpha=0, def_gain=np.sqrt(2), cuda_idx=9, ref='x', has_2nd_grad=True),
}
#----------------------------------------------------------------------------
_inited = False
_plugin = None
_null_tensor = torch.empty([0])
def _init():
global _inited, _plugin
if not _inited:
_inited = True
sources = ['bias_act.cpp', 'bias_act.cu']
sources = [os.path.join(os.path.dirname(__file__), s) for s in sources]
try:
_plugin = custom_ops.get_plugin('bias_act_plugin', sources=sources, extra_cuda_cflags=['--use_fast_math'])
except:
warnings.warn('Failed to build CUDA kernels for bias_act. Falling back to slow reference implementation. Details:\n\n' + traceback.format_exc())
return _plugin is not None
#----------------------------------------------------------------------------
def bias_act(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None, impl='cuda'):
r"""Fused bias and activation function.
Adds bias `b` to activation tensor `x`, evaluates activation function `act`,
and scales the result by `gain`. Each of the steps is optional. In most cases,
the fused op is considerably more efficient than performing the same calculation
using standard PyTorch ops. It supports first and second order gradients,
but not third order gradients.
Args:
x: Input activation tensor. Can be of any shape.
b: Bias vector, or `None` to disable. Must be a 1D tensor of the same type
as `x`. The shape must be known, and it must match the dimension of `x`
corresponding to `dim`.
dim: The dimension in `x` corresponding to the elements of `b`.
The value of `dim` is ignored if `b` is not specified.
act: Name of the activation function to evaluate, or `"linear"` to disable.
Can be e.g. `"relu"`, `"lrelu"`, `"tanh"`, `"sigmoid"`, `"swish"`, etc.
See `activation_funcs` for a full list. `None` is not allowed.
alpha: Shape parameter for the activation function, or `None` to use the default.
gain: Scaling factor for the output tensor, or `None` to use default.
See `activation_funcs` for the default scaling of each activation function.
If unsure, consider specifying 1.
clamp: Clamp the output values to `[-clamp, +clamp]`, or `None` to disable
the clamping (default).
impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
Returns:
Tensor of the same shape and datatype as `x`.
"""
assert isinstance(x, torch.Tensor)
assert impl in ['ref', 'cuda']
if impl == 'cuda' and x.device.type == 'cuda' and _init():
return _bias_act_cuda(dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp).apply(x, b)
return _bias_act_ref(x=x, b=b, dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp)
#----------------------------------------------------------------------------
@misc.profiled_function
def _bias_act_ref(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None):
"""Slow reference implementation of `bias_act()` using standard TensorFlow ops.
"""
assert isinstance(x, torch.Tensor)
assert clamp is None or clamp >= 0
spec = activation_funcs[act]
alpha = float(alpha if alpha is not None else spec.def_alpha)
gain = float(gain if gain is not None else spec.def_gain)
clamp = float(clamp if clamp is not None else -1)
# Add bias.
if b is not None:
assert isinstance(b, torch.Tensor) and b.ndim == 1
assert 0 <= dim < x.ndim
assert b.shape[0] == x.shape[dim]
x = x + b.reshape([-1 if i == dim else 1 for i in range(x.ndim)])
# Evaluate activation function.
alpha = float(alpha)
x = spec.func(x, alpha=alpha)
# Scale by gain.
gain = float(gain)
if gain != 1:
x = x * gain
# Clamp.
if clamp >= 0:
x = x.clamp(-clamp, clamp) # pylint: disable=invalid-unary-operand-type
return x
#----------------------------------------------------------------------------
_bias_act_cuda_cache = dict()
def _bias_act_cuda(dim=1, act='linear', alpha=None, gain=None, clamp=None):
"""Fast CUDA implementation of `bias_act()` using custom ops.
"""
# Parse arguments.
assert clamp is None or clamp >= 0
spec = activation_funcs[act]
alpha = float(alpha if alpha is not None else spec.def_alpha)
gain = float(gain if gain is not None else spec.def_gain)
clamp = float(clamp if clamp is not None else -1)
# Lookup from cache.
key = (dim, act, alpha, gain, clamp)
if key in _bias_act_cuda_cache:
return _bias_act_cuda_cache[key]
# Forward op.
class BiasActCuda(torch.autograd.Function):
@staticmethod
def forward(ctx, x, b): # pylint: disable=arguments-differ
ctx.memory_format = torch.channels_last if x.ndim > 2 and x.stride()[1] == 1 else torch.contiguous_format
x = x.contiguous(memory_format=ctx.memory_format)
b = b.contiguous() if b is not None else _null_tensor
y = x
if act != 'linear' or gain != 1 or clamp >= 0 or b is not _null_tensor:
y = _plugin.bias_act(x, b, _null_tensor, _null_tensor, _null_tensor, 0, dim, spec.cuda_idx, alpha, gain, clamp)
ctx.save_for_backward(
x if 'x' in spec.ref or spec.has_2nd_grad else _null_tensor,
b if 'x' in spec.ref or spec.has_2nd_grad else _null_tensor,
y if 'y' in spec.ref else _null_tensor)
return y
@staticmethod
def backward(ctx, dy): # pylint: disable=arguments-differ
dy = dy.contiguous(memory_format=ctx.memory_format)
x, b, y = ctx.saved_tensors
dx = None
db = None
if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]:
dx = dy
if act != 'linear' or gain != 1 or clamp >= 0:
dx = BiasActCudaGrad.apply(dy, x, b, y)
if ctx.needs_input_grad[1]:
db = dx.sum([i for i in range(dx.ndim) if i != dim])
return dx, db
# Backward op.
class BiasActCudaGrad(torch.autograd.Function):
@staticmethod
def forward(ctx, dy, x, b, y): # pylint: disable=arguments-differ
ctx.memory_format = torch.channels_last if dy.ndim > 2 and dy.stride()[1] == 1 else torch.contiguous_format
dx = _plugin.bias_act(dy, b, x, y, _null_tensor, 1, dim, spec.cuda_idx, alpha, gain, clamp)
ctx.save_for_backward(
dy if spec.has_2nd_grad else _null_tensor,
x, b, y)
return dx
@staticmethod
def backward(ctx, d_dx): # pylint: disable=arguments-differ
d_dx = d_dx.contiguous(memory_format=ctx.memory_format)
dy, x, b, y = ctx.saved_tensors
d_dy = None
d_x = None
d_b = None
d_y = None
if ctx.needs_input_grad[0]:
d_dy = BiasActCudaGrad.apply(d_dx, x, b, y)
if spec.has_2nd_grad and (ctx.needs_input_grad[1] or ctx.needs_input_grad[2]):
d_x = _plugin.bias_act(d_dx, b, x, y, dy, 2, dim, spec.cuda_idx, alpha, gain, clamp)
if spec.has_2nd_grad and ctx.needs_input_grad[2]:
d_b = d_x.sum([i for i in range(d_x.ndim) if i != dim])
return d_dy, d_x, d_b, d_y
# Add to cache.
_bias_act_cuda_cache[key] = BiasActCuda
return BiasActCuda
#----------------------------------------------------------------------------
| 10,047 | 46.173709 | 185 | py |
DFMGAN | DFMGAN-main/torch_utils/ops/grid_sample_gradfix.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Custom replacement for `torch.nn.functional.grid_sample` that
supports arbitrarily high order gradients between the input and output.
Only works on 2D images and assumes
`mode='bilinear'`, `padding_mode='zeros'`, `align_corners=False`."""
import warnings
import torch
# pylint: disable=redefined-builtin
# pylint: disable=arguments-differ
# pylint: disable=protected-access
#----------------------------------------------------------------------------
enabled = False # Enable the custom op by setting this to true.
#----------------------------------------------------------------------------
def grid_sample(input, grid):
if _should_use_custom_op():
return _GridSample2dForward.apply(input, grid)
return torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False)
#----------------------------------------------------------------------------
def _should_use_custom_op():
if not enabled:
return False
if any(torch.__version__.startswith(x) for x in ['1.7.', '1.8.', '1.9']):
return True
warnings.warn(f'grid_sample_gradfix not supported on PyTorch {torch.__version__}. Falling back to torch.nn.functional.grid_sample().')
return False
#----------------------------------------------------------------------------
class _GridSample2dForward(torch.autograd.Function):
@staticmethod
def forward(ctx, input, grid):
assert input.ndim == 4
assert grid.ndim == 4
output = torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False)
ctx.save_for_backward(input, grid)
return output
@staticmethod
def backward(ctx, grad_output):
input, grid = ctx.saved_tensors
grad_input, grad_grid = _GridSample2dBackward.apply(grad_output, input, grid)
return grad_input, grad_grid
#----------------------------------------------------------------------------
class _GridSample2dBackward(torch.autograd.Function):
@staticmethod
def forward(ctx, grad_output, input, grid):
op = torch._C._jit_get_operation('aten::grid_sampler_2d_backward')
grad_input, grad_grid = op(grad_output, input, grid, 0, 0, False)
ctx.save_for_backward(grid)
return grad_input, grad_grid
@staticmethod
def backward(ctx, grad2_grad_input, grad2_grad_grid):
_ = grad2_grad_grid # unused
grid, = ctx.saved_tensors
grad2_grad_output = None
grad2_input = None
grad2_grid = None
if ctx.needs_input_grad[0]:
grad2_grad_output = _GridSample2dForward.apply(grad2_grad_input, grid)
assert not ctx.needs_input_grad[2]
return grad2_grad_output, grad2_input, grad2_grid
#----------------------------------------------------------------------------
| 3,299 | 38.285714 | 138 | py |
DFMGAN | DFMGAN-main/torch_utils/ops/conv2d_gradfix.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Custom replacement for `torch.nn.functional.conv2d` that supports
arbitrarily high order gradients with zero performance penalty."""
import warnings
import contextlib
import torch
# pylint: disable=redefined-builtin
# pylint: disable=arguments-differ
# pylint: disable=protected-access
#----------------------------------------------------------------------------
enabled = False # Enable the custom op by setting this to true.
weight_gradients_disabled = False # Forcefully disable computation of gradients with respect to the weights.
@contextlib.contextmanager
def no_weight_gradients():
global weight_gradients_disabled
old = weight_gradients_disabled
weight_gradients_disabled = True
yield
weight_gradients_disabled = old
#----------------------------------------------------------------------------
def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
if _should_use_custom_op(input):
return _conv2d_gradfix(transpose=False, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=0, dilation=dilation, groups=groups).apply(input, weight, bias)
return torch.nn.functional.conv2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups)
def conv_transpose2d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1):
if _should_use_custom_op(input):
return _conv2d_gradfix(transpose=True, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation).apply(input, weight, bias)
return torch.nn.functional.conv_transpose2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation)
#----------------------------------------------------------------------------
def _should_use_custom_op(input):
assert isinstance(input, torch.Tensor)
if (not enabled) or (not torch.backends.cudnn.enabled):
return False
if input.device.type != 'cuda':
return False
if any(torch.__version__.startswith(x) for x in ['1.7.', '1.8.', '1.9']):
return True
warnings.warn(f'conv2d_gradfix not supported on PyTorch {torch.__version__}. Falling back to torch.nn.functional.conv2d().')
return False
def _tuple_of_ints(xs, ndim):
xs = tuple(xs) if isinstance(xs, (tuple, list)) else (xs,) * ndim
assert len(xs) == ndim
assert all(isinstance(x, int) for x in xs)
return xs
#----------------------------------------------------------------------------
_conv2d_gradfix_cache = dict()
def _conv2d_gradfix(transpose, weight_shape, stride, padding, output_padding, dilation, groups):
# Parse arguments.
ndim = 2
weight_shape = tuple(weight_shape)
stride = _tuple_of_ints(stride, ndim)
padding = _tuple_of_ints(padding, ndim)
output_padding = _tuple_of_ints(output_padding, ndim)
dilation = _tuple_of_ints(dilation, ndim)
# Lookup from cache.
key = (transpose, weight_shape, stride, padding, output_padding, dilation, groups)
if key in _conv2d_gradfix_cache:
return _conv2d_gradfix_cache[key]
# Validate arguments.
assert groups >= 1
assert len(weight_shape) == ndim + 2
assert all(stride[i] >= 1 for i in range(ndim))
assert all(padding[i] >= 0 for i in range(ndim))
assert all(dilation[i] >= 0 for i in range(ndim))
if not transpose:
assert all(output_padding[i] == 0 for i in range(ndim))
else: # transpose
assert all(0 <= output_padding[i] < max(stride[i], dilation[i]) for i in range(ndim))
# Helpers.
common_kwargs = dict(stride=stride, padding=padding, dilation=dilation, groups=groups)
def calc_output_padding(input_shape, output_shape):
if transpose:
return [0, 0]
return [
input_shape[i + 2]
- (output_shape[i + 2] - 1) * stride[i]
- (1 - 2 * padding[i])
- dilation[i] * (weight_shape[i + 2] - 1)
for i in range(ndim)
]
# Forward & backward.
class Conv2d(torch.autograd.Function):
@staticmethod
def forward(ctx, input, weight, bias):
assert weight.shape == weight_shape
if not transpose:
output = torch.nn.functional.conv2d(input=input, weight=weight, bias=bias, **common_kwargs)
else: # transpose
output = torch.nn.functional.conv_transpose2d(input=input, weight=weight, bias=bias, output_padding=output_padding, **common_kwargs)
ctx.save_for_backward(input, weight)
return output
@staticmethod
def backward(ctx, grad_output):
input, weight = ctx.saved_tensors
grad_input = None
grad_weight = None
grad_bias = None
if ctx.needs_input_grad[0]:
p = calc_output_padding(input_shape=input.shape, output_shape=grad_output.shape)
grad_input = _conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs).apply(grad_output, weight, None)
assert grad_input.shape == input.shape
if ctx.needs_input_grad[1] and not weight_gradients_disabled:
grad_weight = Conv2dGradWeight.apply(grad_output, input)
assert grad_weight.shape == weight_shape
if ctx.needs_input_grad[2]:
grad_bias = grad_output.sum([0, 2, 3])
return grad_input, grad_weight, grad_bias
# Gradient with respect to the weights.
class Conv2dGradWeight(torch.autograd.Function):
@staticmethod
def forward(ctx, grad_output, input):
op = torch._C._jit_get_operation('aten::cudnn_convolution_backward_weight' if not transpose else 'aten::cudnn_convolution_transpose_backward_weight')
flags = [torch.backends.cudnn.benchmark, torch.backends.cudnn.deterministic, torch.backends.cudnn.allow_tf32]
grad_weight = op(weight_shape, grad_output, input, padding, stride, dilation, groups, *flags)
assert grad_weight.shape == weight_shape
ctx.save_for_backward(grad_output, input)
return grad_weight
@staticmethod
def backward(ctx, grad2_grad_weight):
grad_output, input = ctx.saved_tensors
grad2_grad_output = None
grad2_input = None
if ctx.needs_input_grad[0]:
grad2_grad_output = Conv2d.apply(input, grad2_grad_weight, None)
assert grad2_grad_output.shape == grad_output.shape
if ctx.needs_input_grad[1]:
p = calc_output_padding(input_shape=input.shape, output_shape=grad_output.shape)
grad2_input = _conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs).apply(grad_output, grad2_grad_weight, None)
assert grad2_input.shape == input.shape
return grad2_grad_output, grad2_input
_conv2d_gradfix_cache[key] = Conv2d
return Conv2d
#----------------------------------------------------------------------------
| 7,677 | 43.900585 | 197 | py |
DFMGAN | DFMGAN-main/torch_utils/ops/upfirdn2d.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Custom PyTorch ops for efficient resampling of 2D images."""
import os
import warnings
import numpy as np
import torch
import traceback
from .. import custom_ops
from .. import misc
from . import conv2d_gradfix
#----------------------------------------------------------------------------
_inited = False
_plugin = None
def _init():
global _inited, _plugin
if not _inited:
sources = ['upfirdn2d.cpp', 'upfirdn2d.cu']
sources = [os.path.join(os.path.dirname(__file__), s) for s in sources]
try:
_plugin = custom_ops.get_plugin('upfirdn2d_plugin', sources=sources, extra_cuda_cflags=['--use_fast_math'])
except:
warnings.warn('Failed to build CUDA kernels for upfirdn2d. Falling back to slow reference implementation. Details:\n\n' + traceback.format_exc())
return _plugin is not None
def _parse_scaling(scaling):
if isinstance(scaling, int):
scaling = [scaling, scaling]
assert isinstance(scaling, (list, tuple))
assert all(isinstance(x, int) for x in scaling)
sx, sy = scaling
assert sx >= 1 and sy >= 1
return sx, sy
def _parse_padding(padding):
if isinstance(padding, int):
padding = [padding, padding]
assert isinstance(padding, (list, tuple))
assert all(isinstance(x, int) for x in padding)
if len(padding) == 2:
padx, pady = padding
padding = [padx, padx, pady, pady]
padx0, padx1, pady0, pady1 = padding
return padx0, padx1, pady0, pady1
def _get_filter_size(f):
if f is None:
return 1, 1
assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
fw = f.shape[-1]
fh = f.shape[0]
with misc.suppress_tracer_warnings():
fw = int(fw)
fh = int(fh)
misc.assert_shape(f, [fh, fw][:f.ndim])
assert fw >= 1 and fh >= 1
return fw, fh
#----------------------------------------------------------------------------
def setup_filter(f, device=torch.device('cpu'), normalize=True, flip_filter=False, gain=1, separable=None):
r"""Convenience function to setup 2D FIR filter for `upfirdn2d()`.
Args:
f: Torch tensor, numpy array, or python list of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable),
`[]` (impulse), or
`None` (identity).
device: Result device (default: cpu).
normalize: Normalize the filter so that it retains the magnitude
for constant input signal (DC)? (default: True).
flip_filter: Flip the filter? (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
separable: Return a separable filter? (default: select automatically).
Returns:
Float32 tensor of the shape
`[filter_height, filter_width]` (non-separable) or
`[filter_taps]` (separable).
"""
# Validate.
if f is None:
f = 1
f = torch.as_tensor(f, dtype=torch.float32)
assert f.ndim in [0, 1, 2]
assert f.numel() > 0
if f.ndim == 0:
f = f[np.newaxis]
# Separable?
if separable is None:
separable = (f.ndim == 1 and f.numel() >= 8)
if f.ndim == 1 and not separable:
f = f.ger(f)
assert f.ndim == (1 if separable else 2)
# Apply normalize, flip, gain, and device.
if normalize:
f /= f.sum()
if flip_filter:
f = f.flip(list(range(f.ndim)))
f = f * (gain ** (f.ndim / 2))
f = f.to(device=device)
return f
#----------------------------------------------------------------------------
def upfirdn2d(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1, impl='cuda'):
r"""Pad, upsample, filter, and downsample a batch of 2D images.
Performs the following sequence of operations for each channel:
1. Upsample the image by inserting N-1 zeros after each pixel (`up`).
2. Pad the image with the specified number of zeros on each side (`padding`).
Negative padding corresponds to cropping the image.
3. Convolve the image with the specified 2D FIR filter (`f`), shrinking it
so that the footprint of all output pixels lies within the input image.
4. Downsample the image by keeping every Nth pixel (`down`).
This sequence of operations bears close resemblance to scipy.signal.upfirdn().
The fused op is considerably more efficient than performing the same calculation
using standard PyTorch ops. It supports gradients of arbitrary order.
Args:
x: Float32/float64/float16 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
f: Float32 FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
up: Integer upsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
down: Integer downsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
padding: Padding with respect to the upsampled image. Can be a single number
or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
flip_filter: False = convolution, True = correlation (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
assert isinstance(x, torch.Tensor)
assert impl in ['ref', 'cuda']
if impl == 'cuda' and x.device.type == 'cuda' and _init():
return _upfirdn2d_cuda(up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain).apply(x, f)
return _upfirdn2d_ref(x, f, up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain)
#----------------------------------------------------------------------------
@misc.profiled_function
def _upfirdn2d_ref(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1):
"""Slow reference implementation of `upfirdn2d()` using standard PyTorch ops.
"""
# Validate arguments.
assert isinstance(x, torch.Tensor) and x.ndim == 4
if f is None:
f = torch.ones([1, 1], dtype=torch.float32, device=x.device)
assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
assert f.dtype == torch.float32 and not f.requires_grad
batch_size, num_channels, in_height, in_width = x.shape
upx, upy = _parse_scaling(up)
downx, downy = _parse_scaling(down)
padx0, padx1, pady0, pady1 = _parse_padding(padding)
# Upsample by inserting zeros.
x = x.reshape([batch_size, num_channels, in_height, 1, in_width, 1])
x = torch.nn.functional.pad(x, [0, upx - 1, 0, 0, 0, upy - 1])
x = x.reshape([batch_size, num_channels, in_height * upy, in_width * upx])
# Pad or crop.
x = torch.nn.functional.pad(x, [max(padx0, 0), max(padx1, 0), max(pady0, 0), max(pady1, 0)])
x = x[:, :, max(-pady0, 0) : x.shape[2] - max(-pady1, 0), max(-padx0, 0) : x.shape[3] - max(-padx1, 0)]
# Setup filter.
f = f * (gain ** (f.ndim / 2))
f = f.to(x.dtype)
if not flip_filter:
f = f.flip(list(range(f.ndim)))
# Convolve with the filter.
f = f[np.newaxis, np.newaxis].repeat([num_channels, 1] + [1] * f.ndim)
if f.ndim == 4:
x = conv2d_gradfix.conv2d(input=x, weight=f, groups=num_channels)
else:
x = conv2d_gradfix.conv2d(input=x, weight=f.unsqueeze(2), groups=num_channels)
x = conv2d_gradfix.conv2d(input=x, weight=f.unsqueeze(3), groups=num_channels)
# Downsample by throwing away pixels.
x = x[:, :, ::downy, ::downx]
return x
#----------------------------------------------------------------------------
_upfirdn2d_cuda_cache = dict()
def _upfirdn2d_cuda(up=1, down=1, padding=0, flip_filter=False, gain=1):
"""Fast CUDA implementation of `upfirdn2d()` using custom ops.
"""
# Parse arguments.
upx, upy = _parse_scaling(up)
downx, downy = _parse_scaling(down)
padx0, padx1, pady0, pady1 = _parse_padding(padding)
# Lookup from cache.
key = (upx, upy, downx, downy, padx0, padx1, pady0, pady1, flip_filter, gain)
if key in _upfirdn2d_cuda_cache:
return _upfirdn2d_cuda_cache[key]
# Forward op.
class Upfirdn2dCuda(torch.autograd.Function):
@staticmethod
def forward(ctx, x, f): # pylint: disable=arguments-differ
assert isinstance(x, torch.Tensor) and x.ndim == 4
if f is None:
f = torch.ones([1, 1], dtype=torch.float32, device=x.device)
assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
y = x
if f.ndim == 2:
y = _plugin.upfirdn2d(y, f, upx, upy, downx, downy, padx0, padx1, pady0, pady1, flip_filter, gain)
else:
y = _plugin.upfirdn2d(y, f.unsqueeze(0), upx, 1, downx, 1, padx0, padx1, 0, 0, flip_filter, np.sqrt(gain))
y = _plugin.upfirdn2d(y, f.unsqueeze(1), 1, upy, 1, downy, 0, 0, pady0, pady1, flip_filter, np.sqrt(gain))
ctx.save_for_backward(f)
ctx.x_shape = x.shape
return y
@staticmethod
def backward(ctx, dy): # pylint: disable=arguments-differ
f, = ctx.saved_tensors
_, _, ih, iw = ctx.x_shape
_, _, oh, ow = dy.shape
fw, fh = _get_filter_size(f)
p = [
fw - padx0 - 1,
iw * upx - ow * downx + padx0 - upx + 1,
fh - pady0 - 1,
ih * upy - oh * downy + pady0 - upy + 1,
]
dx = None
df = None
if ctx.needs_input_grad[0]:
dx = _upfirdn2d_cuda(up=down, down=up, padding=p, flip_filter=(not flip_filter), gain=gain).apply(dy, f)
assert not ctx.needs_input_grad[1]
return dx, df
# Add to cache.
_upfirdn2d_cuda_cache[key] = Upfirdn2dCuda
return Upfirdn2dCuda
#----------------------------------------------------------------------------
def filter2d(x, f, padding=0, flip_filter=False, gain=1, impl='cuda'):
r"""Filter a batch of 2D images using the given 2D FIR filter.
By default, the result is padded so that its shape matches the input.
User-specified padding is applied on top of that, with negative values
indicating cropping. Pixels outside the image are assumed to be zero.
Args:
x: Float32/float64/float16 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
f: Float32 FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
padding: Padding with respect to the output. Can be a single number or a
list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
flip_filter: False = convolution, True = correlation (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
padx0, padx1, pady0, pady1 = _parse_padding(padding)
fw, fh = _get_filter_size(f)
p = [
padx0 + fw // 2,
padx1 + (fw - 1) // 2,
pady0 + fh // 2,
pady1 + (fh - 1) // 2,
]
return upfirdn2d(x, f, padding=p, flip_filter=flip_filter, gain=gain, impl=impl)
#----------------------------------------------------------------------------
def upsample2d(x, f, up=2, padding=0, flip_filter=False, gain=1, impl='cuda'):
r"""Upsample a batch of 2D images using the given 2D FIR filter.
By default, the result is padded so that its shape is a multiple of the input.
User-specified padding is applied on top of that, with negative values
indicating cropping. Pixels outside the image are assumed to be zero.
Args:
x: Float32/float64/float16 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
f: Float32 FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
up: Integer upsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
padding: Padding with respect to the output. Can be a single number or a
list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
flip_filter: False = convolution, True = correlation (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
upx, upy = _parse_scaling(up)
padx0, padx1, pady0, pady1 = _parse_padding(padding)
fw, fh = _get_filter_size(f)
p = [
padx0 + (fw + upx - 1) // 2,
padx1 + (fw - upx) // 2,
pady0 + (fh + upy - 1) // 2,
pady1 + (fh - upy) // 2,
]
return upfirdn2d(x, f, up=up, padding=p, flip_filter=flip_filter, gain=gain*upx*upy, impl=impl)
#----------------------------------------------------------------------------
def downsample2d(x, f, down=2, padding=0, flip_filter=False, gain=1, impl='cuda'):
r"""Downsample a batch of 2D images using the given 2D FIR filter.
By default, the result is padded so that its shape is a fraction of the input.
User-specified padding is applied on top of that, with negative values
indicating cropping. Pixels outside the image are assumed to be zero.
Args:
x: Float32/float64/float16 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
f: Float32 FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
down: Integer downsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
padding: Padding with respect to the input. Can be a single number or a
list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
flip_filter: False = convolution, True = correlation (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
downx, downy = _parse_scaling(down)
padx0, padx1, pady0, pady1 = _parse_padding(padding)
fw, fh = _get_filter_size(f)
p = [
padx0 + (fw - downx + 1) // 2,
padx1 + (fw - downx) // 2,
pady0 + (fh - downy + 1) // 2,
pady1 + (fh - downy) // 2,
]
return upfirdn2d(x, f, down=down, padding=p, flip_filter=flip_filter, gain=gain, impl=impl)
#----------------------------------------------------------------------------
| 16,287 | 41.306494 | 157 | py |
DFMGAN | DFMGAN-main/torch_utils/ops/conv2d_resample.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""2D convolution with optional up/downsampling."""
import torch
from .. import misc
from . import conv2d_gradfix
from . import upfirdn2d
from .upfirdn2d import _parse_padding
from .upfirdn2d import _get_filter_size
#----------------------------------------------------------------------------
def _get_weight_shape(w):
with misc.suppress_tracer_warnings(): # this value will be treated as a constant
shape = [int(sz) for sz in w.shape]
misc.assert_shape(w, shape)
return shape
#----------------------------------------------------------------------------
def _conv2d_wrapper(x, w, stride=1, padding=0, groups=1, transpose=False, flip_weight=True):
"""Wrapper for the underlying `conv2d()` and `conv_transpose2d()` implementations.
"""
out_channels, in_channels_per_group, kh, kw = _get_weight_shape(w)
# Flip weight if requested.
if not flip_weight: # conv2d() actually performs correlation (flip_weight=True) not convolution (flip_weight=False).
w = w.flip([2, 3])
# Workaround performance pitfall in cuDNN 8.0.5, triggered when using
# 1x1 kernel + memory_format=channels_last + less than 64 channels.
if kw == 1 and kh == 1 and stride == 1 and padding in [0, [0, 0], (0, 0)] and not transpose:
if x.stride()[1] == 1 and min(out_channels, in_channels_per_group) < 64:
if out_channels <= 4 and groups == 1:
in_shape = x.shape
x = w.squeeze(3).squeeze(2) @ x.reshape([in_shape[0], in_channels_per_group, -1])
x = x.reshape([in_shape[0], out_channels, in_shape[2], in_shape[3]])
else:
x = x.to(memory_format=torch.contiguous_format)
w = w.to(memory_format=torch.contiguous_format)
x = conv2d_gradfix.conv2d(x, w, groups=groups)
return x.to(memory_format=torch.channels_last)
# Otherwise => execute using conv2d_gradfix.
op = conv2d_gradfix.conv_transpose2d if transpose else conv2d_gradfix.conv2d
return op(x, w, stride=stride, padding=padding, groups=groups)
#----------------------------------------------------------------------------
@misc.profiled_function
def conv2d_resample(x, w, f=None, up=1, down=1, padding=0, groups=1, flip_weight=True, flip_filter=False):
r"""2D convolution with optional up/downsampling.
Padding is performed only once at the beginning, not between the operations.
Args:
x: Input tensor of shape
`[batch_size, in_channels, in_height, in_width]`.
w: Weight tensor of shape
`[out_channels, in_channels//groups, kernel_height, kernel_width]`.
f: Low-pass filter for up/downsampling. Must be prepared beforehand by
calling upfirdn2d.setup_filter(). None = identity (default).
up: Integer upsampling factor (default: 1).
down: Integer downsampling factor (default: 1).
padding: Padding with respect to the upsampled image. Can be a single number
or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
groups: Split input channels into N groups (default: 1).
flip_weight: False = convolution, True = correlation (default: True).
flip_filter: False = convolution, True = correlation (default: False).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
# Validate arguments.
assert isinstance(x, torch.Tensor) and (x.ndim == 4)
assert isinstance(w, torch.Tensor) and (w.ndim == 4) and (w.dtype == x.dtype)
assert f is None or (isinstance(f, torch.Tensor) and f.ndim in [1, 2] and f.dtype == torch.float32)
assert isinstance(up, int) and (up >= 1)
assert isinstance(down, int) and (down >= 1)
assert isinstance(groups, int) and (groups >= 1)
out_channels, in_channels_per_group, kh, kw = _get_weight_shape(w)
fw, fh = _get_filter_size(f)
px0, px1, py0, py1 = _parse_padding(padding)
# Adjust padding to account for up/downsampling.
if up > 1:
px0 += (fw + up - 1) // 2
px1 += (fw - up) // 2
py0 += (fh + up - 1) // 2
py1 += (fh - up) // 2
if down > 1:
px0 += (fw - down + 1) // 2
px1 += (fw - down) // 2
py0 += (fh - down + 1) // 2
py1 += (fh - down) // 2
# Fast path: 1x1 convolution with downsampling only => downsample first, then convolve.
if kw == 1 and kh == 1 and (down > 1 and up == 1):
x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, padding=[px0,px1,py0,py1], flip_filter=flip_filter)
x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
return x
# Fast path: 1x1 convolution with upsampling only => convolve first, then upsample.
if kw == 1 and kh == 1 and (up > 1 and down == 1):
x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
x = upfirdn2d.upfirdn2d(x=x, f=f, up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter)
return x
# Fast path: downsampling only => use strided convolution.
if down > 1 and up == 1:
x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0,px1,py0,py1], flip_filter=flip_filter)
x = _conv2d_wrapper(x=x, w=w, stride=down, groups=groups, flip_weight=flip_weight)
return x
# Fast path: upsampling with optional downsampling => use transpose strided convolution.
if up > 1:
if groups == 1:
w = w.transpose(0, 1)
else:
w = w.reshape(groups, out_channels // groups, in_channels_per_group, kh, kw)
w = w.transpose(1, 2)
w = w.reshape(groups * in_channels_per_group, out_channels // groups, kh, kw)
px0 -= kw - 1
px1 -= kw - up
py0 -= kh - 1
py1 -= kh - up
pxt = max(min(-px0, -px1), 0)
pyt = max(min(-py0, -py1), 0)
x = _conv2d_wrapper(x=x, w=w, stride=up, padding=[pyt,pxt], groups=groups, transpose=True, flip_weight=(not flip_weight))
x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0+pxt,px1+pxt,py0+pyt,py1+pyt], gain=up**2, flip_filter=flip_filter)
if down > 1:
x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter)
return x
# Fast path: no up/downsampling, padding supported by the underlying implementation => use plain conv2d.
if up == 1 and down == 1:
if px0 == px1 and py0 == py1 and px0 >= 0 and py0 >= 0:
return _conv2d_wrapper(x=x, w=w, padding=[py0,px0], groups=groups, flip_weight=flip_weight)
# Fallback: Generic reference implementation.
x = upfirdn2d.upfirdn2d(x=x, f=(f if up > 1 else None), up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter)
x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
if down > 1:
x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter)
return x
#----------------------------------------------------------------------------
| 7,591 | 47.356688 | 130 | py |
DFMGAN | DFMGAN-main/torch_utils/ops/fma.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Fused multiply-add, with slightly faster gradients than `torch.addcmul()`."""
import torch
#----------------------------------------------------------------------------
def fma(a, b, c): # => a * b + c
return _FusedMultiplyAdd.apply(a, b, c)
#----------------------------------------------------------------------------
class _FusedMultiplyAdd(torch.autograd.Function): # a * b + c
@staticmethod
def forward(ctx, a, b, c): # pylint: disable=arguments-differ
out = torch.addcmul(c, a, b)
ctx.save_for_backward(a, b)
ctx.c_shape = c.shape
return out
@staticmethod
def backward(ctx, dout): # pylint: disable=arguments-differ
a, b = ctx.saved_tensors
c_shape = ctx.c_shape
da = None
db = None
dc = None
if ctx.needs_input_grad[0]:
da = _unbroadcast(dout * b, a.shape)
if ctx.needs_input_grad[1]:
db = _unbroadcast(dout * a, b.shape)
if ctx.needs_input_grad[2]:
dc = _unbroadcast(dout, c_shape)
return da, db, dc
#----------------------------------------------------------------------------
def _unbroadcast(x, shape):
extra_dims = x.ndim - len(shape)
assert extra_dims >= 0
dim = [i for i in range(x.ndim) if x.shape[i] > 1 and (i < extra_dims or shape[i - extra_dims] == 1)]
if len(dim):
x = x.sum(dim=dim, keepdim=True)
if extra_dims:
x = x.reshape(-1, *x.shape[extra_dims+1:])
assert x.shape == shape
return x
#----------------------------------------------------------------------------
| 2,034 | 32.360656 | 105 | py |
DFMGAN | DFMGAN-main/metrics/metric_utils.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import time
import hashlib
import pickle
import copy
import uuid
import numpy as np
import torch
import dnnlib
#----------------------------------------------------------------------------
class MetricOptions:
def __init__(self, G=None, G_kwargs={}, dataset_kwargs={}, dataset2_kwargs={}, num_gpus=1, rank=0, device=None, progress=None, cache=True):
assert 0 <= rank < num_gpus
self.G = G
self.G_kwargs = dnnlib.EasyDict(G_kwargs)
self.dataset_kwargs = dnnlib.EasyDict(dataset_kwargs)
self.dataset2_kwargs = dnnlib.EasyDict(dataset2_kwargs)
self.num_gpus = num_gpus
self.rank = rank
self.device = device if device is not None else torch.device('cuda', rank)
self.progress = progress.sub() if progress is not None and rank == 0 else ProgressMonitor()
self.cache = cache
#----------------------------------------------------------------------------
_feature_detector_cache = dict()
def get_feature_detector_name(url):
return os.path.splitext(url.split('/')[-1])[0]
def get_feature_detector(url, device=torch.device('cpu'), num_gpus=1, rank=0, verbose=False):
assert 0 <= rank < num_gpus
key = (url, device)
if key not in _feature_detector_cache:
is_leader = (rank == 0)
if not is_leader and num_gpus > 1:
torch.distributed.barrier() # leader goes first
with dnnlib.util.open_url(url, verbose=(verbose and is_leader)) as f:
_feature_detector_cache[key] = torch.jit.load(f).eval().to(device)
if is_leader and num_gpus > 1:
torch.distributed.barrier() # others follow
return _feature_detector_cache[key]
#----------------------------------------------------------------------------
class FeatureStats:
def __init__(self, capture_all=False, capture_mean_cov=False, max_items=None):
self.capture_all = capture_all
self.capture_mean_cov = capture_mean_cov
self.max_items = max_items
self.num_items = 0
self.num_features = None
self.all_features = None
self.raw_mean = None
self.raw_cov = None
def set_num_features(self, num_features):
if self.num_features is not None:
assert num_features == self.num_features
else:
self.num_features = num_features
self.all_features = []
self.raw_mean = np.zeros([num_features], dtype=np.float64)
self.raw_cov = np.zeros([num_features, num_features], dtype=np.float64)
def is_full(self):
return (self.max_items is not None) and (self.num_items >= self.max_items)
def append(self, x):
x = np.asarray(x, dtype=np.float32)
assert x.ndim == 2
if (self.max_items is not None) and (self.num_items + x.shape[0] > self.max_items):
if self.num_items >= self.max_items:
return
x = x[:self.max_items - self.num_items]
self.set_num_features(x.shape[1])
self.num_items += x.shape[0]
if self.capture_all:
self.all_features.append(x)
if self.capture_mean_cov:
x64 = x.astype(np.float64)
self.raw_mean += x64.sum(axis=0)
self.raw_cov += x64.T @ x64
def append_torch(self, x, num_gpus=1, rank=0):
assert isinstance(x, torch.Tensor) and x.ndim == 2
assert 0 <= rank < num_gpus
if num_gpus > 1:
ys = []
for src in range(num_gpus):
y = x.clone()
torch.distributed.broadcast(y, src=src)
ys.append(y)
x = torch.stack(ys, dim=1).flatten(0, 1) # interleave samples
self.append(x.cpu().numpy())
def get_all(self):
assert self.capture_all
return np.concatenate(self.all_features, axis=0)
def get_all_torch(self):
return torch.from_numpy(self.get_all())
def get_mean_cov(self):
assert self.capture_mean_cov
mean = self.raw_mean / self.num_items
cov = self.raw_cov / self.num_items
cov = cov - np.outer(mean, mean)
return mean, cov
def save(self, pkl_file):
with open(pkl_file, 'wb') as f:
pickle.dump(self.__dict__, f)
@staticmethod
def load(pkl_file):
with open(pkl_file, 'rb') as f:
s = dnnlib.EasyDict(pickle.load(f))
obj = FeatureStats(capture_all=s.capture_all, max_items=s.max_items)
obj.__dict__.update(s)
return obj
#----------------------------------------------------------------------------
class ProgressMonitor:
def __init__(self, tag=None, num_items=None, flush_interval=1000, verbose=False, progress_fn=None, pfn_lo=0, pfn_hi=1000, pfn_total=1000):
self.tag = tag
self.num_items = num_items
self.verbose = verbose
self.flush_interval = flush_interval
self.progress_fn = progress_fn
self.pfn_lo = pfn_lo
self.pfn_hi = pfn_hi
self.pfn_total = pfn_total
self.start_time = time.time()
self.batch_time = self.start_time
self.batch_items = 0
if self.progress_fn is not None:
self.progress_fn(self.pfn_lo, self.pfn_total)
def update(self, cur_items):
assert (self.num_items is None) or (cur_items <= self.num_items)
if (cur_items < self.batch_items + self.flush_interval) and (self.num_items is None or cur_items < self.num_items):
return
cur_time = time.time()
total_time = cur_time - self.start_time
time_per_item = (cur_time - self.batch_time) / max(cur_items - self.batch_items, 1)
if (self.verbose) and (self.tag is not None):
print(f'{self.tag:<19s} items {cur_items:<7d} time {dnnlib.util.format_time(total_time):<12s} ms/item {time_per_item*1e3:.2f}')
self.batch_time = cur_time
self.batch_items = cur_items
if (self.progress_fn is not None) and (self.num_items is not None):
self.progress_fn(self.pfn_lo + (self.pfn_hi - self.pfn_lo) * (cur_items / self.num_items), self.pfn_total)
def sub(self, tag=None, num_items=None, flush_interval=1000, rel_lo=0, rel_hi=1):
return ProgressMonitor(
tag = tag,
num_items = num_items,
flush_interval = flush_interval,
verbose = self.verbose,
progress_fn = self.progress_fn,
pfn_lo = self.pfn_lo + (self.pfn_hi - self.pfn_lo) * rel_lo,
pfn_hi = self.pfn_lo + (self.pfn_hi - self.pfn_lo) * rel_hi,
pfn_total = self.pfn_total,
)
#----------------------------------------------------------------------------
def compute_feature_stats_for_dataset(opts, detector_url, detector_kwargs, rel_lo=0, rel_hi=1, batch_size=64, data_loader_kwargs=None, max_items=None, num_data = 1, **stats_kwargs):
dataset = dnnlib.util.construct_class_by_name(**opts.dataset_kwargs) if num_data == 1 else dnnlib.util.construct_class_by_name(**opts.dataset2_kwargs)
if data_loader_kwargs is None:
data_loader_kwargs = dict(pin_memory=True, num_workers=3, prefetch_factor=2)
# Try to lookup from cache.
cache_file = None
if opts.cache:
# Choose cache file name.
args = dict(dataset_kwargs=opts.dataset_kwargs if num_data == 1 else opts.dataset2_kwargs, detector_url=detector_url, detector_kwargs=detector_kwargs, stats_kwargs=stats_kwargs)
md5 = hashlib.md5(repr(sorted(args.items())).encode('utf-8'))
cache_tag = f'{dataset.name}-{get_feature_detector_name(detector_url)}-{md5.hexdigest()}'
cache_file = dnnlib.make_cache_dir_path('gan-metrics', cache_tag + '.pkl')
# Check if the file exists (all processes must agree).
flag = os.path.isfile(cache_file) if opts.rank == 0 else False
if opts.num_gpus > 1:
flag = torch.as_tensor(flag, dtype=torch.float32, device=opts.device)
torch.distributed.broadcast(tensor=flag, src=0)
flag = (float(flag.cpu()) != 0)
# Load.
if flag:
return FeatureStats.load(cache_file)
# Initialize.
num_items = len(dataset)
if max_items is not None:
num_items = min(num_items, max_items)
stats = FeatureStats(max_items=num_items, **stats_kwargs)
progress = opts.progress.sub(tag='dataset features', num_items=num_items, rel_lo=rel_lo, rel_hi=rel_hi)
detector = get_feature_detector(url=detector_url, device=opts.device, num_gpus=opts.num_gpus, rank=opts.rank, verbose=progress.verbose)
# Main loop.
item_subset = [(i * opts.num_gpus + opts.rank) % num_items for i in range((num_items - 1) // opts.num_gpus + 1)]
for images, _labels in torch.utils.data.DataLoader(dataset=dataset, sampler=item_subset, batch_size=batch_size, **data_loader_kwargs):
if images.shape[1] == 1:
images = images.repeat([1, 3, 1, 1])
if images.shape[1] == 4:
images = images[:, :3, :, :]
features = detector(images.to(opts.device), **detector_kwargs)
stats.append_torch(features, num_gpus=opts.num_gpus, rank=opts.rank)
progress.update(stats.num_items)
# Save to cache.
if cache_file is not None and opts.rank == 0:
os.makedirs(os.path.dirname(cache_file), exist_ok=True)
temp_file = cache_file + '.' + uuid.uuid4().hex
stats.save(temp_file)
os.replace(temp_file, cache_file) # atomic
return stats
#----------------------------------------------------------------------------
def compute_feature_stats_for_generator(opts, detector_url, detector_kwargs, rel_lo=0, rel_hi=1, batch_size=64, batch_gen=None, jit=False, **stats_kwargs):
if batch_gen is None:
batch_gen = min(batch_size, 4)
assert batch_size % batch_gen == 0
# Setup generator and load labels.
G = copy.deepcopy(opts.G).eval().requires_grad_(False).to(opts.device)
dataset = dnnlib.util.construct_class_by_name(**opts.dataset_kwargs)
# Image generation func.
def run_generator(z, c, defect_z = None):
img = G(z=z, c=c, defect_z = defect_z, **opts.G_kwargs)
img = (img * 127.5 + 128).clamp(0, 255).to(torch.uint8)
return img
# JIT.
if jit:
z = torch.zeros([batch_gen, G.z_dim], device=opts.device)
c = torch.zeros([batch_gen, G.c_dim], device=opts.device)
input_list = [z, c]
if G.transfer in ['dual_mod', 'res_block', 'res_block_match_dis', 'res_block_uni_dis']:
defect_z = torch.zeros([batch_gen, G.z_dim], device=opts.device)
input_list.append(defect_z)
run_generator = torch.jit.trace(run_generator, input_list, check_trace=False)
# Initialize.
stats = FeatureStats(**stats_kwargs)
assert stats.max_items is not None
progress = opts.progress.sub(tag='generator features', num_items=stats.max_items, rel_lo=rel_lo, rel_hi=rel_hi)
detector = get_feature_detector(url=detector_url, device=opts.device, num_gpus=opts.num_gpus, rank=opts.rank, verbose=progress.verbose)
# Main loop.
while not stats.is_full():
images = []
for _i in range(batch_size // batch_gen):
z = torch.randn([batch_gen, G.z_dim], device=opts.device)
c = [dataset.get_label(np.random.randint(len(dataset))) for _i in range(batch_gen)]
c = torch.from_numpy(np.stack(c)).pin_memory().to(opts.device)
defect_z = None
if G.transfer in ['dual_mod', 'res_block', 'res_block_match_dis', 'res_block_uni_dis']:
defect_z = torch.randn([batch_gen, G.z_dim], device=opts.device)
images.append(run_generator(z, c, defect_z))
images = torch.cat(images)
if images.shape[1] == 1:
images = images.repeat([1, 3, 1, 1])
features = detector(images, **detector_kwargs)
stats.append_torch(features, num_gpus=opts.num_gpus, rank=opts.rank)
progress.update(stats.num_items)
return stats
#----------------------------------------------------------------------------
| 12,605 | 43.076923 | 185 | py |
DFMGAN | DFMGAN-main/metrics/kernel_inception_distance.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Kernel Inception Distance (KID) from the paper "Demystifying MMD
GANs". Matches the original implementation by Binkowski et al. at
https://github.com/mbinkowski/MMD-GAN/blob/master/gan/compute_scores.py"""
import numpy as np
from . import metric_utils
#----------------------------------------------------------------------------
def compute_kid(opts, max_real, num_gen, num_subsets, max_subset_size):
# Direct TorchScript translation of http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz
detector_url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/inception-2015-12-05.pt'
detector_kwargs = dict(return_features=True) # Return raw features before the softmax layer.
real_features = metric_utils.compute_feature_stats_for_dataset(
opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs,
rel_lo=0, rel_hi=0, capture_all=True, max_items=max_real).get_all()
gen_features = metric_utils.compute_feature_stats_for_generator(
opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs,
rel_lo=0, rel_hi=1, capture_all=True, max_items=num_gen).get_all()
if opts.rank != 0:
return float('nan')
n = real_features.shape[1]
m = min(min(real_features.shape[0], gen_features.shape[0]), max_subset_size)
t = 0
for _subset_idx in range(num_subsets):
x = gen_features[np.random.choice(gen_features.shape[0], m, replace=False)]
y = real_features[np.random.choice(real_features.shape[0], m, replace=False)]
a = (x @ x.T / n + 1) ** 3 + (y @ y.T / n + 1) ** 3
b = (x @ y.T / n + 1) ** 3
t += (a.sum() - np.diag(a).sum()) / (m - 1) - b.sum() * 2 / m
kid = t / num_subsets / m
return float(kid)
#----------------------------------------------------------------------------
def compute_kid_between_dir(opts, max_real, num_gen, num_subsets, max_subset_size):
# Direct TorchScript translation of http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz
detector_url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/inception-2015-12-05.pt'
detector_kwargs = dict(return_features=True) # Return raw features before the softmax layer.
detector2_kwargs = dict(return_features=True) # Return raw features before the softmax layer.
real_features = metric_utils.compute_feature_stats_for_dataset(
opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs,
rel_lo=0, rel_hi=0, capture_all=True, max_items=max_real).get_all()
real_features_2 = metric_utils.compute_feature_stats_for_dataset(
opts=opts, detector_url=detector_url, detector_kwargs=detector2_kwargs,
rel_lo=0, rel_hi=0, capture_all=True, max_items=max_real, num_data = 2).get_all()
if opts.rank != 0:
return float('nan')
n = real_features.shape[1]
m = min(min(real_features.shape[0], real_features_2.shape[0]), max_subset_size)
t = 0
for _subset_idx in range(num_subsets):
x = real_features_2[np.random.choice(real_features_2.shape[0], m, replace=False)]
y = real_features[np.random.choice(real_features.shape[0], m, replace=False)]
a = (x @ x.T / n + 1) ** 3 + (y @ y.T / n + 1) ** 3
b = (x @ y.T / n + 1) ** 3
t += (a.sum() - np.diag(a).sum()) / (m - 1) - b.sum() * 2 / m
kid = t / num_subsets / m
return float(kid)
#----------------------------------------------------------------------------
| 3,977 | 50 | 118 | py |
DFMGAN | DFMGAN-main/metrics/frechet_inception_distance.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Frechet Inception Distance (FID) from the paper
"GANs trained by a two time-scale update rule converge to a local Nash
equilibrium". Matches the original implementation by Heusel et al. at
https://github.com/bioinf-jku/TTUR/blob/master/fid.py"""
import numpy as np
import scipy.linalg
from . import metric_utils
#----------------------------------------------------------------------------
def compute_fid(opts, max_real, num_gen):
# Direct TorchScript translation of http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz
detector_url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/inception-2015-12-05.pt'
detector_kwargs = dict(return_features=True) # Return raw features before the softmax layer.
mu_real, sigma_real = metric_utils.compute_feature_stats_for_dataset(
opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs,
rel_lo=0, rel_hi=0, capture_mean_cov=True, max_items=max_real).get_mean_cov()
mu_gen, sigma_gen = metric_utils.compute_feature_stats_for_generator(
opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs,
rel_lo=0, rel_hi=1, capture_mean_cov=True, max_items=num_gen).get_mean_cov()
if opts.rank != 0:
return float('nan')
m = np.square(mu_gen - mu_real).sum()
s, _ = scipy.linalg.sqrtm(np.dot(sigma_gen, sigma_real), disp=False) # pylint: disable=no-member
fid = np.real(m + np.trace(sigma_gen + sigma_real - s * 2))
return float(fid)
#----------------------------------------------------------------------------
| 2,040 | 47.595238 | 118 | py |
DFMGAN | DFMGAN-main/metrics/lpips.py | import lpips, torch
import itertools
import numpy as np
import dnnlib
from tqdm import tqdm
import copy
def compute_clpips(opts, num_gen):
dataset_kwargs = opts.dataset_kwargs
device = opts.device
G = copy.deepcopy(opts.G).eval().requires_grad_(False).to(device)
with torch.no_grad():
loss_fn_alex = lpips.LPIPS(net='alex', verbose = opts.progress.verbose).to(device) # best forward scores
#loss_fn_vgg = lpips.LPIPS(net='vgg') # closer to "traditional" perceptual loss, when used for optimization
data_list = []
dataset = dnnlib.util.construct_class_by_name(**dataset_kwargs)
data_loader_kwargs = dict(pin_memory=True, num_workers=3, prefetch_factor=2)
for img, _labels in torch.utils.data.DataLoader(dataset=dataset, batch_size=64, **data_loader_kwargs):
if img.shape[1] == 1:
img = img.repeat([1, 3, 1, 1])
if img.shape[1] == 4:
img = img[:, :3, :, :]
data_list.append(img.to(device))
data_list = torch.cat(data_list, dim = 0)
cluster = [[] for _ in range(data_list.shape[0])]
label = torch.zeros([1, G.c_dim], device=device)
iterator = tqdm(range(num_gen), desc = 'Clustering') if opts.progress.verbose else range(num_gen)
for seed in iterator:
z = torch.from_numpy(np.random.RandomState(seed).randn(1, G.z_dim)).to(device)
img = G(z, label, defect_z = z, truncation_psi = 1, noise_mode = 'const')
score_list = loss_fn_alex(img.repeat(data_list.shape[0], 1, 1, 1), data_list)
#score_list = np.array([loss_fn_alex(img, data).item() for data in data_list])
closest_index = score_list.argmin().item()
if len(cluster[closest_index]) < 200:
cluster[closest_index].append(img)
cluster_lpips = []
iterator = tqdm(cluster, desc = 'Computing clustered LPIPS') if opts.progress.verbose else cluster
for c in iterator:
# c_lpips = []
# for img1, img2 in itertools.combinations(c, 2):
# d = loss_fn_alex(img1, img2)
# c_lpips.append(d.item())
# if len(c_lpips) == 0:
# cluster_lpips.append(0.0)
# else:
# cluster_lpips.append(sum(c_lpips) / len(c_lpips))
if len(c) <= 1:
cluster_lpips.append(0.0)
continue
c_lpips = 0.0
img = torch.cat(c, dim = 0)
ref_img = img.clone()
for _ in range(img.shape[0] - 1):
img = torch.cat([img[1:], img[0:1]], dim = 0)
c_lpips += loss_fn_alex(img, ref_img).sum().item()
cluster_lpips.append(c_lpips / (img.shape[0] * (img.shape[0] - 1)))
if opts.progress.verbose:
print('Cluster Statistics:')
print([(len(cluster[i]), '%.4f' % cluster_lpips[i]) for i in range(len(data_list))])
clpips = sum(cluster_lpips) / len(cluster_lpips)
rz_sum = 0.0
n = 0
for score in cluster_lpips:
if score != 0.0:
rz_sum += score
n += 1
clpips_rz = rz_sum / n
return clpips, clpips_rz
| 3,201 | 41.131579 | 115 | py |
DFMGAN | DFMGAN-main/metrics/perceptual_path_length.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Perceptual Path Length (PPL) from the paper "A Style-Based Generator
Architecture for Generative Adversarial Networks". Matches the original
implementation by Karras et al. at
https://github.com/NVlabs/stylegan/blob/master/metrics/perceptual_path_length.py"""
import copy
import numpy as np
import torch
import dnnlib
from . import metric_utils
#----------------------------------------------------------------------------
# Spherical interpolation of a batch of vectors.
def slerp(a, b, t):
a = a / a.norm(dim=-1, keepdim=True)
b = b / b.norm(dim=-1, keepdim=True)
d = (a * b).sum(dim=-1, keepdim=True)
p = t * torch.acos(d)
c = b - d * a
c = c / c.norm(dim=-1, keepdim=True)
d = a * torch.cos(p) + c * torch.sin(p)
d = d / d.norm(dim=-1, keepdim=True)
return d
#----------------------------------------------------------------------------
class PPLSampler(torch.nn.Module):
def __init__(self, G, G_kwargs, epsilon, space, sampling, crop, vgg16):
assert space in ['z', 'w']
assert sampling in ['full', 'end']
super().__init__()
self.G = copy.deepcopy(G)
self.G_kwargs = G_kwargs
self.epsilon = epsilon
self.space = space
self.sampling = sampling
self.crop = crop
self.vgg16 = copy.deepcopy(vgg16)
def forward(self, c):
# Generate random latents and interpolation t-values.
t = torch.rand([c.shape[0]], device=c.device) * (1 if self.sampling == 'full' else 0)
z0, z1 = torch.randn([c.shape[0] * 2, self.G.z_dim], device=c.device).chunk(2)
# Interpolate in W or Z.
if self.space == 'w':
w0, w1 = self.G.mapping(z=torch.cat([z0,z1]), c=torch.cat([c,c])).chunk(2)
wt0 = w0.lerp(w1, t.unsqueeze(1).unsqueeze(2))
wt1 = w0.lerp(w1, t.unsqueeze(1).unsqueeze(2) + self.epsilon)
else: # space == 'z'
zt0 = slerp(z0, z1, t.unsqueeze(1))
zt1 = slerp(z0, z1, t.unsqueeze(1) + self.epsilon)
wt0, wt1 = self.G.mapping(z=torch.cat([zt0,zt1]), c=torch.cat([c,c])).chunk(2)
# Randomize noise buffers.
for name, buf in self.G.named_buffers():
if name.endswith('.noise_const'):
buf.copy_(torch.randn_like(buf))
# Generate images.
img = self.G.synthesis(ws=torch.cat([wt0,wt1]), noise_mode='const', force_fp32=True, **self.G_kwargs)
# Center crop.
if self.crop:
assert img.shape[2] == img.shape[3]
c = img.shape[2] // 8
img = img[:, :, c*3 : c*7, c*2 : c*6]
# Downsample to 256x256.
factor = self.G.img_resolution // 256
if factor > 1:
img = img.reshape([-1, img.shape[1], img.shape[2] // factor, factor, img.shape[3] // factor, factor]).mean([3, 5])
# Scale dynamic range from [-1,1] to [0,255].
img = (img + 1) * (255 / 2)
if self.G.img_channels == 1:
img = img.repeat([1, 3, 1, 1])
# Evaluate differential LPIPS.
lpips_t0, lpips_t1 = self.vgg16(img, resize_images=False, return_lpips=True).chunk(2)
dist = (lpips_t0 - lpips_t1).square().sum(1) / self.epsilon ** 2
return dist
#----------------------------------------------------------------------------
def compute_ppl(opts, num_samples, epsilon, space, sampling, crop, batch_size, jit=False):
dataset = dnnlib.util.construct_class_by_name(**opts.dataset_kwargs)
vgg16_url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/vgg16.pt'
vgg16 = metric_utils.get_feature_detector(vgg16_url, num_gpus=opts.num_gpus, rank=opts.rank, verbose=opts.progress.verbose)
# Setup sampler.
sampler = PPLSampler(G=opts.G, G_kwargs=opts.G_kwargs, epsilon=epsilon, space=space, sampling=sampling, crop=crop, vgg16=vgg16)
sampler.eval().requires_grad_(False).to(opts.device)
if jit:
c = torch.zeros([batch_size, opts.G.c_dim], device=opts.device)
sampler = torch.jit.trace(sampler, [c], check_trace=False)
# Sampling loop.
dist = []
progress = opts.progress.sub(tag='ppl sampling', num_items=num_samples)
for batch_start in range(0, num_samples, batch_size * opts.num_gpus):
progress.update(batch_start)
c = [dataset.get_label(np.random.randint(len(dataset))) for _i in range(batch_size)]
c = torch.from_numpy(np.stack(c)).pin_memory().to(opts.device)
x = sampler(c)
for src in range(opts.num_gpus):
y = x.clone()
if opts.num_gpus > 1:
torch.distributed.broadcast(y, src=src)
dist.append(y)
progress.update(num_samples)
# Compute PPL.
if opts.rank != 0:
return float('nan')
dist = torch.cat(dist)[:num_samples].cpu().numpy()
lo = np.percentile(dist, 1, interpolation='lower')
hi = np.percentile(dist, 99, interpolation='higher')
ppl = np.extract(np.logical_and(dist >= lo, dist <= hi), dist).mean()
return float(ppl)
#----------------------------------------------------------------------------
| 5,538 | 40.962121 | 131 | py |
DFMGAN | DFMGAN-main/metrics/inception_score.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Inception Score (IS) from the paper "Improved techniques for training
GANs". Matches the original implementation by Salimans et al. at
https://github.com/openai/improved-gan/blob/master/inception_score/model.py"""
import numpy as np
from . import metric_utils
#----------------------------------------------------------------------------
def compute_is(opts, num_gen, num_splits):
# Direct TorchScript translation of http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz
detector_url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/inception-2015-12-05.pt'
detector_kwargs = dict(no_output_bias=True) # Match the original implementation by not applying bias in the softmax layer.
gen_probs = metric_utils.compute_feature_stats_for_generator(
opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs,
capture_all=True, max_items=num_gen).get_all()
if opts.rank != 0:
return float('nan'), float('nan')
scores = []
for i in range(num_splits):
part = gen_probs[i * num_gen // num_splits : (i + 1) * num_gen // num_splits]
kl = part * (np.log(part) - np.log(np.mean(part, axis=0, keepdims=True)))
kl = np.mean(np.sum(kl, axis=1))
scores.append(np.exp(kl))
return float(np.mean(scores)), float(np.std(scores))
#----------------------------------------------------------------------------
| 1,874 | 47.076923 | 126 | py |
DFMGAN | DFMGAN-main/metrics/metric_main.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import time
import json
import torch
import dnnlib
from . import metric_utils
from . import frechet_inception_distance
from . import kernel_inception_distance
from . import precision_recall
from . import perceptual_path_length
from . import inception_score
from . import lpips
#----------------------------------------------------------------------------
_metric_dict = dict() # name => fn
def register_metric(fn):
assert callable(fn)
_metric_dict[fn.__name__] = fn
return fn
def is_valid_metric(metric):
return metric in _metric_dict
def list_valid_metrics():
return list(_metric_dict.keys())
#----------------------------------------------------------------------------
def calc_metric(metric, **kwargs): # See metric_utils.MetricOptions for the full list of arguments.
assert is_valid_metric(metric)
opts = metric_utils.MetricOptions(**kwargs)
# Calculate.
start_time = time.time()
results = _metric_dict[metric](opts)
total_time = time.time() - start_time
# Broadcast results.
for key, value in list(results.items()):
if opts.num_gpus > 1:
value = torch.as_tensor(value, dtype=torch.float64, device=opts.device)
torch.distributed.broadcast(tensor=value, src=0)
value = float(value.cpu())
results[key] = value
# Decorate with metadata.
return dnnlib.EasyDict(
results = dnnlib.EasyDict(results),
metric = metric,
total_time = total_time,
total_time_str = dnnlib.util.format_time(total_time),
num_gpus = opts.num_gpus,
)
#----------------------------------------------------------------------------
def report_metric(result_dict, run_dir=None, snapshot_pkl=None):
metric = result_dict['metric']
assert is_valid_metric(metric)
if run_dir is not None and snapshot_pkl is not None:
snapshot_pkl = os.path.relpath(snapshot_pkl, run_dir)
jsonl_line = json.dumps(dict(result_dict, snapshot_pkl=snapshot_pkl, timestamp=time.time()))
print(jsonl_line)
if run_dir is not None and os.path.isdir(run_dir):
with open(os.path.join(run_dir, f'metric-{metric}.jsonl'), 'at') as f:
f.write(jsonl_line + '\n')
#----------------------------------------------------------------------------
# Primary metrics.
@register_metric
def fid50k_full(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
fid = frechet_inception_distance.compute_fid(opts, max_real=None, num_gen=50000)
return dict(fid50k_full=fid)
@register_metric
def kid50k_full(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
kid = kernel_inception_distance.compute_kid(opts, max_real=1000000, num_gen=50000, num_subsets=100, max_subset_size=1000)
return dict(kid50k_full=kid)
@register_metric
def fid5k_full(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
fid = frechet_inception_distance.compute_fid(opts, max_real=None, num_gen=5000)
return dict(fid5k_full=fid)
@register_metric
def kid5k_full(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
kid = kernel_inception_distance.compute_kid(opts, max_real=1000000, num_gen=5000, num_subsets=100, max_subset_size=1000)
return dict(kid5k_full=kid)
@register_metric
def fid_between_dir(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
fid = frechet_inception_distance.compute_fid_between_dir(opts, max_real=1000000, num_gen=5000, num_subsets=100, max_subset_size=1000)
return dict(fid_between_dir=fid)
@register_metric
def kid_between_dir(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
kid = kernel_inception_distance.compute_kid_between_dir(opts, max_real=1000000, num_gen=5000, num_subsets=100, max_subset_size=1000)
return dict(kid_between_dir=kid)
@register_metric
def clpips1k(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
clpips1k, clpips1k_rz = lpips.compute_clpips(opts, num_gen = 1000)
return dict(clpips1k = clpips1k, clpips1k_rz = clpips1k_rz)
@register_metric
def pr50k3_full(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
precision, recall = precision_recall.compute_pr(opts, max_real=200000, num_gen=50000, nhood_size=3, row_batch_size=10000, col_batch_size=10000)
return dict(pr50k3_full_precision=precision, pr50k3_full_recall=recall)
@register_metric
def ppl2_wend(opts):
ppl = perceptual_path_length.compute_ppl(opts, num_samples=50000, epsilon=1e-4, space='w', sampling='end', crop=False, batch_size=2)
return dict(ppl2_wend=ppl)
@register_metric
def is50k(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
mean, std = inception_score.compute_is(opts, num_gen=50000, num_splits=10)
return dict(is50k_mean=mean, is50k_std=std)
@register_metric
def is5k(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
mean, std = inception_score.compute_is(opts, num_gen=5000, num_splits=10)
return dict(is5k_mean=mean, is5k_std=std)
#----------------------------------------------------------------------------
# Legacy metrics.
@register_metric
def fid50k(opts):
opts.dataset_kwargs.update(max_size=None)
fid = frechet_inception_distance.compute_fid(opts, max_real=50000, num_gen=50000)
return dict(fid50k=fid)
@register_metric
def kid50k(opts):
opts.dataset_kwargs.update(max_size=None)
kid = kernel_inception_distance.compute_kid(opts, max_real=50000, num_gen=50000, num_subsets=100, max_subset_size=1000)
return dict(kid50k=kid)
@register_metric
def pr50k3(opts):
opts.dataset_kwargs.update(max_size=None)
precision, recall = precision_recall.compute_pr(opts, max_real=50000, num_gen=50000, nhood_size=3, row_batch_size=10000, col_batch_size=10000)
return dict(pr50k3_precision=precision, pr50k3_recall=recall)
@register_metric
def ppl_zfull(opts):
ppl = perceptual_path_length.compute_ppl(opts, num_samples=50000, epsilon=1e-4, space='z', sampling='full', crop=True, batch_size=2)
return dict(ppl_zfull=ppl)
@register_metric
def ppl_wfull(opts):
ppl = perceptual_path_length.compute_ppl(opts, num_samples=50000, epsilon=1e-4, space='w', sampling='full', crop=True, batch_size=2)
return dict(ppl_wfull=ppl)
@register_metric
def ppl_zend(opts):
ppl = perceptual_path_length.compute_ppl(opts, num_samples=50000, epsilon=1e-4, space='z', sampling='end', crop=True, batch_size=2)
return dict(ppl_zend=ppl)
@register_metric
def ppl_wend(opts):
ppl = perceptual_path_length.compute_ppl(opts, num_samples=50000, epsilon=1e-4, space='w', sampling='end', crop=True, batch_size=2)
return dict(ppl_wend=ppl)
#----------------------------------------------------------------------------
| 7,212 | 36.963158 | 147 | py |
DFMGAN | DFMGAN-main/metrics/precision_recall.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Precision/Recall (PR) from the paper "Improved Precision and Recall
Metric for Assessing Generative Models". Matches the original implementation
by Kynkaanniemi et al. at
https://github.com/kynkaat/improved-precision-and-recall-metric/blob/master/precision_recall.py"""
import torch
from . import metric_utils
#----------------------------------------------------------------------------
def compute_distances(row_features, col_features, num_gpus, rank, col_batch_size):
assert 0 <= rank < num_gpus
num_cols = col_features.shape[0]
num_batches = ((num_cols - 1) // col_batch_size // num_gpus + 1) * num_gpus
col_batches = torch.nn.functional.pad(col_features, [0, 0, 0, -num_cols % num_batches]).chunk(num_batches)
dist_batches = []
for col_batch in col_batches[rank :: num_gpus]:
dist_batch = torch.cdist(row_features.unsqueeze(0), col_batch.unsqueeze(0))[0]
for src in range(num_gpus):
dist_broadcast = dist_batch.clone()
if num_gpus > 1:
torch.distributed.broadcast(dist_broadcast, src=src)
dist_batches.append(dist_broadcast.cpu() if rank == 0 else None)
return torch.cat(dist_batches, dim=1)[:, :num_cols] if rank == 0 else None
#----------------------------------------------------------------------------
def compute_pr(opts, max_real, num_gen, nhood_size, row_batch_size, col_batch_size):
detector_url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/vgg16.pt'
detector_kwargs = dict(return_features=True)
real_features = metric_utils.compute_feature_stats_for_dataset(
opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs,
rel_lo=0, rel_hi=0, capture_all=True, max_items=max_real).get_all_torch().to(torch.float16).to(opts.device)
gen_features = metric_utils.compute_feature_stats_for_generator(
opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs,
rel_lo=0, rel_hi=1, capture_all=True, max_items=num_gen).get_all_torch().to(torch.float16).to(opts.device)
results = dict()
for name, manifold, probes in [('precision', real_features, gen_features), ('recall', gen_features, real_features)]:
kth = []
for manifold_batch in manifold.split(row_batch_size):
dist = compute_distances(row_features=manifold_batch, col_features=manifold, num_gpus=opts.num_gpus, rank=opts.rank, col_batch_size=col_batch_size)
kth.append(dist.to(torch.float32).kthvalue(nhood_size + 1).values.to(torch.float16) if opts.rank == 0 else None)
kth = torch.cat(kth) if opts.rank == 0 else None
pred = []
for probes_batch in probes.split(row_batch_size):
dist = compute_distances(row_features=probes_batch, col_features=manifold, num_gpus=opts.num_gpus, rank=opts.rank, col_batch_size=col_batch_size)
pred.append((dist <= kth).any(dim=1) if opts.rank == 0 else None)
results[name] = float(torch.cat(pred).to(torch.float32).mean() if opts.rank == 0 else 'nan')
return results['precision'], results['recall']
#----------------------------------------------------------------------------
| 3,617 | 56.428571 | 159 | py |
Conditionial-SWF | Conditionial-SWF-main/main.py | import glob
import os
import shutil
import configargparse
import jax
import jax.numpy as jnp
import numpy as np
import dataset
import models
import plotting
import utils
parser = configargparse.ArgumentParser()
parser.add("-c", "--config", required=True, is_config_file=True, help="config file path")
utils.setup_parser(parser)
args = parser.parse_args()
# print configs and copy code for reproducibility
logger, dirname = utils.setup_logging(args)
files_to_copy = glob.glob(os.path.dirname(os.path.realpath(__file__)) + "/*.py")
for script_src in files_to_copy:
script_dst = os.path.abspath(os.path.join(dirname, "code", os.path.basename(script_src) + ".bak"))
shutil.copyfile(script_src, script_dst)
for k, v in sorted(vars(args).items()):
logger.info(" %30s: %s" % (k, v))
# experimental setups
n_devices = jax.device_count()
devices = jax.devices()
logger.info(f"{n_devices} devices found.")
utils.setup_seed(args.seed)
data_train, data_test, label_train, label_test, data_shape = dataset.get_dataset(args.dataset)
dim, n_train_data = data_train.shape
_, n_test_data = data_test.shape
n_labels = label_train.shape[0]
cdim = dim
hdim = args.hdim
hdim_per_conv = args.hdim_per_conv
layer_steps = args.layer_steps
step_size = args.step_size
n_batched_particles = args.n_batched_particles
n_offline_particles = args.n_offline_particles
n_bins_particles = args.n_bins_particles
n_bins_data = args.n_bins_data
init_std = args.init_std
max_layer = 100000
logger.info(f"dim={dim}, #data={n_train_data}, #test data={n_test_data}")
logger.info(f"hdim={hdim}, hdim_per_conv={hdim_per_conv}, #batched particles={n_batched_particles}, #offline particles={n_offline_particles}, #layer_steps={layer_steps}, stepsize={step_size}")
logger.info(f"forward: {args.forward}, inverse: {args.inverse}")
if args.forward == "rqspline":
logger.info(f"#bins for particles={n_bins_particles}")
if args.inverse == "rqspline":
logger.info(f"#bins for data={n_bins_data}")
assert dim == np.prod(data_shape)
nrow = int(np.sqrt(args.n_viz))
assert nrow * nrow == args.n_viz # use a square number for easy visualization
assert n_offline_particles // n_devices >= args.n_viz # make sure there are enough particles on device 0 for visualization
assert args.forward in ["rqspline", "sorting"]
assert args.inverse in ["rqspline", "sorting"]
assert n_batched_particles % n_devices == n_offline_particles % n_devices == 0
assert args.downsample.lower() in ["nearest", "lanczos3", "lanczos5"]
# for class-conditional generation, data/particles are in the XxY space
if args.cond and args.cond_type == "class":
amplifier = args.amplifier
data_train = np.concatenate([data_train, label_train * amplifier], axis=0)
cdim = dim + n_labels
# make sure the dataset can be evenly split across devices
if n_train_data % n_devices != 0:
data_train = np.concatenate([data_train, data_train[:, :n_devices - n_train_data % n_devices]], axis=1)
n_train_data = data_train.shape[1]
# initialize/restore particles
if args.restore_path:
if os.path.isfile(os.path.join(args.restore_path, "particles_batched.npy")) and os.path.isfile(os.path.join(args.restore_path, "particles_offline.npy")):
particles_batched = np.load(os.path.join(args.restore_path, "particles_batched.npy"))
particles_offline = np.load(os.path.join(args.restore_path, "particles_offline.npy"))
else:
raise ValueError(f"Cannot restore from {args.restore_path}")
else:
particles_batched = np.random.randn(cdim, n_batched_particles) * init_std
particles_offline = np.random.randn(cdim, n_offline_particles) * init_std
# generate mask and initialize particles for conditional tasks
if args.cond:
if args.cond_type.lower() == "bottom":
mask = np.ones(data_shape, dtype=np.float32)
mask[:, :data_shape[1] // 2, :] = 0.0
elif args.cond_type.lower() == "right":
mask = np.ones(data_shape, dtype=np.float32)
mask[:, :, :data_shape[1] // 2] = 0.0
elif args.cond_type.lower() == "class":
mask = np.ones(cdim, dtype=np.float32)
mask[dim:] = 0.0
else:
raise NotImplementedError(f"Condition type {args.cond_type} unknown.")
mask = np.reshape(mask, (-1, 1))
if args.cond_type.lower() == "class":
# for class-conditional generation, we use uniform distribution of class labels
batched_idx = np.tile(np.repeat(np.arange(n_labels), nrow), n_batched_particles // (n_labels * nrow) + 1)
offline_idx = np.tile(np.repeat(np.arange(n_labels), nrow), n_offline_particles // (n_labels * nrow) + 1)
onehot = np.eye(n_labels) * amplifier
particles_batched[dim:, :] = onehot[:, batched_idx[:n_batched_particles]]
particles_offline[dim:, :] = onehot[:, offline_idx[:n_offline_particles]]
else:
# for image inpainting, we create partially-observed images from the dataset
n_copies = n_batched_particles // n_train_data
data_train_samples = data_train[:, :n_batched_particles - n_copies * n_train_data]
data_train_samples = np.concatenate([data_train] * n_copies + [data_train_samples], axis=1)
if args.dequantize: # TODO check if necessary
data_train_samples = data_train_samples + np.random.rand(*data_train_samples.shape) / 128.0
particles_batched = particles_batched * mask + data_train_samples * (1.0 - mask)
assert n_offline_particles % nrow == 0 and n_offline_particles // nrow <= n_test_data # for easy visualization
data_test_samples = np.repeat(data_test[:, :n_offline_particles // nrow], nrow, axis=1)
particles_offline = particles_offline * mask + data_test_samples * (1.0 - mask)
else:
mask = None
# plot initial particles
samples_0 = np.concatenate([np.reshape(particles_batched[:dim, :args.n_viz].T, (nrow, nrow, -1)), np.reshape(particles_offline[:dim, :args.n_viz].T, (nrow, nrow, -1))], axis=1)
plotting.save_image(args, 0, samples_0, prefix="batched_offline", nrow=nrow * 2)
plotting.save_image(args, 0, data_train[:dim, :args.n_viz].T, prefix="data", nrow=nrow)
# copy data to devices
particles_batched_sh = jax.device_put_sharded(np.split(particles_batched, n_devices, axis=1), devices)
particles_offline_sh = jax.device_put_sharded(np.split(particles_offline, n_devices, axis=1), devices)
data_train_sh = jax.device_put_sharded(np.split(data_train, n_devices, axis=1), devices)
particles_batched_to_save = None
# the "model" defines locally-connected projections and pyramidal schedules
if args.dataset in ["mnist", "fashion"]:
model = models.mnist_model
elif args.dataset in ["cifar10"]:
model = models.cifar10_model
elif args.dataset in ["celeba"]:
model = models.celeba_model
else:
raise NotImplementedError(f"Model for {args.dataset} unknown.")
if args.baseline:
model = models.swf_model
transform_layers, transform_steps = model(
data_shape=data_shape, mask=mask, hdim=hdim, hdim_per_conv=hdim_per_conv, step_size=step_size, layer_steps=layer_steps, forward=args.forward, inverse=args.inverse, n_bins_particles=n_bins_particles, n_bins_data=n_bins_data, downsample_method=args.downsample, dequantize=args.dequantize
)
# generate batched & offline samples
key = jax.random.PRNGKey(args.seed)
steps_mark = list(np.cumsum(transform_steps))
assert len(steps_mark) == len(transform_layers)
for i in range(1, max_layer + 1):
if args.pyramidal:
if i > steps_mark[0]:
steps_mark = steps_mark[1:]
transform_layers = transform_layers[1:]
if not transform_layers:
break
logger.info(f"Now use {transform_layers[0]}")
key, wkey = jax.random.split(key)
key, dkey = jax.random.split(key)
dkeys = jax.random.split(dkey, n_devices)
particles_batched_sh, particles_offline_sh, ws_dist_batched_sh, ws_dist_offline_sh, particles_batched_to_save = transform_layers[0](wkey, dkeys, data_train_sh, particles_batched_sh, particles_offline_sh)
else:
nf = len(transform_layers)
key, wkey = jax.random.split(key)
key, dkey = jax.random.split(key)
dkeys = jax.random.split(dkey, n_devices)
particles_batched_sh, particles_offline_sh, ws_dist_batched_sh, ws_dist_offline_sh, particles_batched_to_save = transform_layers[np.random.randint(nf)](wkey, dkeys, data_train_sh, particles_batched_sh, particles_offline_sh)
logger.info(f"Iter {i:3d}: ws_dist_batched={jnp.mean(ws_dist_batched_sh):.5f}, ws_dist_offline={jnp.mean(ws_dist_offline_sh):.5f}")
if i % args.viz_every == 0:
samples_i = jnp.concatenate([jnp.reshape(particles_batched_to_save[0, :dim, :args.n_viz].T, (nrow, nrow, -1)), jnp.reshape(particles_offline_sh[0, :dim, :args.n_viz].T, (nrow, nrow, -1))], axis=1)
plotting.save_image(args, i, samples_i, prefix="batched_offline", nrow=nrow * 2)
# save final particles and their nearest neighbors
particles_batched = np.moveaxis(np.array(particles_batched_to_save), 0, 1).reshape(cdim, -1)
particles_offline = np.moveaxis(np.array(particles_offline_sh), 0, 1).reshape(cdim, -1)
with open(os.path.join(dirname, "particles", "particles_batched.npy"), "wb") as f:
np.save(f, particles_batched)
logger.info(f"{f.name} saved.")
with open(os.path.join(dirname, "particles", "particles_offline.npy"), "wb") as f:
np.save(f, particles_offline)
logger.info(f"{f.name} saved.")
plotting.make_video(args, "batched_offline_samples", max_frame=max_layer)
del particles_batched_sh, particles_offline_sh, data_train_sh
data_train = jnp.array(data_train)
# save nearest neighbors of generated particles
all_find_neighbors = jax.vmap(utils.find_neighbors, in_axes=(1, None))
particles_batched_with_neighbors = jnp.reshape(all_find_neighbors(particles_batched[:dim, :args.n_viz], data_train[:dim]), (-1, dim))
particles_offline_with_neighbors = jnp.reshape(all_find_neighbors(particles_offline[:dim, :args.n_viz], data_train[:dim]), (-1, dim))
plotting.save_image(args, 0, particles_batched_with_neighbors, prefix="nn_batched", nrow=11)
plotting.save_image(args, 0, particles_offline_with_neighbors, prefix="nn_offline", nrow=11)
| 9,887 | 47.470588 | 287 | py |
Conditionial-SWF | Conditionial-SWF-main/plotting.py | import os
import imageio
import numpy as np
import torch
import torchvision
def save_image(args, i, data, prefix="", nrow=None):
data = (np.array(data) + 1.0) / 2.0
if args.dataset in ["mnist", "fashion"]:
data_shape = (1, 28, 28)
if args.dataset == "cifar10":
data_shape = (3, 32, 32)
if args.dataset == "celeba":
data_shape = (3, 64, 64)
nrow = int(np.sqrt(data.shape[0])) if nrow is None else nrow
torchvision.utils.save_image(torch.from_numpy(data.reshape(-1, *data_shape)), os.path.join(args.dirname, "images", prefix + f"_samples_{i:04d}.png"), nrow=nrow)
def make_video(args, prefix="", fps=24, max_frame=100000):
fileList = [os.path.join(args.dirname, "images", f"{prefix}_{i:04d}.png") for i in range(max_frame + 1)]
writer = imageio.get_writer(os.path.join(args.dirname, "videos", f"{prefix}.mp4"), fps=fps)
for im in fileList:
if os.path.exists(im):
writer.append_data(imageio.v2.imread(im))
writer.close()
| 965 | 33.5 | 162 | py |
Conditionial-SWF | Conditionial-SWF-main/utils.py | import errno
import logging
import os
import random
import time
import coloredlogs
import jax
import jax.numpy as jnp
import numpy as np
param_dict = dict(
seed=0,
hdim=10000,
hdim_per_conv=10,
layer_steps=200,
step_size=1.0,
n_batched_particles=250000,
n_offline_particles=4000,
forward="sorting",
inverse="sorting",
n_bins_particles=200,
n_bins_data=200,
downsample="lanczos5",
dequantize=True,
pyramidal=True,
basedir="output",
expname="experiment",
dataset="mnist",
n_viz=400,
viz_every=100,
restore_path=None,
cond=False,
cond_type="bottom",
amplifier=1.0,
init_std=0.1,
baseline=False,
)
def str2bool(v):
"""
https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
"""
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ValueError("Boolean value expected.")
def add_dict_to_argparser(parser, default_dict):
for k, v in default_dict.items():
v_type = type(v)
if v is None:
v_type = str
elif isinstance(v, bool):
v_type = str2bool
parser.add_argument(f"--{k}", default=v, type=v_type)
def setup_parser(parser):
add_dict_to_argparser(parser, default_dict=param_dict)
def setup_seed(seed):
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
random.seed(seed)
def setup_logging(args):
dirname_base = args.basedir if hasattr(args, "basedir") else "basedir"
logger = logging.getLogger("COLOREDLOGS")
FORMAT = "[%(asctime)s] %(message)s"
DATEFMT = "%H:%M:%S"
LEVEL_STYLES = dict(
debug=dict(color="blue"),
info=dict(color="green"),
verbose=dict(),
warning=dict(color="yellow"),
error=dict(color="red"),
critical=dict(color="magenta"),
)
coloredlogs.install(logger=logger, level="info", fmt=FORMAT, datefmt=DATEFMT, level_styles=LEVEL_STYLES)
# Determine suffix
suffix = ""
suffix += args.dataset if hasattr(args, "dataset") else ""
suffix += "-" if suffix else ""
suffix += args.cond_type if hasattr(args, "cond") and hasattr(args, "cond_type") and args.cond else "uncond"
suffix += "-" if suffix else ""
suffix += args.forward if hasattr(args, "forward") else ""
suffix += str(args.n_bins_particles) if hasattr(args, "forward") and hasattr(args, "n_bins_particles") and args.forward == "rqspline" else ""
suffix += "-" if suffix else ""
suffix += args.inverse if hasattr(args, "inverse") else ""
suffix += str(args.n_bins_data) if hasattr(args, "inverse") and hasattr(args, "n_bins_data") and args.inverse == "rqspline" else ""
suffix += "-" if suffix else ""
suffix += args.downsample if hasattr(args, "downsample") else ""
suffix += "-" if suffix else ""
suffix += "{{" + (str(args.expname if args.expname else "debug") if hasattr(args, "expname") else "") + "}}"
suffix += "-hd" + str(args.hdim) if hasattr(args, "hdim") else ""
suffix += "-hdc" + str(args.hdim_per_conv) if hasattr(args, "hdim_per_conv") else ""
suffix += "-lst" + str(args.layer_steps) if hasattr(args, "layer_steps") else ""
suffix += "-lr" + str(args.step_size) if hasattr(args, "step_size") else ""
suffix += "-std" + str(args.init_std) if hasattr(args, "init_std") else ""
suffix += "-np" + str(args.n_batched_particles) if hasattr(args, "n_batched_particles") else ""
suffix += "-xi" + str(args.amplifier) if hasattr(args, "amplifier") and hasattr(args, "cond") and args.cond else ""
suffix += "-seed" + str(args.seed) if hasattr(args, "seed") else ""
# Determine prefix
prefix = time.strftime("%Y-%m-%d--%H-%M")
prefix_counter = 0
dirname = dirname_base + "/%s.%s" % (prefix, suffix)
while True:
try:
os.makedirs(dirname)
os.makedirs(os.path.join(dirname, "code"))
os.makedirs(os.path.join(dirname, "images"))
os.makedirs(os.path.join(dirname, "videos"))
os.makedirs(os.path.join(dirname, "particles"))
except OSError as e:
if e.errno != errno.EEXIST:
raise e
prefix_counter += 1
dirname = dirname_base + "/%s+%d.%s" % (prefix, prefix_counter, suffix)
if prefix_counter >= 10:
exit()
else:
break
formatter = logging.Formatter(FORMAT, DATEFMT)
logger_fname = os.path.join(dirname, "logfile.txt")
fh = logging.FileHandler(logger_fname)
fh.setFormatter(formatter)
logger.addHandler(fh)
# logger.propagate = False
args.dirname = dirname
return logger, dirname
def find_neighbors(x, data):
data_sqnorm = jnp.sum(jnp.square(data), axis=0)
sqdist = jnp.sum(jnp.square(x)) + data_sqnorm - 2 * jnp.matmul(x, data)
_, idx = jax.lax.top_k(-sqdist, 10)
return jnp.vstack([x, data[:, idx].T])
| 4,759 | 30.111111 | 143 | py |
Conditionial-SWF | Conditionial-SWF-main/dataset.py | import numpy as np
import torch
import torchvision
def mnist():
ds = torchvision.datasets.MNIST(root="./data", train=True, download=True)
dst = torchvision.datasets.MNIST(root="./data", train=False, download=True)
mx = ds.data.float()
mxt = dst.data.float()
my = torch.nn.functional.one_hot(ds.targets, num_classes=10).float().numpy()
myt = torch.nn.functional.one_hot(dst.targets, num_classes=10).float().numpy()
mx = mx / 256.0
mxt = mxt / 256.0
mx = mx.flatten(1).numpy() * 2.0 - 1.0
mxt = mxt.flatten(1).numpy() * 2.0 - 1.0
return mx, mxt, my, myt
def fashionmnist():
ds = torchvision.datasets.FashionMNIST(root="./data", train=True, download=True)
dst = torchvision.datasets.FashionMNIST(root="./data", train=False, download=True)
mx = ds.data.float()
mxt = dst.data.float()
my = torch.nn.functional.one_hot(ds.targets, num_classes=10).float().numpy()
myt = torch.nn.functional.one_hot(dst.targets, num_classes=10).float().numpy()
mx = mx / 256.0
mxt = mxt / 256.0
mx = mx.flatten(1).numpy() * 2.0 - 1.0
mxt = mxt.flatten(1).numpy() * 2.0 - 1.0
return mx, mxt, my, myt
def cifar10(flip=True):
ds = torchvision.datasets.CIFAR10(root="./data", train=True, download=True)
dst = torchvision.datasets.CIFAR10(root="./data", train=False, download=True)
mx = np.moveaxis(ds.data.astype(np.float32), 3, 1)
mxt = np.moveaxis(dst.data.astype(np.float32), 3, 1)
eye = np.eye(10)
my = eye[ds.targets]
myt = eye[dst.targets]
if flip:
mx_flip = mx[:, :, :, ::-1]
mx = np.concatenate([mx, mx_flip], axis=0)
my = np.concatenate([my, my], axis=0)
mx = mx / 256.0
mxt = mxt / 256.0
mx = mx.reshape(mx.shape[0], -1) * 2.0 - 1.0
mxt = mxt.reshape(mxt.shape[0], -1) * 2.0 - 1.0
return mx, mxt, my, myt
def get_dataset(name):
"""Return numpy array of dataset"""
if name == "mnist":
mx, mxt, my, myt = mnist()
data_shape = (1, 28, 28)
elif name == "fashion":
mx, mxt, my, myt = fashionmnist()
data_shape = (1, 28, 28)
elif name == "cifar10":
mx, mxt, my, myt = cifar10()
data_shape = (3, 32, 32)
elif name == "celeba":
mx = np.load("./data/celeba_train.npy") * 255.0 / 256.0 * 2.0 - 1.0
mxt = np.load("./data/celeba_eval.npy") * 255.0 / 256.0 * 2.0 - 1.0
data_shape = (3, 64, 64)
my = np.zeros((mx.shape[0], 1))
myt = np.zeros((mxt.shape[0], 1))
else:
raise NotImplementedError(f"Dataset {name} unknown.")
perm = np.random.permutation(mx.shape[0])
mx = mx[perm]
my = my[perm]
permt = np.random.permutation(mxt.shape[0])
mxt = mxt[permt]
myt = myt[permt]
assert mx.shape[0] == my.shape[0]
assert mxt.shape[0] == myt.shape[0]
assert mx.shape[1] == mxt.shape[1]
assert my.shape[1] == myt.shape[1]
mx.flags.writeable = False
my.flags.writeable = False
mxt.flags.writeable = False
myt.flags.writeable = False
return mx.T, mxt.T, my.T, myt.T, data_shape
| 2,912 | 31.730337 | 84 | py |
Conditionial-SWF | Conditionial-SWF-main/slicers.py | import jax
import jax.numpy as jnp
import numpy as np
def uniform(key, dim, hdim, **kwargs):
w = jax.random.normal(key, shape=(hdim, dim))
w_norm = jnp.linalg.norm(w, axis=1, keepdims=True)
w = w / w_norm
return w
def conv(key, input_shape, hdim, n_filters, kernel_sizes, strides=1, paddings="SAME", dilations=1, normalize=True, **kwargs):
kernel_sizes = kernel_sizes if isinstance(kernel_sizes, (list, tuple)) else (kernel_sizes,)
n_filters = n_filters if isinstance(n_filters, (list, tuple)) else (n_filters,) * len(kernel_sizes)
strides = strides if isinstance(strides, (list, tuple)) else (strides,) * len(kernel_sizes)
paddings = paddings if isinstance(paddings, (list, tuple)) else (paddings,) * len(kernel_sizes)
dilations = dilations if isinstance(dilations, (list, tuple)) else (dilations,) * len(kernel_sizes)
assert len(n_filters) == len(kernel_sizes) == len(strides) == len(paddings) == len(dilations)
n_convs = len(n_filters)
n_filters = (input_shape[0],) + n_filters
kernels = []
for i in range(n_convs):
key, subkey = jax.random.split(key)
kernels.append(jax.random.normal(subkey, shape=(n_filters[i + 1], n_filters[i], kernel_sizes[i], kernel_sizes[i])) * 0.1)
# kernels.append(jax.random.laplace(subkey, shape=(n_filters[i + 1], n_filters[i], kernel_sizes[i], kernel_sizes[i])) * 0.1)
# we obtain the equivalent projections through vjp through the forward mapping
def f(x):
for i in range(n_convs):
stride = (strides[i],) * 2
padding = paddings[i] if isinstance(paddings[i], str) else (paddings[i],) * 2
dilation = (dilations[i],) * 2
x = jax.lax.conv_general_dilated(x, kernels[i], window_strides=stride, padding=padding, rhs_dilation=dilation)
return x
x_dummy = jnp.zeros((1, *input_shape))
f_value, f_vjp = jax.vjp(f, x_dummy)
outdim = np.prod(f_value.shape)
hdim = outdim if hdim is None else hdim
assert outdim >= hdim
if outdim > hdim:
key, subkey = jax.random.split(key)
perm = jax.random.permutation(subkey, outdim)
I = jax.nn.one_hot(perm[:hdim], outdim)
else:
I = jnp.eye(outdim)
def wi(v):
return f_vjp(v.reshape(f_value.shape))[0]
w = jax.vmap(wi, in_axes=0)(I)
w = jnp.reshape(w, (w.shape[0], np.prod(input_shape)))
if normalize:
w_norm = jnp.linalg.norm(w, axis=1, keepdims=True)
w = w / w_norm
return w
# a wrapper function to obtain the upsampled projection from lower resolutions
def downsample_slicer(key, slice_fn, input_shape, down_size, **kwargs):
down_shape = (input_shape[0], *down_size)
kwargs["dim"] = np.prod(down_shape)
kwargs["input_shape"] = down_shape
sub_w = slice_fn(key, **kwargs)
sub_w = jnp.reshape(sub_w, (sub_w.shape[0], *down_shape))
method = kwargs["method"] if "method" in kwargs else "lanczos3"
w = jax.image.resize(sub_w, (sub_w.shape[0], *input_shape), method=method)
w = jnp.reshape(w, (w.shape[0], np.prod(input_shape)))
w_norm = jnp.linalg.norm(w, axis=1, keepdims=True)
w = w / w_norm
return w
| 3,025 | 37.303797 | 128 | py |
Conditionial-SWF | Conditionial-SWF-main/layers.py | import functools
import jax
import jax.numpy as jnp
import jax.scipy
def sorting_forward(xs, x):
nx = xs.shape[0]
idx = jnp.searchsorted(xs, x)
im1 = jnp.clip(idx - 1, 0, nx - 1)
i = jnp.clip(idx, 0, nx - 1)
# if falls in the middle
delta_x = xs[i] - xs[im1]
offset_x = x - xs[im1]
rel_offset = jnp.clip(jnp.nan_to_num(offset_x / delta_x), 0.0, 1.0)
cdf = (im1 + rel_offset + 0.5) / nx
return cdf
def sorting_inverse(ys, cdf):
ny = ys.shape[0]
jdy = jnp.int32(jnp.floor(cdf * ny))
jdy = jnp.clip(jdy, 0, ny - 1)
jp1 = jnp.clip(jdy + 1, 0, ny - 1)
a = ys[jdy] + (cdf - jdy / ny) * (ys[jp1] - ys[jdy])
return a
def rq_spline_compute_shared(bin_widths, bin_heights, knot_slopes, x_or_y, range_min=0.0, is_x=True):
"""Captures shared computations across the rational quadratic spline forward/inverse."""
assert bin_widths.ndim == bin_heights.ndim == knot_slopes.ndim == 1
kx = jnp.concatenate([jnp.full((1,), range_min), jnp.cumsum(bin_widths, axis=-1) + range_min], axis=-1)
ky = jnp.concatenate([jnp.full((1,), range_min), jnp.cumsum(bin_heights, axis=-1) + range_min], axis=-1)
kd = jnp.concatenate([jnp.full((1,), 1.0), knot_slopes, jnp.full((1,), 1.0)], axis=-1)
kx_or_ky = kx if is_x else ky
kx_or_ky_min = kx_or_ky[0]
kx_or_ky_max = kx_or_ky[-1]
out_of_bounds = (x_or_y <= kx_or_ky_min) | (x_or_y >= kx_or_ky_max)
x_or_y = jnp.where(out_of_bounds, kx_or_ky_min, x_or_y)
idx = jnp.clip(jnp.searchsorted(kx_or_ky, x_or_y) - 1, 0, kx_or_ky.shape[0] - 2)
x_k = kx[idx]
x_kp1 = kx[idx + 1]
y_k = ky[idx]
y_kp1 = ky[idx + 1]
d_k = kd[idx]
d_kp1 = kd[idx + 1]
h_k = y_kp1 - y_k
w_k = x_kp1 - x_k
s_k = h_k / w_k
return out_of_bounds, x_k, y_k, d_k, d_kp1, h_k, w_k, s_k
def rq_spline_forward(bin_widths, bin_heights, knot_slopes, x, range_min=0.0):
"""Compute the rational quadratic spline forward transformation"""
out_of_bounds, x_k, y_k, d_k, d_kp1, h_k, w_k, s_k = rq_spline_compute_shared(bin_widths, bin_heights, knot_slopes, x, range_min=range_min, is_x=True)
relx = (x - x_k) / w_k
spline_val = y_k + ((h_k * (s_k * relx**2 + d_k * relx * (1 - relx))) / (s_k + (d_kp1 + d_k - 2 * s_k) * relx * (1 - relx)))
y_val = jnp.where(out_of_bounds, x, spline_val)
return y_val
def rq_spline_inverse(bin_widths, bin_heights, knot_slopes, y, range_min=0.0):
"""Compute the rational quadratic spline inverse transformation"""
out_of_bounds, x_k, y_k, d_k, d_kp1, h_k, w_k, s_k = rq_spline_compute_shared(bin_widths, bin_heights, knot_slopes, y, range_min=range_min, is_x=False)
rely = jnp.where(out_of_bounds, 0.0, y - y_k)
term2 = rely * (d_kp1 + d_k - 2 * s_k)
# These terms are the a, b, c terms of the quadratic formula.
a = h_k * (s_k - d_k) + term2
b = h_k * d_k - term2
c = -s_k * rely
# The expression used here has better numerical behavior for small 4*a*c.
relx = jnp.where((rely == 0.0), 0.0, (2 * c) / (-b - jnp.sqrt(b**2 - 4 * a * c)))
return jnp.where(out_of_bounds, y, relx * w_k + x_k)
def layer(wkey, dkey, data_train, x_batched, x_offline, slicer_dict, dim, hdim, mask=None, step_size=1.0, forward="rqspline", inverse="rqspline", n_bins_particles=200, n_bins_data=200, dequantize=True, multi_devices=True, clip=None, fix_slopes=False):
assert isinstance(slicer_dict, dict)
ws = []
for slicer, num in slicer_dict.items():
wkey, subkey = jax.random.split(wkey)
skeys = jax.random.split(subkey, num)
wi = jax.vmap(slicer)(skeys)
ws.append(jnp.reshape(wi, (-1, dim)))
print(f"Slicer {slicer}: hdim = {ws[-1].shape[0]}")
w = jnp.vstack(ws)
print(f"Image slicer shape = {w.shape}")
if w.shape[0] > hdim:
wkey, subkey = jax.random.split(wkey)
w = jax.random.choice(subkey, w, (hdim,), replace=False)
print(f"[After subsampling] Slicer shape = {w.shape}")
assert w.shape[0] == hdim
# generate projections for labels
if data_train.shape[0] > dim:
n_labels = data_train.shape[0] - dim
wkey, subkey = jax.random.split(wkey)
w_labels = jax.random.laplace(subkey, shape=(hdim, n_labels))
w_labels_norm = jnp.linalg.norm(w_labels, axis=1, keepdims=True)
w_labels = w_labels / w_labels_norm
print(f"Label slicer shape = {w_labels.shape}")
w = jnp.concatenate([w * jnp.sqrt(dim / data_train.shape[0]), w_labels * jnp.sqrt(n_labels / data_train.shape[0])], axis=1)
print(f"Final slicer shape = {w.shape}")
# compute projection
x_proj = jnp.matmul(w, x_batched)
if dequantize:
data_train = data_train + jax.random.uniform(dkey, data_train.shape) / 128.0
data_proj = jnp.matmul(w, data_train)
# prepare forward and inverse functions
if forward == "rqspline":
x_proj = jnp.sort(x_proj)
x_min = x_proj[:, :1]
x_max = x_proj[:, -1:]
if multi_devices:
x_min = jax.lax.pmin(x_min, axis_name="device")
x_max = jax.lax.pmax(x_max, axis_name="device")
x_proj = (x_proj - x_min) / (x_max - x_min)
bin_edges_idx_x = jnp.int32(jnp.linspace(0.0, 1.0, num=n_bins_particles + 1)[1:-1] * x_proj.shape[-1])
bin_edges_x = x_proj[:, bin_edges_idx_x]
if multi_devices:
bin_edges_x = jax.lax.pmean(bin_edges_x, axis_name="device")
bin_edges_x = jnp.concatenate([jnp.full(bin_edges_x.shape[:-1] + (1,), 0.0), bin_edges_x, jnp.full(bin_edges_x.shape[:-1] + (1,), 1.0)], axis=-1)
hist_x, _ = jax.vmap(functools.partial(jnp.histogram, range=(0.0, 1.0), density=False))(x_proj, bin_edges_x)
hist_x = hist_x / x_proj.shape[1]
if multi_devices:
hist_x = jax.lax.pmean(hist_x, axis_name="device")
bin_widths_x = bin_edges_x[:, 1:] - bin_edges_x[:, :-1]
knot_slopes_x = (hist_x[:, :-1] + hist_x[:, 1:]) / (bin_widths_x[:, :-1] + bin_widths_x[:, 1:])
if fix_slopes:
knot_slopes_x = jnp.ones_like(knot_slopes_x)
if forward == "sorting":
if multi_devices:
x_proj = jax.lax.all_gather(x_proj, axis_name="device", axis=1)
x_proj = jnp.reshape(x_proj, (hdim, -1))
x_proj = jnp.sort(x_proj)
if inverse == "rqspline":
data_proj = jnp.sort(data_proj)
data_min = data_proj[:, :1]
data_max = data_proj[:, -1:]
if multi_devices:
data_min = jax.lax.pmin(data_min, axis_name="device")
data_max = jax.lax.pmax(data_max, axis_name="device")
data_proj = (data_proj - data_min) / (data_max - data_min)
bin_edges_idx_data = jnp.int32(jnp.linspace(0.0, 1.0, num=n_bins_data + 1)[1:-1] * data_proj.shape[-1])
bin_edges_data = data_proj[:, bin_edges_idx_data]
if multi_devices:
bin_edges_data = jax.lax.pmean(bin_edges_data, axis_name="device")
bin_edges_data = jnp.concatenate([jnp.full(bin_edges_data.shape[:-1] + (1,), 0.0), bin_edges_data, jnp.full(bin_edges_data.shape[:-1] + (1,), 1.0)], axis=-1)
hist_data, _ = jax.vmap(functools.partial(jnp.histogram, range=(0.0, 1.0), density=False))(data_proj, bin_edges_data)
hist_data = hist_data / data_proj.shape[1]
if multi_devices:
hist_data = jax.lax.pmean(hist_data, axis_name="device")
bin_widths_data = bin_edges_data[:, 1:] - bin_edges_data[:, :-1]
knot_slopes_data = (hist_data[:, :-1] + hist_data[:, 1:]) / (bin_widths_data[:, :-1] + bin_widths_data[:, 1:])
if fix_slopes:
knot_slopes_data = jnp.ones_like(knot_slopes_data)
if inverse == "sorting":
if multi_devices:
data_proj = jax.lax.all_gather(data_proj, axis_name="device", axis=1)
data_proj = jnp.reshape(data_proj, (hdim, -1))
data_proj = jnp.sort(data_proj)
# prepare unidimensional optimal transport functions
if forward == "sorting" and inverse == "sorting":
def unidim_transport(xs, ys, x):
return sorting_inverse(ys, sorting_forward(xs, x))
elif forward == "sorting" and inverse == "rqspline":
def unidim_transport(xs, bin_widths_data, bin_heights_data, knot_slopes_data, x):
return rq_spline_inverse(bin_widths_data, bin_heights_data, knot_slopes_data, sorting_forward(xs, x))
elif forward == "rqspline" and inverse == "sorting":
def unidim_transport(bin_widths_x, bin_heights_x, knot_slopes_x, ys, x):
return sorting_inverse(ys, rq_spline_forward(bin_widths_x, bin_heights_x, knot_slopes_x, x))
elif forward == "rqspline" and inverse == "rqspline":
def unidim_transport(bin_widths_x, bin_heights_x, knot_slopes_x, bin_widths_data, bin_heights_data, knot_slopes_data, x):
return rq_spline_inverse(bin_widths_data, bin_heights_data, knot_slopes_data, rq_spline_forward(bin_widths_x, bin_heights_x, knot_slopes_x, x))
else:
raise NotImplementedError(f"forward method {forward} or inverse method {inverse} unknown.")
print(f"Forward method: {forward}, Inverse method: {inverse}")
def transport(x):
y = jnp.matmul(w, x)
if forward == "sorting" and inverse == "sorting":
a = jax.vmap(unidim_transport)(x_proj, data_proj, y)
elif forward == "sorting" and inverse == "rqspline":
a_normalized = jax.vmap(unidim_transport)(x_proj, bin_widths_data, hist_data, knot_slopes_data, y)
a = a_normalized * (data_max - data_min) + data_min
elif forward == "rqspline" and inverse == "sorting":
y_normalized = (y - x_min) / (x_max - x_min)
a = jax.vmap(unidim_transport)(bin_widths_x, hist_x, knot_slopes_x, data_proj, y_normalized)
elif forward == "rqspline" and inverse == "rqspline":
y_normalized = (y - x_min) / (x_max - x_min)
a_normalized = jax.vmap(unidim_transport)(bin_widths_x, hist_x, knot_slopes_x, bin_widths_data, hist_data, knot_slopes_data, y_normalized)
a = a_normalized * (data_max - data_min) + data_min
movement = a - y
delta_x = jnp.matmul(w.T, movement) * (step_size * dim / hdim)
if mask is not None:
z = x + delta_x * mask
else:
z = x + delta_x
if clip is not None:
print(f"Enabled data clipping = {clip}")
z = jnp.clip(z, -clip, clip)
ws_dist = jnp.mean(jnp.abs(movement))
return z, ws_dist
x_batched, ws_dist_batched = transport(x_batched)
x_offline, ws_dist_offline = transport(x_offline)
# a workaround to prevent copying all particles to device 0 for storage
n_save_device = 50000 // jax.device_count()
x_batched_to_save = x_batched[:, :n_save_device]
return x_batched, x_offline, ws_dist_batched, ws_dist_offline, x_batched_to_save
| 10,259 | 43.034335 | 251 | py |
Conditionial-SWF | Conditionial-SWF-main/models.py | import functools
import jax
import numpy as np
import layers
import slicers
nfs = 20
def downsample_kxk_dense_layer(layer, data_shape, k, hdim, step_size=1.0, method="lanczos3"):
down_k_size = (k, k)
dim_ratio = np.prod(down_k_size) / np.prod(data_shape[1:])
down_k_slicer = functools.partial(
slicers.downsample_slicer,
slice_fn=slicers.uniform,
input_shape=data_shape,
down_size=down_k_size,
hdim=hdim,
method=method,
)
step_size = np.minimum(step_size, hdim / np.prod(down_k_size) / data_shape[0])
step_size = step_size * dim_ratio
down_k_layer = jax.pmap(functools.partial(layer, slicer_dict={down_k_slicer: 1}, step_size=step_size), axis_name="device", in_axes=(None, 0, 0, 0, 0))
return down_k_layer
def downsample_kxk_conv_layer(layer, data_shape, k, hdim, hdim_per_conv, n_filters, kernel_sizes, strides=1, paddings="SAME", dilations=1, step_size=1.0, method="lanczos3"):
down_k_size = (k, k)
dim_ratio = np.prod(down_k_size) / np.prod(data_shape[1:])
down_k_slicer = functools.partial(
slicers.downsample_slicer,
slice_fn=slicers.conv,
input_shape=data_shape,
down_size=down_k_size,
hdim=hdim_per_conv,
n_filters=n_filters,
kernel_sizes=kernel_sizes,
strides=strides,
paddings=paddings,
dilations=dilations,
method=method,
)
step_size = np.minimum(step_size, hdim / np.prod(down_k_size) / data_shape[0])
step_size = step_size * dim_ratio
down_k_layer = jax.pmap(functools.partial(layer, slicer_dict={down_k_slicer: hdim // hdim_per_conv}, step_size=step_size), axis_name="device", in_axes=(None, 0, 0, 0, 0))
return down_k_layer
def low_rez_dense_model(layer, data_shape, hdim, step_size, downsample_method="lanczos3", rezs=(1, 2, 3, 4, 5, 6), steps=(10, 100, 200, 300, 300, 300)):
assert len(rezs) == len(steps)
transform_layers, transform_steps = [], []
for rez, step in zip(rezs, steps):
dense_layer_ixi = downsample_kxk_dense_layer(layer=layer, data_shape=data_shape, k=rez, hdim=hdim, step_size=step_size, method=downsample_method)
transform_layers.append(dense_layer_ixi)
transform_steps.append(step)
assert len(transform_layers) == len(transform_steps)
return transform_layers, transform_steps
def downsample_kxk_model(layer, data_shape, k, hdim, hdim_per_conv, step_size, nfs, kss, sts, pds, dls, steps, min_convs=1, downsample_method="lanczos3", init_dense=False):
assert len(nfs) == len(kss) == len(sts) == len(pds) == len(dls)
assert len(steps) == len(nfs) + (1 if init_dense else 0)
assert min_convs >= 1
nl = len(nfs)
transform_layers, transform_steps = [], []
if init_dense:
dense_layer_kxk = downsample_kxk_dense_layer(layer=layer, data_shape=data_shape, k=k, hdim=hdim, step_size=step_size, method=downsample_method)
transform_layers.append(dense_layer_kxk)
transform_steps.append(steps[0])
steps = steps[1:]
for i in range(nl):
nf = nfs[i] if isinstance(nfs[i], (list, tuple)) else (nfs[i],) * (nl - i + min_convs - 1)
ks = kss[i] if isinstance(kss[i], (list, tuple)) else (kss[i],) * (nl - i + min_convs - 1)
st = sts[i] if isinstance(sts[i], (list, tuple)) else (sts[i],) * (nl - i + min_convs - 1)
pd = pds[i] if isinstance(pds[i], (list, tuple)) else (pds[i],) * (nl - i + min_convs - 1)
dl = dls[i] if isinstance(dls[i], (list, tuple)) else (dls[i],) * (nl - i + min_convs - 1)
conv_i_layer_kxk = downsample_kxk_conv_layer(layer=layer, data_shape=data_shape, k=k, hdim=hdim, hdim_per_conv=hdim_per_conv, n_filters=nf, kernel_sizes=ks, strides=st, paddings=pd, dilations=dl, step_size=step_size, method=downsample_method)
transform_layers.append(conv_i_layer_kxk)
transform_steps.append(steps[i])
assert len(transform_layers) == len(transform_steps)
return transform_layers, transform_steps
def kxk_model(layer, data_shape, hdim, hdim_per_conv, step_size, nfs, kss, sts, pds, dls, steps, min_convs=1, init_dense=False):
assert len(nfs) == len(kss) == len(sts) == len(pds) == len(dls)
assert len(steps) == len(nfs) + (1 if init_dense else 0)
assert min_convs >= 1
nl = len(nfs)
transform_layers, transform_steps = [], []
dim = np.prod(data_shape)
step_size = np.minimum(step_size, hdim / dim)
if init_dense:
dense_slicer = functools.partial(
slicers.uniform,
dim=dim,
hdim=hdim,
)
dense_layer = jax.pmap(functools.partial(layer, slicer_dict={dense_slicer: 1}, step_size=step_size), axis_name="device", in_axes=(None, 0, 0, 0, 0))
transform_layers.append(dense_layer)
transform_steps.append(steps[0])
steps = steps[1:]
for i in range(nl):
nf = nfs[i] if isinstance(nfs[i], (list, tuple)) else (nfs[i],) * (nl - i + min_convs - 1)
ks = kss[i] if isinstance(kss[i], (list, tuple)) else (kss[i],) * (nl - i + min_convs - 1)
st = sts[i] if isinstance(sts[i], (list, tuple)) else (sts[i],) * (nl - i + min_convs - 1)
pd = pds[i] if isinstance(pds[i], (list, tuple)) else (pds[i],) * (nl - i + min_convs - 1)
dl = dls[i] if isinstance(dls[i], (list, tuple)) else (dls[i],) * (nl - i + min_convs - 1)
conv_i_slicer = functools.partial(
slicers.conv,
input_shape=data_shape,
hdim=hdim_per_conv,
n_filters=nf,
kernel_sizes=ks,
strides=st,
paddings=pd,
dilations=dl,
)
conv_i_layer = jax.pmap(functools.partial(layer, slicer_dict={conv_i_slicer: hdim // hdim_per_conv}, step_size=step_size), axis_name="device", in_axes=(None, 0, 0, 0, 0))
transform_layers.append(conv_i_layer)
transform_steps.append(steps[i])
assert len(transform_layers) == len(transform_steps)
return transform_layers, transform_steps
def swf_model(data_shape, mask, hdim, step_size, layer_steps=200, forward="rqspline", inverse="rqspline", n_bins_particles=200, n_bins_data=200, dequantize=True, **kwargs):
dim = np.prod(data_shape)
step_size = np.minimum(step_size, hdim / dim)
layer = functools.partial(layers.layer, dim=dim, hdim=hdim, mask=mask, forward=forward, inverse=inverse, n_bins_particles=n_bins_particles, n_bins_data=n_bins_data, dequantize=dequantize)
dense_slicer = functools.partial(
slicers.uniform,
dim=dim,
hdim=hdim,
)
dense_layer = jax.pmap(functools.partial(layer, slicer_dict={dense_slicer: 1}, step_size=step_size), axis_name="device", in_axes=(None, 0, 0, 0, 0))
transform_layers, transform_steps = [dense_layer], [layer_steps]
return transform_layers, transform_steps
def mnist_model(data_shape, mask, hdim, hdim_per_conv, step_size, layer_steps=200, forward="rqspline", inverse="rqspline", n_bins_particles=200, n_bins_data=200, downsample_method="lanczos3", dequantize=True):
dim = np.prod(data_shape)
layer = functools.partial(layers.layer, dim=dim, hdim=hdim, mask=mask, forward=forward, inverse=inverse, n_bins_particles=n_bins_particles, n_bins_data=n_bins_data, dequantize=dequantize)
transform_layers, transform_steps = [], []
lowres_layers, lowres_steps = low_rez_dense_model(layer, data_shape, hdim, step_size, downsample_method, rezs=list(range(1, 7)), steps=[20] + [layer_steps] * 5)
transform_layers.extend(lowres_layers)
transform_steps.extend(lowres_steps)
res_7x7_dl2_layers, res_7x7_dl2_steps = downsample_kxk_model(layer, data_shape, 7, hdim, hdim_per_conv, step_size, nfs=[nfs] * 2, kss=[3] * 2, sts=[1] * 2, pds=["SAME"] * 2, dls=[2] * 2, steps=[layer_steps] * 2, min_convs=1, downsample_method=downsample_method, init_dense=False)
transform_layers.extend(res_7x7_dl2_layers)
transform_steps.extend(res_7x7_dl2_steps)
res_7x7_layers, res_7x7_steps = downsample_kxk_model(layer, data_shape, 7, hdim, hdim_per_conv, step_size, nfs=[nfs] * 3, kss=[3] * 3, sts=[1] * 3, pds=["SAME"] * 3, dls=[1] * 3, steps=[layer_steps] * 4, min_convs=1, downsample_method=downsample_method, init_dense=True)
transform_layers.extend(res_7x7_layers)
transform_steps.extend(res_7x7_steps)
res_11x11_dl2_layers, res_11x11_dl2_steps = downsample_kxk_model(layer, data_shape, 11, hdim, hdim_per_conv, step_size, nfs=[nfs] * 3, kss=[3] * 3, sts=[1] * 3, pds=["SAME"] * 3, dls=[2] * 3, steps=[layer_steps] * 3, min_convs=1, downsample_method=downsample_method, init_dense=False)
transform_layers.extend(res_11x11_dl2_layers)
transform_steps.extend(res_11x11_dl2_steps)
res_11x11_layers, res_11x11_steps = downsample_kxk_model(layer, data_shape, 11, hdim, hdim_per_conv, step_size, nfs=[nfs] * 5, kss=[3] * 5, sts=[1] * 5, pds=["SAME"] * 5, dls=[1] * 5, steps=[layer_steps] * 5, min_convs=1, downsample_method=downsample_method, init_dense=False)
transform_layers.extend(res_11x11_layers)
transform_steps.extend(res_11x11_steps)
res_14x14_dl2_layers, res_14x14_dl2_steps = downsample_kxk_model(layer, data_shape, 14, hdim, hdim_per_conv, step_size, nfs=[nfs] * 3, kss=[3] * 3, sts=[1] * 3, pds=["SAME"] * 3, dls=[2] * 3, steps=[layer_steps] * 3, min_convs=1, downsample_method=downsample_method, init_dense=False)
transform_layers.extend(res_14x14_dl2_layers)
transform_steps.extend(res_14x14_dl2_steps)
res_14x14_layers, res_14x14_steps = downsample_kxk_model(layer, data_shape, 14, hdim, hdim_per_conv, step_size, nfs=[nfs] * 7, kss=[3] * 7, sts=[1] * 7, pds=["SAME"] * 7, dls=[1] * 7, steps=[layer_steps] * 7, min_convs=1, downsample_method=downsample_method, init_dense=False)
transform_layers.extend(res_14x14_layers)
transform_steps.extend(res_14x14_steps)
res_21x21_dl2_layers, res_21x21_dl2_steps = downsample_kxk_model(layer, data_shape, 21, hdim, hdim_per_conv, step_size, nfs=[nfs] * 4, kss=[3] * 4, sts=[1] * 4, pds=["SAME"] * 4, dls=[2] * 4, steps=[layer_steps] * 4, min_convs=1, downsample_method=downsample_method, init_dense=False)
transform_layers.extend(res_21x21_dl2_layers)
transform_steps.extend(res_21x21_dl2_steps)
res_21x21_layers, res_21x21_steps = downsample_kxk_model(layer, data_shape, 21, hdim, hdim_per_conv, step_size, nfs=[nfs] * 7, kss=[3] * 7, sts=[1] * 7, pds=["SAME"] * 7, dls=[1] * 7, steps=[layer_steps] * 7, min_convs=1, downsample_method=downsample_method, init_dense=False)
transform_layers.extend(res_21x21_layers)
transform_steps.extend(res_21x21_steps)
res_28x28_layers, res_28x28_steps = kxk_model(layer, data_shape, hdim, hdim_per_conv, step_size, nfs=[nfs] * 7, kss=[3] * 7, sts=[1] * 7, pds=["SAME"] * 7, dls=[1] * 7, steps=[layer_steps] * 7, min_convs=1, init_dense=False)
transform_layers.extend(res_28x28_layers)
transform_steps.extend(res_28x28_steps)
return transform_layers, transform_steps
def cifar10_model(data_shape, mask, hdim, hdim_per_conv, step_size, layer_steps=300, forward="rqspline", inverse="rqspline", n_bins_particles=200, n_bins_data=200, downsample_method="lanczos3", dequantize=True):
dim = np.prod(data_shape)
layer = functools.partial(layers.layer, dim=dim, hdim=hdim, mask=mask, forward=forward, inverse=inverse, n_bins_particles=n_bins_particles, n_bins_data=n_bins_data, dequantize=dequantize)
transform_layers, transform_steps = [], []
lowres_layers, lowres_steps = low_rez_dense_model(layer, data_shape, hdim, step_size, downsample_method, rezs=list(range(1, 8)), steps=[20] + [layer_steps] * 6)
transform_layers.extend(lowres_layers)
transform_steps.extend(lowres_steps)
res_8x8_dl2_layers, res_8x8_dl2_steps = downsample_kxk_model(layer, data_shape, 8, hdim, hdim_per_conv, step_size, nfs=[nfs] * 2, kss=[3] * 2, sts=[1] * 2, pds=["SAME"] * 2, dls=[2] * 2, steps=[layer_steps] * 2, min_convs=1, downsample_method=downsample_method, init_dense=False)
transform_layers.extend(res_8x8_dl2_layers)
transform_steps.extend(res_8x8_dl2_steps)
res_8x8_layers, res_8x8_steps = downsample_kxk_model(layer, data_shape, 8, hdim, hdim_per_conv, step_size, nfs=[nfs] * 3, kss=[3] * 3, sts=[1] * 3, pds=["SAME"] * 3, dls=[1] * 3, steps=[layer_steps] * 4, min_convs=1, downsample_method=downsample_method, init_dense=True)
transform_layers.extend(res_8x8_layers)
transform_steps.extend(res_8x8_steps)
res_12x12_dl2_layers, res_12x12_dl2_steps = downsample_kxk_model(layer, data_shape, 12, hdim, hdim_per_conv, step_size, nfs=[nfs] * 3, kss=[3] * 3, sts=[1] * 3, pds=["SAME"] * 3, dls=[2] * 3, steps=[layer_steps] * 3, min_convs=1, downsample_method=downsample_method, init_dense=False)
transform_layers.extend(res_12x12_dl2_layers)
transform_steps.extend(res_12x12_dl2_steps)
res_12x12_layers, res_12x12_steps = downsample_kxk_model(layer, data_shape, 12, hdim, hdim_per_conv, step_size, nfs=[nfs] * 5, kss=[3] * 5, sts=[1] * 5, pds=["SAME"] * 5, dls=[1] * 5, steps=[layer_steps] * 5, min_convs=1, downsample_method=downsample_method, init_dense=False)
transform_layers.extend(res_12x12_layers)
transform_steps.extend(res_12x12_steps)
res_16x16_dl2_layers, res_16x16_dl2_steps = downsample_kxk_model(layer, data_shape, 16, hdim, hdim_per_conv, step_size, nfs=[nfs] * 3, kss=[3] * 3, sts=[1] * 3, pds=["SAME"] * 3, dls=[2] * 3, steps=[layer_steps] * 3, min_convs=1, downsample_method=downsample_method, init_dense=False)
transform_layers.extend(res_16x16_dl2_layers)
transform_steps.extend(res_16x16_dl2_steps)
res_16x16_layers, res_16x16_steps = downsample_kxk_model(layer, data_shape, 16, hdim, hdim_per_conv, step_size, nfs=[nfs] * 7, kss=[3] * 7, sts=[1] * 7, pds=["SAME"] * 7, dls=[1] * 7, steps=[layer_steps] * 7, min_convs=1, downsample_method=downsample_method, init_dense=False)
transform_layers.extend(res_16x16_layers)
transform_steps.extend(res_16x16_steps)
res_24x24_layers, res_24x24_steps = downsample_kxk_model(layer, data_shape, 24, hdim, hdim_per_conv, step_size, nfs=[nfs] * 7, kss=[3] * 7, sts=[1] * 7, pds=["SAME"] * 7, dls=[1] * 7, steps=[layer_steps] * 7, min_convs=1, downsample_method=downsample_method, init_dense=False)
transform_layers.extend(res_24x24_layers)
transform_steps.extend(res_24x24_steps)
res_32x32_layers, res_32x32_steps = kxk_model(layer, data_shape, hdim, hdim_per_conv, step_size, nfs=[nfs] * 7, kss=[3] * 7, sts=[1] * 7, pds=["SAME"] * 7, dls=[1] * 7, steps=[layer_steps] * 5 + [1000] * 2, min_convs=1, init_dense=False)
transform_layers.extend(res_32x32_layers)
transform_steps.extend(res_32x32_steps)
return transform_layers, transform_steps
def celeba_model(data_shape, mask, hdim, hdim_per_conv, step_size, layer_steps=300, forward="rqspline", inverse="rqspline", n_bins_particles=200, n_bins_data=200, downsample_method="lanczos3", dequantize=True):
dim = np.prod(data_shape)
layer = functools.partial(layers.layer, dim=dim, hdim=hdim, mask=mask, forward=forward, inverse=inverse, n_bins_particles=n_bins_particles, n_bins_data=n_bins_data, dequantize=dequantize, clip=1.0)
transform_layers, transform_steps = [], []
lowres_layers, lowres_steps = low_rez_dense_model(layer, data_shape, hdim, step_size, downsample_method, rezs=list(range(1, 8)), steps=[50] + [layer_steps] * 6)
transform_layers.extend(lowres_layers)
transform_steps.extend(lowres_steps)
res_8x8_dl2_layers, res_8x8_dl2_steps = downsample_kxk_model(layer, data_shape, 8, hdim, hdim_per_conv, step_size, nfs=[nfs] * 2, kss=[3] * 2, sts=[1] * 2, pds=["SAME"] * 2, dls=[2] * 2, steps=[layer_steps] * 2, min_convs=1, downsample_method=downsample_method, init_dense=False)
transform_layers.extend(res_8x8_dl2_layers)
transform_steps.extend(res_8x8_dl2_steps)
res_8x8_layers, res_8x8_steps = downsample_kxk_model(layer, data_shape, 8, hdim, hdim_per_conv, step_size, nfs=[nfs] * 3, kss=[3] * 3, sts=[1] * 3, pds=["SAME"] * 3, dls=[1] * 3, steps=[layer_steps] * 4, min_convs=1, downsample_method=downsample_method, init_dense=True)
transform_layers.extend(res_8x8_layers)
transform_steps.extend(res_8x8_steps)
res_12x12_dl2_layers, res_12x12_dl2_steps = downsample_kxk_model(layer, data_shape, 12, hdim, hdim_per_conv, step_size, nfs=[nfs] * 3, kss=[3] * 3, sts=[1] * 3, pds=["SAME"] * 3, dls=[2] * 3, steps=[layer_steps] * 3, min_convs=1, downsample_method=downsample_method, init_dense=False)
transform_layers.extend(res_12x12_dl2_layers)
transform_steps.extend(res_12x12_dl2_steps)
res_12x12_layers, res_12x12_steps = downsample_kxk_model(layer, data_shape, 12, hdim, hdim_per_conv, step_size, nfs=[nfs] * 5, kss=[3] * 5, sts=[1] * 5, pds=["SAME"] * 5, dls=[1] * 5, steps=[layer_steps] * 5, min_convs=1, downsample_method=downsample_method, init_dense=False)
transform_layers.extend(res_12x12_layers)
transform_steps.extend(res_12x12_steps)
res_16x16_dl2_layers, res_16x16_dl2_steps = downsample_kxk_model(layer, data_shape, 16, hdim, hdim_per_conv, step_size, nfs=[nfs] * 3, kss=[3] * 3, sts=[1] * 3, pds=["SAME"] * 3, dls=[2] * 3, steps=[layer_steps] * 3, min_convs=1, downsample_method=downsample_method, init_dense=False)
transform_layers.extend(res_16x16_dl2_layers)
transform_steps.extend(res_16x16_dl2_steps)
res_16x16_layers, res_16x16_steps = downsample_kxk_model(layer, data_shape, 16, hdim, hdim_per_conv, step_size, nfs=[nfs] * 7, kss=[3] * 7, sts=[1] * 7, pds=["SAME"] * 7, dls=[1] * 7, steps=[layer_steps] * 7, min_convs=1, downsample_method=downsample_method, init_dense=False)
transform_layers.extend(res_16x16_layers)
transform_steps.extend(res_16x16_steps)
res_24x24_layers, res_24x24_steps = downsample_kxk_model(layer, data_shape, 24, hdim, hdim_per_conv, step_size, nfs=[nfs] * 7, kss=[3] * 7, sts=[1] * 7, pds=["SAME"] * 7, dls=[1] * 7, steps=[layer_steps] * 7, min_convs=1, downsample_method=downsample_method, init_dense=False)
transform_layers.extend(res_24x24_layers)
transform_steps.extend(res_24x24_steps)
res_32x32_layers, res_32x32_steps = downsample_kxk_model(layer, data_shape, 32, hdim, hdim_per_conv, step_size, nfs=[nfs] * 7, kss=[3] * 7, sts=[1] * 7, pds=["SAME"] * 7, dls=[1] * 7, steps=[layer_steps] * 7, min_convs=1, downsample_method=downsample_method, init_dense=False)
transform_layers.extend(res_32x32_layers)
transform_steps.extend(res_32x32_steps)
res_64x64_layers, res_64x64_steps = kxk_model(layer, data_shape, hdim, hdim_per_conv, step_size, nfs=[nfs] * 7, kss=[3] * 7, sts=[1] * 7, pds=["SAME"] * 7, dls=[1] * 7, steps=[layer_steps] * 5 + [1000] * 2, min_convs=1, init_dense=False)
transform_layers.extend(res_64x64_layers)
transform_steps.extend(res_64x64_steps)
return transform_layers, transform_steps
| 18,274 | 60.949153 | 286 | py |
blockchain-explorer | blockchain-explorer-main/docs/source/conf.py | # -*- coding: utf-8 -*-
#
# SPDX-License-Identifier: Apache-2.0
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = u'Hyperledger Explorer'
copyright = u'Hyperledger Explorer Project source code is released under the Apache 2.0 license'
author = u'Hyperledger Explorer'
# The short X.Y version
version = u''
# The full version, including alpha/beta/rc tags
release = u''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
# extensions = [
# 'sphinx.ext.autodoc',
# 'sphinx.ext.doctest',
# 'sphinx.ext.intersphinx',
# 'sphinx.ext.todo',
# 'sphinx.ext.coverage',
# 'sphinx.ext.mathjax',
# 'sphinx.ext.ifconfig',
# 'sphinx.ext.viewcode',
# 'sphinx.ext.githubpages',
#]
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.ifconfig',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.coverage',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# recommonmark is a python utility that allows markdown to be used within
# Sphinx projects.
# Installed version as per directive in docs/requirement.txt
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
# source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['.xxrst']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
#html_theme = 'sphinxdoc'
# html_theme = 'traditional'
# html_theme = 'bizstyle'
# html_theme = 'haiku'
# html_theme = 'nature'
# html_theme = 'pyramid'
# html_theme = 'pyramid'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'canonical_url': '',
'analytics_id': 'UA-XXXXXXX-1', # Provided by Google in your dashboard
'logo_only': False,
'display_version': True,
'prev_next_buttons_location': 'both',
# Toc options
'collapse_navigation': True,
'sticky_navigation': True,
'navigation_depth': 4,
'includehidden': True,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'HyperledgerExplorerDocument'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'HyperledgerExplorer.tex', u'Hyperledger Explorer Documentation',
u'Explorer', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'hyperledgerexplorer', u'Hyperledger Explorer Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'HyperledgerExplorer', u'Hyperledger Explorer Documentation',
author, 'HyperledgerExplorer', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
extensions = ['sphinxcontrib.contentui']
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| 7,264 | 28.653061 | 96 | py |
DLFuzz | DLFuzz-master/ImageNet/utils_tmp.py | # -*- coding: utf-8 -*-
import random
from collections import defaultdict
import numpy as np
from datetime import datetime
from keras import backend as K
from keras.applications.vgg16 import preprocess_input, decode_predictions
from keras.models import Model
from keras.preprocessing import image
model_layer_weights_top_k = []
def preprocess_image(img_path):
img = image.load_img(img_path, target_size=(224, 224))
input_img_data = image.img_to_array(img)
input_img_data = np.expand_dims(input_img_data, axis=0)
input_img_data = preprocess_input(input_img_data) # final input shape = (1,224,224,3)
return input_img_data
def deprocess_image(x):
x = x.reshape((224, 224, 3))
# Remove zero-center by mean pixel
x[:, :, 0] += 103.939
x[:, :, 1] += 116.779
x[:, :, 2] += 123.68
# 'BGR'->'RGB'
x = x[:, :, ::-1]
x = np.clip(x, 0, 255).astype('uint8')
return x
def decode_label(pred):
return decode_predictions(pred)[0][0][1]
def normalize(x):
# utility function to normalize a tensor by its L2 norm
return x / (K.sqrt(K.mean(K.square(x))) + 1e-5)
def constraint_occl(gradients, start_point, rect_shape):
new_grads = np.zeros_like(gradients)
new_grads[:, start_point[0]:start_point[0] + rect_shape[0],
start_point[1]:start_point[1] + rect_shape[1]] = gradients[:, start_point[0]:start_point[0] + rect_shape[0],
start_point[1]:start_point[1] + rect_shape[1]]
return new_grads
def constraint_light(gradients):
new_grads = np.ones_like(gradients)
grad_mean = 1e4 * np.mean(gradients)
return grad_mean * new_grads
def constraint_black(gradients, rect_shape=(10, 10)):
start_point = (
random.randint(0, gradients.shape[1] - rect_shape[0]), random.randint(0, gradients.shape[2] - rect_shape[1]))
new_grads = np.zeros_like(gradients)
patch = gradients[:, start_point[0]:start_point[0] + rect_shape[0], start_point[1]:start_point[1] + rect_shape[1]]
if np.mean(patch) < 0:
new_grads[:, start_point[0]:start_point[0] + rect_shape[0],
start_point[1]:start_point[1] + rect_shape[1]] = -np.ones_like(patch)
return new_grads
def init_coverage_tables(model1, model2, model3):
model_layer_dict1 = defaultdict(bool)
model_layer_dict2 = defaultdict(bool)
model_layer_dict3 = defaultdict(bool)
init_dict(model1, model_layer_dict1)
init_dict(model2, model_layer_dict2)
init_dict(model3, model_layer_dict3)
return model_layer_dict1, model_layer_dict2, model_layer_dict3
def init_coverage_tables(model1):
model_layer_dict1 = defaultdict(bool)
init_dict(model1, model_layer_dict1)
return model_layer_dict1
def init_dict(model, model_layer_dict):
for layer in model.layers:
if 'flatten' in layer.name or 'input' in layer.name:
continue
for index in range(layer.output_shape[-1]):
model_layer_dict[(layer.name, index)] = False
def init_coverage_times(model):
model_layer_times = defaultdict(int)
init_times(model,model_layer_times)
return model_layer_times
def init_coverage_value(model):
model_layer_value = defaultdict(float)
init_times(model, model_layer_value)
return model_layer_value
def init_times(model,model_layer_times):
for layer in model.layers:
if 'flatten' in layer.name or 'input' in layer.name:
continue
for index in range(layer.output_shape[-1]):
model_layer_times[(layer.name, index)] = 0
def neuron_to_cover(model_layer_dict):
not_covered = [(layer_name, index) for (layer_name, index), v in model_layer_dict.items() if not v]
if not_covered:
layer_name, index = random.choice(not_covered)
else:
layer_name, index = random.choice(model_layer_dict.keys())
return layer_name, index
def neuron_to_cover(not_covered,model_layer_dict):
if not_covered:
layer_name, index = random.choice(not_covered)
not_covered.remove((layer_name, index))
else:
layer_name, index = random.choice(model_layer_dict.keys())
return layer_name, index
def random_strategy(model,model_layer_times, neuron_to_cover_num):
loss_neuron = []
not_covered = [(layer_name, index) for (layer_name, index), v in model_layer_times.items() if v == 0]
for _ in xrange(neuron_to_cover_num):
layer_name, index = neuron_to_cover(not_covered, model_layer_times)
loss00_neuron = K.mean(model.get_layer(layer_name).output[..., index])
# if loss_neuron == 0:
# loss_neuron = loss00_neuron
# else:
# loss_neuron += loss00_neuron
# loss_neuron += loss1_neuron
loss_neuron.append(loss00_neuron)
return loss_neuron
def neuron_select_high_weight(model, layer_names, top_k):
global model_layer_weights_top_k
model_layer_weights_dict = {}
for layer_name in layer_names:
weights = model.get_layer(layer_name).get_weights()
if len(weights) <= 0:
continue
w = np.asarray(weights[0]) # 0 is weights, 1 is biases
w = w.reshape(w.shape)
for index in range(model.get_layer(layer_name).output_shape[-1]):
index_w = np.mean(w[..., index])
if index_w <= 0:
continue
model_layer_weights_dict[(layer_name,index)]=index_w
# notice!
model_layer_weights_list = sorted(model_layer_weights_dict.items(), key=lambda x: x[1], reverse=True)
k = 0
for (layer_name, index),weight in model_layer_weights_list:
if k >= top_k:
break
model_layer_weights_top_k.append([layer_name,index])
k += 1
def neuron_selection(model, model_layer_times, model_layer_value, neuron_select_strategy, neuron_to_cover_num,threshold):
if neuron_select_strategy == 'None':
return random_strategy(model, model_layer_times, neuron_to_cover_num)
num_strategy = len([x for x in neuron_select_strategy if x in ['0', '1', '2', '3']])
neuron_to_cover_num_each = neuron_to_cover_num / num_strategy
loss_neuron = []
# initialization for strategies
if ('0' in list(neuron_select_strategy)) or ('1' in list(neuron_select_strategy)):
i = 0
neurons_covered_times = []
neurons_key_pos = {}
for (layer_name, index), time in model_layer_times.items():
neurons_covered_times.append(time)
neurons_key_pos[i] = (layer_name, index)
i += 1
neurons_covered_times = np.asarray(neurons_covered_times)
times_total = sum(neurons_covered_times)
# select neurons covered often
if '0' in list(neuron_select_strategy):
if times_total == 0:
return random_strategy(model, model_layer_times, 1)#The beginning of no neurons covered
neurons_covered_percentage = neurons_covered_times / float(times_total)
num_neuron0 = np.random.choice(range(len(neurons_covered_times)), neuron_to_cover_num_each, replace=False, p=neurons_covered_percentage)
for num in num_neuron0:
layer_name0, index0 = neurons_key_pos[num]
loss0_neuron = K.mean(model.get_layer(layer_name0).output[..., index0])
loss_neuron.append(loss0_neuron)
# select neurons covered rarely
if '1' in list(neuron_select_strategy):
if times_total == 0:
return random_strategy(model, model_layer_times, 1)
neurons_covered_times_inverse = np.subtract(max(neurons_covered_times), neurons_covered_times)
neurons_covered_percentage_inverse = neurons_covered_times_inverse / float(sum(neurons_covered_times_inverse))
# num_neuron1 = np.random.choice(range(len(neurons_covered_times)), p=neurons_covered_percentage_inverse)
num_neuron1 = np.random.choice(range(len(neurons_covered_times)), neuron_to_cover_num_each, replace=False,
p=neurons_covered_percentage_inverse)
for num in num_neuron1:
layer_name1, index1 = neurons_key_pos[num]
loss1_neuron = K.mean(model.get_layer(layer_name1).output[..., index1])
loss_neuron.append(loss1_neuron)
# select neurons with largest weights (feature maps with largest filter weights)
if '2' in list(neuron_select_strategy):
layer_names = [layer.name for layer in model.layers if
'flatten' not in layer.name and 'input' not in layer.name]
k = 0.1
top_k = k * len(model_layer_times) # number of neurons to be selected within
global model_layer_weights_top_k
if len(model_layer_weights_top_k) == 0:
neuron_select_high_weight(model, layer_names, top_k) # Set the value
num_neuron2 = np.random.choice(range(len(model_layer_weights_top_k)), neuron_to_cover_num_each, replace=False)
for i in num_neuron2:
layer_name2 = model_layer_weights_top_k[i][0]
index2 = model_layer_weights_top_k[i][1]
loss2_neuron = K.mean(model.get_layer(layer_name2).output[..., index2])
loss_neuron.append(loss2_neuron)
if '3' in list(neuron_select_strategy):
above_threshold = []
below_threshold = []
above_num = neuron_to_cover_num_each / 2
below_num = neuron_to_cover_num_each - above_num
above_i = 0
below_i = 0
for (layer_name, index), value in model_layer_value.items():
if threshold + 0.25 > value > threshold and layer_name != 'fc1' and layer_name != 'fc2' and \
layer_name != 'predictions' and layer_name != 'fc1000' and above_i < above_num:
above_threshold.append([layer_name, index])
above_i += 1
elif threshold > value > threshold - 0.2 and layer_name != 'fc1' and layer_name != 'fc2' and \
layer_name != 'predictions' and layer_name != 'fc1000' and below_i < below_num:
below_threshold.append([layer_name, index])
below_i += 1
loss_neuron = []
if len(above_threshold) > 0:
for above_item in range(len(above_threshold)):
loss_neuron.append(K.mean(
model.get_layer(above_threshold[above_item][0]).output[..., above_threshold[above_item][1]]))
if len(below_threshold) > 0:
for below_item in range(len(below_threshold)):
loss_neuron.append(-K.mean(
model.get_layer(below_threshold[below_item][0]).output[..., below_threshold[below_item][1]]))
if loss_neuron == 0:
return random_strategy(model, model_layer_times, 1) # The beginning of no neurons covered
return loss_neuron
def neuron_scale(loss_neuron):
loss_neuron_new = []
loss_sum = K.sum(loss_neuron)
for loss_each in loss_neuron:
loss_each /= loss_sum
loss_neuron_new.append(loss_each)
return loss_neuron_new
def neuron_scale_maxmin(loss_neuron):
max_loss = K.max(loss_neuron)
min_loss = K.min(loss_neuron)
base = max_loss - min_loss
loss_neuron_new = []
for loss_each in loss_neuron:
loss_each_new = (loss_each - min_loss) / base
loss_neuron_new.append(loss_each_new)
return loss_neuron_new
def neuron_covered(model_layer_times):
covered_neurons = len([v for v in model_layer_times.values() if v > 0])
total_neurons = len(model_layer_times)
return covered_neurons, total_neurons, covered_neurons / float(total_neurons)
def scale(intermediate_layer_output, rmax=1, rmin=0):
X_std = (intermediate_layer_output - intermediate_layer_output.min()) / (
intermediate_layer_output.max() - intermediate_layer_output.min())
X_scaled = X_std * (rmax - rmin) + rmin
return X_scaled
def update_coverage(input_data, model, model_layer_times, threshold=0):
layer_names = [layer.name for layer in model.layers if
'flatten' not in layer.name and 'input' not in layer.name]
intermediate_layer_model = Model(inputs=model.input,
outputs=[model.get_layer(layer_name).output for layer_name in layer_names])
intermediate_layer_outputs = intermediate_layer_model.predict(input_data)
for i, intermediate_layer_output in enumerate(intermediate_layer_outputs):
scaled = scale(intermediate_layer_output[0])
# xrange(scaled.shape[-1])
for num_neuron in xrange(scaled.shape[-1]):
if np.mean(scaled[..., num_neuron]) > threshold: #and model_layer_dict[(layer_names[i], num_neuron)] == 0:
model_layer_times[(layer_names[i], num_neuron)] += 1
return intermediate_layer_outputs
def update_coverage_value(input_data, model, model_layer_value):
layer_names = [layer.name for layer in model.layers if
'flatten' not in layer.name and 'input' not in layer.name]
intermediate_layer_model = Model(inputs=model.input,
outputs=[model.get_layer(layer_name).output for layer_name in layer_names])
intermediate_layer_outputs = intermediate_layer_model.predict(input_data)
for i, intermediate_layer_output in enumerate(intermediate_layer_outputs):
scaled = scale(intermediate_layer_output[0])
# xrange(scaled.shape[-1])
for num_neuron in xrange(scaled.shape[-1]):
model_layer_value[(layer_names[i], num_neuron)] = np.mean(scaled[..., num_neuron])
return intermediate_layer_outputs
'''
def update_coverage(input_data, model, model_layer_dict, threshold=0):
layer_names = [layer.name for layer in model.layers if
'flatten' not in layer.name and 'input' not in layer.name]
intermediate_layer_model = Model(inputs=model.input,
outputs=[model.get_layer(layer_name).output for layer_name in layer_names])
intermediate_layer_outputs = intermediate_layer_model.predict(input_data)
for i, intermediate_layer_output in enumerate(intermediate_layer_outputs):
scaled = scale(intermediate_layer_output[0])
# xrange(scaled.shape[-1])
for num_neuron in xrange(scaled.shape[-1]):
if np.mean(scaled[..., num_neuron]) > threshold and not model_layer_dict[(layer_names[i], num_neuron)]:
model_layer_dict[(layer_names[i], num_neuron)] = True
return intermediate_layer_outputs
'''
def full_coverage(model_layer_dict):
if False in model_layer_dict.values():
return False
return True
def fired(model, layer_name, index, input_data, threshold=0):
intermediate_layer_model = Model(inputs=model.input, outputs=model.get_layer(layer_name).output)
intermediate_layer_output = intermediate_layer_model.predict(input_data)[0]
scaled = scale(intermediate_layer_output)
if np.mean(scaled[..., index]) > threshold:
return True
return False
def diverged(predictions1, predictions2, predictions3, target):
if not predictions1 == predictions2 == predictions3:
return True
return False
def get_signature():
now = datetime.now()
past = datetime(2015, 6, 6, 0, 0, 0, 0)
timespan = now - past
time_sig = int(timespan.total_seconds() * 1000)
return str(time_sig)
| 15,272 | 40.167116 | 144 | py |
DLFuzz | DLFuzz-master/ImageNet/gen_diff.py | # -*- coding: utf-8 -*-
from __future__ import print_function
import shutil
from keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import VGG19
from keras.applications.resnet50 import ResNet50
from keras.layers import Input
from scipy.misc import imsave
from utils_tmp import *
import sys
import os
import time
# input image dimensions
img_rows, img_cols = 224, 224
input_shape = (img_rows, img_cols, 3)
# define input tensor as a placeholder
input_tensor = Input(shape=input_shape)
# load multiple models sharing same input tensor
K.set_learning_phase(0)
model_name = sys.argv[6]
if model_name == 'vgg16':
model1 = VGG16(input_tensor=input_tensor)
elif model_name == 'vgg19':
model1 = VGG19(input_tensor=input_tensor)
elif model_name == 'resnet50':
model1 = ResNet50(input_tensor=input_tensor)
else:
print('please specify model name')
os._exit(0)
print(model1.name)
# model_layer_dict1 = init_coverage_tables(model1)
model_layer_times1 = init_coverage_times(model1) # times of each neuron covered
model_layer_times2 = init_coverage_times(model1) # update when new image and adversarial images found
model_layer_value1 = init_coverage_value(model1)
# start gen inputs
img_dir = './seeds_20'
img_paths = os.listdir(img_dir)
img_num = len(img_paths)
# e.g.[0,1,2] None for neurons not covered, 0 for covered often, 1 for covered rarely, 2 for high weights
neuron_select_strategy = sys.argv[1]
threshold = float(sys.argv[2])
neuron_to_cover_num = int(sys.argv[3])
subdir = sys.argv[4]
iteration_times = int(sys.argv[5])
predict_weight = 0.5
neuron_to_cover_weight = 0.5
learning_step = 0.5
save_dir = './generated_inputs/' + subdir + '/'
if os.path.exists(save_dir):
for i in os.listdir(save_dir):
path_file = os.path.join(save_dir, i)
if os.path.isfile(path_file):
os.remove(path_file)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
# start = time.clock()
total_time = 0
total_norm = 0
adversial_num = 0
total_perturb_adversial = 0
for i in xrange(img_num):
start_time = time.clock()
img_list = []
img_path = os.path.join(img_dir,img_paths[i])
print(img_path)
tmp_img = preprocess_image(img_path)
orig_img = tmp_img.copy()
img_list.append(tmp_img)
update_coverage(tmp_img, model1, model_layer_times2, threshold)
while len(img_list) > 0:
gen_img = img_list[0]
img_list.remove(gen_img)
# first check if input already induces differences
pred1 = model1.predict(gen_img)
label1 = np.argmax(pred1[0])
label_top5 = np.argsort(pred1[0])[-5:]
update_coverage_value(gen_img, model1, model_layer_value1)
update_coverage(gen_img, model1, model_layer_times1, threshold)
orig_label = label1
orig_pred = pred1
if model1.name == 'resnet50':
loss_1 = K.mean(model1.get_layer('fc1000').output[..., orig_label])
loss_2 = K.mean(model1.get_layer('fc1000').output[..., label_top5[-2]])
loss_3 = K.mean(model1.get_layer('fc1000').output[..., label_top5[-3]])
loss_4 = K.mean(model1.get_layer('fc1000').output[..., label_top5[-4]])
loss_5 = K.mean(model1.get_layer('fc1000').output[..., label_top5[-5]])
else:
loss_1 = K.mean(model1.get_layer('predictions').output[..., orig_label])
loss_2 = K.mean(model1.get_layer('predictions').output[..., label_top5[-2]])
loss_3 = K.mean(model1.get_layer('predictions').output[..., label_top5[-3]])
loss_4 = K.mean(model1.get_layer('predictions').output[..., label_top5[-4]])
loss_5 = K.mean(model1.get_layer('predictions').output[..., label_top5[-5]])
layer_output = (predict_weight * (loss_2 + loss_3 + loss_4 + loss_5) - loss_1)
# neuron coverage loss
loss_neuron = neuron_selection(model1, model_layer_times1, model_layer_value1, neuron_select_strategy,
neuron_to_cover_num,threshold)
# extreme value means the activation value for a neuron can be as high as possible ...
EXTREME_VALUE = False
if EXTREME_VALUE:
neuron_to_cover_weight = 2
layer_output += neuron_to_cover_weight * K.sum(loss_neuron)
# for adversarial image generation
final_loss = K.mean(layer_output)
# we compute the gradient of the input picture wrt this loss
grads = normalize(K.gradients(final_loss, input_tensor)[0])
grads_tensor_list = [loss_1, loss_2, loss_3, loss_4, loss_5]
grads_tensor_list.extend(loss_neuron)
grads_tensor_list.append(grads)
# this function returns the loss and grads given the input picture
iterate = K.function([input_tensor], grads_tensor_list)
# we run gradient ascent for some steps
for iters in xrange(iteration_times):
loss_neuron_list = iterate([gen_img])
perturb = loss_neuron_list[-1] * learning_step
gen_img += perturb
# previous accumulated neuron coverage
previous_coverage = neuron_covered(model_layer_times1)[2]
pred1 = model1.predict(gen_img)
label1 = np.argmax(pred1[0])
update_coverage(gen_img, model1, model_layer_times1, threshold) # for seed selection
current_coverage = neuron_covered(model_layer_times1)[2]
diff_img = gen_img - orig_img
L2_norm = np.linalg.norm(diff_img)
orig_L2_norm = np.linalg.norm(orig_img)
perturb_adversial = L2_norm / orig_L2_norm
if current_coverage - previous_coverage > 0.01 / (i + 1) and perturb_adversial < 0.02:
img_list.append(gen_img)
# print('coverage diff = ', current_coverage - previous_coverage, 'perturb_adversial = ', perturb_adversial)
if label1 != orig_label:
update_coverage(gen_img, model1, model_layer_times2, threshold)
total_norm += L2_norm
total_perturb_adversial += perturb_adversial
# print('L2 norm : ' + str(L2_norm))
# print('ratio perturb = ', perturb_adversial)
gen_img_tmp = gen_img.copy()
gen_img_deprocessed = deprocess_image(gen_img_tmp)
save_img = save_dir + decode_label(pred1) + '-' + decode_label(orig_pred) + '-' + str(get_signature()) + '.png'
imsave(save_img, gen_img_deprocessed)
adversial_num += 1
end_time = time.clock()
print('covered neurons percentage %d neurons %.3f'
% (len(model_layer_times2), neuron_covered(model_layer_times2)[2]))
duration = end_time - start_time
print('used time : ' + str(duration))
total_time += duration
print('covered neurons percentage %d neurons %.3f'
% (len(model_layer_times2), neuron_covered(model_layer_times2)[2]))
print('total_time = ' + str(total_time))
print('average_norm = ' + str(total_norm / adversial_num))
print('adversial num = ' + str(adversial_num))
print('average perb adversial = ' + str(total_perturb_adversial / adversial_num))
| 7,199 | 30.858407 | 127 | py |
DLFuzz | DLFuzz-master/MNIST/Model2.py | '''
LeNet-4
'''
# usage: python MNISTModel2.py - train the model
from __future__ import print_function
from keras.datasets import mnist
from keras.layers import Convolution2D, MaxPooling2D, Input, Dense, Activation, Flatten
from keras.models import Model
from keras.utils import to_categorical
def Model2(input_tensor=None, train=False):
nb_classes = 10
# convolution kernel size
kernel_size = (5, 5)
if train:
batch_size = 256
nb_epoch = 10
# input image dimensions
img_rows, img_cols = 28, 28
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# convert class vectors to binary class matrices
y_train = to_categorical(y_train, nb_classes)
y_test = to_categorical(y_test, nb_classes)
input_tensor = Input(shape=input_shape)
elif input_tensor is None:
print('you have to proved input_tensor when testing')
exit()
# block1
print("in Model2 input_tensor = ",input_tensor)
x = Convolution2D(6, kernel_size, activation='relu', padding='same', name='block1_conv1')(input_tensor)
x = MaxPooling2D(pool_size=(2, 2), name='block1_pool1')(x)
# block2
x = Convolution2D(16, kernel_size, activation='relu', padding='same', name='block2_conv1')(x)
x = MaxPooling2D(pool_size=(2, 2), name='block2_pool1')(x)
x = Flatten(name='flatten')(x)
x = Dense(84, activation='relu', name='fc1')(x)
x = Dense(nb_classes, name='before_softmax')(x)
x = Activation('softmax', name='predictions')(x)
model = Model(input_tensor, x)
if train:
# compiling
model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
# trainig
model.fit(x_train, y_train, validation_data=(x_test, y_test), batch_size=batch_size, epochs=nb_epoch, verbose=1)
# save model
model.save_weights('./Model2.h5')
score = model.evaluate(x_test, y_test, verbose=0)
print('\n')
print('Overall Test score:', score[0])
print('Overall Test accuracy:', score[1])
else:
model.load_weights('./Model2.h5')
print('Model2 loaded')
return model
if __name__ == '__main__':
Model2(train=True)
| 2,636 | 30.023529 | 120 | py |
DLFuzz | DLFuzz-master/MNIST/Model3.py | '''
LeNet-5
'''
# usage: python MNISTModel3.py - train the model
from __future__ import print_function
from keras.datasets import mnist
from keras.layers import Convolution2D, MaxPooling2D, Input, Dense, Activation, Flatten
from keras.models import Model
from keras.utils import to_categorical
def Model3(input_tensor=None, train=False):
nb_classes = 10
# convolution kernel size
kernel_size = (5, 5)
if train:
batch_size = 256
nb_epoch = 10
# input image dimensions
img_rows, img_cols = 28, 28
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# convert class vectors to binary class matrices
y_train = to_categorical(y_train, nb_classes)
y_test = to_categorical(y_test, nb_classes)
input_tensor = Input(shape=input_shape)
elif input_tensor is None:
print('you have to proved input_tensor when testing')
exit()
# block1
x = Convolution2D(6, kernel_size, activation='relu', padding='same', name='block1_conv1')(input_tensor)
x = MaxPooling2D(pool_size=(2, 2), name='block1_pool1')(x)
# block2
x = Convolution2D(16, kernel_size, activation='relu', padding='same', name='block2_conv1')(x)
x = MaxPooling2D(pool_size=(2, 2), name='block2_pool1')(x)
x = Flatten(name='flatten')(x)
x = Dense(120, activation='relu', name='fc1')(x)
x = Dense(84, activation='relu', name='fc2')(x)
x = Dense(nb_classes, name='before_softmax')(x)
x = Activation('softmax', name='predictions')(x)
model = Model(input_tensor, x)
if train:
# compiling
model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
# trainig
model.fit(x_train, y_train, validation_data=(x_test, y_test), batch_size=batch_size, epochs=nb_epoch, verbose=1)
# save model
model.save_weights('./Model3.h5')
score = model.evaluate(x_test, y_test, verbose=0)
print('\n')
print('Overall Test score:', score[0])
print('Overall Test accuracy:', score[1])
else:
model.load_weights('./Model3.h5')
print('Model3 loaded')
return model
if __name__ == '__main__':
Model3(train=True)
| 2,637 | 30.035294 | 120 | py |
DLFuzz | DLFuzz-master/MNIST/utils_tmp.py | # -*- coding: utf-8 -*-
import random
from collections import defaultdict
import numpy as np
from datetime import datetime
from keras import backend as K
from keras.applications.vgg16 import preprocess_input, decode_predictions
from keras.models import Model
from keras.preprocessing import image
model_layer_weights_top_k = []
def preprocess_image(img_path):
img = image.load_img(img_path, target_size=(28, 28), grayscale=True)
input_img_data = image.img_to_array(img)
input_img_data = input_img_data.reshape(1, 28, 28, 1)
input_img_data = input_img_data.astype('float32')
input_img_data /= 255
# input_img_data = preprocess_input(input_img_data) # final input shape = (1,224,224,3)
return input_img_data
def deprocess_image(x):
x *= 255
x = np.clip(x, 0, 255).astype('uint8')
return x.reshape(x.shape[1], x.shape[2]) # original shape (1,img_rows, img_cols,1)
def decode_label(pred):
return decode_predictions(pred)[0][0][1]
def normalize(x):
# utility function to normalize a tensor by its L2 norm
return x / (K.sqrt(K.mean(K.square(x))) + 1e-5)
def constraint_occl(gradients, start_point, rect_shape):
new_grads = np.zeros_like(gradients)
new_grads[:, start_point[0]:start_point[0] + rect_shape[0],
start_point[1]:start_point[1] + rect_shape[1]] = gradients[:, start_point[0]:start_point[0] + rect_shape[0],
start_point[1]:start_point[1] + rect_shape[1]]
return new_grads
def constraint_light(gradients):
new_grads = np.ones_like(gradients)
grad_mean = 1e4 * np.mean(gradients)
return grad_mean * new_grads
def constraint_black(gradients, rect_shape=(10, 10)):
start_point = (
random.randint(0, gradients.shape[1] - rect_shape[0]), random.randint(0, gradients.shape[2] - rect_shape[1]))
new_grads = np.zeros_like(gradients)
patch = gradients[:, start_point[0]:start_point[0] + rect_shape[0], start_point[1]:start_point[1] + rect_shape[1]]
if np.mean(patch) < 0:
new_grads[:, start_point[0]:start_point[0] + rect_shape[0],
start_point[1]:start_point[1] + rect_shape[1]] = -np.ones_like(patch)
return new_grads
def init_coverage_tables(model1, model2, model3):
model_layer_dict1 = defaultdict(bool)
model_layer_dict2 = defaultdict(bool)
model_layer_dict3 = defaultdict(bool)
init_dict(model1, model_layer_dict1)
init_dict(model2, model_layer_dict2)
init_dict(model3, model_layer_dict3)
return model_layer_dict1, model_layer_dict2, model_layer_dict3
def init_coverage_tables(model1):
model_layer_dict1 = defaultdict(bool)
init_dict(model1, model_layer_dict1)
return model_layer_dict1
def init_dict(model, model_layer_dict):
for layer in model.layers:
if 'flatten' in layer.name or 'input' in layer.name:
continue
for index in range(layer.output_shape[-1]):
model_layer_dict[(layer.name, index)] = False
def init_coverage_times(model):
model_layer_times = defaultdict(int)
init_times(model,model_layer_times)
return model_layer_times
def init_coverage_value(model):
model_layer_value = defaultdict(float)
init_times(model, model_layer_value)
return model_layer_value
def init_times(model,model_layer_times):
for layer in model.layers:
if 'flatten' in layer.name or 'input' in layer.name:
continue
for index in range(layer.output_shape[-1]):
model_layer_times[(layer.name, index)] = 0
def neuron_to_cover(model_layer_dict):
not_covered = [(layer_name, index) for (layer_name, index), v in model_layer_dict.items() if not v]
if not_covered:
layer_name, index = random.choice(not_covered)
else:
layer_name, index = random.choice(model_layer_dict.keys())
return layer_name, index
def neuron_to_cover(not_covered,model_layer_dict):
if not_covered:
layer_name, index = random.choice(not_covered)
not_covered.remove((layer_name, index))
else:
layer_name, index = random.choice(model_layer_dict.keys())
return layer_name, index
def random_strategy(model,model_layer_times, neuron_to_cover_num):
loss_neuron = []
not_covered = [(layer_name, index) for (layer_name, index), v in model_layer_times.items() if v == 0]
for _ in xrange(neuron_to_cover_num):
layer_name, index = neuron_to_cover(not_covered, model_layer_times)
loss00_neuron = K.mean(model.get_layer(layer_name).output[..., index])
# if loss_neuron == 0:
# loss_neuron = loss00_neuron
# else:
# loss_neuron += loss00_neuron
# loss_neuron += loss1_neuron
loss_neuron.append(loss00_neuron)
return loss_neuron
def neuron_select_high_weight(model, layer_names, top_k):
global model_layer_weights_top_k
model_layer_weights_dict = {}
for layer_name in layer_names:
weights = model.get_layer(layer_name).get_weights()
if len(weights) <= 0:
continue
w = np.asarray(weights[0]) # 0 is weights, 1 is biases
w = w.reshape(w.shape)
for index in range(model.get_layer(layer_name).output_shape[-1]):
index_w = np.mean(w[..., index])
if index_w <= 0:
continue
model_layer_weights_dict[(layer_name,index)]=index_w
# notice!
model_layer_weights_list = sorted(model_layer_weights_dict.items(), key=lambda x: x[1], reverse=True)
k = 0
for (layer_name, index),weight in model_layer_weights_list:
if k >= top_k:
break
model_layer_weights_top_k.append([layer_name,index])
k += 1
def neuron_selection(model, model_layer_times, model_layer_value, neuron_select_strategy, neuron_to_cover_num, threshold):
if neuron_select_strategy == 'None':
return random_strategy(model, model_layer_times, neuron_to_cover_num)
num_strategy = len([x for x in neuron_select_strategy if x in ['0', '1', '2', '3']])
neuron_to_cover_num_each = neuron_to_cover_num / num_strategy
loss_neuron = []
# initialization for strategies
if ('0' in list(neuron_select_strategy)) or ('1' in list(neuron_select_strategy)):
i = 0
neurons_covered_times = []
neurons_key_pos = {}
for (layer_name, index), time in model_layer_times.items():
neurons_covered_times.append(time)
neurons_key_pos[i] = (layer_name, index)
i += 1
neurons_covered_times = np.asarray(neurons_covered_times)
times_total = sum(neurons_covered_times)
# select neurons covered often
if '0' in list(neuron_select_strategy):
if times_total == 0:
return random_strategy(model, model_layer_times, 1)#The beginning of no neurons covered
neurons_covered_percentage = neurons_covered_times / float(times_total)
# num_neuron0 = np.random.choice(range(len(neurons_covered_times)), p=neurons_covered_percentage)
num_neuron0 = np.random.choice(range(len(neurons_covered_times)), neuron_to_cover_num_each, replace=False, p=neurons_covered_percentage)
for num in num_neuron0:
layer_name0, index0 = neurons_key_pos[num]
loss0_neuron = K.mean(model.get_layer(layer_name0).output[..., index0])
loss_neuron.append(loss0_neuron)
# select neurons covered rarely
if '1' in list(neuron_select_strategy):
if times_total == 0:
return random_strategy(model, model_layer_times, 1)
neurons_covered_times_inverse = np.subtract(max(neurons_covered_times), neurons_covered_times)
neurons_covered_percentage_inverse = neurons_covered_times_inverse / float(sum(neurons_covered_times_inverse))
# num_neuron1 = np.random.choice(range(len(neurons_covered_times)), p=neurons_covered_percentage_inverse)
num_neuron1 = np.random.choice(range(len(neurons_covered_times)), neuron_to_cover_num_each, replace=False,
p=neurons_covered_percentage_inverse)
for num in num_neuron1:
layer_name1, index1 = neurons_key_pos[num]
loss1_neuron = K.mean(model.get_layer(layer_name1).output[..., index1])
loss_neuron.append(loss1_neuron)
# select neurons with largest weights (feature maps with largest filter weights)
if '2' in list(neuron_select_strategy):
layer_names = [layer.name for layer in model.layers if
'flatten' not in layer.name and 'input' not in layer.name]
k = 0.1
top_k = k * len(model_layer_times) # number of neurons to be selected within
global model_layer_weights_top_k
if len(model_layer_weights_top_k) == 0:
neuron_select_high_weight(model, layer_names, top_k) # Set the value
num_neuron2 = np.random.choice(range(len(model_layer_weights_top_k)), neuron_to_cover_num_each, replace=False)
for i in num_neuron2:
# i = np.random.choice(range(len(model_layer_weights_top_k)))
layer_name2 = model_layer_weights_top_k[i][0]
index2 = model_layer_weights_top_k[i][1]
loss2_neuron = K.mean(model.get_layer(layer_name2).output[..., index2])
loss_neuron.append(loss2_neuron)
if '3' in list(neuron_select_strategy):
above_threshold = []
below_threshold = []
above_num = neuron_to_cover_num_each / 2
below_num = neuron_to_cover_num_each - above_num
above_i = 0
below_i = 0
for (layer_name, index), value in model_layer_value.items():
if threshold + 0.25 > value > threshold and layer_name != 'fc1' and layer_name != 'fc2' and \
layer_name != 'predictions' and layer_name != 'fc1000' and layer_name != 'before_softmax' \
and above_i < above_num:
above_threshold.append([layer_name, index])
above_i += 1
# print(layer_name,index,value)
# above_threshold_dict[(layer_name, index)]=value
elif threshold > value > threshold - 0.2 and layer_name != 'fc1' and layer_name != 'fc2' and \
layer_name != 'predictions' and layer_name != 'fc1000' and layer_name != 'before_softmax' \
and below_i < below_num:
below_threshold.append([layer_name, index])
below_i += 1
#
# loss3_neuron_above = 0
# loss3_neuron_below = 0
loss_neuron = []
if len(above_threshold) > 0:
for above_item in range(len(above_threshold)):
loss_neuron.append(K.mean(
model.get_layer(above_threshold[above_item][0]).output[..., above_threshold[above_item][1]]))
if len(below_threshold) > 0:
for below_item in range(len(below_threshold)):
loss_neuron.append(-K.mean(
model.get_layer(below_threshold[below_item][0]).output[..., below_threshold[below_item][1]]))
# loss_neuron += loss3_neuron_below - loss3_neuron_above
# for (layer_name, index), value in model_layer_value.items():
# if 0.5 > value > 0.25:
# above_threshold.append([layer_name, index])
# elif 0.25 > value > 0.2:
# below_threshold.append([layer_name, index])
# loss3_neuron_above = 0
# loss3_neuron_below = 0
# if len(above_threshold)>0:
# above_i = np.random.choice(range(len(above_threshold)))
# loss3_neuron_above = K.mean(model.get_layer(above_threshold[above_i][0]).output[..., above_threshold[above_i][1]])
# if len(below_threshold)>0:
# below_i = np.random.choice(range(len(below_threshold)))
# loss3_neuron_below = K.mean(model.get_layer(below_threshold[below_i][0]).output[..., below_threshold[below_i][1]])
# loss_neuron += loss3_neuron_below - loss3_neuron_above
if loss_neuron == 0:
return random_strategy(model, model_layer_times, 1) # The beginning of no neurons covered
return loss_neuron
def neuron_scale(loss_neuron):
loss_neuron_new = []
loss_sum = K.sum(loss_neuron)
for loss_each in loss_neuron:
loss_each /= loss_sum
loss_neuron_new.append(loss_each)
return loss_neuron_new
def neuron_scale_maxmin(loss_neuron):
max_loss = K.max(loss_neuron)
min_loss = K.min(loss_neuron)
base = max_loss - min_loss
loss_neuron_new = []
for loss_each in loss_neuron:
loss_each_new = (loss_each - min_loss) / base
loss_neuron_new.append(loss_each_new)
return loss_neuron_new
def neuron_covered(model_layer_times):
covered_neurons = len([v for v in model_layer_times.values() if v > 0])
total_neurons = len(model_layer_times)
return covered_neurons, total_neurons, covered_neurons / float(total_neurons)
def scale(intermediate_layer_output, rmax=1, rmin=0):
X_std = (intermediate_layer_output - intermediate_layer_output.min()) / (
intermediate_layer_output.max() - intermediate_layer_output.min())
X_scaled = X_std * (rmax - rmin) + rmin
return X_scaled
def update_coverage(input_data, model, model_layer_times, threshold=0):
layer_names = [layer.name for layer in model.layers if
'flatten' not in layer.name and 'input' not in layer.name]
intermediate_layer_model = Model(inputs=model.input,
outputs=[model.get_layer(layer_name).output for layer_name in layer_names])
intermediate_layer_outputs = intermediate_layer_model.predict(input_data)
for i, intermediate_layer_output in enumerate(intermediate_layer_outputs):
scaled = scale(intermediate_layer_output[0])
# xrange(scaled.shape[-1])
for num_neuron in xrange(scaled.shape[-1]):
if np.mean(scaled[..., num_neuron]) > threshold: #and model_layer_dict[(layer_names[i], num_neuron)] == 0:
model_layer_times[(layer_names[i], num_neuron)] += 1
return intermediate_layer_outputs
def update_coverage_value(input_data, model, model_layer_value):
layer_names = [layer.name for layer in model.layers if
'flatten' not in layer.name and 'input' not in layer.name]
intermediate_layer_model = Model(inputs=model.input,
outputs=[model.get_layer(layer_name).output for layer_name in layer_names])
intermediate_layer_outputs = intermediate_layer_model.predict(input_data)
for i, intermediate_layer_output in enumerate(intermediate_layer_outputs):
scaled = scale(intermediate_layer_output[0])
# xrange(scaled.shape[-1])
for num_neuron in xrange(scaled.shape[-1]):
model_layer_value[(layer_names[i], num_neuron)] = np.mean(scaled[..., num_neuron])
return intermediate_layer_outputs
'''
def update_coverage(input_data, model, model_layer_dict, threshold=0):
layer_names = [layer.name for layer in model.layers if
'flatten' not in layer.name and 'input' not in layer.name]
intermediate_layer_model = Model(inputs=model.input,
outputs=[model.get_layer(layer_name).output for layer_name in layer_names])
intermediate_layer_outputs = intermediate_layer_model.predict(input_data)
for i, intermediate_layer_output in enumerate(intermediate_layer_outputs):
scaled = scale(intermediate_layer_output[0])
# xrange(scaled.shape[-1])
for num_neuron in xrange(scaled.shape[-1]):
if np.mean(scaled[..., num_neuron]) > threshold and not model_layer_dict[(layer_names[i], num_neuron)]:
model_layer_dict[(layer_names[i], num_neuron)] = True
return intermediate_layer_outputs
'''
def full_coverage(model_layer_dict):
if False in model_layer_dict.values():
return False
return True
def fired(model, layer_name, index, input_data, threshold=0):
intermediate_layer_model = Model(inputs=model.input, outputs=model.get_layer(layer_name).output)
intermediate_layer_output = intermediate_layer_model.predict(input_data)[0]
scaled = scale(intermediate_layer_output)
if np.mean(scaled[..., index]) > threshold:
return True
return False
def diverged(predictions1, predictions2, predictions3, target):
# if predictions2 == predictions3 == target and predictions1 != target:
if not predictions1 == predictions2 == predictions3:
return True
return False
def get_signature():
now = datetime.now()
past = datetime(2015, 6, 6, 0, 0, 0, 0)
timespan = now - past
time_sig = int(timespan.total_seconds() * 1000)
return str(time_sig)
| 16,769 | 41.671756 | 144 | py |
DLFuzz | DLFuzz-master/MNIST/gen_diff.py | # -*- coding: utf-8 -*-
from __future__ import print_function
from keras.layers import Input
from scipy.misc import imsave
from utils_tmp import *
import sys
import os
import time
from Model1 import Model1
from Model2 import Model2
from Model3 import Model3
def load_data(path="../MNIST_data/mnist.npz"):
f = np.load(path)
x_train, y_train = f['x_train'], f['y_train']
x_test, y_test = f['x_test'], f['y_test']
f.close()
return (x_train, y_train), (x_test, y_test)
# input image dimensions
img_rows, img_cols = 28, 28
input_shape = (img_rows, img_cols, 1)
# define input tensor as a placeholder
input_tensor = Input(shape=input_shape)
# load multiple models sharing same input tensor
K.set_learning_phase(0)
model_name = sys.argv[6]
if model_name == 'model1':
model1 = Model1(input_tensor=input_tensor)
elif model_name == 'model2':
model1 = Model2(input_tensor=input_tensor)
elif model_name == 'model3':
model1 = Model3(input_tensor=input_tensor)
else:
print('please specify model name')
os._exit(0)
print(model1.name)
# model_layer_dict1 = init_coverage_tables(model1)
model_layer_times1 = init_coverage_times(model1) # times of each neuron covered
model_layer_times2 = init_coverage_times(model1) # update when new image and adversarial images found
model_layer_value1 = init_coverage_value(model1)
# start gen inputs
# img_paths = image.list_pictures('../seeds_20', ext='JPEG')
img_dir = './seeds_50'
img_paths = os.listdir(img_dir)
img_num = len(img_paths)
# e.g.[0,1,2] None for neurons not covered, 0 for covered often, 1 for covered rarely, 2 for high weights
neuron_select_strategy = sys.argv[1]
threshold = float(sys.argv[2])
neuron_to_cover_num = int(sys.argv[3])
subdir = sys.argv[4]
iteration_times = int(sys.argv[5])
neuron_to_cover_weight = 0.5
predict_weight = 0.5
learning_step = 0.02
save_dir = './generated_inputs/' + subdir + '/'
if os.path.exists(save_dir):
for i in os.listdir(save_dir):
path_file = os.path.join(save_dir, i)
if os.path.isfile(path_file):
os.remove(path_file)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
# start = time.clock()
total_time = 0
total_norm = 0
adversial_num = 0
total_perturb_adversial = 0
for i in xrange(img_num):
start_time = time.clock()
img_list = []
img_path = os.path.join(img_dir,img_paths[i])
img_name = img_paths[i].split('.')[0]
mannual_label = int(img_name.split('_')[1])
print(img_path)
tmp_img = preprocess_image(img_path)
orig_img = tmp_img.copy()
img_list.append(tmp_img)
update_coverage(tmp_img, model1, model_layer_times2, threshold)
while len(img_list) > 0:
gen_img = img_list[0]
img_list.remove(gen_img)
# first check if input already induces differences
pred1 = model1.predict(gen_img)
label1 = np.argmax(pred1[0])
label_top5 = np.argsort(pred1[0])[-5:]
update_coverage_value(gen_img, model1, model_layer_value1)
update_coverage(gen_img, model1, model_layer_times1, threshold)
orig_label = label1
orig_pred = pred1
loss_1 = K.mean(model1.get_layer('before_softmax').output[..., orig_label])
loss_2 = K.mean(model1.get_layer('before_softmax').output[..., label_top5[-2]])
loss_3 = K.mean(model1.get_layer('before_softmax').output[..., label_top5[-3]])
loss_4 = K.mean(model1.get_layer('before_softmax').output[..., label_top5[-4]])
loss_5 = K.mean(model1.get_layer('before_softmax').output[..., label_top5[-5]])
layer_output = (predict_weight * (loss_2 + loss_3 + loss_4 + loss_5) - loss_1)
# neuron coverage loss
loss_neuron = neuron_selection(model1, model_layer_times1, model_layer_value1, neuron_select_strategy,
neuron_to_cover_num, threshold)
# loss_neuron = neuron_scale(loss_neuron) # useless, and negative result
# extreme value means the activation value for a neuron can be as high as possible ...
EXTREME_VALUE = False
if EXTREME_VALUE:
neuron_to_cover_weight = 2
layer_output += neuron_to_cover_weight * K.sum(loss_neuron)
# for adversarial image generation
final_loss = K.mean(layer_output)
# we compute the gradient of the input picture wrt this loss
grads = normalize(K.gradients(final_loss, input_tensor)[0])
grads_tensor_list = [loss_1, loss_2, loss_3, loss_4, loss_5]
grads_tensor_list.extend(loss_neuron)
grads_tensor_list.append(grads)
# this function returns the loss and grads given the input picture
iterate = K.function([input_tensor], grads_tensor_list)
# we run gradient ascent for 3 steps
for iters in xrange(iteration_times):
loss_neuron_list = iterate([gen_img])
perturb = loss_neuron_list[-1] * learning_step
gen_img += perturb
# previous accumulated neuron coverage
previous_coverage = neuron_covered(model_layer_times1)[2]
pred1 = model1.predict(gen_img)
label1 = np.argmax(pred1[0])
update_coverage(gen_img, model1, model_layer_times1, threshold) # for seed selection
current_coverage = neuron_covered(model_layer_times1)[2]
diff_img = gen_img - orig_img
L2_norm = np.linalg.norm(diff_img)
orig_L2_norm = np.linalg.norm(orig_img)
perturb_adversial = L2_norm / orig_L2_norm
if current_coverage - previous_coverage > 0.01 / (i + 1) and perturb_adversial < 0.02:
img_list.append(gen_img)
# print('coverage diff = ', current_coverage - previous_coverage, 'perturb_adversial = ', perturb_adversial)
if label1 != orig_label:
update_coverage(gen_img, model1, model_layer_times2, threshold)
total_norm += L2_norm
total_perturb_adversial += perturb_adversial
# print('L2 norm : ' + str(L2_norm))
# print('ratio perturb = ', perturb_adversial)
gen_img_tmp = gen_img.copy()
gen_img_deprocessed = deprocess_image(gen_img_tmp)
save_img = save_dir + img_name + '_' + str(get_signature()) + '.png'
imsave(save_img, gen_img_deprocessed)
adversial_num += 1
end_time = time.clock()
print('covered neurons percentage %d neurons %.3f'
% (len(model_layer_times2), neuron_covered(model_layer_times2)[2]))
duration = end_time - start_time
print('used time : ' + str(duration))
total_time += duration
print('covered neurons percentage %d neurons %.3f'
% (len(model_layer_times2), neuron_covered(model_layer_times2)[2]))
print('total_time = ' + str(total_time))
print('average_norm = ' + str(total_norm / adversial_num))
print('adversial num = ' + str(adversial_num))
print('average perb adversial = ' + str(total_perturb_adversial / adversial_num))
| 7,071 | 29.614719 | 124 | py |
DLFuzz | DLFuzz-master/MNIST/Model1.py | '''
LeNet-1
'''
# usage: python MNISTModel1.py - train the model
from __future__ import print_function
from keras.datasets import mnist
from keras.layers import Convolution2D, MaxPooling2D, Input, Dense, Activation, Flatten
from keras.models import Model
from keras.utils import to_categorical
from keras import backend as K
import numpy as np
def load_data(path="MNIST_data/mnist.npz"):
f = np.load(path)
x_train, y_train = f['x_train'], f['y_train']
x_test, y_test = f['x_test'], f['y_test']
f.close()
return (x_train, y_train), (x_test, y_test)
def Model1(input_tensor=None, train=False):
nb_classes = 10
# convolution kernel size
kernel_size = (5, 5)
if train:
batch_size = 256
nb_epoch = 10
# input image dimensions
img_rows, img_cols = 28, 28
# the data, shuffled and split between train and test sets
# (x_train, y_train), (x_test, y_test) = mnist.load_data()
(x_train, y_train), (x_test, y_test) = load_data()
print(x_train.shape)
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# convert class vectors to binary class matrices
y_train = to_categorical(y_train, nb_classes)
y_test = to_categorical(y_test, nb_classes)
input_tensor = Input(shape=input_shape)
elif input_tensor is None:
print('you have to proved input_tensor when testing')
exit()
# block1
# print("in Model1 input_tensor = ",input_tensor)
x = Convolution2D(4, kernel_size, activation='relu', padding='same', name='block1_conv1')(input_tensor)
# print("in Model1 x = ", x)
x = MaxPooling2D(pool_size=(2, 2), name='block1_pool1')(x)
# block2
x = Convolution2D(12, kernel_size, activation='relu', padding='same', name='block2_conv1')(x)
x = MaxPooling2D(pool_size=(2, 2), name='block2_pool1')(x)
x = Flatten(name='flatten')(x)
x = Dense(nb_classes, name='before_softmax')(x)
x = Activation('softmax', name='predictions')(x)
model = Model(input_tensor, x)
if train:
# compiling
model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
# trainig
model.fit(x_train, y_train, validation_data=(x_test, y_test), batch_size=batch_size, epochs=nb_epoch, verbose=1)
# save model
model.save_weights('./Model1.h5')
score = model.evaluate(x_test, y_test, verbose=0)
print('\n')
print('Overall Test score:', score[0])
print('Overall Test accuracy:', score[1])
else:
model.load_weights('./Model1.h5')
print('Model1 loaded')
# K.clear_session()
return model
if __name__ == '__main__':
Model1(train=True)
| 3,009 | 29.714286 | 120 | py |
pylops | pylops-master/tutorials/torchop.py | r"""
19. Automatic Differentiation
=============================
This tutorial focuses on the use of :class:`pylops.TorchOperator` to allow performing
Automatic Differentiation (AD) on chains of operators which can be:
- native PyTorch mathematical operations (e.g., :func:`torch.log`,
:func:`torch.sin`, :func:`torch.tan`, :func:`torch.pow`, ...)
- neural network operators in :mod:`torch.nn`
- PyLops linear operators
This opens up many opportunities, such as easily including linear regularization
terms to nonlinear cost functions or using linear preconditioners with nonlinear
modelling operators.
"""
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import gradcheck
import pylops
plt.close("all")
np.random.seed(10)
torch.manual_seed(10)
###############################################################################
# In this example we consider a simple multidimensional functional:
#
# .. math::
# \mathbf{y} = \mathbf{A} sin(\mathbf{x})
#
# and we use AD to compute the gradient with respect to the input vector
# evaluated at :math:`\mathbf{x}=\mathbf{x}_0` :
# :math:`\mathbf{g} = d\mathbf{y} / d\mathbf{x} |_{\mathbf{x}=\mathbf{x}_0}`.
#
# Let's start by defining the Jacobian:
#
# .. math::
# \textbf{J} = \begin{bmatrix}
# dy_1 / dx_1 & ... & dy_1 / dx_M \\
# ... & ... & ... \\
# dy_N / dx_1 & ... & dy_N / dx_M
# \end{bmatrix} = \begin{bmatrix}
# a_{11} cos(x_1) & ... & a_{1M} cos(x_M) \\
# ... & ... & ... \\
# a_{N1} cos(x_1) & ... & a_{NM} cos(x_M)
# \end{bmatrix} = \textbf{A} cos(\mathbf{x})
#
# Since both input and output are multidimensional,
# PyTorch ``backward`` actually computes the product between the transposed
# Jacobian and a vector :math:`\mathbf{v}`:
# :math:`\mathbf{g}=\mathbf{J^T} \mathbf{v}`.
#
# To validate the correctness of the AD result, we can in this simple case
# also compute the Jacobian analytically and apply it to the same vector
# :math:`\mathbf{v}` that we have provided to PyTorch ``backward``.
nx, ny = 10, 6
x0 = torch.arange(nx, dtype=torch.double, requires_grad=True)
# Forward
A = np.random.normal(0.0, 1.0, (ny, nx))
At = torch.from_numpy(A)
Aop = pylops.TorchOperator(pylops.MatrixMult(A))
y = Aop.apply(torch.sin(x0))
# AD
v = torch.ones(ny, dtype=torch.double)
y.backward(v, retain_graph=True)
adgrad = x0.grad
# Analytical
J = At * torch.cos(x0)
anagrad = torch.matmul(J.T, v)
print("Input: ", x0)
print("AD gradient: ", adgrad)
print("Analytical gradient: ", anagrad)
###############################################################################
# Similarly we can use the :func:`torch.autograd.gradcheck` directly from
# PyTorch. Note that doubles must be used for this to succeed with very small
# `eps` and `atol`
input = (
torch.arange(nx, dtype=torch.double, requires_grad=True),
Aop.matvec,
Aop.rmatvec,
Aop.device,
"cpu",
)
test = gradcheck(Aop.Top, input, eps=1e-6, atol=1e-4)
print(test)
###############################################################################
# Note that while matrix-vector multiplication could have been performed using
# the native PyTorch operator :func:`torch.matmul`, in this case we have shown
# that we are also able to use a PyLops operator wrapped in
# :class:`pylops.TorchOperator`. As already mentioned, this gives us the
# ability to use much more complex linear operators provided by PyLops within
# a chain of mixed linear and nonlinear AD-enabled operators.
# To conclude, let's see how we can chain a torch convolutional network
# with PyLops :class:`pylops.Smoothing2D` operator. First of all, we consider
# a single training sample.
class Network(nn.Module):
def __init__(self, input_channels):
super(Network, self).__init__()
self.conv1 = nn.Conv2d(
input_channels, input_channels // 2, kernel_size=3, padding=1
)
self.conv2 = nn.Conv2d(
input_channels // 2, input_channels // 4, kernel_size=3, padding=1
)
self.activation = nn.LeakyReLU(0.2)
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
def forward(self, x):
x = self.conv1(x)
x = self.activation(x)
x = self.conv2(x)
x = self.activation(x)
return x
net = Network(4)
Cop = pylops.TorchOperator(pylops.Smoothing2D((5, 5), dims=(32, 32)))
# Forward
x = torch.randn(1, 4, 32, 32).requires_grad_()
y = Cop.apply(net(x).view(-1)).reshape(32, 32)
# Backward
loss = y.sum()
loss.backward()
fig, axs = plt.subplots(1, 2, figsize=(12, 3))
axs[0].imshow(y.detach().numpy())
axs[0].set_title("Forward")
axs[0].axis("tight")
axs[1].imshow(x.grad.reshape(4 * 32, 32).T)
axs[1].set_title("Gradient")
axs[1].axis("tight")
plt.tight_layout()
###############################################################################
# And finally we do the same with a batch of 3 training samples.
net = Network(4)
Cop = pylops.TorchOperator(pylops.Smoothing2D((5, 5), dims=(32, 32)), batch=True)
# Forward
x = torch.randn(3, 4, 32, 32).requires_grad_()
y = Cop.apply(net(x).reshape(3, 32 * 32)).reshape(3, 32, 32)
# Backward
loss = y.sum()
loss.backward()
fig, axs = plt.subplots(1, 2, figsize=(12, 3))
axs[0].imshow(y[0].detach().numpy())
axs[0].set_title("Forward")
axs[0].axis("tight")
axs[1].imshow(x.grad[0].reshape(4 * 32, 32).T)
axs[1].set_title("Gradient")
axs[1].axis("tight")
plt.tight_layout()
| 5,465 | 30.964912 | 85 | py |
pylops | pylops-master/docs/source/conf.py | # -*- coding: utf-8 -*-
import sys
import os
import datetime
from sphinx_gallery.sorting import ExampleTitleSortKey
from pylops import __version__
# Sphinx needs to be able to import the package to use autodoc and get the version number
sys.path.insert(0, os.path.abspath("../../pylops"))
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.doctest",
"sphinx.ext.viewcode",
"sphinx.ext.extlinks",
"sphinx.ext.intersphinx",
"matplotlib.sphinxext.plot_directive",
"numpydoc",
"nbsphinx",
"sphinx_gallery.gen_gallery",
# 'sphinx.ext.napoleon',
]
# intersphinx configuration
intersphinx_mapping = {
"python": ("https://docs.python.org/3/", None),
"numpy": ("https://docs.scipy.org/doc/numpy/", None),
"scipy": ("https://docs.scipy.org/doc/scipy/reference", None),
"sklearn": ("http://scikit-learn.org/stable/", None),
"pandas": ("http://pandas.pydata.org/pandas-docs/stable/", None),
"matplotlib": ("https://matplotlib.org/", None),
"pyfftw": ("https://pyfftw.readthedocs.io/en/latest/", None),
"spgl1": ("https://spgl1.readthedocs.io/en/latest/", None),
}
# Generate autodoc stubs with summaries from code
autosummary_generate = True
# Include Python objects as they appear in source files
autodoc_member_order = "bysource"
# Default flags used by autodoc directives
autodoc_default_flags = ["members"]
# Avoid showing typing annotations in doc
autodoc_typehints = "none"
numpydoc_show_class_members = False
numpydoc_show_inherited_class_members = False
numpydoc_class_members_toctree = False
sphinx_gallery_conf = {
# path to your examples scripts
"examples_dirs": [
"../../examples",
"../../tutorials",
],
# path where to save gallery generated examples
"gallery_dirs": ["gallery", "tutorials"],
"filename_pattern": r"\.py",
# Remove the "Download all examples" button from the top level gallery
"download_all_examples": False,
# Sort gallery example by file name instead of number of lines (default)
"within_subsection_order": ExampleTitleSortKey,
# directory where function granular galleries are stored
"backreferences_dir": "api/generated/backreferences",
# Modules for which function level galleries are created.
"doc_module": "pylops",
# Insert links to documentation of objects in the examples
"reference_url": {"pylops": None},
}
# Always show the source code that generates a plot
plot_include_source = True
plot_formats = ["png"]
# Sphinx project configuration
templates_path = ["_templates"]
exclude_patterns = ["_build", "**.ipynb_checkpoints", "**.ipynb", "**.md5"]
source_suffix = ".rst"
# The encoding of source files.
source_encoding = "utf-8-sig"
master_doc = "index"
# General information about the project
year = datetime.date.today().year
project = "PyLops"
copyright = "{}, PyLops Development Team".format(year)
# Version
version = __version__
if len(version.split("+")) > 1 or version == "unknown":
version = "dev"
# These enable substitutions using |variable| in the rst files
rst_epilog = """
.. |year| replace:: {year}
""".format(
year=year
)
html_static_path = ["_static"]
html_last_updated_fmt = "%b %d, %Y"
html_title = "PyLops"
html_short_title = "PyLops"
html_logo = "_static/pylops.png"
html_favicon = "_static/favicon.ico"
html_extra_path = []
pygments_style = "default"
add_function_parentheses = False
html_show_sourcelink = False
html_show_sphinx = True
html_show_copyright = True
# Theme config
html_theme = "pydata_sphinx_theme"
html_theme_options = {
"logo_only": True,
"display_version": True,
"logo": {
"image_light": "pylops_b.png",
"image_dark": "pylops.png",
}
}
html_css_files = [
'css/custom.css',
]
html_context = {
"menu_links_name": "Repository",
"menu_links": [
(
'<i class="fa fa-github fa-fw"></i> Source Code',
"https://github.com/PyLops/pylops",
),
(
'<i class="fa fa-users fa-fw"></i> Contributing',
"https://github.com/PyLops/pylops/blob/master/CONTRIBUTING.md",
),
],
# Custom variables to enable "Improve this page"" and "Download notebook"
# links
"doc_path": "docs/source",
"galleries": sphinx_gallery_conf["gallery_dirs"],
"gallery_dir": dict(
zip(sphinx_gallery_conf["gallery_dirs"], sphinx_gallery_conf["examples_dirs"])
),
"github_project": "PyLops",
"github_repo": "pylops",
"github_version": "master",
}
# Load the custom CSS files (needs sphinx >= 1.6 for this to work)
def setup(app):
app.add_css_file("style.css")
| 4,717 | 28.304348 | 89 | py |
pylops | pylops-master/pytests/test_torchoperator.py | import numpy as np
import pytest
import torch
from numpy.testing import assert_array_equal
from pylops import MatrixMult, TorchOperator
par1 = {"ny": 11, "nx": 11, "dtype": np.float32} # square
par2 = {"ny": 21, "nx": 11, "dtype": np.float32} # overdetermined
np.random.seed(0)
@pytest.mark.parametrize("par", [(par1)])
def test_TorchOperator(par):
"""Apply forward and gradient. As for linear operators the gradient
must equal the adjoint of operator applied to the same vector, the two
results are also checked to be the same.
"""
Dop = MatrixMult(np.random.normal(0.0, 1.0, (par["ny"], par["nx"])))
Top = TorchOperator(Dop, batch=False)
x = np.random.normal(0.0, 1.0, par["nx"])
xt = torch.from_numpy(x).view(-1)
xt.requires_grad = True
v = torch.randn(par["ny"])
# pylops operator
y = Dop * x
xadj = Dop.H * v
# torch operator
yt = Top.apply(xt)
yt.backward(v, retain_graph=True)
assert_array_equal(y, yt.detach().cpu().numpy())
assert_array_equal(xadj, xt.grad.cpu().numpy())
@pytest.mark.parametrize("par", [(par1)])
def test_TorchOperator_batch(par):
"""Apply forward for input with multiple samples (= batch) and flattened arrays"""
Dop = MatrixMult(np.random.normal(0.0, 1.0, (par["ny"], par["nx"])))
Top = TorchOperator(Dop, batch=True)
x = np.random.normal(0.0, 1.0, (4, par["nx"]))
xt = torch.from_numpy(x)
xt.requires_grad = True
y = Dop.matmat(x.T).T
yt = Top.apply(xt)
assert_array_equal(y, yt.detach().cpu().numpy())
@pytest.mark.parametrize("par", [(par1)])
def test_TorchOperator_batch_nd(par):
"""Apply forward for input with multiple samples (= batch) and nd-arrays"""
Dop = MatrixMult(np.random.normal(0.0, 1.0, (par["ny"], par["nx"])), otherdims=(2,))
Top = TorchOperator(Dop, batch=True, flatten=False)
x = np.random.normal(0.0, 1.0, (4, par["nx"], 2))
xt = torch.from_numpy(x)
xt.requires_grad = True
y = (Dop @ x.transpose(1, 2, 0)).transpose(2, 0, 1)
yt = Top.apply(xt)
assert_array_equal(y, yt.detach().cpu().numpy())
| 2,110 | 29.157143 | 88 | py |
pylops | pylops-master/pylops/_torchoperator.py | import logging
from pylops.utils import deps
if deps.torch_enabled:
import torch
from torch.utils.dlpack import from_dlpack, to_dlpack
if deps.cupy_enabled:
import cupy as cp
class _TorchOperator(torch.autograd.Function):
"""Wrapper class for PyLops operators into Torch functions"""
@staticmethod
def forward(ctx, x, forw, adj, device, devicetorch):
ctx.forw = forw
ctx.adj = adj
ctx.device = device
ctx.devicetorch = devicetorch
# check if data is moved to cpu and warn user
if ctx.device == "cpu" and ctx.devicetorch != "cpu":
logging.warning(
"pylops operator will be applied on the cpu "
"whilst the input torch vector is on "
"%s, this may lead to poor performance" % ctx.devicetorch
)
# prepare input
if ctx.device == "cpu":
# bring x to cpu and numpy
x = x.cpu().detach().numpy()
else:
# pass x to cupy using DLPack
x = cp.fromDlpack(to_dlpack(x))
# apply forward operator
y = ctx.forw(x)
# prepare output
if ctx.device == "cpu":
# move y to torch and device
y = torch.from_numpy(y).to(ctx.devicetorch)
else:
# move y to torch and device
y = from_dlpack(y.toDlpack())
return y
@staticmethod
def backward(ctx, y):
# prepare input
if ctx.device == "cpu":
y = y.cpu().detach().numpy()
else:
# pass x to cupy using DLPack
y = cp.fromDlpack(to_dlpack(y))
# apply adjoint operator
x = ctx.adj(y)
# prepare output
if ctx.device == "cpu":
x = torch.from_numpy(x).to(ctx.devicetorch)
else:
x = from_dlpack(x.toDlpack())
return x, None, None, None, None, None
| 1,923 | 26.884058 | 73 | py |
pylops | pylops-master/pylops/__init__.py | """
PyLops
======
Linear operators and inverse problems are at the core of many of the most used
algorithms in signal processing, image processing, and remote sensing.
When dealing with small-scale problems, the Python numerical scientific
libraries `numpy <http://www.numpy.org>`_
and `scipy <http://www.scipy.org/scipylib/index.html>`_ allow to perform most
of the underlying matrix operations (e.g., computation of matrix-vector
products and manipulation of matrices) in a simple and expressive way.
Many useful operators, however, do not lend themselves to an explicit matrix
representation when used to solve large-scale problems. PyLops operators,
on the other hand, still represent a matrix and can be treated in a similar
way, but do not rely on the explicit creation of a dense (or sparse) matrix
itself. Conversely, the forward and adjoint operators are represented by small
pieces of codes that mimic the effect of the matrix on a vector or
another matrix.
Luckily, many iterative methods (e.g. cg, lsqr) do not need to know the
individual entries of a matrix to solve a linear system. Such solvers only
require the computation of forward and adjoint matrix-vector products as
done for any of the PyLops operators.
PyLops provides
1. A general construct for creating Linear Operators
2. An extensive set of commonly used linear operators
3. A set of least-squares and sparse solvers for linear operators.
Available subpackages
---------------------
basicoperators
Basic Linear Operators
signalprocessing
Linear Operators for Signal Processing operations
avo
Linear Operators for Seismic Reservoir Characterization
waveeqprocessing
Linear Operators for Wave Equation oriented processing
optimization
Solvers
utils
Utility routines
"""
from .config import *
from .linearoperator import *
from .torchoperator import *
from .basicoperators import *
from . import (
avo,
basicoperators,
optimization,
signalprocessing,
utils,
waveeqprocessing,
)
from .avo.poststack import *
from .avo.prestack import *
from .optimization.basic import *
from .optimization.leastsquares import *
from .optimization.sparsity import *
from .utils.seismicevents import *
from .utils.tapers import *
from .utils.utils import *
from .utils.wavelets import *
try:
from .version import version as __version__
except ImportError:
# If it was not installed, then we don't know the version. We could throw a
# warning here, but this case *should* be rare. pylops should be installed
# properly!
from datetime import datetime
__version__ = "unknown-" + datetime.today().strftime("%Y%m%d")
| 2,654 | 32.607595 | 79 | py |
pylops | pylops-master/pylops/torchoperator.py | __all__ = [
"TorchOperator",
]
from typing import Optional
import numpy as np
from pylops import LinearOperator
from pylops.utils import deps
if deps.torch_enabled:
from pylops._torchoperator import _TorchOperator
else:
torch_message = (
"Torch package not installed. In order to be able to use"
'the twoway module run "pip install torch" or'
'"conda install -c pytorch torch".'
)
from pylops.utils.typing import TensorTypeLike
class TorchOperator:
"""Wrap a PyLops operator into a Torch function.
This class can be used to wrap a pylops operator into a
torch function. Doing so, users can mix native torch functions (e.g.
basic linear algebra operations, neural networks, etc.) and pylops
operators.
Since all operators in PyLops are linear operators, a Torch function is
simply implemented by using the forward operator for its forward pass
and the adjoint operator for its backward (gradient) pass.
Parameters
----------
Op : :obj:`pylops.LinearOperator`
PyLops operator
batch : :obj:`bool`, optional
Input has single sample (``False``) or batch of samples (``True``).
If ``batch==False`` the input must be a 1-d Torch tensor or a tensor of
size equal to ``Op.dims``; if ``batch==True`` the input must be a 2-d Torch
tensor with batches along the first dimension or a tensor of size equal to
``[nbatch, *Op.dims]`` where ``nbatch`` is the size of the batch
flatten : :obj:`bool`, optional
Input is flattened along ``Op.dims`` (``True``) or not (``False``)
device : :obj:`str`, optional
Device to be used when applying operator (``cpu`` or ``gpu``)
devicetorch : :obj:`str`, optional
Device to be assigned the output of the operator to (any Torch-compatible device)
"""
def __init__(
self,
Op: LinearOperator,
batch: bool = False,
flatten: Optional[bool] = True,
device: str = "cpu",
devicetorch: str = "cpu",
) -> None:
if not deps.torch_enabled:
raise NotImplementedError(torch_message)
self.device = device
self.devicetorch = devicetorch
self.dtype = np.dtype(Op.dtype)
self.dims, self.dimsd = Op.dims, Op.dimsd
self.name = Op.name
# define transpose indices to bring batch to last dimension before applying
# pylops forward and adjoint (this will call matmat and rmatmat)
self.transpf = np.roll(np.arange(2 if flatten else len(self.dims) + 1), -1)
self.transpb = np.roll(np.arange(2 if flatten else len(self.dims) + 1), 1)
if not batch:
self.matvec = lambda x: Op @ x
self.rmatvec = lambda x: Op.H @ x
else:
self.matvec = lambda x: (Op @ x.transpose(self.transpf)).transpose(
self.transpb
)
self.rmatvec = lambda x: (Op.H @ x.transpose(self.transpf)).transpose(
self.transpb
)
self.Top = _TorchOperator.apply
def __call__(self, x):
return self.apply(x)
def apply(self, x: TensorTypeLike) -> TensorTypeLike:
"""Apply forward pass to input vector
Parameters
----------
x : :obj:`torch.Tensor`
Input array
Returns
-------
y : :obj:`torch.Tensor`
Output array resulting from the application of the operator to ``x``.
"""
return self.Top(x, self.matvec, self.rmatvec, self.device, self.devicetorch)
| 3,580 | 33.76699 | 89 | py |
pylops | pylops-master/pylops/utils/typing.py | __all__ = [
"IntNDArray",
"NDArray",
"InputDimsLike",
"SamplingLike",
"ShapeLike",
"DTypeLike",
"TensorTypeLike",
]
from typing import Sequence, Tuple, Union
import numpy as np
import numpy.typing as npt
from pylops.utils.deps import torch_enabled
if torch_enabled:
import torch
IntNDArray = npt.NDArray[np.int_]
NDArray = npt.NDArray
InputDimsLike = Union[Sequence[int], IntNDArray]
SamplingLike = Union[Sequence[float], NDArray]
ShapeLike = Tuple[int, ...]
DTypeLike = npt.DTypeLike
if torch_enabled:
TensorTypeLike = torch.Tensor
else:
TensorTypeLike = None
| 608 | 17.454545 | 48 | py |
pylops | pylops-master/pylops/utils/deps.py | __all__ = [
"cupy_enabled",
"cusignal_enabled",
"devito_enabled",
"numba_enabled",
"pyfftw_enabled",
"pywt_enabled",
"skfmm_enabled",
"spgl1_enabled",
"sympy_enabled",
"torch_enabled",
]
import os
from importlib import util
# check package availability
cupy_enabled = (
util.find_spec("cupy") is not None and int(os.getenv("CUPY_PYLOPS", 1)) == 1
)
cusignal_enabled = (
util.find_spec("cusignal") is not None and int(os.getenv("CUSIGNAL_PYLOPS", 1)) == 1
)
devito_enabled = util.find_spec("devito") is not None
numba_enabled = util.find_spec("numba") is not None
pyfftw_enabled = util.find_spec("pyfftw") is not None
pywt_enabled = util.find_spec("pywt") is not None
skfmm_enabled = util.find_spec("skfmm") is not None
spgl1_enabled = util.find_spec("spgl1") is not None
sympy_enabled = util.find_spec("sympy") is not None
torch_enabled = util.find_spec("torch") is not None
# error message at import of available package
def devito_import(message):
if devito_enabled:
try:
import devito # noqa: F401
devito_message = None
except Exception as e:
devito_message = f"Failed to import devito (error:{e})."
else:
devito_message = (
f"Devito not available. "
f"In order to be able to use "
f'{message} run "pip install devito".'
)
return devito_message
def numba_import(message):
if numba_enabled:
try:
import numba # noqa: F401
numba_message = None
except Exception as e:
numba_message = f"Failed to import numba (error:{e}), use numpy."
else:
numba_message = (
"Numba not available, reverting to numpy. "
"In order to be able to use "
f"{message} run "
f'"pip install numba" or '
f'"conda install numba".'
)
return numba_message
def pyfftw_import(message):
if pyfftw_enabled:
try:
import pyfftw # noqa: F401
pyfftw_message = None
except Exception as e:
pyfftw_message = f"Failed to import pyfftw (error:{e}), use numpy."
else:
pyfftw_message = (
"Pyfftw not available, reverting to numpy. "
"In order to be able to use "
f"{message} run "
f'"pip install pyFFTW" or '
f'"conda install -c conda-forge pyfftw".'
)
return pyfftw_message
def pywt_import(message):
if pywt_enabled:
try:
import pywt # noqa: F401
pywt_message = None
except Exception as e:
pywt_message = f"Failed to import pywt (error:{e})."
else:
pywt_message = (
"Pywt not available. "
"In order to be able to use "
f"{message} run "
f'"pip install PyWavelets" or '
f'"conda install pywavelets".'
)
return pywt_message
def skfmm_import(message):
if skfmm_enabled:
try:
import skfmm # noqa: F401
skfmm_message = None
except Exception as e:
skfmm_message = f"Failed to import skfmm (error:{e})."
else:
skfmm_message = (
f"Skfmm package not installed. In order to be able to use "
f"{message} run "
f'"pip install scikit-fmm" or '
f'"conda install -c conda-forge scikit-fmm".'
)
return skfmm_message
def spgl1_import(message):
if spgl1_enabled:
try:
import spgl1 # noqa: F401
spgl1_message = None
except Exception as e:
spgl1_message = f"Failed to import spgl1 (error:{e})."
else:
spgl1_message = (
f"Spgl1 package not installed. In order to be able to use "
f"{message} run "
f'"pip install spgl1".'
)
return spgl1_message
def sympy_import(message):
if sympy_enabled:
try:
import sympy # noqa: F401
sympy_message = None
except Exception as e:
sympy_message = f"Failed to import sympy (error:{e})."
else:
sympy_message = (
f"Sympy package not installed. In order to be able to use "
f"{message} run "
f'"pip install sympy".'
)
return sympy_message
| 4,378 | 26.540881 | 88 | py |
EfficientNet-PyTorch | EfficientNet-PyTorch-master/sotabench.py | import os
import numpy as np
import PIL
import torch
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
from torchvision.datasets import ImageNet
from efficientnet_pytorch import EfficientNet
from sotabencheval.image_classification import ImageNetEvaluator
from sotabencheval.utils import is_server
if is_server():
DATA_ROOT = DATA_ROOT = os.environ.get('IMAGENET_DIR', './imagenet') # './.data/vision/imagenet'
else: # local settings
DATA_ROOT = os.environ['IMAGENET_DIR']
assert bool(DATA_ROOT), 'please set IMAGENET_DIR environment variable'
print('Local data root: ', DATA_ROOT)
model_name = 'EfficientNet-B5'
model = EfficientNet.from_pretrained(model_name.lower())
image_size = EfficientNet.get_image_size(model_name.lower())
input_transform = transforms.Compose([
transforms.Resize(image_size, PIL.Image.BICUBIC),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
test_dataset = ImageNet(
DATA_ROOT,
split="val",
transform=input_transform,
target_transform=None,
)
test_loader = DataLoader(
test_dataset,
batch_size=128,
shuffle=False,
num_workers=4,
pin_memory=True,
)
model = model.cuda()
model.eval()
evaluator = ImageNetEvaluator(model_name=model_name,
paper_arxiv_id='1905.11946')
def get_img_id(image_name):
return image_name.split('/')[-1].replace('.JPEG', '')
with torch.no_grad():
for i, (input, target) in enumerate(test_loader):
input = input.to(device='cuda', non_blocking=True)
target = target.to(device='cuda', non_blocking=True)
output = model(input)
image_ids = [get_img_id(img[0]) for img in test_loader.dataset.imgs[i*test_loader.batch_size:(i+1)*test_loader.batch_size]]
evaluator.add(dict(zip(image_ids, list(output.cpu().numpy()))))
if evaluator.cache_exists:
break
if not is_server():
print("Results:")
print(evaluator.get_results())
evaluator.save()
| 2,094 | 28.097222 | 131 | py |
EfficientNet-PyTorch | EfficientNet-PyTorch-master/setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Note: To use the 'upload' functionality of this file, you must:
# $ pipenv install twine --dev
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
# Package meta-data.
NAME = 'efficientnet_pytorch'
DESCRIPTION = 'EfficientNet implemented in PyTorch.'
URL = 'https://github.com/lukemelas/EfficientNet-PyTorch'
EMAIL = 'lmelaskyriazi@college.harvard.edu'
AUTHOR = 'Luke'
REQUIRES_PYTHON = '>=3.5.0'
VERSION = '0.7.1'
# What packages are required for this module to be executed?
REQUIRED = [
'torch'
]
# What packages are optional?
EXTRAS = {
# 'fancy feature': ['django'],
}
# The rest you shouldn't have to touch too much :)
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
project_slug = NAME.lower().replace("-", "_").replace(" ", "_")
with open(os.path.join(here, project_slug, '__version__.py')) as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPI via Twine…')
os.system('twine upload dist/*')
self.status('Pushing git tags…')
os.system('git tag v{0}'.format(about['__version__']))
os.system('git push --tags')
sys.exit()
# Where the magic happens:
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=["tests", "*.tests", "*.tests.*", "tests.*"]),
# py_modules=['model'], # If your package is a single module, use this instead of 'packages'
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
license='Apache',
classifiers=[
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
],
# $ setup.py publish support.
cmdclass={
'upload': UploadCommand,
},
)
| 3,543 | 27.580645 | 96 | py |
EfficientNet-PyTorch | EfficientNet-PyTorch-master/hubconf.py | from efficientnet_pytorch import EfficientNet as _EfficientNet
dependencies = ['torch']
def _create_model_fn(model_name):
def _model_fn(num_classes=1000, in_channels=3, pretrained='imagenet'):
"""Create Efficient Net.
Described in detail here: https://arxiv.org/abs/1905.11946
Args:
num_classes (int, optional): Number of classes, default is 1000.
in_channels (int, optional): Number of input channels, default
is 3.
pretrained (str, optional): One of [None, 'imagenet', 'advprop']
If None, no pretrained model is loaded.
If 'imagenet', models trained on imagenet dataset are loaded.
If 'advprop', models trained using adversarial training called
advprop are loaded. It is important to note that the
preprocessing required for the advprop pretrained models is
slightly different from normal ImageNet preprocessing
"""
model_name_ = model_name.replace('_', '-')
if pretrained is not None:
model = _EfficientNet.from_pretrained(
model_name=model_name_,
advprop=(pretrained == 'advprop'),
num_classes=num_classes,
in_channels=in_channels)
else:
model = _EfficientNet.from_name(
model_name=model_name_,
override_params={'num_classes': num_classes},
)
model._change_in_channels(in_channels)
return model
return _model_fn
for model_name in ['efficientnet_b' + str(i) for i in range(9)]:
locals()[model_name] = _create_model_fn(model_name)
| 1,709 | 37.863636 | 78 | py |
EfficientNet-PyTorch | EfficientNet-PyTorch-master/efficientnet_pytorch/utils.py | """utils.py - Helper functions for building the model and for loading model parameters.
These helper functions are built to mirror those in the official TensorFlow implementation.
"""
# Author: lukemelas (github username)
# Github repo: https://github.com/lukemelas/EfficientNet-PyTorch
# With adjustments and added comments by workingcoder (github username).
import re
import math
import collections
from functools import partial
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils import model_zoo
################################################################################
# Help functions for model architecture
################################################################################
# GlobalParams and BlockArgs: Two namedtuples
# Swish and MemoryEfficientSwish: Two implementations of the method
# round_filters and round_repeats:
# Functions to calculate params for scaling model width and depth ! ! !
# get_width_and_height_from_size and calculate_output_image_size
# drop_connect: A structural design
# get_same_padding_conv2d:
# Conv2dDynamicSamePadding
# Conv2dStaticSamePadding
# get_same_padding_maxPool2d:
# MaxPool2dDynamicSamePadding
# MaxPool2dStaticSamePadding
# It's an additional function, not used in EfficientNet,
# but can be used in other model (such as EfficientDet).
# Parameters for the entire model (stem, all blocks, and head)
GlobalParams = collections.namedtuple('GlobalParams', [
'width_coefficient', 'depth_coefficient', 'image_size', 'dropout_rate',
'num_classes', 'batch_norm_momentum', 'batch_norm_epsilon',
'drop_connect_rate', 'depth_divisor', 'min_depth', 'include_top'])
# Parameters for an individual model block
BlockArgs = collections.namedtuple('BlockArgs', [
'num_repeat', 'kernel_size', 'stride', 'expand_ratio',
'input_filters', 'output_filters', 'se_ratio', 'id_skip'])
# Set GlobalParams and BlockArgs's defaults
GlobalParams.__new__.__defaults__ = (None,) * len(GlobalParams._fields)
BlockArgs.__new__.__defaults__ = (None,) * len(BlockArgs._fields)
# Swish activation function
if hasattr(nn, 'SiLU'):
Swish = nn.SiLU
else:
# For compatibility with old PyTorch versions
class Swish(nn.Module):
def forward(self, x):
return x * torch.sigmoid(x)
# A memory-efficient implementation of Swish function
class SwishImplementation(torch.autograd.Function):
@staticmethod
def forward(ctx, i):
result = i * torch.sigmoid(i)
ctx.save_for_backward(i)
return result
@staticmethod
def backward(ctx, grad_output):
i = ctx.saved_tensors[0]
sigmoid_i = torch.sigmoid(i)
return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))
class MemoryEfficientSwish(nn.Module):
def forward(self, x):
return SwishImplementation.apply(x)
def round_filters(filters, global_params):
"""Calculate and round number of filters based on width multiplier.
Use width_coefficient, depth_divisor and min_depth of global_params.
Args:
filters (int): Filters number to be calculated.
global_params (namedtuple): Global params of the model.
Returns:
new_filters: New filters number after calculating.
"""
multiplier = global_params.width_coefficient
if not multiplier:
return filters
# TODO: modify the params names.
# maybe the names (width_divisor,min_width)
# are more suitable than (depth_divisor,min_depth).
divisor = global_params.depth_divisor
min_depth = global_params.min_depth
filters *= multiplier
min_depth = min_depth or divisor # pay attention to this line when using min_depth
# follow the formula transferred from official TensorFlow implementation
new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor)
if new_filters < 0.9 * filters: # prevent rounding by more than 10%
new_filters += divisor
return int(new_filters)
def round_repeats(repeats, global_params):
"""Calculate module's repeat number of a block based on depth multiplier.
Use depth_coefficient of global_params.
Args:
repeats (int): num_repeat to be calculated.
global_params (namedtuple): Global params of the model.
Returns:
new repeat: New repeat number after calculating.
"""
multiplier = global_params.depth_coefficient
if not multiplier:
return repeats
# follow the formula transferred from official TensorFlow implementation
return int(math.ceil(multiplier * repeats))
def drop_connect(inputs, p, training):
"""Drop connect.
Args:
input (tensor: BCWH): Input of this structure.
p (float: 0.0~1.0): Probability of drop connection.
training (bool): The running mode.
Returns:
output: Output after drop connection.
"""
assert 0 <= p <= 1, 'p must be in range of [0,1]'
if not training:
return inputs
batch_size = inputs.shape[0]
keep_prob = 1 - p
# generate binary_tensor mask according to probability (p for 0, 1-p for 1)
random_tensor = keep_prob
random_tensor += torch.rand([batch_size, 1, 1, 1], dtype=inputs.dtype, device=inputs.device)
binary_tensor = torch.floor(random_tensor)
output = inputs / keep_prob * binary_tensor
return output
def get_width_and_height_from_size(x):
"""Obtain height and width from x.
Args:
x (int, tuple or list): Data size.
Returns:
size: A tuple or list (H,W).
"""
if isinstance(x, int):
return x, x
if isinstance(x, list) or isinstance(x, tuple):
return x
else:
raise TypeError()
def calculate_output_image_size(input_image_size, stride):
"""Calculates the output image size when using Conv2dSamePadding with a stride.
Necessary for static padding. Thanks to mannatsingh for pointing this out.
Args:
input_image_size (int, tuple or list): Size of input image.
stride (int, tuple or list): Conv2d operation's stride.
Returns:
output_image_size: A list [H,W].
"""
if input_image_size is None:
return None
image_height, image_width = get_width_and_height_from_size(input_image_size)
stride = stride if isinstance(stride, int) else stride[0]
image_height = int(math.ceil(image_height / stride))
image_width = int(math.ceil(image_width / stride))
return [image_height, image_width]
# Note:
# The following 'SamePadding' functions make output size equal ceil(input size/stride).
# Only when stride equals 1, can the output size be the same as input size.
# Don't be confused by their function names ! ! !
def get_same_padding_conv2d(image_size=None):
"""Chooses static padding if you have specified an image size, and dynamic padding otherwise.
Static padding is necessary for ONNX exporting of models.
Args:
image_size (int or tuple): Size of the image.
Returns:
Conv2dDynamicSamePadding or Conv2dStaticSamePadding.
"""
if image_size is None:
return Conv2dDynamicSamePadding
else:
return partial(Conv2dStaticSamePadding, image_size=image_size)
class Conv2dDynamicSamePadding(nn.Conv2d):
"""2D Convolutions like TensorFlow, for a dynamic image size.
The padding is operated in forward function by calculating dynamically.
"""
# Tips for 'SAME' mode padding.
# Given the following:
# i: width or height
# s: stride
# k: kernel size
# d: dilation
# p: padding
# Output after Conv2d:
# o = floor((i+p-((k-1)*d+1))/s+1)
# If o equals i, i = floor((i+p-((k-1)*d+1))/s+1),
# => p = (i-1)*s+((k-1)*d+1)-i
def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, bias=True):
super().__init__(in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias)
self.stride = self.stride if len(self.stride) == 2 else [self.stride[0]] * 2
def forward(self, x):
ih, iw = x.size()[-2:]
kh, kw = self.weight.size()[-2:]
sh, sw = self.stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw) # change the output size according to stride ! ! !
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0)
if pad_h > 0 or pad_w > 0:
x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2])
return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
class Conv2dStaticSamePadding(nn.Conv2d):
"""2D Convolutions like TensorFlow's 'SAME' mode, with the given input image size.
The padding mudule is calculated in construction function, then used in forward.
"""
# With the same calculation as Conv2dDynamicSamePadding
def __init__(self, in_channels, out_channels, kernel_size, stride=1, image_size=None, **kwargs):
super().__init__(in_channels, out_channels, kernel_size, stride, **kwargs)
self.stride = self.stride if len(self.stride) == 2 else [self.stride[0]] * 2
# Calculate padding based on image size and save it
assert image_size is not None
ih, iw = (image_size, image_size) if isinstance(image_size, int) else image_size
kh, kw = self.weight.size()[-2:]
sh, sw = self.stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0)
if pad_h > 0 or pad_w > 0:
self.static_padding = nn.ZeroPad2d((pad_w // 2, pad_w - pad_w // 2,
pad_h // 2, pad_h - pad_h // 2))
else:
self.static_padding = nn.Identity()
def forward(self, x):
x = self.static_padding(x)
x = F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
return x
def get_same_padding_maxPool2d(image_size=None):
"""Chooses static padding if you have specified an image size, and dynamic padding otherwise.
Static padding is necessary for ONNX exporting of models.
Args:
image_size (int or tuple): Size of the image.
Returns:
MaxPool2dDynamicSamePadding or MaxPool2dStaticSamePadding.
"""
if image_size is None:
return MaxPool2dDynamicSamePadding
else:
return partial(MaxPool2dStaticSamePadding, image_size=image_size)
class MaxPool2dDynamicSamePadding(nn.MaxPool2d):
"""2D MaxPooling like TensorFlow's 'SAME' mode, with a dynamic image size.
The padding is operated in forward function by calculating dynamically.
"""
def __init__(self, kernel_size, stride, padding=0, dilation=1, return_indices=False, ceil_mode=False):
super().__init__(kernel_size, stride, padding, dilation, return_indices, ceil_mode)
self.stride = [self.stride] * 2 if isinstance(self.stride, int) else self.stride
self.kernel_size = [self.kernel_size] * 2 if isinstance(self.kernel_size, int) else self.kernel_size
self.dilation = [self.dilation] * 2 if isinstance(self.dilation, int) else self.dilation
def forward(self, x):
ih, iw = x.size()[-2:]
kh, kw = self.kernel_size
sh, sw = self.stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0)
if pad_h > 0 or pad_w > 0:
x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2])
return F.max_pool2d(x, self.kernel_size, self.stride, self.padding,
self.dilation, self.ceil_mode, self.return_indices)
class MaxPool2dStaticSamePadding(nn.MaxPool2d):
"""2D MaxPooling like TensorFlow's 'SAME' mode, with the given input image size.
The padding mudule is calculated in construction function, then used in forward.
"""
def __init__(self, kernel_size, stride, image_size=None, **kwargs):
super().__init__(kernel_size, stride, **kwargs)
self.stride = [self.stride] * 2 if isinstance(self.stride, int) else self.stride
self.kernel_size = [self.kernel_size] * 2 if isinstance(self.kernel_size, int) else self.kernel_size
self.dilation = [self.dilation] * 2 if isinstance(self.dilation, int) else self.dilation
# Calculate padding based on image size and save it
assert image_size is not None
ih, iw = (image_size, image_size) if isinstance(image_size, int) else image_size
kh, kw = self.kernel_size
sh, sw = self.stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0)
if pad_h > 0 or pad_w > 0:
self.static_padding = nn.ZeroPad2d((pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2))
else:
self.static_padding = nn.Identity()
def forward(self, x):
x = self.static_padding(x)
x = F.max_pool2d(x, self.kernel_size, self.stride, self.padding,
self.dilation, self.ceil_mode, self.return_indices)
return x
################################################################################
# Helper functions for loading model params
################################################################################
# BlockDecoder: A Class for encoding and decoding BlockArgs
# efficientnet_params: A function to query compound coefficient
# get_model_params and efficientnet:
# Functions to get BlockArgs and GlobalParams for efficientnet
# url_map and url_map_advprop: Dicts of url_map for pretrained weights
# load_pretrained_weights: A function to load pretrained weights
class BlockDecoder(object):
"""Block Decoder for readability,
straight from the official TensorFlow repository.
"""
@staticmethod
def _decode_block_string(block_string):
"""Get a block through a string notation of arguments.
Args:
block_string (str): A string notation of arguments.
Examples: 'r1_k3_s11_e1_i32_o16_se0.25_noskip'.
Returns:
BlockArgs: The namedtuple defined at the top of this file.
"""
assert isinstance(block_string, str)
ops = block_string.split('_')
options = {}
for op in ops:
splits = re.split(r'(\d.*)', op)
if len(splits) >= 2:
key, value = splits[:2]
options[key] = value
# Check stride
assert (('s' in options and len(options['s']) == 1) or
(len(options['s']) == 2 and options['s'][0] == options['s'][1]))
return BlockArgs(
num_repeat=int(options['r']),
kernel_size=int(options['k']),
stride=[int(options['s'][0])],
expand_ratio=int(options['e']),
input_filters=int(options['i']),
output_filters=int(options['o']),
se_ratio=float(options['se']) if 'se' in options else None,
id_skip=('noskip' not in block_string))
@staticmethod
def _encode_block_string(block):
"""Encode a block to a string.
Args:
block (namedtuple): A BlockArgs type argument.
Returns:
block_string: A String form of BlockArgs.
"""
args = [
'r%d' % block.num_repeat,
'k%d' % block.kernel_size,
's%d%d' % (block.strides[0], block.strides[1]),
'e%s' % block.expand_ratio,
'i%d' % block.input_filters,
'o%d' % block.output_filters
]
if 0 < block.se_ratio <= 1:
args.append('se%s' % block.se_ratio)
if block.id_skip is False:
args.append('noskip')
return '_'.join(args)
@staticmethod
def decode(string_list):
"""Decode a list of string notations to specify blocks inside the network.
Args:
string_list (list[str]): A list of strings, each string is a notation of block.
Returns:
blocks_args: A list of BlockArgs namedtuples of block args.
"""
assert isinstance(string_list, list)
blocks_args = []
for block_string in string_list:
blocks_args.append(BlockDecoder._decode_block_string(block_string))
return blocks_args
@staticmethod
def encode(blocks_args):
"""Encode a list of BlockArgs to a list of strings.
Args:
blocks_args (list[namedtuples]): A list of BlockArgs namedtuples of block args.
Returns:
block_strings: A list of strings, each string is a notation of block.
"""
block_strings = []
for block in blocks_args:
block_strings.append(BlockDecoder._encode_block_string(block))
return block_strings
def efficientnet_params(model_name):
"""Map EfficientNet model name to parameter coefficients.
Args:
model_name (str): Model name to be queried.
Returns:
params_dict[model_name]: A (width,depth,res,dropout) tuple.
"""
params_dict = {
# Coefficients: width,depth,res,dropout
'efficientnet-b0': (1.0, 1.0, 224, 0.2),
'efficientnet-b1': (1.0, 1.1, 240, 0.2),
'efficientnet-b2': (1.1, 1.2, 260, 0.3),
'efficientnet-b3': (1.2, 1.4, 300, 0.3),
'efficientnet-b4': (1.4, 1.8, 380, 0.4),
'efficientnet-b5': (1.6, 2.2, 456, 0.4),
'efficientnet-b6': (1.8, 2.6, 528, 0.5),
'efficientnet-b7': (2.0, 3.1, 600, 0.5),
'efficientnet-b8': (2.2, 3.6, 672, 0.5),
'efficientnet-l2': (4.3, 5.3, 800, 0.5),
}
return params_dict[model_name]
def efficientnet(width_coefficient=None, depth_coefficient=None, image_size=None,
dropout_rate=0.2, drop_connect_rate=0.2, num_classes=1000, include_top=True):
"""Create BlockArgs and GlobalParams for efficientnet model.
Args:
width_coefficient (float)
depth_coefficient (float)
image_size (int)
dropout_rate (float)
drop_connect_rate (float)
num_classes (int)
Meaning as the name suggests.
Returns:
blocks_args, global_params.
"""
# Blocks args for the whole model(efficientnet-b0 by default)
# It will be modified in the construction of EfficientNet Class according to model
blocks_args = [
'r1_k3_s11_e1_i32_o16_se0.25',
'r2_k3_s22_e6_i16_o24_se0.25',
'r2_k5_s22_e6_i24_o40_se0.25',
'r3_k3_s22_e6_i40_o80_se0.25',
'r3_k5_s11_e6_i80_o112_se0.25',
'r4_k5_s22_e6_i112_o192_se0.25',
'r1_k3_s11_e6_i192_o320_se0.25',
]
blocks_args = BlockDecoder.decode(blocks_args)
global_params = GlobalParams(
width_coefficient=width_coefficient,
depth_coefficient=depth_coefficient,
image_size=image_size,
dropout_rate=dropout_rate,
num_classes=num_classes,
batch_norm_momentum=0.99,
batch_norm_epsilon=1e-3,
drop_connect_rate=drop_connect_rate,
depth_divisor=8,
min_depth=None,
include_top=include_top,
)
return blocks_args, global_params
def get_model_params(model_name, override_params):
"""Get the block args and global params for a given model name.
Args:
model_name (str): Model's name.
override_params (dict): A dict to modify global_params.
Returns:
blocks_args, global_params
"""
if model_name.startswith('efficientnet'):
w, d, s, p = efficientnet_params(model_name)
# note: all models have drop connect rate = 0.2
blocks_args, global_params = efficientnet(
width_coefficient=w, depth_coefficient=d, dropout_rate=p, image_size=s)
else:
raise NotImplementedError('model name is not pre-defined: {}'.format(model_name))
if override_params:
# ValueError will be raised here if override_params has fields not included in global_params.
global_params = global_params._replace(**override_params)
return blocks_args, global_params
# train with Standard methods
# check more details in paper(EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks)
url_map = {
'efficientnet-b0': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b0-355c32eb.pth',
'efficientnet-b1': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b1-f1951068.pth',
'efficientnet-b2': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b2-8bb594d6.pth',
'efficientnet-b3': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b3-5fb5a3c3.pth',
'efficientnet-b4': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b4-6ed6700e.pth',
'efficientnet-b5': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b5-b6417697.pth',
'efficientnet-b6': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b6-c76e70fd.pth',
'efficientnet-b7': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b7-dcc49843.pth',
}
# train with Adversarial Examples(AdvProp)
# check more details in paper(Adversarial Examples Improve Image Recognition)
url_map_advprop = {
'efficientnet-b0': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b0-b64d5a18.pth',
'efficientnet-b1': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b1-0f3ce85a.pth',
'efficientnet-b2': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b2-6e9d97e5.pth',
'efficientnet-b3': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b3-cdd7c0f4.pth',
'efficientnet-b4': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b4-44fb3a87.pth',
'efficientnet-b5': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b5-86493f6b.pth',
'efficientnet-b6': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b6-ac80338e.pth',
'efficientnet-b7': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b7-4652b6dd.pth',
'efficientnet-b8': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b8-22a8fe65.pth',
}
# TODO: add the petrained weights url map of 'efficientnet-l2'
def load_pretrained_weights(model, model_name, weights_path=None, load_fc=True, advprop=False, verbose=True):
"""Loads pretrained weights from weights path or download using url.
Args:
model (Module): The whole model of efficientnet.
model_name (str): Model name of efficientnet.
weights_path (None or str):
str: path to pretrained weights file on the local disk.
None: use pretrained weights downloaded from the Internet.
load_fc (bool): Whether to load pretrained weights for fc layer at the end of the model.
advprop (bool): Whether to load pretrained weights
trained with advprop (valid when weights_path is None).
"""
if isinstance(weights_path, str):
state_dict = torch.load(weights_path)
else:
# AutoAugment or Advprop (different preprocessing)
url_map_ = url_map_advprop if advprop else url_map
state_dict = model_zoo.load_url(url_map_[model_name])
if load_fc:
ret = model.load_state_dict(state_dict, strict=False)
assert not ret.missing_keys, 'Missing keys when loading pretrained weights: {}'.format(ret.missing_keys)
else:
state_dict.pop('_fc.weight')
state_dict.pop('_fc.bias')
ret = model.load_state_dict(state_dict, strict=False)
assert set(ret.missing_keys) == set(
['_fc.weight', '_fc.bias']), 'Missing keys when loading pretrained weights: {}'.format(ret.missing_keys)
assert not ret.unexpected_keys, 'Missing keys when loading pretrained weights: {}'.format(ret.unexpected_keys)
if verbose:
print('Loaded pretrained weights for {}'.format(model_name))
| 24,957 | 39.450567 | 130 | py |
EfficientNet-PyTorch | EfficientNet-PyTorch-master/efficientnet_pytorch/model.py | """model.py - Model and module class for EfficientNet.
They are built to mirror those in the official TensorFlow implementation.
"""
# Author: lukemelas (github username)
# Github repo: https://github.com/lukemelas/EfficientNet-PyTorch
# With adjustments and added comments by workingcoder (github username).
import torch
from torch import nn
from torch.nn import functional as F
from .utils import (
round_filters,
round_repeats,
drop_connect,
get_same_padding_conv2d,
get_model_params,
efficientnet_params,
load_pretrained_weights,
Swish,
MemoryEfficientSwish,
calculate_output_image_size
)
VALID_MODELS = (
'efficientnet-b0', 'efficientnet-b1', 'efficientnet-b2', 'efficientnet-b3',
'efficientnet-b4', 'efficientnet-b5', 'efficientnet-b6', 'efficientnet-b7',
'efficientnet-b8',
# Support the construction of 'efficientnet-l2' without pretrained weights
'efficientnet-l2'
)
class MBConvBlock(nn.Module):
"""Mobile Inverted Residual Bottleneck Block.
Args:
block_args (namedtuple): BlockArgs, defined in utils.py.
global_params (namedtuple): GlobalParam, defined in utils.py.
image_size (tuple or list): [image_height, image_width].
References:
[1] https://arxiv.org/abs/1704.04861 (MobileNet v1)
[2] https://arxiv.org/abs/1801.04381 (MobileNet v2)
[3] https://arxiv.org/abs/1905.02244 (MobileNet v3)
"""
def __init__(self, block_args, global_params, image_size=None):
super().__init__()
self._block_args = block_args
self._bn_mom = 1 - global_params.batch_norm_momentum # pytorch's difference from tensorflow
self._bn_eps = global_params.batch_norm_epsilon
self.has_se = (self._block_args.se_ratio is not None) and (0 < self._block_args.se_ratio <= 1)
self.id_skip = block_args.id_skip # whether to use skip connection and drop connect
# Expansion phase (Inverted Bottleneck)
inp = self._block_args.input_filters # number of input channels
oup = self._block_args.input_filters * self._block_args.expand_ratio # number of output channels
if self._block_args.expand_ratio != 1:
Conv2d = get_same_padding_conv2d(image_size=image_size)
self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# image_size = calculate_output_image_size(image_size, 1) <-- this wouldn't modify image_size
# Depthwise convolution phase
k = self._block_args.kernel_size
s = self._block_args.stride
Conv2d = get_same_padding_conv2d(image_size=image_size)
self._depthwise_conv = Conv2d(
in_channels=oup, out_channels=oup, groups=oup, # groups makes it depthwise
kernel_size=k, stride=s, bias=False)
self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
image_size = calculate_output_image_size(image_size, s)
# Squeeze and Excitation layer, if desired
if self.has_se:
Conv2d = get_same_padding_conv2d(image_size=(1, 1))
num_squeezed_channels = max(1, int(self._block_args.input_filters * self._block_args.se_ratio))
self._se_reduce = Conv2d(in_channels=oup, out_channels=num_squeezed_channels, kernel_size=1)
self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=oup, kernel_size=1)
# Pointwise convolution phase
final_oup = self._block_args.output_filters
Conv2d = get_same_padding_conv2d(image_size=image_size)
self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
self._swish = MemoryEfficientSwish()
def forward(self, inputs, drop_connect_rate=None):
"""MBConvBlock's forward function.
Args:
inputs (tensor): Input tensor.
drop_connect_rate (bool): Drop connect rate (float, between 0 and 1).
Returns:
Output of this block after processing.
"""
# Expansion and Depthwise Convolution
x = inputs
if self._block_args.expand_ratio != 1:
x = self._expand_conv(inputs)
x = self._bn0(x)
x = self._swish(x)
x = self._depthwise_conv(x)
x = self._bn1(x)
x = self._swish(x)
# Squeeze and Excitation
if self.has_se:
x_squeezed = F.adaptive_avg_pool2d(x, 1)
x_squeezed = self._se_reduce(x_squeezed)
x_squeezed = self._swish(x_squeezed)
x_squeezed = self._se_expand(x_squeezed)
x = torch.sigmoid(x_squeezed) * x
# Pointwise Convolution
x = self._project_conv(x)
x = self._bn2(x)
# Skip connection and drop connect
input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters
if self.id_skip and self._block_args.stride == 1 and input_filters == output_filters:
# The combination of skip connection and drop connect brings about stochastic depth.
if drop_connect_rate:
x = drop_connect(x, p=drop_connect_rate, training=self.training)
x = x + inputs # skip connection
return x
def set_swish(self, memory_efficient=True):
"""Sets swish function as memory efficient (for training) or standard (for export).
Args:
memory_efficient (bool): Whether to use memory-efficient version of swish.
"""
self._swish = MemoryEfficientSwish() if memory_efficient else Swish()
class EfficientNet(nn.Module):
"""EfficientNet model.
Most easily loaded with the .from_name or .from_pretrained methods.
Args:
blocks_args (list[namedtuple]): A list of BlockArgs to construct blocks.
global_params (namedtuple): A set of GlobalParams shared between blocks.
References:
[1] https://arxiv.org/abs/1905.11946 (EfficientNet)
Example:
>>> import torch
>>> from efficientnet.model import EfficientNet
>>> inputs = torch.rand(1, 3, 224, 224)
>>> model = EfficientNet.from_pretrained('efficientnet-b0')
>>> model.eval()
>>> outputs = model(inputs)
"""
def __init__(self, blocks_args=None, global_params=None):
super().__init__()
assert isinstance(blocks_args, list), 'blocks_args should be a list'
assert len(blocks_args) > 0, 'block args must be greater than 0'
self._global_params = global_params
self._blocks_args = blocks_args
# Batch norm parameters
bn_mom = 1 - self._global_params.batch_norm_momentum
bn_eps = self._global_params.batch_norm_epsilon
# Get stem static or dynamic convolution depending on image size
image_size = global_params.image_size
Conv2d = get_same_padding_conv2d(image_size=image_size)
# Stem
in_channels = 3 # rgb
out_channels = round_filters(32, self._global_params) # number of output channels
self._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False)
self._bn0 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps)
image_size = calculate_output_image_size(image_size, 2)
# Build blocks
self._blocks = nn.ModuleList([])
for block_args in self._blocks_args:
# Update block input and output filters based on depth multiplier.
block_args = block_args._replace(
input_filters=round_filters(block_args.input_filters, self._global_params),
output_filters=round_filters(block_args.output_filters, self._global_params),
num_repeat=round_repeats(block_args.num_repeat, self._global_params)
)
# The first block needs to take care of stride and filter size increase.
self._blocks.append(MBConvBlock(block_args, self._global_params, image_size=image_size))
image_size = calculate_output_image_size(image_size, block_args.stride)
if block_args.num_repeat > 1: # modify block_args to keep same output size
block_args = block_args._replace(input_filters=block_args.output_filters, stride=1)
for _ in range(block_args.num_repeat - 1):
self._blocks.append(MBConvBlock(block_args, self._global_params, image_size=image_size))
# image_size = calculate_output_image_size(image_size, block_args.stride) # stride = 1
# Head
in_channels = block_args.output_filters # output of final block
out_channels = round_filters(1280, self._global_params)
Conv2d = get_same_padding_conv2d(image_size=image_size)
self._conv_head = Conv2d(in_channels, out_channels, kernel_size=1, bias=False)
self._bn1 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps)
# Final linear layer
self._avg_pooling = nn.AdaptiveAvgPool2d(1)
if self._global_params.include_top:
self._dropout = nn.Dropout(self._global_params.dropout_rate)
self._fc = nn.Linear(out_channels, self._global_params.num_classes)
# set activation to memory efficient swish by default
self._swish = MemoryEfficientSwish()
def set_swish(self, memory_efficient=True):
"""Sets swish function as memory efficient (for training) or standard (for export).
Args:
memory_efficient (bool): Whether to use memory-efficient version of swish.
"""
self._swish = MemoryEfficientSwish() if memory_efficient else Swish()
for block in self._blocks:
block.set_swish(memory_efficient)
def extract_endpoints(self, inputs):
"""Use convolution layer to extract features
from reduction levels i in [1, 2, 3, 4, 5].
Args:
inputs (tensor): Input tensor.
Returns:
Dictionary of last intermediate features
with reduction levels i in [1, 2, 3, 4, 5].
Example:
>>> import torch
>>> from efficientnet.model import EfficientNet
>>> inputs = torch.rand(1, 3, 224, 224)
>>> model = EfficientNet.from_pretrained('efficientnet-b0')
>>> endpoints = model.extract_endpoints(inputs)
>>> print(endpoints['reduction_1'].shape) # torch.Size([1, 16, 112, 112])
>>> print(endpoints['reduction_2'].shape) # torch.Size([1, 24, 56, 56])
>>> print(endpoints['reduction_3'].shape) # torch.Size([1, 40, 28, 28])
>>> print(endpoints['reduction_4'].shape) # torch.Size([1, 112, 14, 14])
>>> print(endpoints['reduction_5'].shape) # torch.Size([1, 320, 7, 7])
>>> print(endpoints['reduction_6'].shape) # torch.Size([1, 1280, 7, 7])
"""
endpoints = dict()
# Stem
x = self._swish(self._bn0(self._conv_stem(inputs)))
prev_x = x
# Blocks
for idx, block in enumerate(self._blocks):
drop_connect_rate = self._global_params.drop_connect_rate
if drop_connect_rate:
drop_connect_rate *= float(idx) / len(self._blocks) # scale drop connect_rate
x = block(x, drop_connect_rate=drop_connect_rate)
if prev_x.size(2) > x.size(2):
endpoints['reduction_{}'.format(len(endpoints) + 1)] = prev_x
elif idx == len(self._blocks) - 1:
endpoints['reduction_{}'.format(len(endpoints) + 1)] = x
prev_x = x
# Head
x = self._swish(self._bn1(self._conv_head(x)))
endpoints['reduction_{}'.format(len(endpoints) + 1)] = x
return endpoints
def extract_features(self, inputs):
"""use convolution layer to extract feature .
Args:
inputs (tensor): Input tensor.
Returns:
Output of the final convolution
layer in the efficientnet model.
"""
# Stem
x = self._swish(self._bn0(self._conv_stem(inputs)))
# Blocks
for idx, block in enumerate(self._blocks):
drop_connect_rate = self._global_params.drop_connect_rate
if drop_connect_rate:
drop_connect_rate *= float(idx) / len(self._blocks) # scale drop connect_rate
x = block(x, drop_connect_rate=drop_connect_rate)
# Head
x = self._swish(self._bn1(self._conv_head(x)))
return x
def forward(self, inputs):
"""EfficientNet's forward function.
Calls extract_features to extract features, applies final linear layer, and returns logits.
Args:
inputs (tensor): Input tensor.
Returns:
Output of this model after processing.
"""
# Convolution layers
x = self.extract_features(inputs)
# Pooling and final linear layer
x = self._avg_pooling(x)
if self._global_params.include_top:
x = x.flatten(start_dim=1)
x = self._dropout(x)
x = self._fc(x)
return x
@classmethod
def from_name(cls, model_name, in_channels=3, **override_params):
"""Create an efficientnet model according to name.
Args:
model_name (str): Name for efficientnet.
in_channels (int): Input data's channel number.
override_params (other key word params):
Params to override model's global_params.
Optional key:
'width_coefficient', 'depth_coefficient',
'image_size', 'dropout_rate',
'num_classes', 'batch_norm_momentum',
'batch_norm_epsilon', 'drop_connect_rate',
'depth_divisor', 'min_depth'
Returns:
An efficientnet model.
"""
cls._check_model_name_is_valid(model_name)
blocks_args, global_params = get_model_params(model_name, override_params)
model = cls(blocks_args, global_params)
model._change_in_channels(in_channels)
return model
@classmethod
def from_pretrained(cls, model_name, weights_path=None, advprop=False,
in_channels=3, num_classes=1000, **override_params):
"""Create an efficientnet model according to name.
Args:
model_name (str): Name for efficientnet.
weights_path (None or str):
str: path to pretrained weights file on the local disk.
None: use pretrained weights downloaded from the Internet.
advprop (bool):
Whether to load pretrained weights
trained with advprop (valid when weights_path is None).
in_channels (int): Input data's channel number.
num_classes (int):
Number of categories for classification.
It controls the output size for final linear layer.
override_params (other key word params):
Params to override model's global_params.
Optional key:
'width_coefficient', 'depth_coefficient',
'image_size', 'dropout_rate',
'batch_norm_momentum',
'batch_norm_epsilon', 'drop_connect_rate',
'depth_divisor', 'min_depth'
Returns:
A pretrained efficientnet model.
"""
model = cls.from_name(model_name, num_classes=num_classes, **override_params)
load_pretrained_weights(model, model_name, weights_path=weights_path,
load_fc=(num_classes == 1000), advprop=advprop)
model._change_in_channels(in_channels)
return model
@classmethod
def get_image_size(cls, model_name):
"""Get the input image size for a given efficientnet model.
Args:
model_name (str): Name for efficientnet.
Returns:
Input image size (resolution).
"""
cls._check_model_name_is_valid(model_name)
_, _, res, _ = efficientnet_params(model_name)
return res
@classmethod
def _check_model_name_is_valid(cls, model_name):
"""Validates model name.
Args:
model_name (str): Name for efficientnet.
Returns:
bool: Is a valid name or not.
"""
if model_name not in VALID_MODELS:
raise ValueError('model_name should be one of: ' + ', '.join(VALID_MODELS))
def _change_in_channels(self, in_channels):
"""Adjust model's first convolution layer to in_channels, if in_channels not equals 3.
Args:
in_channels (int): Input data's channel number.
"""
if in_channels != 3:
Conv2d = get_same_padding_conv2d(image_size=self._global_params.image_size)
out_channels = round_filters(32, self._global_params)
self._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False)
| 17,388 | 40.402381 | 107 | py |
EfficientNet-PyTorch | EfficientNet-PyTorch-master/examples/imagenet/main.py | """
Evaluate on ImageNet. Note that at the moment, training is not implemented (I am working on it).
that being said, evaluation is working.
"""
import argparse
import os
import random
import shutil
import time
import warnings
import PIL
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from efficientnet_pytorch import EfficientNet
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',
help='model architecture (default: resnet18)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--image_size', default=224, type=int,
help='image size')
parser.add_argument('--advprop', default=False, action='store_true',
help='use advprop or not')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
best_acc1 = 0
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# create model
if 'efficientnet' in args.arch: # NEW
if args.pretrained:
model = EfficientNet.from_pretrained(args.arch, advprop=args.advprop)
print("=> using pre-trained model '{}'".format(args.arch))
else:
print("=> creating model '{}'".format(args.arch))
model = EfficientNet.from_name(args.arch)
else:
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
if args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int(args.workers / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
if args.advprop:
normalize = transforms.Lambda(lambda img: img * 2.0 - 1.0)
else:
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
if 'efficientnet' in args.arch:
image_size = EfficientNet.get_image_size(args.arch)
else:
image_size = args.image_size
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(image_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_transforms = transforms.Compose([
transforms.Resize(image_size, interpolation=PIL.Image.BICUBIC),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
normalize,
])
print('Using image size', image_size)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, val_transforms),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
res = validate(val_loader, model, criterion, args)
with open('res.txt', 'w') as f:
print(res, file=f)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args)
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, args)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer' : optimizer.state_dict(),
}, is_best)
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(len(train_loader), batch_time, data_time, losses, top1,
top5, prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.print(i)
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(len(val_loader), batch_time, losses, top1, top5,
prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.print(i)
# TODO: this should also be done with the ProgressMeter
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, *meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def print(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 17,107 | 37.531532 | 96 | py |
EfficientNet-PyTorch | EfficientNet-PyTorch-master/tests/test_model.py | from collections import OrderedDict
import pytest
import torch
import torch.nn as nn
from efficientnet_pytorch import EfficientNet
# -- fixtures -------------------------------------------------------------------------------------
@pytest.fixture(scope='module', params=[x for x in range(4)])
def model(request):
return 'efficientnet-b{}'.format(request.param)
@pytest.fixture(scope='module', params=[True, False])
def pretrained(request):
return request.param
@pytest.fixture(scope='function')
def net(model, pretrained):
return EfficientNet.from_pretrained(model) if pretrained else EfficientNet.from_name(model)
# -- tests ----------------------------------------------------------------------------------------
@pytest.mark.parametrize('img_size', [224, 256, 512])
def test_forward(net, img_size):
"""Test `.forward()` doesn't throw an error"""
data = torch.zeros((1, 3, img_size, img_size))
output = net(data)
assert not torch.isnan(output).any()
def test_dropout_training(net):
"""Test dropout `.training` is set by `.train()` on parent `nn.module`"""
net.train()
assert net._dropout.training == True
def test_dropout_eval(net):
"""Test dropout `.training` is set by `.eval()` on parent `nn.module`"""
net.eval()
assert net._dropout.training == False
def test_dropout_update(net):
"""Test dropout `.training` is updated by `.train()` and `.eval()` on parent `nn.module`"""
net.train()
assert net._dropout.training == True
net.eval()
assert net._dropout.training == False
net.train()
assert net._dropout.training == True
net.eval()
assert net._dropout.training == False
@pytest.mark.parametrize('img_size', [224, 256, 512])
def test_modify_dropout(net, img_size):
"""Test ability to modify dropout and fc modules of network"""
dropout = nn.Sequential(OrderedDict([
('_bn2', nn.BatchNorm1d(net._bn1.num_features)),
('_drop1', nn.Dropout(p=net._global_params.dropout_rate)),
('_linear1', nn.Linear(net._bn1.num_features, 512)),
('_relu', nn.ReLU()),
('_bn3', nn.BatchNorm1d(512)),
('_drop2', nn.Dropout(p=net._global_params.dropout_rate / 2))
]))
fc = nn.Linear(512, net._global_params.num_classes)
net._dropout = dropout
net._fc = fc
data = torch.zeros((2, 3, img_size, img_size))
output = net(data)
assert not torch.isnan(output).any()
@pytest.mark.parametrize('img_size', [224, 256, 512])
def test_modify_pool(net, img_size):
"""Test ability to modify pooling module of network"""
class AdaptiveMaxAvgPool(nn.Module):
def __init__(self):
super().__init__()
self.ada_avgpool = nn.AdaptiveAvgPool2d(1)
self.ada_maxpool = nn.AdaptiveMaxPool2d(1)
def forward(self, x):
avg_x = self.ada_avgpool(x)
max_x = self.ada_maxpool(x)
x = torch.cat((avg_x, max_x), dim=1)
return x
avg_pooling = AdaptiveMaxAvgPool()
fc = nn.Linear(net._fc.in_features * 2, net._global_params.num_classes)
net._avg_pooling = avg_pooling
net._fc = fc
data = torch.zeros((2, 3, img_size, img_size))
output = net(data)
assert not torch.isnan(output).any()
@pytest.mark.parametrize('img_size', [224, 256, 512])
def test_extract_endpoints(net, img_size):
"""Test `.extract_endpoints()` doesn't throw an error"""
data = torch.zeros((1, 3, img_size, img_size))
endpoints = net.extract_endpoints(data)
assert not torch.isnan(endpoints['reduction_1']).any()
assert not torch.isnan(endpoints['reduction_2']).any()
assert not torch.isnan(endpoints['reduction_3']).any()
assert not torch.isnan(endpoints['reduction_4']).any()
assert not torch.isnan(endpoints['reduction_5']).any()
assert endpoints['reduction_1'].size(2) == img_size // 2
assert endpoints['reduction_2'].size(2) == img_size // 4
assert endpoints['reduction_3'].size(2) == img_size // 8
assert endpoints['reduction_4'].size(2) == img_size // 16
assert endpoints['reduction_5'].size(2) == img_size // 32
| 4,122 | 31.984 | 99 | py |
EfficientNet-PyTorch | EfficientNet-PyTorch-master/tf_to_pytorch/convert_tf_to_pt/load_tf_weights_tf1.py | import numpy as np
import tensorflow as tf
import torch
def load_param(checkpoint_file, conversion_table, model_name):
"""
Load parameters according to conversion_table.
Args:
checkpoint_file (string): pretrained checkpoint model file in tensorflow
conversion_table (dict): { pytorch tensor in a model : checkpoint variable name }
"""
for pyt_param, tf_param_name in conversion_table.items():
tf_param_name = str(model_name) + '/' + tf_param_name
tf_param = tf.train.load_variable(checkpoint_file, tf_param_name)
if 'conv' in tf_param_name and 'kernel' in tf_param_name:
tf_param = np.transpose(tf_param, (3, 2, 0, 1))
if 'depthwise' in tf_param_name:
tf_param = np.transpose(tf_param, (1, 0, 2, 3))
elif tf_param_name.endswith('kernel'): # for weight(kernel), we should do transpose
tf_param = np.transpose(tf_param)
assert pyt_param.size() == tf_param.shape, \
'Dim Mismatch: %s vs %s ; %s' % (tuple(pyt_param.size()), tf_param.shape, tf_param_name)
pyt_param.data = torch.from_numpy(tf_param)
def load_efficientnet(model, checkpoint_file, model_name):
"""
Load PyTorch EfficientNet from TensorFlow checkpoint file
"""
# This will store the enire conversion table
conversion_table = {}
merge = lambda dict1, dict2: {**dict1, **dict2}
# All the weights not in the conv blocks
conversion_table_for_weights_outside_blocks = {
model._conv_stem.weight: 'stem/conv2d/kernel', # [3, 3, 3, 32]),
model._bn0.bias: 'stem/tpu_batch_normalization/beta', # [32]),
model._bn0.weight: 'stem/tpu_batch_normalization/gamma', # [32]),
model._bn0.running_mean: 'stem/tpu_batch_normalization/moving_mean', # [32]),
model._bn0.running_var: 'stem/tpu_batch_normalization/moving_variance', # [32]),
model._conv_head.weight: 'head/conv2d/kernel', # [1, 1, 320, 1280]),
model._bn1.bias: 'head/tpu_batch_normalization/beta', # [1280]),
model._bn1.weight: 'head/tpu_batch_normalization/gamma', # [1280]),
model._bn1.running_mean: 'head/tpu_batch_normalization/moving_mean', # [32]),
model._bn1.running_var: 'head/tpu_batch_normalization/moving_variance', # [32]),
model._fc.bias: 'head/dense/bias', # [1000]),
model._fc.weight: 'head/dense/kernel', # [1280, 1000]),
}
conversion_table = merge(conversion_table, conversion_table_for_weights_outside_blocks)
# The first conv block is special because it does not have _expand_conv
conversion_table_for_first_block = {
model._blocks[0]._project_conv.weight: 'blocks_0/conv2d/kernel', # 1, 1, 32, 16]),
model._blocks[0]._depthwise_conv.weight: 'blocks_0/depthwise_conv2d/depthwise_kernel', # [3, 3, 32, 1]),
model._blocks[0]._se_reduce.bias: 'blocks_0/se/conv2d/bias', # , [8]),
model._blocks[0]._se_reduce.weight: 'blocks_0/se/conv2d/kernel', # , [1, 1, 32, 8]),
model._blocks[0]._se_expand.bias: 'blocks_0/se/conv2d_1/bias', # , [32]),
model._blocks[0]._se_expand.weight: 'blocks_0/se/conv2d_1/kernel', # , [1, 1, 8, 32]),
model._blocks[0]._bn1.bias: 'blocks_0/tpu_batch_normalization/beta', # [32]),
model._blocks[0]._bn1.weight: 'blocks_0/tpu_batch_normalization/gamma', # [32]),
model._blocks[0]._bn1.running_mean: 'blocks_0/tpu_batch_normalization/moving_mean',
model._blocks[0]._bn1.running_var: 'blocks_0/tpu_batch_normalization/moving_variance',
model._blocks[0]._bn2.bias: 'blocks_0/tpu_batch_normalization_1/beta', # [16]),
model._blocks[0]._bn2.weight: 'blocks_0/tpu_batch_normalization_1/gamma', # [16]),
model._blocks[0]._bn2.running_mean: 'blocks_0/tpu_batch_normalization_1/moving_mean',
model._blocks[0]._bn2.running_var: 'blocks_0/tpu_batch_normalization_1/moving_variance',
}
conversion_table = merge(conversion_table, conversion_table_for_first_block)
# Conv blocks
for i in range(len(model._blocks)):
is_first_block = '_expand_conv.weight' not in [n for n, p in model._blocks[i].named_parameters()]
if is_first_block:
conversion_table_block = {
model._blocks[i]._project_conv.weight: 'blocks_' + str(i) + '/conv2d/kernel', # 1, 1, 32, 16]),
model._blocks[i]._depthwise_conv.weight: 'blocks_' + str(i) + '/depthwise_conv2d/depthwise_kernel',
# [3, 3, 32, 1]),
model._blocks[i]._se_reduce.bias: 'blocks_' + str(i) + '/se/conv2d/bias', # , [8]),
model._blocks[i]._se_reduce.weight: 'blocks_' + str(i) + '/se/conv2d/kernel', # , [1, 1, 32, 8]),
model._blocks[i]._se_expand.bias: 'blocks_' + str(i) + '/se/conv2d_1/bias', # , [32]),
model._blocks[i]._se_expand.weight: 'blocks_' + str(i) + '/se/conv2d_1/kernel', # , [1, 1, 8, 32]),
model._blocks[i]._bn1.bias: 'blocks_' + str(i) + '/tpu_batch_normalization/beta', # [32]),
model._blocks[i]._bn1.weight: 'blocks_' + str(i) + '/tpu_batch_normalization/gamma', # [32]),
model._blocks[i]._bn1.running_mean: 'blocks_' + str(i) + '/tpu_batch_normalization/moving_mean',
model._blocks[i]._bn1.running_var: 'blocks_' + str(i) + '/tpu_batch_normalization/moving_variance',
model._blocks[i]._bn2.bias: 'blocks_' + str(i) + '/tpu_batch_normalization_1/beta', # [16]),
model._blocks[i]._bn2.weight: 'blocks_' + str(i) + '/tpu_batch_normalization_1/gamma', # [16]),
model._blocks[i]._bn2.running_mean: 'blocks_' + str(i) + '/tpu_batch_normalization_1/moving_mean',
model._blocks[i]._bn2.running_var: 'blocks_' + str(i) + '/tpu_batch_normalization_1/moving_variance',
}
else:
conversion_table_block = {
model._blocks[i]._expand_conv.weight: 'blocks_' + str(i) + '/conv2d/kernel',
model._blocks[i]._project_conv.weight: 'blocks_' + str(i) + '/conv2d_1/kernel',
model._blocks[i]._depthwise_conv.weight: 'blocks_' + str(i) + '/depthwise_conv2d/depthwise_kernel',
model._blocks[i]._se_reduce.bias: 'blocks_' + str(i) + '/se/conv2d/bias',
model._blocks[i]._se_reduce.weight: 'blocks_' + str(i) + '/se/conv2d/kernel',
model._blocks[i]._se_expand.bias: 'blocks_' + str(i) + '/se/conv2d_1/bias',
model._blocks[i]._se_expand.weight: 'blocks_' + str(i) + '/se/conv2d_1/kernel',
model._blocks[i]._bn0.bias: 'blocks_' + str(i) + '/tpu_batch_normalization/beta',
model._blocks[i]._bn0.weight: 'blocks_' + str(i) + '/tpu_batch_normalization/gamma',
model._blocks[i]._bn0.running_mean: 'blocks_' + str(i) + '/tpu_batch_normalization/moving_mean',
model._blocks[i]._bn0.running_var: 'blocks_' + str(i) + '/tpu_batch_normalization/moving_variance',
model._blocks[i]._bn1.bias: 'blocks_' + str(i) + '/tpu_batch_normalization_1/beta',
model._blocks[i]._bn1.weight: 'blocks_' + str(i) + '/tpu_batch_normalization_1/gamma',
model._blocks[i]._bn1.running_mean: 'blocks_' + str(i) + '/tpu_batch_normalization_1/moving_mean',
model._blocks[i]._bn1.running_var: 'blocks_' + str(i) + '/tpu_batch_normalization_1/moving_variance',
model._blocks[i]._bn2.bias: 'blocks_' + str(i) + '/tpu_batch_normalization_2/beta',
model._blocks[i]._bn2.weight: 'blocks_' + str(i) + '/tpu_batch_normalization_2/gamma',
model._blocks[i]._bn2.running_mean: 'blocks_' + str(i) + '/tpu_batch_normalization_2/moving_mean',
model._blocks[i]._bn2.running_var: 'blocks_' + str(i) + '/tpu_batch_normalization_2/moving_variance',
}
conversion_table = merge(conversion_table, conversion_table_block)
# Load TensorFlow parameters into PyTorch model
load_param(checkpoint_file, conversion_table, model_name)
return conversion_table
def load_and_save_temporary_tensorflow_model(model_name, model_ckpt, example_img= '../../example/img.jpg'):
""" Loads and saves a TensorFlow model. """
image_files = [example_img]
eval_ckpt_driver = eval_ckpt_main.EvalCkptDriver(model_name)
with tf.Graph().as_default(), tf.Session() as sess:
images, labels = eval_ckpt_driver.build_dataset(image_files, [0] * len(image_files), False)
probs = eval_ckpt_driver.build_model(images, is_training=False)
sess.run(tf.global_variables_initializer())
print(model_ckpt)
eval_ckpt_driver.restore_model(sess, model_ckpt)
tf.train.Saver().save(sess, 'tmp/model.ckpt')
if __name__ == '__main__':
import sys
import argparse
sys.path.append('original_tf')
import eval_ckpt_main
from efficientnet_pytorch import EfficientNet
parser = argparse.ArgumentParser(
description='Convert TF model to PyTorch model and save for easier future loading')
parser.add_argument('--model_name', type=str, default='efficientnet-b0',
help='efficientnet-b{N}, where N is an integer 0 <= N <= 8')
parser.add_argument('--tf_checkpoint', type=str, default='pretrained_tensorflow/efficientnet-b0/',
help='checkpoint file path')
parser.add_argument('--output_file', type=str, default='pretrained_pytorch/efficientnet-b0.pth',
help='output PyTorch model file name')
args = parser.parse_args()
# Build model
model = EfficientNet.from_name(args.model_name)
# Load and save temporary TensorFlow file due to TF nuances
print(args.tf_checkpoint)
load_and_save_temporary_tensorflow_model(args.model_name, args.tf_checkpoint)
# Load weights
load_efficientnet(model, 'tmp/model.ckpt', model_name=args.model_name)
print('Loaded TF checkpoint weights')
# Save PyTorch file
torch.save(model.state_dict(), args.output_file)
print('Saved model to', args.output_file)
| 10,344 | 58.797688 | 126 | py |
EfficientNet-PyTorch | EfficientNet-PyTorch-master/tf_to_pytorch/convert_tf_to_pt/load_tf_weights.py | import numpy as np
import tensorflow as tf
import torch
tf.compat.v1.disable_v2_behavior()
def load_param(checkpoint_file, conversion_table, model_name):
"""
Load parameters according to conversion_table.
Args:
checkpoint_file (string): pretrained checkpoint model file in tensorflow
conversion_table (dict): { pytorch tensor in a model : checkpoint variable name }
"""
for pyt_param, tf_param_name in conversion_table.items():
tf_param_name = str(model_name) + '/' + tf_param_name
tf_param = tf.train.load_variable(checkpoint_file, tf_param_name)
if 'conv' in tf_param_name and 'kernel' in tf_param_name:
tf_param = np.transpose(tf_param, (3, 2, 0, 1))
if 'depthwise' in tf_param_name:
tf_param = np.transpose(tf_param, (1, 0, 2, 3))
elif tf_param_name.endswith('kernel'): # for weight(kernel), we should do transpose
tf_param = np.transpose(tf_param)
assert pyt_param.size() == tf_param.shape, \
'Dim Mismatch: %s vs %s ; %s' % (tuple(pyt_param.size()), tf_param.shape, tf_param_name)
pyt_param.data = torch.from_numpy(tf_param)
def load_efficientnet(model, checkpoint_file, model_name):
"""
Load PyTorch EfficientNet from TensorFlow checkpoint file
"""
# This will store the enire conversion table
conversion_table = {}
merge = lambda dict1, dict2: {**dict1, **dict2}
# All the weights not in the conv blocks
conversion_table_for_weights_outside_blocks = {
model._conv_stem.weight: 'stem/conv2d/kernel', # [3, 3, 3, 32]),
model._bn0.bias: 'stem/tpu_batch_normalization/beta', # [32]),
model._bn0.weight: 'stem/tpu_batch_normalization/gamma', # [32]),
model._bn0.running_mean: 'stem/tpu_batch_normalization/moving_mean', # [32]),
model._bn0.running_var: 'stem/tpu_batch_normalization/moving_variance', # [32]),
model._conv_head.weight: 'head/conv2d/kernel', # [1, 1, 320, 1280]),
model._bn1.bias: 'head/tpu_batch_normalization/beta', # [1280]),
model._bn1.weight: 'head/tpu_batch_normalization/gamma', # [1280]),
model._bn1.running_mean: 'head/tpu_batch_normalization/moving_mean', # [32]),
model._bn1.running_var: 'head/tpu_batch_normalization/moving_variance', # [32]),
model._fc.bias: 'head/dense/bias', # [1000]),
model._fc.weight: 'head/dense/kernel', # [1280, 1000]),
}
conversion_table = merge(conversion_table, conversion_table_for_weights_outside_blocks)
# The first conv block is special because it does not have _expand_conv
conversion_table_for_first_block = {
model._blocks[0]._project_conv.weight: 'blocks_0/conv2d/kernel', # 1, 1, 32, 16]),
model._blocks[0]._depthwise_conv.weight: 'blocks_0/depthwise_conv2d/depthwise_kernel', # [3, 3, 32, 1]),
model._blocks[0]._se_reduce.bias: 'blocks_0/se/conv2d/bias', # , [8]),
model._blocks[0]._se_reduce.weight: 'blocks_0/se/conv2d/kernel', # , [1, 1, 32, 8]),
model._blocks[0]._se_expand.bias: 'blocks_0/se/conv2d_1/bias', # , [32]),
model._blocks[0]._se_expand.weight: 'blocks_0/se/conv2d_1/kernel', # , [1, 1, 8, 32]),
model._blocks[0]._bn1.bias: 'blocks_0/tpu_batch_normalization/beta', # [32]),
model._blocks[0]._bn1.weight: 'blocks_0/tpu_batch_normalization/gamma', # [32]),
model._blocks[0]._bn1.running_mean: 'blocks_0/tpu_batch_normalization/moving_mean',
model._blocks[0]._bn1.running_var: 'blocks_0/tpu_batch_normalization/moving_variance',
model._blocks[0]._bn2.bias: 'blocks_0/tpu_batch_normalization_1/beta', # [16]),
model._blocks[0]._bn2.weight: 'blocks_0/tpu_batch_normalization_1/gamma', # [16]),
model._blocks[0]._bn2.running_mean: 'blocks_0/tpu_batch_normalization_1/moving_mean',
model._blocks[0]._bn2.running_var: 'blocks_0/tpu_batch_normalization_1/moving_variance',
}
conversion_table = merge(conversion_table, conversion_table_for_first_block)
# Conv blocks
for i in range(len(model._blocks)):
is_first_block = '_expand_conv.weight' not in [n for n, p in model._blocks[i].named_parameters()]
if is_first_block:
conversion_table_block = {
model._blocks[i]._project_conv.weight: 'blocks_' + str(i) + '/conv2d/kernel', # 1, 1, 32, 16]),
model._blocks[i]._depthwise_conv.weight: 'blocks_' + str(i) + '/depthwise_conv2d/depthwise_kernel',
# [3, 3, 32, 1]),
model._blocks[i]._se_reduce.bias: 'blocks_' + str(i) + '/se/conv2d/bias', # , [8]),
model._blocks[i]._se_reduce.weight: 'blocks_' + str(i) + '/se/conv2d/kernel', # , [1, 1, 32, 8]),
model._blocks[i]._se_expand.bias: 'blocks_' + str(i) + '/se/conv2d_1/bias', # , [32]),
model._blocks[i]._se_expand.weight: 'blocks_' + str(i) + '/se/conv2d_1/kernel', # , [1, 1, 8, 32]),
model._blocks[i]._bn1.bias: 'blocks_' + str(i) + '/tpu_batch_normalization/beta', # [32]),
model._blocks[i]._bn1.weight: 'blocks_' + str(i) + '/tpu_batch_normalization/gamma', # [32]),
model._blocks[i]._bn1.running_mean: 'blocks_' + str(i) + '/tpu_batch_normalization/moving_mean',
model._blocks[i]._bn1.running_var: 'blocks_' + str(i) + '/tpu_batch_normalization/moving_variance',
model._blocks[i]._bn2.bias: 'blocks_' + str(i) + '/tpu_batch_normalization_1/beta', # [16]),
model._blocks[i]._bn2.weight: 'blocks_' + str(i) + '/tpu_batch_normalization_1/gamma', # [16]),
model._blocks[i]._bn2.running_mean: 'blocks_' + str(i) + '/tpu_batch_normalization_1/moving_mean',
model._blocks[i]._bn2.running_var: 'blocks_' + str(i) + '/tpu_batch_normalization_1/moving_variance',
}
else:
conversion_table_block = {
model._blocks[i]._expand_conv.weight: 'blocks_' + str(i) + '/conv2d/kernel',
model._blocks[i]._project_conv.weight: 'blocks_' + str(i) + '/conv2d_1/kernel',
model._blocks[i]._depthwise_conv.weight: 'blocks_' + str(i) + '/depthwise_conv2d/depthwise_kernel',
model._blocks[i]._se_reduce.bias: 'blocks_' + str(i) + '/se/conv2d/bias',
model._blocks[i]._se_reduce.weight: 'blocks_' + str(i) + '/se/conv2d/kernel',
model._blocks[i]._se_expand.bias: 'blocks_' + str(i) + '/se/conv2d_1/bias',
model._blocks[i]._se_expand.weight: 'blocks_' + str(i) + '/se/conv2d_1/kernel',
model._blocks[i]._bn0.bias: 'blocks_' + str(i) + '/tpu_batch_normalization/beta',
model._blocks[i]._bn0.weight: 'blocks_' + str(i) + '/tpu_batch_normalization/gamma',
model._blocks[i]._bn0.running_mean: 'blocks_' + str(i) + '/tpu_batch_normalization/moving_mean',
model._blocks[i]._bn0.running_var: 'blocks_' + str(i) + '/tpu_batch_normalization/moving_variance',
model._blocks[i]._bn1.bias: 'blocks_' + str(i) + '/tpu_batch_normalization_1/beta',
model._blocks[i]._bn1.weight: 'blocks_' + str(i) + '/tpu_batch_normalization_1/gamma',
model._blocks[i]._bn1.running_mean: 'blocks_' + str(i) + '/tpu_batch_normalization_1/moving_mean',
model._blocks[i]._bn1.running_var: 'blocks_' + str(i) + '/tpu_batch_normalization_1/moving_variance',
model._blocks[i]._bn2.bias: 'blocks_' + str(i) + '/tpu_batch_normalization_2/beta',
model._blocks[i]._bn2.weight: 'blocks_' + str(i) + '/tpu_batch_normalization_2/gamma',
model._blocks[i]._bn2.running_mean: 'blocks_' + str(i) + '/tpu_batch_normalization_2/moving_mean',
model._blocks[i]._bn2.running_var: 'blocks_' + str(i) + '/tpu_batch_normalization_2/moving_variance',
}
conversion_table = merge(conversion_table, conversion_table_block)
# Load TensorFlow parameters into PyTorch model
load_param(checkpoint_file, conversion_table, model_name)
return conversion_table
def load_and_save_temporary_tensorflow_model(model_name, model_ckpt, example_img= '../../example/img.jpg'):
""" Loads and saves a TensorFlow model. """
image_files = [example_img]
eval_ckpt_driver = eval_ckpt_main.EvalCkptDriver(model_name)
with tf.Graph().as_default(), tf.compat.v1.Session() as sess:
images, labels = eval_ckpt_driver.build_dataset(image_files, [0] * len(image_files), False)
probs = eval_ckpt_driver.build_model(images, is_training=False)
sess.run(tf.compat.v1.global_variables_initializer())
print(model_ckpt)
eval_ckpt_driver.restore_model(sess, model_ckpt)
tf.compat.v1.train.Saver().save(sess, 'tmp/model.ckpt')
if __name__ == '__main__':
import sys
import argparse
sys.path.append('original_tf')
import eval_ckpt_main
from efficientnet_pytorch import EfficientNet
parser = argparse.ArgumentParser(
description='Convert TF model to PyTorch model and save for easier future loading')
parser.add_argument('--model_name', type=str, default='efficientnet-b0',
help='efficientnet-b{N}, where N is an integer 0 <= N <= 8')
parser.add_argument('--tf_checkpoint', type=str, default='pretrained_tensorflow/efficientnet-b0/',
help='checkpoint file path')
parser.add_argument('--output_file', type=str, default='pretrained_pytorch/efficientnet-b0.pth',
help='output PyTorch model file name')
args = parser.parse_args()
# Build model
model = EfficientNet.from_name(args.model_name)
# Load and save temporary TensorFlow file due to TF nuances
print(args.tf_checkpoint)
load_and_save_temporary_tensorflow_model(args.model_name, args.tf_checkpoint)
# Load weights
load_efficientnet(model, 'tmp/model.ckpt', model_name=args.model_name)
print('Loaded TF checkpoint weights')
# Save PyTorch file
torch.save(model.state_dict(), args.output_file)
print('Saved model to', args.output_file)
| 10,410 | 58.491429 | 126 | py |
EfficientNet-PyTorch | EfficientNet-PyTorch-master/tf_to_pytorch/convert_tf_to_pt/original_tf/efficientnet_model.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for EfficientNet model.
[1] Mingxing Tan, Quoc V. Le
EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks.
ICML'19, https://arxiv.org/abs/1905.11946
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import math
from absl import logging
import numpy as np
import six
from six.moves import xrange
import tensorflow.compat.v1 as tf
import utils
# from condconv import condconv_layers
GlobalParams = collections.namedtuple('GlobalParams', [
'batch_norm_momentum', 'batch_norm_epsilon', 'dropout_rate', 'data_format',
'num_classes', 'width_coefficient', 'depth_coefficient', 'depth_divisor',
'min_depth', 'survival_prob', 'relu_fn', 'batch_norm', 'use_se',
'local_pooling', 'condconv_num_experts', 'clip_projection_output',
'blocks_args'
])
GlobalParams.__new__.__defaults__ = (None,) * len(GlobalParams._fields)
BlockArgs = collections.namedtuple('BlockArgs', [
'kernel_size', 'num_repeat', 'input_filters', 'output_filters',
'expand_ratio', 'id_skip', 'strides', 'se_ratio', 'conv_type', 'fused_conv',
'super_pixel', 'condconv'
])
# defaults will be a public argument for namedtuple in Python 3.7
# https://docs.python.org/3/library/collections.html#collections.namedtuple
BlockArgs.__new__.__defaults__ = (None,) * len(BlockArgs._fields)
def conv_kernel_initializer(shape, dtype=None, partition_info=None):
"""Initialization for convolutional kernels.
The main difference with tf.variance_scaling_initializer is that
tf.variance_scaling_initializer uses a truncated normal with an uncorrected
standard deviation, whereas here we use a normal distribution. Similarly,
tf.initializers.variance_scaling uses a truncated normal with
a corrected standard deviation.
Args:
shape: shape of variable
dtype: dtype of variable
partition_info: unused
Returns:
an initialization for the variable
"""
del partition_info
kernel_height, kernel_width, _, out_filters = shape
fan_out = int(kernel_height * kernel_width * out_filters)
return tf.random_normal(
shape, mean=0.0, stddev=np.sqrt(2.0 / fan_out), dtype=dtype)
def dense_kernel_initializer(shape, dtype=None, partition_info=None):
"""Initialization for dense kernels.
This initialization is equal to
tf.variance_scaling_initializer(scale=1.0/3.0, mode='fan_out',
distribution='uniform').
It is written out explicitly here for clarity.
Args:
shape: shape of variable
dtype: dtype of variable
partition_info: unused
Returns:
an initialization for the variable
"""
del partition_info
init_range = 1.0 / np.sqrt(shape[1])
return tf.random_uniform(shape, -init_range, init_range, dtype=dtype)
def superpixel_kernel_initializer(shape, dtype='float32', partition_info=None):
"""Initializes superpixel kernels.
This is inspired by space-to-depth transformation that is mathematically
equivalent before and after the transformation. But we do the space-to-depth
via a convolution. Moreover, we make the layer trainable instead of direct
transform, we can initialization it this way so that the model can learn not
to do anything but keep it mathematically equivalent, when improving
performance.
Args:
shape: shape of variable
dtype: dtype of variable
partition_info: unused
Returns:
an initialization for the variable
"""
del partition_info
# use input depth to make superpixel kernel.
depth = shape[-2]
filters = np.zeros([2, 2, depth, 4 * depth], dtype=dtype)
i = np.arange(2)
j = np.arange(2)
k = np.arange(depth)
mesh = np.array(np.meshgrid(i, j, k)).T.reshape(-1, 3).T
filters[
mesh[0],
mesh[1],
mesh[2],
4 * mesh[2] + 2 * mesh[0] + mesh[1]] = 1
return filters
def round_filters(filters, global_params):
"""Round number of filters based on depth multiplier."""
orig_f = filters
multiplier = global_params.width_coefficient
divisor = global_params.depth_divisor
min_depth = global_params.min_depth
if not multiplier:
return filters
filters *= multiplier
min_depth = min_depth or divisor
new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_filters < 0.9 * filters:
new_filters += divisor
logging.info('round_filter input=%s output=%s', orig_f, new_filters)
return int(new_filters)
def round_repeats(repeats, global_params):
"""Round number of filters based on depth multiplier."""
multiplier = global_params.depth_coefficient
if not multiplier:
return repeats
return int(math.ceil(multiplier * repeats))
class MBConvBlock(tf.keras.layers.Layer):
"""A class of MBConv: Mobile Inverted Residual Bottleneck.
Attributes:
endpoints: dict. A list of internal tensors.
"""
def __init__(self, block_args, global_params):
"""Initializes a MBConv block.
Args:
block_args: BlockArgs, arguments to create a Block.
global_params: GlobalParams, a set of global parameters.
"""
super(MBConvBlock, self).__init__()
self._block_args = block_args
self._batch_norm_momentum = global_params.batch_norm_momentum
self._batch_norm_epsilon = global_params.batch_norm_epsilon
self._batch_norm = global_params.batch_norm
self._condconv_num_experts = global_params.condconv_num_experts
self._data_format = global_params.data_format
if self._data_format == 'channels_first':
self._channel_axis = 1
self._spatial_dims = [2, 3]
else:
self._channel_axis = -1
self._spatial_dims = [1, 2]
self._relu_fn = global_params.relu_fn or tf.nn.swish
self._has_se = (
global_params.use_se and self._block_args.se_ratio is not None and
0 < self._block_args.se_ratio <= 1)
self._clip_projection_output = global_params.clip_projection_output
self.endpoints = None
self.conv_cls = tf.layers.Conv2D
self.depthwise_conv_cls = utils.DepthwiseConv2D
if self._block_args.condconv:
self.conv_cls = functools.partial(
condconv_layers.CondConv2D, num_experts=self._condconv_num_experts)
self.depthwise_conv_cls = functools.partial(
condconv_layers.DepthwiseCondConv2D,
num_experts=self._condconv_num_experts)
# Builds the block accordings to arguments.
self._build()
def block_args(self):
return self._block_args
def _build(self):
"""Builds block according to the arguments."""
if self._block_args.super_pixel == 1:
self._superpixel = tf.layers.Conv2D(
self._block_args.input_filters,
kernel_size=[2, 2],
strides=[2, 2],
kernel_initializer=conv_kernel_initializer,
padding='same',
data_format=self._data_format,
use_bias=False)
self._bnsp = self._batch_norm(
axis=self._channel_axis,
momentum=self._batch_norm_momentum,
epsilon=self._batch_norm_epsilon)
if self._block_args.condconv:
# Add the example-dependent routing function
self._avg_pooling = tf.keras.layers.GlobalAveragePooling2D(
data_format=self._data_format)
self._routing_fn = tf.layers.Dense(
self._condconv_num_experts, activation=tf.nn.sigmoid)
filters = self._block_args.input_filters * self._block_args.expand_ratio
kernel_size = self._block_args.kernel_size
# Fused expansion phase. Called if using fused convolutions.
self._fused_conv = self.conv_cls(
filters=filters,
kernel_size=[kernel_size, kernel_size],
strides=self._block_args.strides,
kernel_initializer=conv_kernel_initializer,
padding='same',
data_format=self._data_format,
use_bias=False)
# Expansion phase. Called if not using fused convolutions and expansion
# phase is necessary.
self._expand_conv = self.conv_cls(
filters=filters,
kernel_size=[1, 1],
strides=[1, 1],
kernel_initializer=conv_kernel_initializer,
padding='same',
data_format=self._data_format,
use_bias=False)
self._bn0 = self._batch_norm(
axis=self._channel_axis,
momentum=self._batch_norm_momentum,
epsilon=self._batch_norm_epsilon)
# Depth-wise convolution phase. Called if not using fused convolutions.
self._depthwise_conv = self.depthwise_conv_cls(
kernel_size=[kernel_size, kernel_size],
strides=self._block_args.strides,
depthwise_initializer=conv_kernel_initializer,
padding='same',
data_format=self._data_format,
use_bias=False)
self._bn1 = self._batch_norm(
axis=self._channel_axis,
momentum=self._batch_norm_momentum,
epsilon=self._batch_norm_epsilon)
if self._has_se:
num_reduced_filters = max(
1, int(self._block_args.input_filters * self._block_args.se_ratio))
# Squeeze and Excitation layer.
self._se_reduce = tf.layers.Conv2D(
num_reduced_filters,
kernel_size=[1, 1],
strides=[1, 1],
kernel_initializer=conv_kernel_initializer,
padding='same',
data_format=self._data_format,
use_bias=True)
self._se_expand = tf.layers.Conv2D(
filters,
kernel_size=[1, 1],
strides=[1, 1],
kernel_initializer=conv_kernel_initializer,
padding='same',
data_format=self._data_format,
use_bias=True)
# Output phase.
filters = self._block_args.output_filters
self._project_conv = self.conv_cls(
filters=filters,
kernel_size=[1, 1],
strides=[1, 1],
kernel_initializer=conv_kernel_initializer,
padding='same',
data_format=self._data_format,
use_bias=False)
self._bn2 = self._batch_norm(
axis=self._channel_axis,
momentum=self._batch_norm_momentum,
epsilon=self._batch_norm_epsilon)
def _call_se(self, input_tensor):
"""Call Squeeze and Excitation layer.
Args:
input_tensor: Tensor, a single input tensor for Squeeze/Excitation layer.
Returns:
A output tensor, which should have the same shape as input.
"""
se_tensor = tf.reduce_mean(input_tensor, self._spatial_dims, keepdims=True)
se_tensor = self._se_expand(self._relu_fn(self._se_reduce(se_tensor)))
logging.info('Built Squeeze and Excitation with tensor shape: %s',
(se_tensor.shape))
return tf.sigmoid(se_tensor) * input_tensor
def call(self, inputs, training=True, survival_prob=None):
"""Implementation of call().
Args:
inputs: the inputs tensor.
training: boolean, whether the model is constructed for training.
survival_prob: float, between 0 to 1, drop connect rate.
Returns:
A output tensor.
"""
logging.info('Block input: %s shape: %s', inputs.name, inputs.shape)
logging.info('Block input depth: %s output depth: %s',
self._block_args.input_filters,
self._block_args.output_filters)
x = inputs
fused_conv_fn = self._fused_conv
expand_conv_fn = self._expand_conv
depthwise_conv_fn = self._depthwise_conv
project_conv_fn = self._project_conv
if self._block_args.condconv:
pooled_inputs = self._avg_pooling(inputs)
routing_weights = self._routing_fn(pooled_inputs)
# Capture routing weights as additional input to CondConv layers
fused_conv_fn = functools.partial(
self._fused_conv, routing_weights=routing_weights)
expand_conv_fn = functools.partial(
self._expand_conv, routing_weights=routing_weights)
depthwise_conv_fn = functools.partial(
self._depthwise_conv, routing_weights=routing_weights)
project_conv_fn = functools.partial(
self._project_conv, routing_weights=routing_weights)
# creates conv 2x2 kernel
if self._block_args.super_pixel == 1:
with tf.variable_scope('super_pixel'):
x = self._relu_fn(
self._bnsp(self._superpixel(x), training=training))
logging.info(
'Block start with SuperPixel: %s shape: %s', x.name, x.shape)
if self._block_args.fused_conv:
# If use fused mbconv, skip expansion and use regular conv.
x = self._relu_fn(self._bn1(fused_conv_fn(x), training=training))
logging.info('Conv2D: %s shape: %s', x.name, x.shape)
else:
# Otherwise, first apply expansion and then apply depthwise conv.
if self._block_args.expand_ratio != 1:
x = self._relu_fn(self._bn0(expand_conv_fn(x), training=training))
logging.info('Expand: %s shape: %s', x.name, x.shape)
x = self._relu_fn(self._bn1(depthwise_conv_fn(x), training=training))
logging.info('DWConv: %s shape: %s', x.name, x.shape)
if self._has_se:
with tf.variable_scope('se'):
x = self._call_se(x)
self.endpoints = {'expansion_output': x}
x = self._bn2(project_conv_fn(x), training=training)
# Add identity so that quantization-aware training can insert quantization
# ops correctly.
x = tf.identity(x)
if self._clip_projection_output:
x = tf.clip_by_value(x, -6, 6)
if self._block_args.id_skip:
if all(
s == 1 for s in self._block_args.strides
) and self._block_args.input_filters == self._block_args.output_filters:
# Apply only if skip connection presents.
if survival_prob:
x = utils.drop_connect(x, training, survival_prob)
x = tf.add(x, inputs)
logging.info('Project: %s shape: %s', x.name, x.shape)
return x
class MBConvBlockWithoutDepthwise(MBConvBlock):
"""MBConv-like block without depthwise convolution and squeeze-and-excite."""
def _build(self):
"""Builds block according to the arguments."""
filters = self._block_args.input_filters * self._block_args.expand_ratio
if self._block_args.expand_ratio != 1:
# Expansion phase:
self._expand_conv = tf.layers.Conv2D(
filters,
kernel_size=[3, 3],
strides=[1, 1],
kernel_initializer=conv_kernel_initializer,
padding='same',
use_bias=False)
self._bn0 = self._batch_norm(
axis=self._channel_axis,
momentum=self._batch_norm_momentum,
epsilon=self._batch_norm_epsilon)
# Output phase:
filters = self._block_args.output_filters
self._project_conv = tf.layers.Conv2D(
filters,
kernel_size=[1, 1],
strides=self._block_args.strides,
kernel_initializer=conv_kernel_initializer,
padding='same',
use_bias=False)
self._bn1 = self._batch_norm(
axis=self._channel_axis,
momentum=self._batch_norm_momentum,
epsilon=self._batch_norm_epsilon)
def call(self, inputs, training=True, survival_prob=None):
"""Implementation of call().
Args:
inputs: the inputs tensor.
training: boolean, whether the model is constructed for training.
survival_prob: float, between 0 to 1, drop connect rate.
Returns:
A output tensor.
"""
logging.info('Block input: %s shape: %s', inputs.name, inputs.shape)
if self._block_args.expand_ratio != 1:
x = self._relu_fn(self._bn0(self._expand_conv(inputs), training=training))
else:
x = inputs
logging.info('Expand: %s shape: %s', x.name, x.shape)
self.endpoints = {'expansion_output': x}
x = self._bn1(self._project_conv(x), training=training)
# Add identity so that quantization-aware training can insert quantization
# ops correctly.
x = tf.identity(x)
if self._clip_projection_output:
x = tf.clip_by_value(x, -6, 6)
if self._block_args.id_skip:
if all(
s == 1 for s in self._block_args.strides
) and self._block_args.input_filters == self._block_args.output_filters:
# Apply only if skip connection presents.
if survival_prob:
x = utils.drop_connect(x, training, survival_prob)
x = tf.add(x, inputs)
logging.info('Project: %s shape: %s', x.name, x.shape)
return x
class Model(tf.keras.Model):
"""A class implements tf.keras.Model for MNAS-like model.
Reference: https://arxiv.org/abs/1807.11626
"""
def __init__(self, blocks_args=None, global_params=None):
"""Initializes an `Model` instance.
Args:
blocks_args: A list of BlockArgs to construct block modules.
global_params: GlobalParams, a set of global parameters.
Raises:
ValueError: when blocks_args is not specified as a list.
"""
super(Model, self).__init__()
if not isinstance(blocks_args, list):
raise ValueError('blocks_args should be a list.')
self._global_params = global_params
self._blocks_args = blocks_args
self._relu_fn = global_params.relu_fn or tf.nn.swish
self._batch_norm = global_params.batch_norm
self.endpoints = None
self._build()
def _get_conv_block(self, conv_type):
conv_block_map = {0: MBConvBlock, 1: MBConvBlockWithoutDepthwise}
return conv_block_map[conv_type]
def _build(self):
"""Builds a model."""
self._blocks = []
batch_norm_momentum = self._global_params.batch_norm_momentum
batch_norm_epsilon = self._global_params.batch_norm_epsilon
if self._global_params.data_format == 'channels_first':
channel_axis = 1
self._spatial_dims = [2, 3]
else:
channel_axis = -1
self._spatial_dims = [1, 2]
# Stem part.
self._conv_stem = tf.layers.Conv2D(
filters=round_filters(32, self._global_params),
kernel_size=[3, 3],
strides=[2, 2],
kernel_initializer=conv_kernel_initializer,
padding='same',
data_format=self._global_params.data_format,
use_bias=False)
self._bn0 = self._batch_norm(
axis=channel_axis,
momentum=batch_norm_momentum,
epsilon=batch_norm_epsilon)
# Builds blocks.
for block_args in self._blocks_args:
assert block_args.num_repeat > 0
assert block_args.super_pixel in [0, 1, 2]
# Update block input and output filters based on depth multiplier.
input_filters = round_filters(block_args.input_filters,
self._global_params)
output_filters = round_filters(block_args.output_filters,
self._global_params)
kernel_size = block_args.kernel_size
block_args = block_args._replace(
input_filters=input_filters,
output_filters=output_filters,
num_repeat=round_repeats(block_args.num_repeat, self._global_params))
# The first block needs to take care of stride and filter size increase.
conv_block = self._get_conv_block(block_args.conv_type)
if not block_args.super_pixel: # no super_pixel at all
self._blocks.append(conv_block(block_args, self._global_params))
else:
# if superpixel, adjust filters, kernels, and strides.
depth_factor = int(4 / block_args.strides[0] / block_args.strides[1])
block_args = block_args._replace(
input_filters=block_args.input_filters * depth_factor,
output_filters=block_args.output_filters * depth_factor,
kernel_size=((block_args.kernel_size + 1) // 2 if depth_factor > 1
else block_args.kernel_size))
# if the first block has stride-2 and super_pixel trandformation
if (block_args.strides[0] == 2 and block_args.strides[1] == 2):
block_args = block_args._replace(strides=[1, 1])
self._blocks.append(conv_block(block_args, self._global_params))
block_args = block_args._replace( # sp stops at stride-2
super_pixel=0,
input_filters=input_filters,
output_filters=output_filters,
kernel_size=kernel_size)
elif block_args.super_pixel == 1:
self._blocks.append(conv_block(block_args, self._global_params))
block_args = block_args._replace(super_pixel=2)
else:
self._blocks.append(conv_block(block_args, self._global_params))
if block_args.num_repeat > 1: # rest of blocks with the same block_arg
# pylint: disable=protected-access
block_args = block_args._replace(
input_filters=block_args.output_filters, strides=[1, 1])
# pylint: enable=protected-access
for _ in xrange(block_args.num_repeat - 1):
self._blocks.append(conv_block(block_args, self._global_params))
# Head part.
self._conv_head = tf.layers.Conv2D(
filters=round_filters(1280, self._global_params),
kernel_size=[1, 1],
strides=[1, 1],
kernel_initializer=conv_kernel_initializer,
padding='same',
use_bias=False)
self._bn1 = self._batch_norm(
axis=channel_axis,
momentum=batch_norm_momentum,
epsilon=batch_norm_epsilon)
self._avg_pooling = tf.keras.layers.GlobalAveragePooling2D(
data_format=self._global_params.data_format)
if self._global_params.num_classes:
self._fc = tf.layers.Dense(
self._global_params.num_classes,
kernel_initializer=dense_kernel_initializer)
else:
self._fc = None
if self._global_params.dropout_rate > 0:
self._dropout = tf.keras.layers.Dropout(self._global_params.dropout_rate)
else:
self._dropout = None
def call(self,
inputs,
training=True,
features_only=None,
pooled_features_only=False):
"""Implementation of call().
Args:
inputs: input tensors.
training: boolean, whether the model is constructed for training.
features_only: build the base feature network only.
pooled_features_only: build the base network for features extraction
(after 1x1 conv layer and global pooling, but before dropout and fc
head).
Returns:
output tensors.
"""
outputs = None
self.endpoints = {}
reduction_idx = 0
# Calls Stem layers
with tf.variable_scope('stem'):
outputs = self._relu_fn(
self._bn0(self._conv_stem(inputs), training=training))
logging.info('Built stem layers with output shape: %s', outputs.shape)
self.endpoints['stem'] = outputs
# Calls blocks.
for idx, block in enumerate(self._blocks):
is_reduction = False # reduction flag for blocks after the stem layer
# If the first block has super-pixel (space-to-depth) layer, then stem is
# the first reduction point.
if (block.block_args().super_pixel == 1 and idx == 0):
reduction_idx += 1
self.endpoints['reduction_%s' % reduction_idx] = outputs
elif ((idx == len(self._blocks) - 1) or
self._blocks[idx + 1].block_args().strides[0] > 1):
is_reduction = True
reduction_idx += 1
with tf.variable_scope('blocks_%s' % idx):
survival_prob = self._global_params.survival_prob
if survival_prob:
drop_rate = 1.0 - survival_prob
survival_prob = 1.0 - drop_rate * float(idx) / len(self._blocks)
logging.info('block_%s survival_prob: %s', idx, survival_prob)
outputs = block.call(
outputs, training=training, survival_prob=survival_prob)
self.endpoints['block_%s' % idx] = outputs
if is_reduction:
self.endpoints['reduction_%s' % reduction_idx] = outputs
if block.endpoints:
for k, v in six.iteritems(block.endpoints):
self.endpoints['block_%s/%s' % (idx, k)] = v
if is_reduction:
self.endpoints['reduction_%s/%s' % (reduction_idx, k)] = v
self.endpoints['features'] = outputs
if not features_only:
# Calls final layers and returns logits.
with tf.variable_scope('head'):
outputs = self._relu_fn(
self._bn1(self._conv_head(outputs), training=training))
self.endpoints['head_1x1'] = outputs
if self._global_params.local_pooling:
shape = outputs.get_shape().as_list()
kernel_size = [
1, shape[self._spatial_dims[0]], shape[self._spatial_dims[1]], 1]
outputs = tf.nn.avg_pool(
outputs, ksize=kernel_size, strides=[1, 1, 1, 1], padding='VALID')
self.endpoints['pooled_features'] = outputs
if not pooled_features_only:
if self._dropout:
outputs = self._dropout(outputs, training=training)
self.endpoints['global_pool'] = outputs
if self._fc:
outputs = tf.squeeze(outputs, self._spatial_dims)
outputs = self._fc(outputs)
self.endpoints['head'] = outputs
else:
outputs = self._avg_pooling(outputs)
self.endpoints['pooled_features'] = outputs
if not pooled_features_only:
if self._dropout:
outputs = self._dropout(outputs, training=training)
self.endpoints['global_pool'] = outputs
if self._fc:
outputs = self._fc(outputs)
self.endpoints['head'] = outputs
return outputs
| 26,027 | 35.453782 | 80 | py |
EfficientNet-PyTorch | EfficientNet-PyTorch-master/tf_to_pytorch/convert_tf_to_pt/original_tf/utils.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import sys
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.python.tpu import tpu_function # pylint:disable=g-direct-tensorflow-import
def build_learning_rate(initial_lr,
global_step,
steps_per_epoch=None,
lr_decay_type='exponential',
decay_factor=0.97,
decay_epochs=2.4,
total_steps=None,
warmup_epochs=5):
"""Build learning rate."""
if lr_decay_type == 'exponential':
assert steps_per_epoch is not None
decay_steps = steps_per_epoch * decay_epochs
lr = tf.train.exponential_decay(
initial_lr, global_step, decay_steps, decay_factor, staircase=True)
elif lr_decay_type == 'cosine':
assert total_steps is not None
lr = 0.5 * initial_lr * (
1 + tf.cos(np.pi * tf.cast(global_step, tf.float32) / total_steps))
elif lr_decay_type == 'constant':
lr = initial_lr
else:
assert False, 'Unknown lr_decay_type : %s' % lr_decay_type
if warmup_epochs:
logging.info('Learning rate warmup_epochs: %d', warmup_epochs)
warmup_steps = int(warmup_epochs * steps_per_epoch)
warmup_lr = (
initial_lr * tf.cast(global_step, tf.float32) / tf.cast(
warmup_steps, tf.float32))
lr = tf.cond(global_step < warmup_steps, lambda: warmup_lr, lambda: lr)
return lr
def build_optimizer(learning_rate,
optimizer_name='rmsprop',
decay=0.9,
epsilon=0.001,
momentum=0.9):
"""Build optimizer."""
if optimizer_name == 'sgd':
logging.info('Using SGD optimizer')
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
elif optimizer_name == 'momentum':
logging.info('Using Momentum optimizer')
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate, momentum=momentum)
elif optimizer_name == 'rmsprop':
logging.info('Using RMSProp optimizer')
optimizer = tf.train.RMSPropOptimizer(learning_rate, decay, momentum,
epsilon)
else:
logging.fatal('Unknown optimizer: %s', optimizer_name)
return optimizer
class TpuBatchNormalization(tf.layers.BatchNormalization):
# class TpuBatchNormalization(tf.layers.BatchNormalization):
"""Cross replica batch normalization."""
def __init__(self, fused=False, **kwargs):
if fused in (True, None):
raise ValueError('TpuBatchNormalization does not support fused=True.')
super(TpuBatchNormalization, self).__init__(fused=fused, **kwargs)
def _cross_replica_average(self, t, num_shards_per_group):
"""Calculates the average value of input tensor across TPU replicas."""
num_shards = tpu_function.get_tpu_context().number_of_shards
group_assignment = None
if num_shards_per_group > 1:
if num_shards % num_shards_per_group != 0:
raise ValueError('num_shards: %d mod shards_per_group: %d, should be 0'
% (num_shards, num_shards_per_group))
num_groups = num_shards // num_shards_per_group
group_assignment = [[
x for x in range(num_shards) if x // num_shards_per_group == y
] for y in range(num_groups)]
return tf.tpu.cross_replica_sum(t, group_assignment) / tf.cast(
num_shards_per_group, t.dtype)
def _moments(self, inputs, reduction_axes, keep_dims):
"""Compute the mean and variance: it overrides the original _moments."""
shard_mean, shard_variance = super(TpuBatchNormalization, self)._moments(
inputs, reduction_axes, keep_dims=keep_dims)
num_shards = tpu_function.get_tpu_context().number_of_shards or 1
if num_shards <= 8: # Skip cross_replica for 2x2 or smaller slices.
num_shards_per_group = 1
else:
num_shards_per_group = max(8, num_shards // 8)
logging.info('TpuBatchNormalization with num_shards_per_group %s',
num_shards_per_group)
if num_shards_per_group > 1:
# Compute variance using: Var[X]= E[X^2] - E[X]^2.
shard_square_of_mean = tf.math.square(shard_mean)
shard_mean_of_square = shard_variance + shard_square_of_mean
group_mean = self._cross_replica_average(
shard_mean, num_shards_per_group)
group_mean_of_square = self._cross_replica_average(
shard_mean_of_square, num_shards_per_group)
group_variance = group_mean_of_square - tf.math.square(group_mean)
return (group_mean, group_variance)
else:
return (shard_mean, shard_variance)
class BatchNormalization(tf.layers.BatchNormalization):
"""Fixed default name of BatchNormalization to match TpuBatchNormalization."""
def __init__(self, name='tpu_batch_normalization', **kwargs):
super(BatchNormalization, self).__init__(name=name, **kwargs)
def drop_connect(inputs, is_training, survival_prob):
"""Drop the entire conv with given survival probability."""
# "Deep Networks with Stochastic Depth", https://arxiv.org/pdf/1603.09382.pdf
if not is_training:
return inputs
# Compute tensor.
batch_size = tf.shape(inputs)[0]
random_tensor = survival_prob
random_tensor += tf.random_uniform([batch_size, 1, 1, 1], dtype=inputs.dtype)
binary_tensor = tf.floor(random_tensor)
# Unlike conventional way that multiply survival_prob at test time, here we
# divide survival_prob at training time, such that no addition compute is
# needed at test time.
output = tf.div(inputs, survival_prob) * binary_tensor
return output
def archive_ckpt(ckpt_eval, ckpt_objective, ckpt_path):
"""Archive a checkpoint if the metric is better."""
ckpt_dir, ckpt_name = os.path.split(ckpt_path)
saved_objective_path = os.path.join(ckpt_dir, 'best_objective.txt')
saved_objective = float('-inf')
if tf.gfile.Exists(saved_objective_path):
with tf.gfile.GFile(saved_objective_path, 'r') as f:
saved_objective = float(f.read())
if saved_objective > ckpt_objective:
logging.info('Ckpt %s is worse than %s', ckpt_objective, saved_objective)
return False
filenames = tf.gfile.Glob(ckpt_path + '.*')
if filenames is None:
logging.info('No files to copy for checkpoint %s', ckpt_path)
return False
# Clear the old folder.
dst_dir = os.path.join(ckpt_dir, 'archive')
if tf.gfile.Exists(dst_dir):
tf.gfile.DeleteRecursively(dst_dir)
tf.gfile.MakeDirs(dst_dir)
# Write checkpoints.
for f in filenames:
dest = os.path.join(dst_dir, os.path.basename(f))
tf.gfile.Copy(f, dest, overwrite=True)
ckpt_state = tf.train.generate_checkpoint_state_proto(
dst_dir,
model_checkpoint_path=ckpt_name,
all_model_checkpoint_paths=[ckpt_name])
with tf.gfile.GFile(os.path.join(dst_dir, 'checkpoint'), 'w') as f:
f.write(str(ckpt_state))
with tf.gfile.GFile(os.path.join(dst_dir, 'best_eval.txt'), 'w') as f:
f.write('%s' % ckpt_eval)
# Update the best objective.
with tf.gfile.GFile(saved_objective_path, 'w') as f:
f.write('%f' % ckpt_objective)
logging.info('Copying checkpoint %s to %s', ckpt_path, dst_dir)
return True
def get_ema_vars():
"""Get all exponential moving average (ema) variables."""
ema_vars = tf.trainable_variables() + tf.get_collection('moving_vars')
for v in tf.global_variables():
# We maintain mva for batch norm moving mean and variance as well.
if 'moving_mean' in v.name or 'moving_variance' in v.name:
ema_vars.append(v)
return list(set(ema_vars))
class DepthwiseConv2D(tf.keras.layers.DepthwiseConv2D, tf.layers.Layer):
"""Wrap keras DepthwiseConv2D to tf.layers."""
pass
class EvalCkptDriver(object):
"""A driver for running eval inference.
Attributes:
model_name: str. Model name to eval.
batch_size: int. Eval batch size.
image_size: int. Input image size, determined by model name.
num_classes: int. Number of classes, default to 1000 for ImageNet.
include_background_label: whether to include extra background label.
"""
def __init__(self,
model_name,
batch_size=1,
image_size=224,
num_classes=1000,
include_background_label=False):
"""Initialize internal variables."""
self.model_name = model_name
self.batch_size = batch_size
self.num_classes = num_classes
self.include_background_label = include_background_label
self.image_size = image_size
def restore_model(self, sess, ckpt_dir, enable_ema=True, export_ckpt=None):
"""Restore variables from checkpoint dir."""
sess.run(tf.global_variables_initializer())
checkpoint = tf.train.latest_checkpoint(ckpt_dir)
if enable_ema:
ema = tf.train.ExponentialMovingAverage(decay=0.0)
ema_vars = get_ema_vars()
var_dict = ema.variables_to_restore(ema_vars)
ema_assign_op = ema.apply(ema_vars)
else:
var_dict = get_ema_vars()
ema_assign_op = None
tf.train.get_or_create_global_step()
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(var_dict, max_to_keep=1)
saver.restore(sess, checkpoint)
if export_ckpt:
if ema_assign_op is not None:
sess.run(ema_assign_op)
saver = tf.train.Saver(max_to_keep=1, save_relative_paths=True)
saver.save(sess, export_ckpt)
def build_model(self, features, is_training):
"""Build model with input features."""
del features, is_training
raise ValueError('Must be implemented by subclasses.')
def get_preprocess_fn(self):
raise ValueError('Must be implemented by subclsses.')
def build_dataset(self, filenames, labels, is_training):
"""Build input dataset."""
batch_drop_remainder = False
if 'condconv' in self.model_name and not is_training:
# CondConv layers can only be called with known batch dimension. Thus, we
# must drop all remaining examples that do not make up one full batch.
# To ensure all examples are evaluated, use a batch size that evenly
# divides the number of files.
batch_drop_remainder = True
num_files = len(filenames)
if num_files % self.batch_size != 0:
tf.logging.warn('Remaining examples in last batch are not being '
'evaluated.')
filenames = tf.constant(filenames)
labels = tf.constant(labels)
dataset = tf.data.Dataset.from_tensor_slices((filenames, labels))
def _parse_function(filename, label):
image_string = tf.read_file(filename)
preprocess_fn = self.get_preprocess_fn()
image_decoded = preprocess_fn(
image_string, is_training, image_size=self.image_size)
image = tf.cast(image_decoded, tf.float32)
return image, label
dataset = dataset.map(_parse_function)
dataset = dataset.batch(self.batch_size,
drop_remainder=batch_drop_remainder)
iterator = dataset.make_one_shot_iterator()
images, labels = iterator.get_next()
return images, labels
def run_inference(self,
ckpt_dir,
image_files,
labels,
enable_ema=True,
export_ckpt=None):
"""Build and run inference on the target images and labels."""
label_offset = 1 if self.include_background_label else 0
with tf.Graph().as_default(), tf.Session() as sess:
images, labels = self.build_dataset(image_files, labels, False)
probs = self.build_model(images, is_training=False)
if isinstance(probs, tuple):
probs = probs[0]
self.restore_model(sess, ckpt_dir, enable_ema, export_ckpt)
prediction_idx = []
prediction_prob = []
for _ in range(len(image_files) // self.batch_size):
out_probs = sess.run(probs)
idx = np.argsort(out_probs)[::-1]
prediction_idx.append(idx[:5] - label_offset)
prediction_prob.append([out_probs[pid] for pid in idx[:5]])
# Return the top 5 predictions (idx and prob) for each image.
return prediction_idx, prediction_prob
def eval_example_images(self,
ckpt_dir,
image_files,
labels_map_file,
enable_ema=True,
export_ckpt=None):
"""Eval a list of example images.
Args:
ckpt_dir: str. Checkpoint directory path.
image_files: List[str]. A list of image file paths.
labels_map_file: str. The labels map file path.
enable_ema: enable expotential moving average.
export_ckpt: export ckpt folder.
Returns:
A tuple (pred_idx, and pred_prob), where pred_idx is the top 5 prediction
index and pred_prob is the top 5 prediction probability.
"""
classes = json.loads(tf.gfile.Open(labels_map_file).read())
pred_idx, pred_prob = self.run_inference(
ckpt_dir, image_files, [0] * len(image_files), enable_ema, export_ckpt)
for i in range(len(image_files)):
print('predicted class for image {}: '.format(image_files[i]))
for j, idx in enumerate(pred_idx[i]):
print(' -> top_{} ({:4.2f}%): {} '.format(j, pred_prob[i][j] * 100,
classes[str(idx)]))
return pred_idx, pred_prob
def eval_imagenet(self, ckpt_dir, imagenet_eval_glob,
imagenet_eval_label, num_images, enable_ema, export_ckpt):
"""Eval ImageNet images and report top1/top5 accuracy.
Args:
ckpt_dir: str. Checkpoint directory path.
imagenet_eval_glob: str. File path glob for all eval images.
imagenet_eval_label: str. File path for eval label.
num_images: int. Number of images to eval: -1 means eval the whole
dataset.
enable_ema: enable expotential moving average.
export_ckpt: export checkpoint folder.
Returns:
A tuple (top1, top5) for top1 and top5 accuracy.
"""
imagenet_val_labels = [int(i) for i in tf.gfile.GFile(imagenet_eval_label)]
imagenet_filenames = sorted(tf.gfile.Glob(imagenet_eval_glob))
if num_images < 0:
num_images = len(imagenet_filenames)
image_files = imagenet_filenames[:num_images]
labels = imagenet_val_labels[:num_images]
pred_idx, _ = self.run_inference(
ckpt_dir, image_files, labels, enable_ema, export_ckpt)
top1_cnt, top5_cnt = 0.0, 0.0
for i, label in enumerate(labels):
top1_cnt += label in pred_idx[i][:1]
top5_cnt += label in pred_idx[i][:5]
if i % 100 == 0:
print('Step {}: top1_acc = {:4.2f}% top5_acc = {:4.2f}%'.format(
i, 100 * top1_cnt / (i + 1), 100 * top5_cnt / (i + 1)))
sys.stdout.flush()
top1, top5 = 100 * top1_cnt / num_images, 100 * top5_cnt / num_images
print('Final: top1_acc = {:4.2f}% top5_acc = {:4.2f}%'.format(top1, top5))
return top1, top5
| 15,742 | 37.775862 | 91 | py |
neuron-importance-zsl | neuron-importance-zsl-master/mod2alpha.py | # Code to map from any modality to alphas.
# Train using class_info and alphas from a trained network
import argparse
import numpy as np
import random
random.seed(1234)
from random import shuffle
import pickle
from pprint import pprint
from dotmap import DotMap
import pdb
import csv
import os
import json
import tensorflow as tf
from scipy.io import loadmat
import ntpath
from scipy.stats import spearmanr
import glob
from tqdm import tqdm
import torchfile
import scipy.io as scio
# Fix CUB names due to mismatch in Scott Reed's caption dataset
CUB_FNAME_FIX = {'093.Clark_Nutcracker': '093.Clarks_Nutcracker',
'124.Le_Conte_Sparrow': '124.Le_Contes_Sparrow',
'180.Wilson_Warbler': '180.Wilsons_Warbler',
'125.Lincoln_Sparrow': '125.Lincolns_Sparrow',
'023.Brandt_Cormorant': '023.Brandts_Cormorant',
'178.Swainson_Warbler': '178.Swainsons_Warbler',
'122.Harris_Sparrow': '122.Harriss_Sparrow',
'113.Baird_Sparrow': '113.Bairds_Sparrow',
'123.Henslow_Sparrow': '123.Henslows_Sparrow',
'098.Scott_Oriole': '098.Scotts_Oriole',
'061.Heermann_Gull': '061.Heermanns_Gull',
'022.Chuck_will_Widow': '022.Chuck_wills_Widow',
'193.Bewick_Wren': '193.Bewicks_Wren',
'067.Anna_Hummingbird': '067.Annas_Hummingbird',
'126.Nelson_Sharp_tailed_Sparrow': '126.Nelsons_Sparrow',
'115.Brewer_Sparrow': '115.Brewers_Sparrow',
'009.Brewer_Blackbird': '009.Brewers_Blackbird'}
parser = argparse.ArgumentParser()
parser.add_argument('--config_json', default='')
def encode_attributes_class(attrdir, imlabelist, config):
im_attr = {}
cls_attr = {}
# Use class level supervision
if config.supervision=='class':
class_att_labels = []
with open(attrdir) as f:
for n, line in enumerate(f):
l = [x for x in line.rstrip().split(" ") ]
l = [x for x in l if x]
#l = l.remove('')
if config.a2t=="True":
l = [int(x) if float(x)!=-1.00 else 0 for x in l ]
else:
l = [float(x) if float(x)!=-1.00 else 0 for x in l ]
cls_attr[n]=l
class_att_labels.append(l)
class_att_labels = np.array(class_att_labels)
#class_att_avg = np.mean(class_att_labels, axis = 0)
for c in range(int(config.n_class)):
imids = [k for k,v in im_attr.items() if v['cls'] == c]
for id in imids:
im_attr[id] = {}
im_attr[id]['att']= class_att_labels[c]/np.max(class_att_labels)
return im_attr, cls_attr
def parse_json(json_file):
with open(json_file, 'r') as f:
data = json.load(f)
return data
def encode_tfidf(tf_file, imlabelist, config):
# Function to encode the TF-IDF features from wikipedia articles
# Make this compatible with Ram's attribute encoding function
attrdir = './data/CUB/11083D_TFIDF.mat'
tf_idf = scio.loadmat(attrdir)['PredicateMatrix']
im_attr = {}
print('Encoding TF-IDF....')
for i in tqdm(range(len(imlabelist))):
#print(tf_idf[imlabelist[i]-1].tolist())
im_attr[str(i+1)] = {}
im_attr[str(i+1)]['att'] = tf_idf[imlabelist[i]-1].tolist()
return im_attr
def encode_captions(cap_dir, imlist_new, imlabelist, config):
# config.attrdir has to be 2 directories joined as strings default argument to use
# In interest of time, we're only doing w2v captions
# Get caption text dir and feature dir
attrdir = './data/CUB/text_c10,./data/CUB/w2v_c10'
cap_dir = attrdir.split(',')[0]
feat_dir = attrdir.split(',')[1]
# Load appropriate mapping
all_f = glob.glob(cap_dir + '/*/*.txt')
all_f = sorted(all_f)
all_f = [x.replace('./','').replace('.txt', '.jpg').replace(cap_dir + '/', '') for x in all_f]
# Load all class t7 files
t7_dict = {}
class_names = list(set([x.split('/')[0] for x in imlist_new]))
print('Loading feature files..')
for i in class_names:
fname = i
if fname in list(CUB_FNAME_FIX.keys()):
fname = CUB_FNAME_FIX[fname]
t7_dict[i] = torchfile.load(feat_dir + '/' + fname + '.t7')
im_attr = {}
# Do this iteratively
print('Encoding captions...')
for i in tqdm(range(len(imlist_new))):
imname = imlist_new[i]
# Image name to class-t7 file
class_name = imname.split('/')[0]
data = t7_dict[class_name]
imind = all_f.index(imname)
indlist = sorted([all_f.index(x) for x in all_f if class_name in x])
pos = indlist.index(imind)
feat = data[pos].T
im_attr[str(i+1)] = {}
im_attr[str(i+1)]['att'] = np.mean(feat, axis=0).tolist()
return im_attr
def im_imid_map(imgmap):
im_imid = {}
with open(imgmap) as f:
for line in f:
l = line.rstrip('\n').split(" ")
im_imid[ntpath.basename(l[1])] = l[0]
return im_imid
def load_class_splits(config):
split_file = config.split_file
class_listf= config.class_listf
# Create a mapping for the reduced classes
# Load class splits from split_file
class_split = loadmat(split_file)
# Get train class-IDs
train_cid = class_split['train_cid'][0].tolist()
test_cid = class_split['test_cid'][0].tolist()
# Load all classes and ignore classes that are not in the seen set
train_seen_class = []
val_seen_class = []
for line in open(class_listf, 'r').readlines():
classID = int(line.strip('\n').split(' ')[0])
class_name = line.strip('\n').split(' ')[1]
if classID in train_cid:
train_seen_class.append((classID-1, class_name))
# Split train classes into train and val
random.shuffle(train_seen_class)
train_seen_class_split = train_seen_class[:int(config.n_train_class)]
val_seen_class_split = train_seen_class[int(config.n_train_class):]
return train_seen_class_split, val_seen_class_split
def create_splits(config):
# Create proper train,val and test splits from CUB alphas dataset
im_imid = im_imid_map(config.imagelist)
train_seen_class, val_seen_class= load_class_splits(config)
imlist_new = [x.strip('\n').split(' ')[1] for x in open('./data/CUB/images.txt', 'r').readlines()]
imlabelist = [int(y.strip('\n').split(' ')[1])-1 for y in open(config.imagelabellist, 'r').readlines()]
# modality specific data loader
if config.modality == 'attributes':
if config.a2t == 'True':
im_attr,cls_attr = encode_attributes_class(config.classattrdir_binary,imlabelist, config)
else:
im_attr,cls_attr = encode_attributes_class(config.classattrdir,imlabelist, config)
elif config.modality == 'wikipedia':
im_attr = encode_tfidf(config.attrdir, imlabelist, config)
elif config.modality == 'captions':
cls_attr = encode_captions(config.attrdir, imlist_new, imlabelist, config)
else:
print("Modality not supported")
imlist = []
imlabellist = []
imattrlist = []
imalphaslist = []
train_val_split = []
trainval_alphadir = config.alphadir.format(config.dataset, config.dataset, config.alpha_model_name, int(config.n_seen_train), config.alpha_layer_name)
print('alphadir: ', trainval_alphadir)
for filename in tqdm(os.listdir(trainval_alphadir)):
if filename.endswith(".json") :
with open(trainval_alphadir+filename,'r') as fj:
data = json.load(fj)
image = ntpath.basename(data['image'])
image_id = im_imid[str(image)]
imlist.append(image_id)
gt_class = int(data['gt_cid'])
if config.modality =='attributes':
attr = cls_attr[gt_class]
elif config.modality == 'captions':
attr = cls_attr[str(gt_class+1)]['att']
imattrlist.append(attr)
# Train on all train and val attributes and test on test attributes
# train = 1
if gt_class in [x for (x,_) in train_seen_class]:
train = 1
else:
train = 0
imlabellist.append(gt_class)
gt_class_alpha = data['gt_alpha']
imalphaslist.append(gt_class_alpha)
train_val_split.append(train)
whole_dataset = list(zip(imlist, imlabellist, imattrlist, imalphaslist, train_val_split))
train_split = [x for x in whole_dataset if x[4] == 1]
val_split = [x for x in whole_dataset if x[4] == 0]
shuffle(train_split)
# split into train and validation set
train_split = [(x[0], x[1],x[2], x[3]) for x in train_split]
val_split = [(x[0], x[1],x[2], x[3]) for x in val_split]
print('#train_instances: %d', len(train_split))
print('#val_instances: %d', len(val_split))
return train_split, val_split
def main(args):
# Load config JSON and use the arguments
config = parse_json(args.config_json)
pprint(config)
config = DotMap(config)
train_split, val_split = create_splits(config)
train_im, train_class, train_attr, train_alphas = map(list, zip(*train_split))
val_im, val_class, val_attr, val_alphas = map(list, zip(*val_split))
graph = tf.Graph()
with graph.as_default():
# Training dataset
train_attr = tf.constant(np.array(train_attr).astype(np.float32))
train_alphas = tf.constant(train_alphas)
train_dataset = tf.contrib.data.Dataset.from_tensor_slices((train_attr, train_alphas))
train_dataset = train_dataset.shuffle(buffer_size=len(train_split))
batched_train_dataset = train_dataset.batch(int(config.dom2alpha_batch_size))
# Val dataset
val_attr = tf.constant(np.array(val_attr).astype(np.float32))
val_alphas = tf.constant(val_alphas)
val_dataset = tf.contrib.data.Dataset.from_tensor_slices((val_attr, val_alphas))
val_dataset = val_dataset.shuffle(buffer_size=len(val_split))
batched_val_dataset = val_dataset.batch(int(config.dom2alpha_batch_size))
# Define iterator that operates on either of the splits
iterator = tf.contrib.data.Iterator.from_structure(batched_train_dataset.output_types, batched_train_dataset.output_shapes)
text, alphas = iterator.get_next()
train_init_op = iterator.make_initializer(batched_train_dataset)
val_init_op = iterator.make_initializer(batched_val_dataset)
# Define the global step to be some tf.Variable
global_step_tensor = tf.Variable(0, trainable=False, name='global_step', dtype=tf.int32)
step_initializer = tf.variables_initializer([global_step_tensor])
if config.dom2alpha_model == "linear":
print("----------------------------------------------------------------Creating a linear model, att to alpha--------------------------------------------------------------------")
num_input = int(config.n_attr)
num_output = int(config.n_alphas)
weights = {'out': tf.Variable(tf.random_normal([num_input, num_output]))}
biases = {'out': tf.Variable(tf.random_normal([num_output]))}
adam_vars = [x for x in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) if 'beta' in x.name]
def neural_net(x):
out_layer = tf.add(tf.matmul(x, weights['out']), biases['out'])
return out_layer
out = neural_net(text)
elif config.dom2alpha_model == "multilayer":
print("------------------------------------------------------------Creating a multilayer (3 layer) model, att to alpha-----------------------------------------------------------")
n_input = int(config.n_attr)
n_hidden_1 = int(config.n_hidden_1)# 400
n_hidden_2 = int(config.n_hidden_2)# 450
# n_hidden_2 = 450
n_output = int(config.n_alphas)
regularizer = tf.contrib.layers.l2_regularizer(scale=0.1)
# Create model
def multilayer_perceptron(x):
# Hidden fully connected layer with 256 neurons
layer_1 = tf.contrib.layers.fully_connected(x, num_outputs=n_hidden_1, activation_fn=tf.nn.relu, weights_regularizer=regularizer)
# Hidden fully connected layer with 256 neurons
layer_2 = tf.contrib.layers.fully_connected(layer_1, num_outputs=n_hidden_2, activation_fn=tf.nn.relu, weights_regularizer=regularizer)
# Output fully connected layer with a neuron for each class
out_layer = tf.contrib.layers.fully_connected(layer_2, num_outputs=n_output, activation_fn=None, weights_regularizer=regularizer)
return out_layer
# Construct model
out = multilayer_perceptron(text)
elif config.dom2alpha_model == "2layer":
print("------------------------------------------------------------Creating a multilayer (3 layer) model, att to alpha-----------------------------------------------------------")
n_input = int(config.n_attr)
n_hidden = int(config.n_hidden)# 400
# n_hidden_2 = 450
n_output = int(config.n_alphas)
regularizer = tf.contrib.layers.l2_regularizer(scale=0.1)
# Create model
def multilayer_perceptron(x):
# Hidden fully connected layer with 256 neurons
layer_1 = tf.contrib.layers.fully_connected(x, num_outputs=n_hidden, activation_fn=tf.nn.relu, weights_regularizer=regularizer)
# Hidden fully connected layer with 256 neurons
#layer_2 = tf.contrib.layers.fully_connected(layer_1, num_outputs=n_hidden_2, activation_fn=tf.nn.relu, weights_regularizer=regularizer)
# Output fully connected layer with a neuron for each class
out_layer = tf.contrib.layers.fully_connected(layer_1, num_outputs=n_output, activation_fn=None, weights_regularizer=regularizer)
return out_layer
# Construct model
out = multilayer_perceptron(text)
#define loss
# Normalize the gt and predicted alphas (required for before feeding to cosine distance loss function)
out_normalized = tf.nn.l2_normalize(out, 1)
alphas_normalized = tf.nn.l2_normalize(alphas, 1)
if config.alpha_loss_type == "cd":
alpha_loss = tf.reduce_mean(tf.losses.cosine_distance(alphas_normalized, out_normalized, dim=1, reduction = tf.losses.Reduction.NONE))
elif config.alpha_loss_type == "l1":
alpha_loss = tf.reduce_mean(tf.abs(alphas - out))
elif config.alpha_loss_type == "cdandl1":
alpha_loss = tf.reduce_mean(tf.abs(alphas - out)) + float(config.cdl1_reg)* tf.reduce_mean(tf.losses.cosine_distance(alphas_normalized, out_normalized, dim=1, reduction = tf.losses.Reduction.NONE))
# regularization term: Not sure if this is necessary. It doesn't matter if the alphas scale is matched. Only the weights for the final classifier need to be of the right scale.
reg_loss = float(config.dom2alpha_lambda_reg) * tf.abs(tf.nn.l2_loss(out) - tf.nn.l2_loss(alphas))
loss = alpha_loss + reg_loss
# Training Op
optimizer = tf.train.AdamOptimizer(learning_rate=float(config.learning_rate))
train_op = optimizer.minimize(loss)
adam_vars = [x for x in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) if 'beta' in x.name]
adam_initializer = tf.variables_initializer(adam_vars)
init_op = tf.initialize_all_variables()
# Define saver
saver = tf.train.Saver()
tf.get_default_graph().finalize()
# Start a session to learn the modality to alpha mapping
with tf.Session(graph=graph) as sess:
val_loss_best = 1e20
val_corr_best = -1
sess.run(adam_initializer)
sess.run(step_initializer)
sess.run(init_op)
tf.train.global_step(sess, global_step_tensor)
# Start by evaluating on val class data
sess.run(val_init_op)
val_loss = []
while True:
try:
l = sess.run(loss)
val_loss.append(l)
except tf.errors.OutOfRangeError:
break
Initial_valLoss = np.array(val_loss).mean()
perf = []
print('Initial Val Loss: {} '.format(Initial_valLoss))
iteration = 1
for epoch in range(int(config.num_epochs)):
print('Epoch {}/{}'.format(epoch+1, int(config.num_epochs)))
sess.run(train_init_op)
while True:
try:
sess.run(train_op)
iteration = iteration + 1
if (iteration-2)%100==0:
print('Iteration: {} Training Loss: {} '.format(iteration, l))
except tf.errors.OutOfRangeError:
break
print("Validating on the val set (images of val classes)")
# Load val class info
sess.run(val_init_op)
val_loss = []
val_alpha_loss = []
val_reg_loss = []
val_rank_corr = []
while True:
try:
l, out_val = sess.run([loss, out])
val_loss.append(l)
except tf.errors.OutOfRangeError:
break
valLoss = np.array(val_loss).mean()
print("Epoch {}, average_val_loss: {}".format(epoch, valLoss))
if valLoss < val_loss_best:
val_loss_best = valLoss
checkpoint_dir = config.dom2alpha_ckpt_dir + 'mod_{}_2alpha_dset_{}_baseNcls_{}_basemodel_{}_layername_{}_d2a_model_{}_n_train_{}_alphaloss_{}_epoch_{}_loss_{:0.2f}.ckpt'.format(config.modality, config.dataset, config.n_seen_train, config.base_model, config.alpha_layer_name, config.dom2alpha_model, config.n_train_class, config.alpha_loss_type, epoch, valLoss)
print("Saving model parameters to: ", checkpoint_dir)
saver.save(sess, checkpoint_dir)
else:
print("Val loss went up ")
iteration += 1
print("Optimization Finished! ")
print("Best Checkpoint dir: ", checkpoint_dir)
print("Initial Validation loss was: {}".format(Initial_valLoss))
print("Best Validation loss achieved: {}".format(val_loss_best))
if __name__ == '__main__':
args = parser.parse_args()
main(args)
| 18,735 | 41.103371 | 377 | py |
neuron-importance-zsl | neuron-importance-zsl-master/alpha2w.py | # Finetune a network in tensorflow on the CUB dataset
import argparse
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import sys
import ntpath
import json
import pdb
import random
import torchfile
import importlib
from scipy.stats import spearmanr
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as scio
import tensorflow as tf
import tensorflow.contrib.slim as slim
import tensorflow.contrib.slim.nets as nets
from tensorflow.python.ops import array_ops
from tensorflow.python import pywrap_tensorflow
from pprint import pprint
import pickle
from dotmap import DotMap
import glob
import itertools
from itertools import groupby
from random import shuffle
from tqdm import tqdm
sys.path.insert(0, '/nethome/rrs6/models/research/slim')
SEED = 1111
tf.set_random_seed(SEED)
random.seed(SEED)
"""
Some classes in Scott Reed's captions
are named differently than the
original CUB_200_2011 folder
"""
CUB_FNAME_FIX = {'093.Clark_Nutcracker': '093.Clarks_Nutcracker',
'124.Le_Conte_Sparrow': '124.Le_Contes_Sparrow',
'180.Wilson_Warbler': '180.Wilsons_Warbler',
'125.Lincoln_Sparrow': '125.Lincolns_Sparrow',
'023.Brandt_Cormorant': '023.Brandts_Cormorant',
'178.Swainson_Warbler': '178.Swainsons_Warbler',
'122.Harris_Sparrow': '122.Harriss_Sparrow',
'113.Baird_Sparrow': '113.Bairds_Sparrow',
'123.Henslow_Sparrow': '123.Henslows_Sparrow',
'098.Scott_Oriole': '098.Scotts_Oriole',
'061.Heermann_Gull': '061.Heermanns_Gull',
'022.Chuck_will_Widow': '022.Chuck_wills_Widow',
'193.Bewick_Wren': '193.Bewicks_Wren',
'067.Anna_Hummingbird': '067.Annas_Hummingbird',
'126.Nelson_Sharp_tailed_Sparrow': '126.Nelsons_Sparrow',
'115.Brewer_Sparrow': '115.Brewers_Sparrow',
'009.Brewer_Blackbird': '009.Brewers_Blackbird'}
parser = argparse.ArgumentParser()
parser.add_argument('--config_json', default='./arg_configs/vgg16_config_AWA_full.json')
VGG_MEAN = [123.68, 116.78, 103.94]
def uhead_plotter(l1, l2, l3, l4, l5, l6, directory, mode):
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.plot(range(len(l1)), l1, label='seen_seen')
ax1.plot(range(len(l2)), l2, label='unseen_unseen')
ax1.plot(range(len(l3)), l3, label='seen_unseen_seen_unseen')
ax1.plot(range(len(l4)), l4, label='seen_seen_unseen')
ax1.plot(range(len(l5)), l5, label='unseen_seen_unseen')
ax1.plot(range(len(l6)), l6, label='harmonic')
plt.legend()
plt.title(mode)
fig.savefig(directory + mode + '.png')
plt.close(fig)
def uhead_plotter_loss(l1,directory, mode):
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.plot(range(len(l1)), l1, label=' val set')
plt.legend()
plt.title(mode)
fig.savefig(directory + mode + '.png')
plt.close(fig)
def parse_json(json_file):
with open(json_file, 'r') as f:
data = json.load(f)
return data
def norm1(a):
return np.sum(np.abs(a))
def entropy(ls):
print(ls)
probs = {x:ls.count(x)/len(ls) for x in ls}
p = np.array(list(probs.values()))
return -p.dot(np.log2(p))
def encode_attributes_class(config, imlabelist):
im_attr = {}
cls_attr = {}
# Use class level supervision
if config.supervision=='class':
class_att_labels = []
with open(config.classattrdir) as f:
for n, line in enumerate(f):
l = [x for x in line.rstrip().split(" ") ]
l = [x for x in l if x]
#l = l.remove('')
l = [float(x) if float(x)!=-1.00 else 0 for x in l ]
cls_attr[n]=l
class_att_labels.append(l)
class_att_labels = np.array(class_att_labels)
#class_att_avg = np.mean(class_att_labels, axis = 0)
for c in range(int(config.n_class)):
imids = [k for k,v in im_attr.items() if v['cls'] == c]
for id in imids:
im_attr[id] = {}
im_attr[id]['att']= class_att_labels[c]/np.max(class_att_labels)
return im_attr, cls_attr
def encode_tfidf(tf_file, imlabelist, config):
# Function to encode the TF-IDF features from wikipedia articles
# Make this compatible with Ram's attribute encoding function
attrdir = './data/CUB/11083D_TFIDF.mat'
tf_idf = scio.loadmat(attrdir)['PredicateMatrix']
im_attr = {}
print('Encoding TF-IDF....')
for i in tqdm(range(len(imlabelist))):
#print(tf_idf[imlabelist[i]-1].tolist())
im_attr[str(i+1)] = {}
im_attr[str(i+1)]['att'] = tf_idf[imlabelist[i]-1].tolist()
return im_attr
def encode_captions(cap_dir, imlist_new, imlabelist, config):
# config.attrdir has to be 2 directories joined as strings default argument to use
# In interest of time, we're only doing w2v captions
# Get caption text dir and feature dir
attrdir = './data/CUB/text_c10,./data/CUB/w2v_c10'
cap_dir = attrdir.split(',')[0]
feat_dir = attrdir.split(',')[1]
# Load appropriate mapping
all_f = glob.glob(cap_dir + '/*/*.txt')
all_f = sorted(all_f)
all_f = [x.replace('./','').replace('.txt', '.jpg').replace(cap_dir + '/', '') for x in all_f]
# Load all class t7 files
t7_dict = {}
class_names = list(set([x.split('/')[0] for x in imlist_new]))
print('Loading caption feature files..')
for i in class_names:
fname = i
if fname in list(CUB_FNAME_FIX.keys()):
fname = CUB_FNAME_FIX[fname]
t7_dict[i] = torchfile.load(feat_dir + '/' + fname + '.t7')
im_attr = {}
cls_attr = {}
# Do this iteratively
print('Encoding captions...')
for i in tqdm(range(len(imlist_new))):
imname = imlist_new[i]
class_id = int(imlist_new[i].split('.')[0])-1
# Image name to class-t7 file
class_name = imname.split('/')[0]
data = t7_dict[class_name]
imind = all_f.index(imname)
indlist = sorted([all_f.index(x) for x in all_f if class_name in x])
pos = indlist.index(imind)
feat = data[pos].T
#im_attr[str(i+1)] = {}
im_attr[str(i+1)] = np.mean(feat, axis=0).tolist()
if class_id in cls_attr:
cls_attr[class_id].append(np.mean(feat, axis=0))
else:
cls_attr[class_id] = []
cls_attr[class_id].append(np.mean(feat, axis=0))
for id in cls_attr:
cls_attr[id] = np.array(cls_attr[id]).mean(axis=0)
return im_attr, cls_attr
def im_imid_map(imagelist):
im_imid = {}
imid_im = {}
with open(imagelist) as f:
for line in f:
l = line.rstrip('\n').split(" ")
im_imid[ntpath.basename(l[1])] = l[0]
imid_im[l[0]] = ntpath.basename(l[1])
return im_imid, imid_im
def get_alphas(config, imattrlist, checkpoint_path):
graph = tf.Graph()
regularizer = tf.contrib.layers.l2_regularizer(scale=0.1)
num_input = len(imattrlist[0])
# load dom2alpha model
if config.dom2alpha_model =='linear':
n_alphas = int(config.n_alphas)
weights = {'out': tf.Variable(tf.random_normal([num_input, n_alphas]))}
biases = {'out': tf.Variable(tf.random_normal([n_alphas]))}
adam_vars = [x for x in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) if 'beta' in x.name]
def neural_net(x):
out_layer = tf.add(tf.matmul(x, weights['out']), biases['out'])
return out_layer
text = tf.placeholder(tf.float32, [None, int(config.n_attr)])
out = neural_net(text)
elif config.dom2alpha_model=='multilayer':
n_alphas = int(config.n_alphas)
def multilayer_perceptron(x):
layer_1 = tf.contrib.layers.fully_connected(x, num_outputs=int(config.n_hidden_1), activation_fn=tf.nn.relu, weights_regularizer=regularizer)
layer_2 = tf.contrib.layers.fully_connected(layer_1, num_outputs=int(config.n_hidden_2), activation_fn=tf.nn.relu, weights_regularizer=regularizer)
out_layer = tf.contrib.layers.fully_connected(layer_2, num_outputs=n_alphas, activation_fn=None, weights_regularizer=regularizer)
return out_layer
# Construct model
text = tf.placeholder(tf.float32, [None, int(config.n_attr)])
out = multilayer_perceptron(text)
elif config.dom2alpha_model=='2layer':
n_alphas = int(config.n_alphas)
def multilayer_perceptron(x):
layer_1 = tf.contrib.layers.fully_connected(x, num_outputs=int(config.n_hidden), activation_fn=tf.nn.relu, weights_regularizer=regularizer)
#layer_2 = tf.contrib.layers.fully_connected(layer_1, num_outputs=int(config.n_hidden_2), activation_fn=tf.nn.relu, weights_regularizer=regularizer)
out_layer = tf.contrib.layers.fully_connected(layer_1, num_outputs=n_alphas, activation_fn=None, weights_regularizer=regularizer)
return out_layer
# Construct model
text = tf.placeholder(tf.float32, [None, int(config.n_attr)])
out = multilayer_perceptron(text)
saver = tf.train.Saver()
sess = tf.Session()
init_op = tf.initialize_all_variables()
saver.restore(sess, checkpoint_path)
alpha_val = sess.run(out, feed_dict={text:imattrlist})
return alpha_val
def load_class_splits(split_file, class_listf):
# Create a mapping for the reduced classes
# Load class splits from split_file
class_split = scio.loadmat(split_file)
# Get train class-IDs
train_cid = class_split['train_cid'][0].tolist()
test_cid = class_split['test_cid'][0].tolist()
# Load all classes and ignore classes that are not in the seen set
test_unseen_class = []
for line in open(class_listf, 'r').readlines():
classID = int(line.strip('\n').split(' ')[0])
class_name = line.strip('\n').split(' ')[1]
if classID in test_cid:
test_unseen_class.append((classID-1, class_name))
# Create mapping
ids = sorted([x[0] for x in test_unseen_class])
idmaps = {}
idmaps_inv = {}
for i in range(len(ids)):
idmaps[ids[i]] = i
idmaps_inv[i] = ids[i]
idmaps_seen = {}
idmaps_seen_inv = {}
for i in range(len(train_cid)):
idmaps_seen[train_cid[i] - 1] = i
idmaps_seen_inv[i] = train_cid[i] - 1
idmaps_all = {}
idmaps_all_inv = {}
for i in range(len(train_cid)+len(test_cid)):
if i < len(train_cid):
idmaps_all[train_cid[i] - 1] = i
idmaps_all_inv[i] = train_cid[i] - 1
#print(train_cid[i] -1)
else:
idmaps_all[ids[i-len(train_cid)]] = i
idmaps_all_inv[i] = ids[i-len(train_cid)]
return test_unseen_class, idmaps, idmaps_all, idmaps_seen, idmaps_inv, idmaps_all_inv, idmaps_seen_inv
def load_data(config, fname,test_unseen_class, idmaps, idmaps_all, idmaps_seen, idmaps_inv, idmaps_all_inv, idmaps_seen_inv, imagedir, imagelist, imagelabellist, train_test_split_list):
# Check if files exist or not
im_imid, imid_im = im_imid_map(config.imagelist)
# Create proper train, val and test splits from the CUB dataset
imlist = [x.strip('\n').split(' ')[1] for x in open(imagelist, 'r').readlines()]
imidlist = [x.strip('\n').split(' ')[0] for x in open(imagelist, 'r').readlines()]
imlabelist = [int(y.strip('\n').split(' ')[1])-1 for y in open(imagelabellist, 'r').readlines()]
#############################################################################################################################
# Load New imagelist
if config.modality == 'attributes':
im_attr, cls_attr = encode_attributes_class(config,imlabelist)
elif config.modality == 'wikipedia':
im_attr = encode_tfidf(config.attrdir, imlabelist, config)
elif config.modality == 'captions':
imlist_new = [x.strip('\n').split(' ')[1] for x in open(config.imagelist, 'r').readlines()]
im_attr, cls_attr = encode_captions(config.attrdir, imlist_new, imlabelist, config)
else:
print("Modality not supported")
# print(im_attr.keys())
#############################################################################################################################
imattrlist = [cls_attr[x] for x in imlabelist]
clsattralphas = {}
clsattralphas_list = get_alphas(config, list(cls_attr.values()), config.text2alpha_ckpt).tolist()
for h in cls_attr:
clsattralphas[h] = clsattralphas_list[h]
# Remove all instances of unseen classes from the dataset
req_im, req_im_seen, req_imclass, req_imattr, req_attralpha, req_split = [], [], [], [], [], []
with open (config.sampling_images, 'rb') as fp:
files = pickle.load(fp)
shuffle(files)
seen_im_list = files[:3000]
unseen_test_split_file = config.unseen_test_split_file
seen_test_split_file = config.seen_test_split_file
with open(unseen_test_split_file,'r') as fj:
data = json.load(fj)
for d in data:
req_im.append(d[0])
req_imclass.append(int(d[1]))
req_imattr.append(cls_attr[int(d[1])])
req_attralpha.append(clsattralphas[idmaps_inv[int(d[1])]])
req_im_seen.append(seen_im_list[random.randint(0,len(seen_im_list)-1)])
req_dataset = list(zip(req_im, req_im_seen, req_imclass, req_imattr, req_attralpha))
shuffle(req_dataset)
train_split = req_dataset
print("size of train dataset", len(train_split))
req_dataset = list(zip(req_im,req_im_seen, req_imclass, req_imattr, req_attralpha))
val_split = req_dataset
train_split = [(x[0], x[1], x[2], x[3], x[4]) for x in train_split]
val_split = [(x[0], x[1], x[2], x[3], x[4]) for x in val_split]
print("size of val dataset", len(val_split))
# Create test split seen classes --> for debugging
req_im, req_imclass, req_imattr, req_attralpha, req_split = [], [], [], [], []
with open(seen_test_split_file,'r') as fj:
data = json.load(fj)
for d in data:
req_im.append(d[0])
req_imclass.append(int(d[1]))
req_imattr.append(cls_attr[int(d[1])])
req_attralpha.append(clsattralphas[idmaps_seen_inv[int(d[1])]])
#req_im_seen.append(seen_im_list[random.randint(0,len(seen_im_list)-1)])
req_split.append(0)
whole_dataset = list(zip(req_im, req_imclass, req_attralpha, req_split))
train_split_seen = [(x[0], x[1], x[2]) for x in whole_dataset if x[3]==0]
# Create test split for all classes --> generalized zsl
req_im, req_imclass, req_imattr, req_attralpha, req_split = [], [], [], [], []
with open(seen_test_split_file,'r') as fj:
data = json.load(fj)
for d in data:
req_im.append(d[0])
req_imclass.append(int(d[1]))
req_imattr.append(cls_attr[int(d[1])])
req_attralpha.append(clsattralphas[idmaps_seen_inv[int(d[1])]])
#req_im_seen.append(seen_im_list[random.randint(0,len(seen_im_list)-1)])
req_split.append(0)
with open(unseen_test_split_file,'r') as fj:
data = json.load(fj)
for d in data:
req_im.append(d[0])
req_imclass.append(int(d[1])+int(config.n_seen))
req_imattr.append(cls_attr[int(d[1])])
req_attralpha.append(clsattralphas[idmaps_inv[int(d[1])]])
#req_im_seen.append(seen_im_list[random.randint(0,len(seen_im_list)-1)])
req_split.append(0)
print("test dataset: number of images from both seen and unseen classes: ", len(req_im))
whole_dataset = list(zip(req_im, req_imclass, req_attralpha, req_split))
test_split = [(x[0], x[1], x[2]) for x in whole_dataset if x[3]==0]
req_im, req_imclass, req_imattr, req_attralpha, req_split = [], [], [], [], []
with open(seen_test_split_file,'r') as fj:
data = json.load(fj)
for d in data:
req_im.append(d[0])
req_imclass.append(int(d[1]))
req_attralpha.append(clsattralphas[idmaps_seen_inv[int(d[1])]])
#req_im_seen.append(seen_im_list[random.randint(0,len(seen_im_list)-1)])
req_split.append(0)
whole_dataset = list(zip(req_im, req_imclass, req_attralpha, req_split))
test_split_seen = [(x[0], x[1], x[2]) for x in whole_dataset if x[3]==0]
req_im, req_imclass, req_imattr, req_attralpha, req_split = [], [], [], [], []
with open(unseen_test_split_file,'r') as fj:
data = json.load(fj)
for d in data:
req_im.append(d[0])
req_imclass.append(int(d[1])+int(config.n_seen))
req_attralpha.append(clsattralphas[idmaps_inv[int(d[1])]])
#req_im_seen.append(seen_im_list[random.randint(0,len(seen_im_list)-1)])
req_split.append(0)
whole_dataset = list(zip(req_im, req_imclass, req_attralpha, req_split))
test_split_unseen = [(x[0], x[1], x[2]) for x in whole_dataset if x[3]==0]
print("length of whole test dataset: ", len(test_split))
print("length of whole test_seen dataset: ", len(test_split_seen))
print("length of whole test_unseen dataset: ", len(test_split_unseen))
return train_split, val_split, test_split, test_split_seen, test_split_unseen, train_split_seen
def check_accuracy(sess, prediction, imclass, accuracy, is_training, dataset_init_op, verbose=False):
# Check accuracy on train or val
# Initialize the dataset
sess.run(dataset_init_op)
#num_correct, num_samples = 0, 0
acc_list = []
while True:
try:
acc, prediction_val, imclass_val= sess.run([accuracy, prediction, imclass], {is_training: False})
if verbose:
print("pred: ", prediction_val)
print("gt cls:", imclass_val)
acc_list.append(acc)
except tf.errors.OutOfRangeError:
break
final_acc = np.mean(np.array(acc_list))
#return float(num_correct)/num_samples
return final_acc
def check_accuracy_normalized(sess, prediction, imclass, accuracy, is_training, dataset_init_op, verbose=False):
# Check accuracy on train or val
# Initialize the dataset
sess.run(dataset_init_op)
acc_list = []
whole_pred_list = []
whole_label_list = []
while True:
try:
acc, prediction_val, imclass_val= sess.run([accuracy, prediction, imclass], {is_training: False})
if verbose:
print("pred: ", prediction_val)
print("gt cls:", imclass_val)
acc_list.append(acc)
# Get of unique predictions
if isinstance(prediction_val,int):
prediction_val = np.array([prediction_val])
if isinstance(imclass_val,int):
imclass_val = np.array([imclass_val])
try:
whole_pred_list += prediction_val.tolist()
whole_label_list += imclass_val.tolist()
except TypeError:
break
except tf.errors.OutOfRangeError:
break
final_acc = np.mean(np.array(acc_list))
# Get unique classes
unique_cls = list(set(whole_label_list))
# Find incices corresponding to a class
all_cls_acc = []
for y in unique_cls:
gt_indices = [i for i,x in enumerate(whole_label_list) if x == y]
acc_clas = float([whole_pred_list[i] for i in gt_indices].count(y))/len(gt_indices)
all_cls_acc.append(acc_clas)
# print("pred list:", whole_pred_list)
# print("label list:", whole_label_list)
return np.mean(all_cls_acc)
# return final_acc
def main(args):
# Load config JSON and use the arguments
config = parse_json(args.config_json)
pprint(config)
config = DotMap(config)
print("loading class splits ..")
test_unseen_class, idmaps, idmaps_all, idmaps_seen, idmaps_inv, idmaps_all_inv, idmaps_seen_inv = load_class_splits(config.split_file, config.class_listf)
#test_unseen_class, idmaps, idmaps_all, idmaps_seen = load_class_splits(config.split_file, config.class_listf)
# Load the dataset splits
print('Loading data...')
# train_split, val_split, test_split, test_split_seen, test_split_unseen, train_split_seen = load_data(config, config.save_path.split('/')[0], test_unseen_class, idmaps, idmaps_all, idmaps_seen, config.imagedir, config.imagelist, config.imagelabellist, config.train_test_split_list, float(config.train_prop), float(config.val_prop), float(config.test_prop))
train_split, val_split, test_split, test_split_seen, test_split_unseen, train_split_seen = load_data(config, config.save_path.split('/')[0], test_unseen_class, idmaps, idmaps_all, idmaps_seen, idmaps_inv, idmaps_all_inv, idmaps_seen_inv, config.imagedir, config.imagelist, config.imagelabellist, config.train_test_split_list)
train_files,seen_train_files, train_imclass, train_imattr, train_attralpha= map(list, zip(*train_split))
val_files,seen_val_files, val_imclass, val_imattr, val_attralpha = map(list, zip(*val_split))
test_files, test_labels, test_attralpha = map(list, zip(*test_split))
test_files_seen, test_labels_seen, test_attralpha_seen = map(list, zip(*test_split_seen))
train_files_seen, train_labels_seen, train_attralpha_seen = map(list, zip(*train_split_seen))
test_files_unseen, test_labels_unseen, test_attralpha_unseen = map(list, zip(*test_split_unseen))
train_imclass = np.array(train_imclass).astype('int32')
val_imclass = np.array(val_imclass).astype('int32')
test_labels = np.array(test_labels).astype('int32')
test_labels_seen = np.array(test_labels_seen).astype('int32')
test_labels_unseen = np.array(test_labels_unseen).astype('int32')
# Write graph definition based on model name
# Define the computation graph with necessary functions
graph = tf.Graph()
with graph.as_default():
# Preprocessing function and module import
preprocess_module_name = 'preprocessing.' + config.preprocess_fn
preprocess_module = importlib.import_module(preprocess_module_name)
# Get image size
mc = getattr(nets, config.model_class, None)
m = getattr(mc, config.model_name, None)
#im_size = getattr(m, 'default_image_size', None)
im_size = int(config.image_size)
# Parsing an pre-processing function
def _parse_function_seen(seen_filename, imclass, attralpha):
image_f = tf.read_file(seen_filename)
image_dec = tf.image.decode_jpeg(image_f, channels=3)
image = tf.cast(image_dec, tf.float32)
# Resize image
res_img = tf.image.resize_images(image, [im_size, im_size])
# attralpha_noise = tf.random_normal([int(config.n_alphas)])
return res_img, imclass, attralpha
# Substitute for parse_function seen+ unseen uncluding preprocessing
def parse_fn_train(filename, imclass, attralpha):
image_file = tf.read_file(filename)
image = tf.image.decode_jpeg(image_file, channels=3)
processed_image = preprocess_module.preprocess_image(image, im_size, im_size, is_training=False)
return processed_image, imclass, attralpha
def _parse_function_noise(seen_filename, imclass, attralpha):
image_f = tf.read_file(seen_filename)
image_dec = tf.image.decode_jpeg(image_f, channels=3)
image = tf.cast(image_dec, tf.float32)
# Resize image
res_img = tf.image.resize_images(image, [im_size, im_size])
img = tf.random_normal([im_size, im_size, 3], mean=0, stddev=1.0)
img = tf.div(tf.subtract(img, tf.reduce_min(img)), tf.subtract(tf.reduce_max(img), tf.reduce_min(img)))
img = tf.cast(img*255.0, tf.int32)
res_img_noise = tf.cast(img, tf.float32)
res_img = res_img_noise
return res_img, imclass, attralpha
# Parsing an pre-processing function
def _parse_function_val(filename, imclass, attralpha):
image_f = tf.read_file(filename)
image_dec = tf.image.decode_jpeg(image_f, channels=3)
image = tf.cast(image_dec, tf.float32)
# Resize image
res_img = tf.image.resize_images(image, [im_size, im_size])
return res_img, imclass, attralpha
# preprocessing function
def prepro(image, imclass, attralpha):
means = tf.reshape(tf.constant(VGG_MEAN), [1, 1, 3])
proc_img = image - means
return proc_img, imclass, attralpha
# Dataset creation
# Training dataset
print("Creating datasets..")
train_files = tf.constant(train_files)
seen_train_files = tf.constant(seen_train_files)
train_imclass = tf.constant(train_imclass)
train_attralpha = tf.constant(train_attralpha)
train_dataset = tf.contrib.data.Dataset.from_tensor_slices((seen_train_files, train_imclass, train_attralpha))
if config.prepro == 'unified':
train_dataset = train_dataset.map(parse_fn_train, num_threads=int(config.batch_size), output_buffer_size = int(config.batch_size))
else:
if config.sampling_mode =='noise':
train_dataset = train_dataset.map(_parse_function_noise, num_threads=int(config.batch_size), output_buffer_size=int(config.batch_size))
train_dataset = train_dataset.map(prepro, num_threads=int(config.batch_size), output_buffer_size=int(config.batch_size))
else:
train_dataset = train_dataset.map(_parse_function_seen, num_threads=int(config.batch_size), output_buffer_size=int(config.batch_size))
train_dataset = train_dataset.map(prepro, num_threads=int(config.batch_size), output_buffer_size=int(config.batch_size))
train_dataset = train_dataset.shuffle(buffer_size=len(train_split))
batched_train_dataset = train_dataset.batch(int(config.batch_size))
# Validation dataset
val_files = tf.constant(val_files)
val_imclass = tf.constant(val_imclass)
val_attralpha = tf.constant(val_attralpha)
val_dataset = tf.contrib.data.Dataset.from_tensor_slices((val_files, val_imclass, val_attralpha))
if config.prepro == 'unified':
val_dataset = val_dataset.map(parse_fn_train, num_threads=int(config.batch_size), output_buffer_size=int(config.batch_size))
else:
val_dataset = val_dataset.map(_parse_function_val, num_threads=int(config.batch_size), output_buffer_size=int(config.batch_size))
val_dataset = val_dataset.map(prepro, num_threads=int(config.batch_size), output_buffer_size=int(config.batch_size))
batched_val_dataset = val_dataset.batch(int(config.batch_size))
# Test dataset
test_files = tf.constant(test_files)
test_labels = tf.constant(test_labels)
test_attralpha = tf.constant(test_attralpha)
test_dataset = tf.contrib.data.Dataset.from_tensor_slices((test_files, test_labels, test_attralpha))
if config.prepro == 'unified':
test_dataset = test_dataset.map(parse_fn_train, num_threads=int(config.batch_size), output_buffer_size=int(config.batch_size))
else:
test_dataset = test_dataset.map(_parse_function_val, num_threads=int(config.batch_size), output_buffer_size=int(config.batch_size))
test_dataset = test_dataset.map(prepro, num_threads=int(config.batch_size), output_buffer_size=int(config.batch_size))
batched_test_dataset = test_dataset.batch(int(config.batch_size))
test_files_seen = tf.constant(test_files_seen)
test_labels_seen = tf.constant(test_labels_seen)
test_attralpha_seen = tf.constant(test_attralpha_seen)
test_dataset_seen = tf.contrib.data.Dataset.from_tensor_slices((test_files_seen, test_labels_seen, test_attralpha_seen))
if config.prepro =='unified':
test_dataset_seen = test_dataset_seen.map(parse_fn_train, num_threads=int(config.batch_size), output_buffer_size=int(config.batch_size))
else:
test_dataset_seen = test_dataset_seen.map(_parse_function_val, num_threads=int(config.batch_size), output_buffer_size=int(config.batch_size))
test_dataset_seen = test_dataset_seen.map(prepro, num_threads=int(config.batch_size), output_buffer_size=int(config.batch_size))
batched_test_dataset_seen = test_dataset_seen.batch(int(config.batch_size))
# Test dataset unseen
test_files_unseen = tf.constant(test_files_unseen)
test_labels_unseen = tf.constant(test_labels_unseen)
test_attralpha_unseen = tf.constant(test_attralpha_unseen)
test_dataset_unseen = tf.contrib.data.Dataset.from_tensor_slices((test_files_unseen, test_labels_unseen, test_attralpha_unseen))
# test_dataset_unseen = test_dataset_unseen.map(parse_fn_train, num_threads=int(config.batch_size), output_buffer_size=int(config.batch_size))
if config.prepro =='unified':
test_dataset_unseen = test_dataset_unseen.map(parse_fn_train, num_threads=int(config.batch_size), output_buffer_size=int(config.batch_size))
else:
test_dataset_unseen = test_dataset_unseen.map(_parse_function_val, num_threads=int(config.batch_size), output_buffer_size=int(config.batch_size))
test_dataset_unseen = test_dataset_unseen.map(prepro, num_threads=int(config.batch_size), output_buffer_size=int(config.batch_size))
batched_test_dataset_unseen = test_dataset_unseen.batch(int(config.batch_size))
# Train dataset seen
train_files_seen = tf.constant(train_files_seen)
train_labels_seen = tf.constant(train_labels_seen)
train_attralpha_seen = tf.constant(train_attralpha_seen)
train_dataset_seen = tf.contrib.data.Dataset.from_tensor_slices((train_files_seen, train_labels_seen, train_attralpha_seen))
#train_dataset_seen = train_dataset_seen.map(parse_fn_train, num_threads=int(config.batch_size), output_buffer_size=int(config.batch_size))
if config.prepro =='unified':
train_dataset_seen = train_dataset_seen.map(parse_fn_train, num_threads=int(config.batch_size), output_buffer_size=int(config.batch_size))
else:
train_dataset_seen = train_dataset_seen.map(_parse_function_val, num_threads=int(config.batch_size), output_buffer_size=int(config.batch_size))
train_dataset_seen = train_dataset_seen.map(prepro, num_threads=int(config.batch_size), output_buffer_size=int(config.batch_size))
batched_train_dataset_seen = train_dataset_seen.batch(int(config.batch_size))
# Define iterator that operates on either of the splits
iterator = tf.contrib.data.Iterator.from_structure(batched_train_dataset.output_types, batched_train_dataset.output_shapes)
images, imclass, attralpha = iterator.get_next()
train_init_op = iterator.make_initializer(batched_train_dataset)
val_init_op = iterator.make_initializer(batched_val_dataset)
test_init_op = iterator.make_initializer(batched_test_dataset)
test_seen_init_op = iterator.make_initializer(batched_test_dataset_seen)
train_seen_init_op = iterator.make_initializer(batched_train_dataset_seen)
test_unseen_init_op = iterator.make_initializer(batched_test_dataset_unseen)
# Boolean variable for train-vs-test
is_training = tf.placeholder(tf.bool)
# Define the global step to be some tf.Variable
global_step_tensor = tf.Variable(0, trainable=False, name='global_step', dtype=tf.int32)
model_c = getattr(nets, config.model_class, None)
model = getattr(model_c, config.model_name, None)
arg_scope = getattr(model_c, config.scope, None)
# Get number of classes in train and test
n_seen = int(config.n_seen)
n_unseen = int(config.n_unseen)
print("Defining model from .. ", model_c )
with slim.arg_scope(arg_scope(weight_decay=float(0))):
print("--------------------------Using original network---------------------------------------------------------------")
# with slim.arg_scope(arg_scope()):
if config.base_model == 'resnet':
logits, endpoints = model(images, num_classes=n_seen, is_training=is_training)
else:
logits, endpoints = model(images, num_classes=n_seen, is_training=is_training, dropout_keep_prob=bool(config.dropout))
if config.base_model == 'resnet':
fc8_seen_weights = tf.contrib.framework.get_variables(config.penultimate_seen_weights)
fc8_seen_biases = tf.contrib.framework.get_variables('resnet_v1_101/logits/biases:0')
else:
fc8_seen_weights = tf.contrib.framework.get_variables('vgg_16/fc8/weights:0')
fc8_seen_biases = tf.contrib.framework.get_variables('vgg_16/fc8/biases:0')
# Check for model path
# assert(os.path.isfile(config.ckpt_path))
if config.base_model =='resnet':
if config.ckpt == 'old':
orig_ckpt = config.orig_ckpt_path
orig_ckpt_reader = pywrap_tensorflow.NewCheckpointReader(orig_ckpt)
new_ckpt_reader = pywrap_tensorflow.NewCheckpointReader(config.ckpt_path)
new_var_to_shape_map = new_ckpt_reader.get_variable_to_shape_map()
orig_var_to_shape_map = orig_ckpt_reader.get_variable_to_shape_map()
vars_in_orig_ckpt = [key for key in sorted(orig_var_to_shape_map)]
vars_in_new_ckpt = [key for key in sorted(new_var_to_shape_map)]
vars_in_graph = [x.name.split(':')[0] for x in tf.contrib.framework.get_variables()]
# Variables to borrow from old ckpt
vars_to_borrow = list(set(list(set(vars_in_graph) - set(vars_in_new_ckpt))) & set(vars_in_orig_ckpt))
# Variables to initialize
# vars_to_init = list(set(vars_in_graph) - set(vars_to_borrow))
vars_to_init = list(set(vars_in_graph) - set(vars_to_borrow + vars_in_new_ckpt))
# Old ckpt init function
old_ckpt_init_fn = tf.contrib.framework.assign_from_checkpoint_fn(orig_ckpt, [x for x in tf.contrib.framework.get_variables() if (x.name.split(':')[0] in vars_to_borrow) and ('global_step' not in x.name)])
# New ckpt init function
new_ckpt_init_fn = tf.contrib.framework.assign_from_checkpoint_fn(config.ckpt_path, [x for x in tf.contrib.framework.get_variables() if x.name.split(':')[0] in vars_in_new_ckpt])
else:
new_ckpt_init_fn = tf.contrib.framework.assign_from_checkpoint_fn(config.ckpt_path, tf.contrib.framework.get_variables_to_restore(exclude=['global_step']))
var_init = tf.variables_initializer([global_step_tensor])
# get seen weights and initialize new layer with mean of them
sess1 = tf.Session()
new_ckpt_init_fn(sess1)
fc8_seen_weights_value = sess1.run(fc8_seen_weights)[0]
fc8_seen_biases_value = sess1.run(fc8_seen_biases)[0]
fc8_seen_weights_mean = fc8_seen_weights_value.mean(axis=3)
fc8_seen_biases_mean = fc8_seen_biases_value.mean(axis=0)
fc8_seen_weights_init = np.repeat(fc8_seen_weights_mean, n_unseen, axis=2)
fc8_seen_biases_init = np.repeat(fc8_seen_biases_mean, n_unseen)
logits = tf.squeeze(logits)
# Add a new head
if config.unseen_w_init == 'seen_centered':
# Initialize by a gaussian centered on the seen class weights
mean_seen_wt = tf.reduce_mean(tf.squeeze(fc8_seen_weights), axis=1, keep_dims=True)
std_seen_wt = tf.sqrt(tf.reduce_mean(tf.square(tf.squeeze(fc8_seen_weights) - mean_seen_wt), axis=1))
mean_wt = tf.tile(mean_seen_wt, [1, n_unseen])
std_wt = tf.tile(tf.expand_dims(std_seen_wt, axis=1), [1, n_unseen])
w_init = tf.random_normal_initializer(mean_wt, std_wt)
logits_unseen = slim.conv2d(endpoints['global_pool'], n_unseen, [1,1], activation_fn = None, normalizer_fn = None, scope='logits_unseen', weights_initializer = w_init)
else:
logits_unseen = slim.conv2d(endpoints['global_pool'], n_unseen, [1,1], activation_fn = None, normalizer_fn = None, scope='logits_unseen', weights_initializer = tf.constant_initializer(fc8_seen_weights_init))
logits_seen = slim.conv2d(endpoints['global_pool'], n_seen, [1,1], activation_fn = None, normalizer_fn = None, scope='logits_seen', weights_initializer = tf.constant_initializer(fc8_seen_weights_value))
logits = array_ops.squeeze(logits_seen, [1,2])
logits_unseen = array_ops.squeeze(logits_unseen, [1,2])
else:
var_to_restore = tf.contrib.framework.get_variables_to_restore(exclude=['global_step'])
print("Using base model checkpoint from: ", config.ckpt_path)
init_fn = tf.contrib.framework.assign_from_checkpoint_fn(config.ckpt_path, var_to_restore)
if config.unseen_w_init == 'seen_centered':
print("Seen centered initializaton of unseen weights")
# Initialize by a gaussian centered on the seen class weights
mean_seen_wt = tf.reduce_mean(tf.squeeze(fc8_seen_weights), axis=1, keep_dims=True)
std_seen_wt = tf.sqrt(tf.reduce_mean(tf.square(tf.squeeze(fc8_seen_weights) - mean_seen_wt), axis=1))
mean_wt = tf.tile(mean_seen_wt, [1, n_unseen])
std_wt = tf.tile(tf.expand_dims(std_seen_wt, axis=1), [1, n_unseen])
w_init = tf.random_normal_initializer(mean_wt, std_wt)
logits_unseen = array_ops.squeeze(tf.contrib.layers.fully_connected(inputs=endpoints['vgg_16/fc7'], num_outputs=n_unseen, activation_fn=None, weights_initializer = w_init), [1,2], name = 'fc8_unseen')
else:
logits_unseen = array_ops.squeeze(tf.contrib.layers.fully_connected(inputs=endpoints['vgg_16/fc7'], num_outputs=n_unseen, activation_fn=None), [1,2], name = 'fc8_unseen')
# Evaluation Metrics for seen classes
prediction_seen = tf.to_int32(tf.argmax(logits, -1))
prediction_seen = tf.squeeze(prediction_seen )
imclass = tf.squeeze(imclass)
correct_prediction_seen = tf.equal(prediction_seen , imclass)
accuracy_seen = tf.reduce_mean(tf.cast(correct_prediction_seen , tf.float32))
logits_seen_unseen = tf.concat([logits, logits_unseen],1)
sys.stdout.flush()
# Evaluation Metrics
prediction = tf.to_int32(tf.argmax(logits_unseen, -1))
prediction = tf.squeeze(prediction)
imclass = tf.squeeze(imclass)
correct_prediction = tf.equal(prediction, imclass)
accuracy_unseen = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
# Generalized ZSL performance
prediction_seen_unseen = tf.to_int32(tf.argmax(logits_seen_unseen,-1))
correct_prediction_seen_unseen = tf.equal(prediction_seen_unseen, imclass)
accuracy_seen_unseen = tf.reduce_mean(tf.cast(correct_prediction_seen_unseen, tf.float32))
# ----------------------------------Optimization starts here---------------------------------------
# Define one-hot for seen class
one_hot_seen = tf.one_hot(imclass, n_seen, 1.0)
signal_seen = tf.multiply(logits, one_hot_seen)
# Define how to get alphas
layer_name = config.model_name + '/' + config.layer_name
grads_seen = tf.gradients(signal_seen, endpoints[layer_name])
# Get alphas
alphas_seen = tf.reduce_sum(grads_seen[0], [1,2])
# Define one-hot for unseen class
one_hot_unseen = tf.one_hot(imclass, n_unseen, 1.0)
signal_unseen = tf.multiply(logits_unseen, one_hot_unseen)
# Define how to get alphas
layer_name = config.model_name + '/' + config.layer_name
grads_unseen = tf.gradients(signal_unseen, endpoints[layer_name])[0]
# Get alphas
alphas_unseen = tf.reduce_sum(grads_unseen, [1,2])
# Regularization coefficient
lambda_loss = float(config.reg_lambda)
attr_alpha_normalized = tf.nn.l2_normalize(attralpha, 1)
alphas_unseen_normalized = tf.nn.l2_normalize(alphas_unseen, 1)
# Cosine distance loss, assumes that both inputs are normalized
def binary_activation(x):
cond = tf.less(x, tf.zeros(tf.shape(x)))
out = tf.where(cond, tf.zeros(tf.shape(x)), tf.ones(tf.shape(x)))
return out
# Loss between network alphas and the predicted importances from domain expert --> alpha model
if config.alpha_loss_type =="cd":
zsl_alpha_loss = tf.reduce_mean(tf.losses.cosine_distance(attr_alpha_normalized, alphas_unseen_normalized, dim=1, reduction=tf.losses.Reduction.NONE))
# Define the optimizers
if config.optimizer =='adam':
optimizer = tf.train.AdamOptimizer(float(config.learning_rate))
if config.optimizer =='sgd':
optimizer = tf.train.GradientDescentOptimizer(float(config.learning_rate))
if config.optimizer =='sgd_momentum':
optimizer = tf.train.MomentumOptimizer(learning_rate = float(config.learning_rate) , momentum = 0.9)
# get the newly initialized vars (for the unseen head)
if config.base_model =='resnet':
new_var = [v for v in tf.trainable_variables() if v.name == 'logits_unseen/weights:0' or v.name =='logits_unseen/biases:0' or v.name == 'logits_seen/weights:0' or v.name =='logits_seen/biases:0']
else:
new_var = [v for v in tf.trainable_variables() if v.name == 'fully_connected/weights:0' or v.name =='fully_connected/biases:0']#tf.contrib.framework.get_variables('logits_unseen')
# Regularizer term
if config.reg_loss == 'dimn_wise_l2':
zsl_reg_loss = tf.nn.l2_loss(tf.squeeze(new_var[0]) - tf.expand_dims(tf.reduce_mean(tf.squeeze(fc8_seen_weights), axis=1), axis=1))
# Total loss is sum of loss and lambda times reg term
zsl_loss = zsl_alpha_loss + lambda_loss * zsl_reg_loss
weights_unseen_grad = tf.gradients(alphas_unseen, new_var[0])[0]
# define training op with the parameters to be optimized (unseen head params and global step)
new_train_op = tf.contrib.slim.learning.create_train_op(zsl_loss, optimizer, variables_to_train=new_var)
new_vars_with_adam = new_var + [x for x in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) if 'beta' in x.name or 'Adam' in x.name or 'global_step' in x.name]
new_vars_with_adam_momentum = new_var + [x for x in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) if 'Momentum' in x.name ]
# Define the global step to be some tf.Variable
global_step_tensor = tf.Variable(0, trainable=False, name='global_step', dtype=tf.int32)
new_init = tf.variables_initializer(new_vars_with_adam + new_vars_with_adam_momentum + [global_step_tensor])
print("Graph finalized")
# save accuracies as trainig proceeds
seen_seen_head_ls, unseen_unseen_head_ls, seen_seen_unseen_head_ls, seen_unseen_seen_unseen_head_ls, unseen_seen_unseen_head_ls, hm_ls = [],[],[],[],[], []
vLoss = []
print("Running simple forward pass for getting initial performance ..")
with tf.Session(graph=graph) as sess:
if config.base_model=='resnet':
if config.ckpt=='old':
new_ckpt_init_fn(sess)
old_ckpt_init_fn(sess)
else:
new_ckpt_init_fn(sess)
sess.run(var_init)
else:
init_fn(sess)
sess.run(new_init)
val_loss_best = 1e20
iteration = 1
# Calculate seen head accuracy on the test set
seen_seen_head = check_accuracy_normalized(sess, prediction_seen,imclass, accuracy_seen, is_training, train_seen_init_op, verbose=False)
print("seen head test accuracy (argmax {}): {}".format(config.n_seen, seen_seen_head))
test_accuracy = check_accuracy_normalized(sess, prediction_seen_unseen,imclass, accuracy_seen_unseen, is_training, test_init_op, verbose=False)
print("Normalized Initial seen + unseen head full test accuracy: {}".format(test_accuracy))
# Define criterion for early stopping (loss doesn't improve by 1% in 40 iterations)
zsl_loss_monitor = 1e20
zsl_loss_ctr = 1
zsl_loss_window = int(config.zsl_loss_estop_window)
# Start training
print("Starting Optimization .....")
epoch_flag = False
for epoch in range(1, int(config.num_epochs)):
if epoch_flag:
break
m = 0
sys.stdout.flush()
sess.run(train_init_op)
loss_list = []
loss_alpha_list = []
loss_reg_list = []
while True:
try:
l, zsl_alpha, zsl_reg, zsl_total = sess.run([new_train_op, zsl_alpha_loss, zsl_reg_loss, zsl_loss], {is_training:False})
if zsl_loss_ctr >= zsl_loss_window:
epoch_flag = True
print('Breaking out of optimization split\n\n')
break
if iteration == 1:
zsl_loss_monitor = zsl_total
else:
if (1 - (zsl_total/zsl_loss_monitor)) > float(config.eps_perc):
zsl_loss_ctr = 0
zsl_loss_monitor = zsl_total
else:
zsl_loss_ctr += 1
loss_list.append(l)
loss_alpha_list.append(zsl_alpha)
loss_reg_list.append(zsl_reg)
iteration +=1
except tf.errors.OutOfRangeError:
break
valLoss = np.mean(np.array(loss_list))
print("Epoch {}, average_training_loss_alpha: {}".format(epoch, np.mean(np.array(loss_alpha_list))))
print("Epoch {}, average_training_loss_reg : {}".format(epoch, np.mean(np.array(loss_reg_list))))
print("Epoch {}, average_training_loss: {}".format(epoch, valLoss))
# Compute accuracy
seen_seen_head_ls.append(seen_seen_head)
unseen_unseen_head = check_accuracy_normalized(sess, prediction,imclass, accuracy_unseen, is_training, val_init_op)
print("unseen head test accuracy (argmax {}): {}".format(config.n_unseen, unseen_unseen_head))
unseen_unseen_head_ls.append(unseen_unseen_head)
seen_seen_unseen_head = check_accuracy_normalized(sess, prediction_seen_unseen, imclass, accuracy_seen_unseen, is_training, test_seen_init_op)
print("seen head full test accuracy: (argmax {}): {}".format(config.n_class, seen_seen_unseen_head))
seen_seen_unseen_head_ls.append(seen_seen_unseen_head)
seen_unseen_seen_unseen_head = check_accuracy_normalized(sess, prediction_seen_unseen, imclass, accuracy_seen_unseen, is_training, test_init_op)
print("seen + unseen head full test accuracy: (argmax {}): {}".format(config.n_class, seen_unseen_seen_unseen_head))
seen_unseen_seen_unseen_head_ls.append(seen_unseen_seen_unseen_head)
unseen_seen_unseen_head = check_accuracy_normalized(sess, prediction_seen_unseen, imclass, accuracy_seen_unseen, is_training, test_unseen_init_op, verbose=False)
print("unseen head full test accuracy: (argmax {}): {}".format(config.n_class, unseen_seen_unseen_head))
unseen_seen_unseen_head_ls.append(unseen_seen_unseen_head)
# Compute Harmonic Mean of seen accuracies and unseen accuracies.
H = 2*seen_seen_unseen_head * unseen_seen_unseen_head/(seen_seen_unseen_head+ unseen_seen_unseen_head)
print("Harmonic mean", H)
hm_ls.append(H)
if valLoss <= val_loss_best:
val_loss_best = valLoss
checkpoint_dir = config.ckpt_dir + '{}_{}_{}_cnn_seen_val{}_alpha_loss_{}_d2a_model_{}_bs_{}_lr_{}_lambda_{}_epoch_{}_ssu_{:0.2f}_usu_{:0.3f}_h_{:0.3f}.ckpt'.format(config.dataset, config.model_name, config.modality, config.n_unseen, config.alpha_loss_type, config.dom2alpha_model, config.batch_size, config.learning_rate, config.reg_lambda, epoch, seen_seen_unseen_head, unseen_seen_unseen_head, H)
saver.save(sess, checkpoint_dir)
print("saved_checkpoint to {}".format(checkpoint_dir))
uhead_plotter(seen_seen_head_ls, unseen_unseen_head_ls, seen_unseen_seen_unseen_head_ls, seen_seen_unseen_head_ls, unseen_seen_unseen_head_ls, hm_ls, config.ckpt_dir, 'Normalized_Accuracy_logs')
sys.stdout.flush()
print("Optimization Done")
print("Best Checkpoint: ", checkpoint_dir)
if __name__ == '__main__':
args = parser.parse_args()
print(args)
main(args)
| 49,803 | 47.589268 | 419 | py |
RecommenderSystems | RecommenderSystems-master/socialRec/dgrec/layers.py | from __future__ import division
from __future__ import print_function
import tensorflow as tf
from .inits import zeros
# DISCLAIMER:
# This file is forked from
# https://github.com/tkipf/gcn
# which is also under the MIT license
# global unique layer ID dictionary for layer name assignment
_LAYER_UIDS = {}
def get_layer_uid(layer_name=''):
"""Helper function, assigns unique layer IDs."""
if layer_name not in _LAYER_UIDS:
_LAYER_UIDS[layer_name] = 1
return 1
else:
_LAYER_UIDS[layer_name] += 1
return _LAYER_UIDS[layer_name]
class Layer(object):
"""Base layer class. Defines basic API for all layer objects.
Implementation inspired by keras (http://keras.io).
# Properties
name: String, defines the variable scope of the layer.
logging: Boolean, switches Tensorflow histogram logging on/off
# Methods
_call(inputs): Defines computation graph of layer
(i.e. takes input, returns output)
__call__(inputs): Wrapper for _call()
_log_vars(): Log all variables
"""
def __init__(self, **kwargs):
allowed_kwargs = {'name', 'logging', 'model_size'}
for kwarg in kwargs.keys():
assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg
name = kwargs.get('name')
if not name:
layer = self.__class__.__name__.lower()
name = layer + '_' + str(get_layer_uid(layer))
self.name = name
self.vars = {}
logging = kwargs.get('logging', False)
self.logging = logging
self.sparse_inputs = False
def _call(self, inputs):
return inputs
def __call__(self, inputs):
with tf.name_scope(self.name):
if self.logging and not self.sparse_inputs:
tf.summary.histogram(self.name + '/inputs', inputs)
outputs = self._call(inputs)
if self.logging:
tf.summary.histogram(self.name + '/outputs', outputs)
return outputs
def _log_vars(self):
for var in self.vars:
tf.summary.histogram(self.name + '/vars/' + var, self.vars[var])
class Dense(Layer):
"""Dense layer."""
def __init__(self, input_dim, output_dim, dropout=0., weight_decay=0.,
act=tf.nn.relu, placeholders=None, bias=True, featureless=False,
sparse_inputs=False, **kwargs):
super(Dense, self).__init__(**kwargs)
self.dropout = dropout
self.weight_decay = weight_decay
self.act = act
self.featureless = featureless
self.bias = bias
self.input_dim = input_dim
self.output_dim = output_dim
# helper variable for sparse dropout
self.sparse_inputs = sparse_inputs
if sparse_inputs:
self.num_features_nonzero = placeholders['num_features_nonzero']
with tf.variable_scope(self.name + '_vars'):
self.vars['weights'] = tf.get_variable('weights', shape=(input_dim, output_dim),
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer(),
regularizer=tf.contrib.layers.l2_regularizer(self.weight_decay))
if self.bias:
self.vars['bias'] = zeros([output_dim], name='bias')
if self.logging:
self._log_vars()
def _call(self, inputs):
x = inputs
x = tf.nn.dropout(x, 1-self.dropout)
# transform
output = tf.matmul(x, self.vars['weights'])
# bias
if self.bias:
output += self.vars['bias']
return self.act(output)
| 3,731 | 31.172414 | 105 | py |
BeyondtheSpectrum | BeyondtheSpectrum-main/test.py | import argparse
import os
import random
import shutil
import time
import warnings
import sys
import cv2
import numpy as np
import scipy.misc
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
#import torchvision.models as models
from datasets import get_dataset
from models import get_classification_model
from sr_models.model import RDN, Vgg19
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--data-root-pos', type=str, default='./data',
help='path to dataset')
parser.add_argument('--data-root-neg', type=str, default='./data',
help='path to dataset')
parser.add_argument('--dataset', type=str, default='cityscapes',
help='dataset name (default: pascal12)')
parser.add_argument('-a', '--arch', type=str, default='resnet50',
help='model architecture')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=100, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=1, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--input-channel', default=3, type=int,
help='number of input channel')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--save-every-epoch', type=int, default=10,
help='how many epochs to save a model.')
parser.add_argument('--output-path', default='./output_models', type=str, metavar='PATH',
help='path to output models')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
parser.add_argument('--dataset_type', type=str, default='image',
help='which dataset to load.')
parser.add_argument('--carlibration', default=1.0, type=float,
help='carlibration factor for posterior')
parser.add_argument('--defense', default=1.0, type=float,
help='defense factor')
parser.add_argument('--save_path', type=str, default='./score.npy', help='save models')
parser.add_argument('--no_dilation', action='store_true', help='do not use dilated convolutions in attackers')
parser.add_argument('--sr-num-features', type=int, default=64)
parser.add_argument('--sr-growth-rate', type=int, default=64)
parser.add_argument('--sr-num-blocks', type=int, default=16)
parser.add_argument('--sr-num-layers', type=int, default=8)
parser.add_argument('--sr-scale', type=int, default=4)
parser.add_argument('--sr-weights-file', type=str, required=True)
parser.add_argument('--idx-stages', type=int, default=0)
best_acc1 = 0
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# create model
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
else:
print("=> creating model '{}'".format(args.arch))
model = get_classification_model(arch=args.arch, pretrained = args.pretrained,
input_channel=args.input_channel, num_classes=2, dilated=(not args.no_dilation))
#import ipdb; ipdb.set_trace()
if args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int(args.workers / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume, map_location='cuda:%d'%(args.gpu))
model.load_state_dict(checkpoint['state_dict'],strict=False)
print("=> loaded checkpoint '{}'"
.format(args.resume))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
test_dataset = get_dataset(name=args.dataset_type, root_pos=args.data_root_pos, root_neg=args.data_root_neg, flip=False)
if args.distributed:
test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset)
else:
test_sampler = None
test_loader = torch.utils.data.DataLoader(
test_dataset, batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True, sampler=test_sampler)
sr_model = RDN(scale_factor=args.sr_scale,
num_channels=3,
num_features=args.sr_num_features,
growth_rate=args.sr_growth_rate,
num_blocks=args.sr_num_blocks,
num_layers=args.sr_num_layers,
requires_grad=False).cuda(args.gpu)#.to(device)
checkpoint = torch.load(args.sr_weights_file, map_location='cuda:%d'%(args.gpu))
if 'state_dict' in checkpoint.keys():
sr_model.load_state_dict(checkpoint['state_dict'])
else:
sr_model.load_state_dict(checkpoint)
perception_net = Vgg19().cuda(args.gpu)
Precision, Recall, Score = test(test_loader, model, sr_model, perception_net, args)
np.save(args.save_path, Score)
def test(test_loader, model, sr_model, perception_net, args):
TP = 0
FP = 0
FN = 0
ACC = 0
# switch to eval mode
model.eval()
sr_model.eval()
# get the softmax weight
model_params = list(model.parameters())
weight_softmax = np.squeeze(model_params[-2].cpu().detach().numpy())
score = []
for i, (input, target, post_path) in enumerate(test_loader):
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
lr = 0
for ii in range(args.sr_scale):
for jj in range(args.sr_scale):
lr = lr + input[:, :, ii::args.sr_scale, jj::args.sr_scale] / (args.sr_scale * args.sr_scale)
lr = lr / 255.0
input = input / 255.0
preds_input = sr_model(lr)
if args.idx_stages > 0:
per_rec = perception_net(preds_input)
per_gt = perception_net(input)
rec_features = abs( per_rec[args.idx_stages - 1] - per_gt[args.idx_stages - 1] )
output, layer4 = model( rec_features )
else:
rec_features0 = abs( preds_input - input )
output, layer4 = model( rec_features0 )
pred = (output[:,0] < output[:,1]).cpu().numpy()
target = target.cpu().numpy()
output = output.cpu().detach().numpy()
score.append(output[0])
TP += sum((target==pred)*(1==pred))
FP += sum((target!=pred)*(1==pred))
FN += sum((target!=pred)*(0==pred))
ACC += sum(target==pred)
print('%08d : Precision=%.4f , Recall = %.4f, Acc = %.4f' % (i+1, 1.0*TP/(TP+FP), 1.0*TP/(TP+FN), 1.0*ACC/(i+1) ))
return 1.0*TP/(TP+FP), 1.0*TP/(TP+FN), np.array(score)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
if __name__ == '__main__':
main()
| 13,359 | 39.731707 | 124 | py |
BeyondtheSpectrum | BeyondtheSpectrum-main/train_sr.py | import argparse
import os
import copy
import torch
from torch import nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
from sr_models.model import RDN, VGGLoss
from sr_models.datasets import TrainDataset, EvalDataset
from sr_models.utils import AverageMeter, calc_psnr, convert_rgb_to_y, denormalize
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--train-file', type=str, required=True)
parser.add_argument('--eval-file', type=str, required=True)
parser.add_argument('--outputs-dir', type=str, required=True)
parser.add_argument('--weights-file', type=str)
parser.add_argument('--num-features', type=int, default=64)
parser.add_argument('--growth-rate', type=int, default=64)
parser.add_argument('--num-blocks', type=int, default=16)
parser.add_argument('--num-layers', type=int, default=8)
parser.add_argument('--scale', type=int, default=4)
parser.add_argument('--patch-size', type=int, default=32)
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--lr-decay', type=float, default=0.5)
parser.add_argument('--lr-decay-epoch', type=int, default=200)
parser.add_argument('--batch-size', type=int, default=16)
parser.add_argument('--num-epochs', type=int, default=800)
parser.add_argument('--num-save', type=int, default=100)
parser.add_argument('--num-workers', type=int, default=8)
parser.add_argument('--gpu-id',type=int, default=0)
parser.add_argument('--seed', type=int, default=123)
parser.add_argument('--vgg-lambda', type=float, default=0.2)
parser.add_argument('--augment', action='store_true', help='whether applying jpeg and gaussian noising augmentation in training a sr model')
parser.add_argument('--completion', action='store_true', help='completion')
parser.add_argument('--colorization', action='store_true', help='colorization')
args = parser.parse_args()
args.outputs_dir = os.path.join(args.outputs_dir, 'x{}'.format(args.scale))
if not os.path.exists(args.outputs_dir):
os.makedirs(args.outputs_dir)
cudnn.benchmark = True
device = torch.device('cuda:%d'%args.gpu_id if torch.cuda.is_available() else 'cpu')
torch.manual_seed(args.seed)
model = RDN(scale_factor=args.scale,
num_channels=3,
num_features=args.num_features,
growth_rate=args.growth_rate,
num_blocks=args.num_blocks,
num_layers=args.num_layers).to(device)
if args.weights_file is not None:
state_dict = model.state_dict()
for n, p in torch.load(args.weights_file, map_location=lambda storage, loc: storage).items():
if n in state_dict.keys():
state_dict[n].copy_(p)
else:
raise KeyError(n)
criterion = nn.L1Loss()
criterion_vgg = VGGLoss(args.gpu_id)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
train_dataset = TrainDataset(args.train_file, patch_size=args.patch_size, scale=args.scale, aug=args.augment, colorization=args.colorization, completion=args.completion)
train_dataloader = DataLoader(dataset=train_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers,
pin_memory=True)
#eval_dataset = EvalDataset(args.eval_file, scale=args.scale)
#eval_dataloader = DataLoader(dataset=eval_dataset, batch_size=1)
best_weights = copy.deepcopy(model.state_dict())
best_epoch = 0
best_psnr = 0.0
for epoch in range(args.num_epochs):
for param_group in optimizer.param_groups:
param_group['lr'] = args.lr * (args.lr_decay ** (epoch // args.lr_decay_epoch))
model.train()
epoch_losses = AverageMeter()
with tqdm(total=(len(train_dataset) - len(train_dataset) % args.batch_size), ncols=80) as t:
t.set_description('epoch: {}/{}'.format(epoch, args.num_epochs - 1))
for data in train_dataloader:
inputs, labels = data
inputs = inputs.to(device)
labels = labels.to(device)
preds = model(inputs)
#import ipdb; ipdb.set_trace()
loss = criterion(preds, labels) + criterion_vgg(preds, labels) * args.vgg_lambda
epoch_losses.update(loss.item(), len(inputs))
optimizer.zero_grad()
loss.backward()
optimizer.step()
t.set_postfix(loss='{:.6f}'.format(epoch_losses.avg))
t.update(len(inputs))
if (epoch + 1) % args.num_save == 0:
torch.save(model.state_dict(), os.path.join(args.outputs_dir, 'epoch_{}.pth'.format(epoch)))
| 4,950 | 41.316239 | 173 | py |
BeyondtheSpectrum | BeyondtheSpectrum-main/train.py | import argparse
import os
import random
import shutil
import time
import warnings
import sys
import cv2
import numpy as np
import scipy.misc
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from datasets import get_dataset
from models import get_classification_model
from sr_models.model import RDN, Vgg19
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--data-root-pos', type=str, default='./data',
help='path to dataset')
parser.add_argument('--data-root-neg', type=str, default='./data',
help='path to dataset')
parser.add_argument('--dataset', type=str, default='cityscapes',
help='dataset name (default: pascal12)')
parser.add_argument('-a', '--arch', type=str, default='resnet50',
help='model architecture')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=100, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--input-channel', default=3, type=int,
help='number of input channel')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--save-every-epoch', type=int, default=2,
help='how many epochs to save a model.')
parser.add_argument('--output-path', default='./output_models', type=str, metavar='PATH',
help='path to output models')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
parser.add_argument('--dataset_type', type=str, default='image',
help='which dataset to load.')
parser.add_argument('--carlibration', default=1.0, type=float,
help='carlibration factor for posterior')
parser.add_argument('--defense', default=1.0, type=float,
help='defense factor')
parser.add_argument('--save_path', type=str, default='./score.npy', help='save models')
parser.add_argument('--no_dilation', action='store_true', help='do not use dilated convolutions in attackers')
parser.add_argument('--sr-num-features', type=int, default=64)
parser.add_argument('--sr-growth-rate', type=int, default=64)
parser.add_argument('--sr-num-blocks', type=int, default=16)
parser.add_argument('--sr-num-layers', type=int, default=8)
parser.add_argument('--sr-scale', type=int, default=4)
parser.add_argument('--sr-weights-file', type=str, required=True)
parser.add_argument('--idx-stages', type=int, default=0)
parser.add_argument('--lr-sr', default=0.02, type=float, help=' learning rate for resolution')
parser.add_argument('--lw-sr', default=1, type=float, help='loss weight for reconstruction')
parser.add_argument('--mode-sr', default='none', type=str, help='can be none, colorization or denoising.')
parser.add_argument('--fixed-sr', dest='fixed_sr', action='store_true',
help='use fixed super resolution model')
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# create model
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
else:
print("=> creating model '{}'".format(args.arch))
model = get_classification_model(arch=args.arch, pretrained = args.pretrained,
input_channel=args.input_channel, num_classes=2, dilated=(not args.no_dilation))
if args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int(args.workers / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
criterion2 = nn.L1Loss().cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume, map_location='cuda:%d'%(args.gpu))
args.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'],strict=False)
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
test_dataset = get_dataset(name=args.dataset_type, root_pos=args.data_root_pos, root_neg=args.data_root_neg, flip=True)
if args.distributed:
test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset)
else:
test_sampler = None
test_loader = torch.utils.data.DataLoader(
test_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True, sampler=test_sampler)
sr_model = RDN(scale_factor=args.sr_scale,
num_channels=3,
num_features=args.sr_num_features,
growth_rate=args.sr_growth_rate,
num_blocks=args.sr_num_blocks,
num_layers=args.sr_num_layers,
requires_grad=False).cuda(args.gpu)#.to(device)
checkpoint = torch.load(args.sr_weights_file, map_location='cuda:%d'%(args.gpu))
if 'state_dict' in checkpoint.keys():
sr_model.load_state_dict(checkpoint['state_dict'])
else:
sr_model.load_state_dict(checkpoint)
perception_net = Vgg19().cuda(args.gpu)
if args.fixed_sr:
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
else:
optimizer = torch.optim.SGD(list(model.parameters()) + list(sr_model.parameters()), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
if not os.path.exists(args.output_path):
os.mkdir(args.output_path)
lr_epoch = 0.9999 * np.cos(np.pi / 2 * np.arange(args.epochs) / (args.epochs-1) ) + 0.0001
for epoch in range( args.epochs ):
# train for one epoch
Precision, Recall, Score = train(epoch, test_loader, model, sr_model, perception_net, optimizer, criterion, criterion2, args)
adjust_learning_rate(optimizer, lr_epoch[epoch] * args.lr )
if epoch % args.save_every_epoch == 0 or epoch == args.epochs-1:
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'optimizer' : optimizer.state_dict(),
}, is_best=False, filename='%s/%04d.pth.tar'%(args.output_path , epoch+1))
save_checkpoint({
'state_dict': sr_model.state_dict(),
}, is_best=False, filename='%s/%04d_sr.pth.tar'%(args.output_path , epoch+1))
def train(epoch, test_loader, model, sr_model, perception_net, optimizer, criterion, criterion2, args):
TP = 0
FP = 0
FN = 0
TOTAL = 0
CORRECT = 0
# switch to eval mode
model.train()
sr_model.train()
score = []
for i, (input, target, post_path) in enumerate(test_loader):
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
lr = 0
for ii in range(args.sr_scale):
for jj in range(args.sr_scale):
lr = lr + input[:, :, ii::args.sr_scale, jj::args.sr_scale] / (args.sr_scale * args.sr_scale)
lr = lr / 255.0
input = input / 255.0
optimizer.zero_grad()
preds_input = sr_model(lr)
if args.idx_stages > 0:
per_rec = perception_net(preds_input)
per_gt = perception_net(input)
rec_features = abs( per_rec[args.idx_stages - 1] - per_gt[args.idx_stages - 1] )
output, aa = model( rec_features )
else:
rec_features0 = abs( preds_input - input )
output, aa = model( rec_features0 )
loss = criterion(output , target)
# compute output
loss2 = 0
if sum(target==1) > 1 and args.fixed_sr == False:
if np.random.uniform(0,1) > 0.5 and args.mode_sr == 'denoising':
dims = lr.shape
lr += torch.empty(lr.shape).normal_(mean=0,std=4.0 / 255).cuda()
elif np.random.uniform(0,1) > 0.5 and args.mode_sr == 'colorization':
dims = lr.shape
mask = np.random.uniform(0,1,(dims[0],dims[2],dims[3]))
mask = mask < np.random.uniform(0.1,0.25)
tmp = lr.mean(dim=1)
for i in range( dims[0] ):
lr[i,:,mask[i]] = tmp[i,mask[i]]
preds_input = sr_model(lr)
per_rec = perception_net(preds_input)
per_gt = perception_net(input)
loss_rec_real = criterion2(preds_input[target==1], input[target==1])
loss_rec_real_feat = criterion2(per_rec[0][target==1], per_gt[0][target==1])
loss_rec_real_feat2 = criterion2(per_rec[1][target==1], per_gt[1][target==1])
loss_rec_real_feat3 = criterion2(per_rec[2][target==1], per_gt[2][target==1])
loss_rec_real_feat4 = criterion2(per_rec[3][target==1], per_gt[3][target==1])
loss_rec_real_feat5 = criterion2(per_rec[4][target==1], per_gt[4][target==1])
loss2 = loss_rec_real * args.lw_sr
loss2 += (1.0 / 32) * (loss_rec_real_feat) * args.lw_sr
loss2 += (1.0 / 16) * (loss_rec_real_feat2 ) * args.lw_sr
loss2 += (1.0 / 8) * (loss_rec_real_feat3 ) * args.lw_sr
loss2 += (1.0 / 4) * (loss_rec_real_feat4 ) * args.lw_sr
loss2 += 1.0 * (loss_rec_real_feat5 ) * args.lw_sr
loss_total = loss + loss2
else:
loss_total = loss
loss_total.backward()
for p in model.parameters():
p.grad.mul_( args.lr_sr )
optimizer.step()
pred = (output[:,0] < output[:,1])
CORRECT += pred.eq(target.view_as(pred)).sum().item()
TOTAL += output.shape[0]
TP = TP + sum((target==pred)*(1==pred))
FP = FP + sum((target!=pred)*(1==pred))
FN = FN + sum((target!=pred)*(0==pred))
print('%d (%d/%d) : Precision=%.4f, Recall = %.4f, Accuracy,%.4f, Loss=%.4f, Loss_Rec=%.4f' % (epoch, i, len(test_loader), 1.0*TP/(TP+FP), 1.0*TP/(TP+FN), 1.0*CORRECT/TOTAL,loss, loss2 ))
return 1.0*TP/(TP+FP), 1.0*TP/(TP+FN), np.array(score)
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, lr):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
for param_group in optimizer.param_groups:
param_group['lr'] = lr
if __name__ == '__main__':
main()
| 17,834 | 40.866197 | 195 | py |
BeyondtheSpectrum | BeyondtheSpectrum-main/models/customize.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Hang Zhang
## ECE Department, Rutgers University
## Email: zhang.hang@rutgers.edu
## Copyright (c) 2017
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
"""Encoding Custermized NN Module"""
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.autograd import Variable
import logging
torch_ver = torch.__version__[:3]
__all__ = ['FrozenBatchNorm2d', 'GlobalAvgPool2d', 'GramMatrix',
'View', 'Sum', 'Mean', 'Normalize', 'ConcurrentModule',
'PyramidPooling']
class FrozenBatchNorm2d(nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed.
It contains non-trainable buffers called
"weight" and "bias", "running_mean", "running_var",
initialized to perform identity transformation.
The pre-trained backbone models from Caffe2 only contain "weight" and "bias",
which are computed from the original four parameters of BN.
The affine transform `x * weight + bias` will perform the equivalent
computation of `(x - running_mean) / sqrt(running_var) * weight + bias`.
When loading a backbone model from Caffe2, "running_mean" and "running_var"
will be left unchanged as identity transformation.
Other pre-trained backbone models may contain all 4 parameters.
The forward is implemented by `F.batch_norm(..., training=False)`.
"""
_version = 3
def __init__(self, num_features, eps=1e-5):
super().__init__()
self.num_features = num_features
self.eps = eps
self.register_buffer("weight", torch.ones(num_features))
self.register_buffer("bias", torch.zeros(num_features))
self.register_buffer("running_mean", torch.zeros(num_features))
self.register_buffer("running_var", torch.ones(num_features) - eps)
def forward(self, x):
if x.requires_grad:
# When gradients are needed, F.batch_norm will use extra memory
# because its backward op computes gradients for weight/bias as well.
scale = self.weight * (self.running_var + self.eps).rsqrt()
bias = self.bias - self.running_mean * scale
scale = scale.reshape(1, -1, 1, 1)
bias = bias.reshape(1, -1, 1, 1)
return x * scale + bias
else:
# When gradients are not needed, F.batch_norm is a single fused op
# and provide more optimization opportunities.
return F.batch_norm(
x,
self.running_mean,
self.running_var,
self.weight,
self.bias,
training=False,
eps=self.eps,
)
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
version = local_metadata.get("version", None)
if version is None or version < 2:
# No running_mean/var in early versions
# This will silent the warnings
if prefix + "running_mean" not in state_dict:
state_dict[prefix + "running_mean"] = torch.zeros_like(self.running_mean)
if prefix + "running_var" not in state_dict:
state_dict[prefix + "running_var"] = torch.ones_like(self.running_var)
if version is not None and version < 3:
logger = logging.getLogger(__name__)
logger.info("FrozenBatchNorm {} is upgraded to version 3.".format(prefix.rstrip(".")))
# In version < 3, running_var are used without +eps.
state_dict[prefix + "running_var"] -= self.eps
super()._load_from_state_dict(
state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
)
def __repr__(self):
return "FrozenBatchNorm2d(num_features={}, eps={})".format(self.num_features, self.eps)
@classmethod
def convert_frozen_batchnorm(cls, module):
"""
Convert BatchNorm/SyncBatchNorm in module into FrozenBatchNorm.
Args:
module (torch.nn.Module):
Returns:
If module is BatchNorm/SyncBatchNorm, returns a new module.
Otherwise, in-place convert module and return it.
Similar to convert_sync_batchnorm in
https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/batchnorm.py
"""
bn_module = nn.modules.batchnorm
bn_module = (bn_module.BatchNorm2d, bn_module.SyncBatchNorm)
res = module
if isinstance(module, bn_module):
res = cls(module.num_features)
if module.affine:
res.weight.data = module.weight.data.clone().detach()
res.bias.data = module.bias.data.clone().detach()
res.running_mean.data = module.running_mean.data
res.running_var.data = module.running_var.data
res.eps = module.eps
else:
for name, child in module.named_children():
new_child = cls.convert_frozen_batchnorm(child)
if new_child is not child:
res.add_module(name, new_child)
return res
def get_norm(norm, out_channels):
"""
Args:
norm (str or callable): either one of BN, SyncBN, FrozenBN, GN;
or a callable that takes a channel number and returns
the normalization layer as a nn.Module.
Returns:
nn.Module or None: the normalization layer
"""
if isinstance(norm, str):
if len(norm) == 0:
return None
norm = {
"BN": BatchNorm2d,
# Fixed in https://github.com/pytorch/pytorch/pull/36382
"SyncBN": NaiveSyncBatchNorm if env.TORCH_VERSION <= (1, 5) else nn.SyncBatchNorm,
"FrozenBN": FrozenBatchNorm2d,
"GN": lambda channels: nn.GroupNorm(32, channels),
# for debugging:
"nnSyncBN": nn.SyncBatchNorm,
"naiveSyncBN": NaiveSyncBatchNorm,
}[norm]
return norm(out_channels)
class GlobalAvgPool2d(nn.Module):
def __init__(self):
"""Global average pooling over the input's spatial dimensions"""
super(GlobalAvgPool2d, self).__init__()
def forward(self, inputs):
return F.adaptive_avg_pool2d(inputs, 1).view(inputs.size(0), -1)
class GramMatrix(nn.Module):
r""" Gram Matrix for a 4D convolutional featuremaps as a mini-batch
.. math::
\mathcal{G} = \sum_{h=1}^{H_i}\sum_{w=1}^{W_i} \mathcal{F}_{h,w}\mathcal{F}_{h,w}^T
"""
def forward(self, y):
(b, ch, h, w) = y.size()
features = y.view(b, ch, w * h)
features_t = features.transpose(1, 2)
gram = features.bmm(features_t) / (ch * h * w)
return gram
class View(nn.Module):
"""Reshape the input into different size, an inplace operator, support
SelfParallel mode.
"""
def __init__(self, *args):
super(View, self).__init__()
if len(args) == 1 and isinstance(args[0], torch.Size):
self.size = args[0]
else:
self.size = torch.Size(args)
def forward(self, input):
return input.view(self.size)
class Sum(nn.Module):
def __init__(self, dim, keep_dim=False):
super(Sum, self).__init__()
self.dim = dim
self.keep_dim = keep_dim
def forward(self, input):
return input.sum(self.dim, self.keep_dim)
class Mean(nn.Module):
def __init__(self, dim, keep_dim=False):
super(Mean, self).__init__()
self.dim = dim
self.keep_dim = keep_dim
def forward(self, input):
return input.mean(self.dim, self.keep_dim)
class Normalize(nn.Module):
r"""Performs :math:`L_p` normalization of inputs over specified dimension.
Does:
.. math::
v = \frac{v}{\max(\lVert v \rVert_p, \epsilon)}
for each subtensor v over dimension dim of input. Each subtensor is
flattened into a vector, i.e. :math:`\lVert v \rVert_p` is not a matrix
norm.
With default arguments normalizes over the second dimension with Euclidean
norm.
Args:
p (float): the exponent value in the norm formulation. Default: 2
dim (int): the dimension to reduce. Default: 1
"""
def __init__(self, p=2, dim=1):
super(Normalize, self).__init__()
self.p = p
self.dim = dim
def forward(self, x):
return F.normalize(x, self.p, self.dim, eps=1e-8)
class ConcurrentModule(nn.ModuleList):
r"""Feed to a list of modules concurrently.
The outputs of the layers are concatenated at channel dimension.
Args:
modules (iterable, optional): an iterable of modules to add
"""
def __init__(self, modules=None):
super(ConcurrentModule, self).__init__(modules)
def forward(self, x):
outputs = []
for layer in self:
outputs.append(layer(x))
return torch.cat(outputs, 1)
class PyramidPooling(nn.Module):
"""
Reference:
Zhao, Hengshuang, et al. *"Pyramid scene parsing network."*
"""
def __init__(self, in_channels, norm_layer, up_kwargs):
super(PyramidPooling, self).__init__()
self.pool1 = nn.AdaptiveAvgPool2d(1)
self.pool2 = nn.AdaptiveAvgPool2d(2)
self.pool3 = nn.AdaptiveAvgPool2d(3)
self.pool4 = nn.AdaptiveAvgPool2d(6)
out_channels = int(in_channels/4)
self.conv1 = nn.Sequential(nn.Conv2d(in_channels, out_channels, 1, bias=False),
norm_layer(out_channels),
nn.ReLU(True))
self.conv2 = nn.Sequential(nn.Conv2d(in_channels, out_channels, 1, bias=False),
norm_layer(out_channels),
nn.ReLU(True))
self.conv3 = nn.Sequential(nn.Conv2d(in_channels, out_channels, 1, bias=False),
norm_layer(out_channels),
nn.ReLU(True))
self.conv4 = nn.Sequential(nn.Conv2d(in_channels, out_channels, 1, bias=False),
norm_layer(out_channels),
nn.ReLU(True))
# bilinear interpolate options
self._up_kwargs = up_kwargs
def forward(self, x):
_, _, h, w = x.size()
feat1 = F.interpolate(self.conv1(self.pool1(x)), (h, w), **self._up_kwargs)
feat2 = F.interpolate(self.conv2(self.pool2(x)), (h, w), **self._up_kwargs)
feat3 = F.interpolate(self.conv3(self.pool3(x)), (h, w), **self._up_kwargs)
feat4 = F.interpolate(self.conv4(self.pool4(x)), (h, w), **self._up_kwargs)
return torch.cat((x, feat1, feat2, feat3, feat4), 1)
| 10,973 | 36.71134 | 99 | py |
BeyondtheSpectrum | BeyondtheSpectrum-main/models/resnet.py | """Dilated ResNet"""
import math
import torch
import torch.utils.model_zoo as model_zoo
import torch.nn as nn
from .customize import GlobalAvgPool2d
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'BasicBlock', 'Bottleneck', 'get_resnet']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
"""ResNet BasicBlock
"""
expansion = 1
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, previous_dilation=1,
norm_layer=None):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride,
padding=dilation, dilation=dilation, bias=False)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1,
padding=previous_dilation, dilation=previous_dilation, bias=False)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
"""ResNet Bottleneck
"""
# pylint: disable=unused-argument
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1,
downsample=None, previous_dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = norm_layer(planes)
self.conv2 = nn.Conv2d(
planes, planes, kernel_size=3, stride=stride,
padding=dilation, dilation=dilation, bias=False)
self.bn2 = norm_layer(planes)
self.conv3 = nn.Conv2d(
planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = norm_layer(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.dilation = dilation
self.stride = stride
def _sum_each(self, x, y):
assert(len(x) == len(y))
z = []
for i in range(len(x)):
z.append(x[i]+y[i])
return z
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
"""Dilated Pre-trained ResNet Model, which preduces the stride of 8 featuremaps at conv5.
Parameters
----------
block : Block
Class for the residual block. Options are BasicBlockV1, BottleneckV1.
layers : list of int
Numbers of layers in each block
classes : int, default 1000
Number of classification classes.
dilated : bool, default False
Applying dilation strategy to pretrained ResNet yielding a stride-8 model,
typically used in Semantic Segmentation.
norm_layer : object
Normalization layer used in backbone network (default: :class:`mxnet.gluon.nn.BatchNorm`;
for Synchronized Cross-GPU BachNormalization).
Reference:
- He, Kaiming, et al. "Deep residual learning for image recognition." Proceedings of the IEEE conference on computer vision and pattern recognition. 2016.
- Yu, Fisher, and Vladlen Koltun. "Multi-scale context aggregation by dilated convolutions."
"""
# pylint: disable=unused-variable
def __init__(self, block, layers, input_channel=3, num_classes=1000, dilated=True, multi_grid=False,
deep_base=True, norm_layer=nn.BatchNorm2d):
self.inplanes = 128 if deep_base else 64
super(ResNet, self).__init__()
if deep_base:
self.conv1 = nn.Sequential(
nn.Conv2d(input_channel, 64, kernel_size=3, stride=2, padding=1, bias=False),
norm_layer(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1, bias=False),
norm_layer(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=False),
)
else:
self.conv1 = nn.Conv2d(input_channel, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], stride=2, norm_layer=norm_layer)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, norm_layer=norm_layer)
if dilated:
self.layer3 = self._make_layer(block, 256, layers[2], stride=1,
dilation=2, norm_layer=norm_layer)
if multi_grid:
self.layer4 = self._make_layer(block, 512, layers[3], stride=1,
dilation=4, norm_layer=norm_layer,
multi_grid=True)
else:
self.layer4 = self._make_layer(block, 512, layers[3], stride=1,
dilation=4, norm_layer=norm_layer)
else:
self.layer3 = self._make_layer(block, 256, layers[2], stride=1,
norm_layer=norm_layer)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, #2
norm_layer=norm_layer)
self.avgpool = GlobalAvgPool2d()
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, norm_layer):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, norm_layer=None, multi_grid=False):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
norm_layer(planes * block.expansion),
)
layers = []
multi_dilations = [4, 8, 16]
if multi_grid:
layers.append(block(self.inplanes, planes, stride, dilation=multi_dilations[0],
downsample=downsample, previous_dilation=dilation, norm_layer=norm_layer))
elif dilation == 1 or dilation == 2:
layers.append(block(self.inplanes, planes, stride, dilation=1,
downsample=downsample, previous_dilation=dilation, norm_layer=norm_layer))
elif dilation == 4:
layers.append(block(self.inplanes, planes, stride, dilation=2,
downsample=downsample, previous_dilation=dilation, norm_layer=norm_layer))
else:
raise RuntimeError("=> unknown dilation size: {}".format(dilation))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
if multi_grid:
layers.append(block(self.inplanes, planes, dilation=multi_dilations[i],
previous_dilation=dilation, norm_layer=norm_layer))
else:
layers.append(block(self.inplanes, planes, dilation=dilation, previous_dilation=dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x2 = self.layer4(x)
x = self.avgpool(x2)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x, x2
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
def get_resnet(arch, pretrained, **kwargs):
if arch == "resnet18":
model = resnet18(pretrained, **kwargs)
elif arch == "resnet34":
model = resnet34(pretrained, **kwargs)
elif arch == "resnet50":
model = resnet50(pretrained, **kwargs)
elif arch == "resnet101":
model = resnet101(pretrained, **kwargs)
elif arch == "resnet152":
model = resnet152(pretrained, **kwargs)
return model
| 11,165 | 35.135922 | 162 | py |
BeyondtheSpectrum | BeyondtheSpectrum-main/models/resnet_cifar.py | """Dilated ResNet"""
import torch.nn as nn
from .customize import FrozenBatchNorm2d
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
}
def conv_1_3x3(input_channel):
return nn.Sequential(nn.Conv2d(input_channel, 64, kernel_size=3, stride=1, padding=1, bias=False), # 3, 64, 7, 2, 3
FrozenBatchNorm2d(64),
nn.ReLU(inplace=True))
# nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
class bottleneck(nn.Module):
def __init__(self, inplanes, planes, kernel_size, strides=(2, 2)):
super(bottleneck, self).__init__()
plane1, plane2, plane3 = planes
self.outchannels = plane3
self.conv1 = nn.Conv2d(inplanes, plane1, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = FrozenBatchNorm2d(plane1)
self.conv2 = nn.Conv2d(plane1, plane2, kernel_size=kernel_size, stride=strides, padding=int((kernel_size - 1) / 2), bias=False)
self.bn2 = FrozenBatchNorm2d(plane2)
self.conv3 = nn.Conv2d(plane2, plane3, kernel_size=1, stride=1, padding=0, bias=False)
self.bn3 = FrozenBatchNorm2d(plane3)
self.conv4 = nn.Conv2d(inplanes, plane3, kernel_size=1, stride=strides, padding=0, bias=False)
self.bn4 = FrozenBatchNorm2d(plane3)
self.relu = nn.ReLU(inplace=True)
def forward(self, input_tensor):
out = self.conv1(input_tensor)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
shortcut = self.conv4(input_tensor)
shortcut = self.bn4(shortcut)
out += shortcut
out = self.relu(out)
return out
class basic_block(nn.Module):
def __init__(self, inplanes, outplanes, kernel_size, strides=(2, 2)):
super(basic_block, self).__init__()
self.conv1 = nn.Conv2d(inplanes, outplanes, kernel_size=kernel_size, stride=strides, padding=int((kernel_size - 1) / 2), bias=False)
self.bn1 = FrozenBatchNorm2d(outplanes)
self.conv2 = nn.Conv2d(outplanes, outplanes, kernel_size=kernel_size, stride=1, padding=int((kernel_size - 1) / 2), bias=False)
self.bn2 = FrozenBatchNorm2d(outplanes)
self.conv3 = nn.Conv2d(inplanes, outplanes, kernel_size=1, stride=strides, padding=0, bias=False)
self.bn3 = FrozenBatchNorm2d(outplanes)
self.relu = nn.ReLU(inplace=True)
def forward(self, input_tensor):
out = self.conv1(input_tensor)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
shortcut = self.conv3(input_tensor)
shortcut = self.bn3(shortcut)
out += shortcut
out = self.relu(out)
return out
class identity_block3(nn.Module):
def __init__(self, inplanes, planes, kernel_size):
super(identity_block3, self).__init__()
plane1, plane2, plane3 = planes
self.outchannels = plane3
self.conv1 = nn.Conv2d(inplanes, plane1, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = FrozenBatchNorm2d(plane1)
self.conv2 = nn.Conv2d(plane1, plane2, kernel_size=kernel_size, stride=1, padding=int((kernel_size - 1) / 2), bias=False)
self.bn2 = FrozenBatchNorm2d(plane2)
self.conv3 = nn.Conv2d(plane2, plane3, kernel_size=1, stride=1, padding=0, bias=False)
self.bn3 = FrozenBatchNorm2d(plane3)
self.relu = nn.ReLU(inplace=True)
def forward(self, input_tensor, return_conv3_out=False):
out = self.conv1(input_tensor)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += input_tensor
out = self.relu(out)
return out
class identity_block2(nn.Module):
def __init__(self, inplanes, outplanes, kernel_size):
super(identity_block2, self).__init__()
self.conv1 = nn.Conv2d(inplanes, outplanes, kernel_size=kernel_size, stride=1, padding=int((kernel_size - 1) / 2), bias=False)
self.bn1 = FrozenBatchNorm2d(outplanes)
self.conv2 = nn.Conv2d(outplanes, outplanes, kernel_size=kernel_size, stride=1, padding=int((kernel_size - 1) / 2), bias=False)
self.bn2 = FrozenBatchNorm2d(outplanes)
self.relu = nn.ReLU(inplace=True)
def forward(self, input_tensor, return_conv3_out=False):
out = self.conv1(input_tensor)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += input_tensor
out = self.relu(out)
return out
class Resnet50(nn.Module):
def __init__(self, input_channel, num_classes, include_top=True):
print('CIFAR Resnet50 is used')
super(Resnet50, self).__init__()
self.num_classes = num_classes
self.input_channel = input_channel
self.include_top = include_top
block_ex = 4
# Define the building blocks
self.conv_3x3 = conv_1_3x3( self.input_channel )
self.bottleneck_1 = bottleneck(16 * block_ex, [16 * block_ex, 16 * block_ex, 64 * block_ex], kernel_size=3, strides=(1, 1))
self.identity_block_1_1 = identity_block3(64*block_ex, [16*block_ex, 16*block_ex, 64*block_ex], kernel_size=3)
self.identity_block_1_2 = identity_block3(64*block_ex, [16*block_ex, 16*block_ex, 64*block_ex], kernel_size=3)
self.bottleneck_2 = bottleneck(64*block_ex, [32*block_ex, 32*block_ex, 128*block_ex], kernel_size=3, strides=(2, 2))
self.identity_block_2_1 = identity_block3(128*block_ex, [32*block_ex, 32*block_ex, 128*block_ex], kernel_size=3)
self.identity_block_2_2 = identity_block3(128*block_ex, [32*block_ex, 32*block_ex, 128*block_ex], kernel_size=3)
self.identity_block_2_3 = identity_block3(128*block_ex, [32*block_ex, 32*block_ex, 128*block_ex], kernel_size=3)
self.bottleneck_3 = bottleneck(128*block_ex, [64*block_ex, 64*block_ex, 256*block_ex], kernel_size=3, strides=(1, 1))
self.identity_block_3_1 = identity_block3(256*block_ex, [64*block_ex, 64*block_ex, 256*block_ex], kernel_size=3)
self.identity_block_3_2 = identity_block3(256*block_ex, [64*block_ex, 64*block_ex, 256*block_ex], kernel_size=3)
self.identity_block_3_3 = identity_block3(256*block_ex, [64*block_ex, 64*block_ex, 256*block_ex], kernel_size=3)
self.identity_block_3_4 = identity_block3(256*block_ex, [64*block_ex, 64*block_ex, 256*block_ex], kernel_size=3)
self.identity_block_3_5 = identity_block3(256*block_ex, [64*block_ex, 64*block_ex, 256*block_ex], kernel_size=3)
self.bottleneck_4 = bottleneck(256*block_ex, [128*block_ex, 128*block_ex, 512*block_ex], kernel_size=3, strides=(2, 2))
self.identity_block_4_1 = identity_block3(512*block_ex, [128*block_ex, 128*block_ex, 512*block_ex], kernel_size=3)
self.identity_block_4_2 = identity_block3(512*block_ex, [128*block_ex, 128*block_ex, 512*block_ex], kernel_size=3)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(512*block_ex, num_classes)
# Initialize the weights
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, FrozenBatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, input_x):
x = self.conv_3x3(input_x)
ret1 = x
x = self.bottleneck_1(x)
x = self.identity_block_1_1(x)
x = self.identity_block_1_2(x)
ret2 = x
x = self.bottleneck_2(x)
x = self.identity_block_2_1(x)
x = self.identity_block_2_2(x)
x = self.identity_block_2_3(x)
ret3 = x
x = self.bottleneck_3(x)
x = self.identity_block_3_1(x)
x = self.identity_block_3_2(x)
x = self.identity_block_3_3(x)
x = self.identity_block_3_4(x)
x = self.identity_block_3_5(x)
ret4 = x
x = self.bottleneck_4(x)
x = self.identity_block_4_1(x)
x = self.identity_block_4_2(x)
ret5 = x
x = self.avgpool(x)
if self.include_top:
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
class Resnet18(nn.Module):
"""
100% CIFAR10: 95.08%
80% CIFAR10: 94.07%
60% CIFAR10: 93.08%
40% CIFAR10: 91.52%
20% CIFAR10: 86.49%
10% CIFAR10: 77.84%
5% CIFAR10: 62.15%
1% CIFAR10: 38.8%
0.5% CIFAR10: 17.46%
"""
def __init__(self, input_channel, num_classes):
print('CIFAR Resnet18 is used')
super(Resnet18, self).__init__()
self.num_classes = num_classes
self.input_channel = input_channel
# Define the building blocks
self.conv_3x3 = conv_1_3x3( self.input_channel )
self.identity_block_1_0 = identity_block2(64, 64, kernel_size=3)
self.identity_block_1_1 = identity_block2(64, 64, kernel_size=3)
self.basic_block_2 = basic_block(64, 128, kernel_size=3, strides=(2, 2))
self.identity_block_2_1 = identity_block2(128, 128, kernel_size=3)
self.basic_block_3 = basic_block(128, 256, kernel_size=3, strides=(1, 1))
self.identity_block_3_1 = identity_block2(256, 256, kernel_size=3)
self.basic_block_4 = basic_block(256, 512, kernel_size=3, strides=(2, 2))
self.identity_block_4_1 = identity_block2(512, 512, kernel_size=3)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(512, num_classes)
# Initialize the weights
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, FrozenBatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, input_x):
x = self.conv_3x3(input_x)
ret1 = x
x = self.identity_block_1_0(x)
x = self.identity_block_1_1(x)
ret2 = x
x = self.basic_block_2(x)
x = self.identity_block_2_1(x)
ret3 = x
x = self.basic_block_3(x)
x = self.identity_block_3_1(x)
ret4 = x
x = self.basic_block_4(x)
x = self.identity_block_4_1(x)
ret5 = x
print(x.shape)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def get_cifar_resnet(arch, pretrained, **kwargs):
if arch == "resnet18":
model = Resnet18(**kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
elif arch == "resnet50":
model = Resnet50(**kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
| 11,254 | 37.412969 | 140 | py |
BeyondtheSpectrum | BeyondtheSpectrum-main/sr_models/utils.py | import torch
import numpy as np
def convert_rgb_to_y(img, dim_order='hwc'):
if dim_order == 'hwc':
return 16. + (64.738 * img[..., 0] + 129.057 * img[..., 1] + 25.064 * img[..., 2]) / 256.
else:
return 16. + (64.738 * img[0] + 129.057 * img[1] + 25.064 * img[2]) / 256.
def denormalize(img):
img = img.mul(255.0).clamp(0.0, 255.0)
return img
def preprocess(img, device):
img = np.array(img).astype(np.float32)
ycbcr = convert_rgb_to_ycbcr(img)
x = ycbcr[..., 0]
x /= 255.
x = torch.from_numpy(x).to(device)
x = x.unsqueeze(0).unsqueeze(0)
return x, ycbcr
def calc_psnr(img1, img2, max=255.0):
return 10. * ((max ** 2) / ((img1 - img2) ** 2).mean()).log10()
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
| 1,061 | 22.086957 | 97 | py |
BeyondtheSpectrum | BeyondtheSpectrum-main/sr_models/model.py | import torch
from torch import nn
class DenseLayer(nn.Module):
def __init__(self, in_channels, out_channels):
super(DenseLayer, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=3 // 2)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
return torch.cat([x, self.relu(self.conv(x))], 1)
class RDB(nn.Module):
def __init__(self, in_channels, growth_rate, num_layers):
super(RDB, self).__init__()
self.layers = nn.Sequential(*[DenseLayer(in_channels + growth_rate * i, growth_rate) for i in range(num_layers)])
# local feature fusion
self.lff = nn.Conv2d(in_channels + growth_rate * num_layers, growth_rate, kernel_size=1)
def forward(self, x):
return x + self.lff(self.layers(x)) # local residual learning
class RDN(nn.Module):
def __init__(self, scale_factor, num_channels, num_features, growth_rate, num_blocks, num_layers, requires_grad=True):
super(RDN, self).__init__()
self.G0 = num_features
self.G = growth_rate
self.D = num_blocks
self.C = num_layers
# shallow feature extraction
self.sfe1 = nn.Conv2d(num_channels, num_features, kernel_size=3, padding=3 // 2)
self.sfe2 = nn.Conv2d(num_features, num_features, kernel_size=3, padding=3 // 2)
# residual dense blocks
self.rdbs = nn.ModuleList([RDB(self.G0, self.G, self.C)])
for _ in range(self.D - 1):
self.rdbs.append(RDB(self.G, self.G, self.C))
# global feature fusion
self.gff = nn.Sequential(
nn.Conv2d(self.G * self.D, self.G0, kernel_size=1),
nn.Conv2d(self.G0, self.G0, kernel_size=3, padding=3 // 2)
)
# up-sampling
assert 2 <= scale_factor <= 4
if scale_factor == 2 or scale_factor == 4:
self.upscale = []
for _ in range(scale_factor // 2):
self.upscale.extend([nn.Conv2d(self.G0, self.G0 * (2 ** 2), kernel_size=3, padding=3 // 2),
nn.PixelShuffle(2)])
self.upscale = nn.Sequential(*self.upscale)
else:
self.upscale = nn.Sequential(
nn.Conv2d(self.G0, self.G0 * (scale_factor ** 2), kernel_size=3, padding=3 // 2),
nn.PixelShuffle(scale_factor)
)
self.output = nn.Conv2d(self.G0, num_channels, kernel_size=3, padding=3 // 2)
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, x):
sfe1 = self.sfe1(x)
sfe2 = self.sfe2(sfe1)
x = sfe2
local_features = []
for i in range(self.D):
x = self.rdbs[i](x)
local_features.append(x)
x = self.gff(torch.cat(local_features, 1)) + sfe1 # global residual learning
x = self.upscale(x)
x = self.output(x)
return x
class VGGLoss(nn.Module):
def __init__(self, gpu_ids):
super(VGGLoss, self).__init__()
self.vgg = Vgg19().cuda(device = gpu_ids)
#self.criterion = nn.L1Loss()
self.criterion = nn.MSELoss()
self.weights = [1.0/32, 1.0/16, 1.0/8, 1.0/4, 1.0]
def forward(self, x, y):
x_vgg, y_vgg = self.vgg(x), self.vgg(y)
loss = 0
for i in range(len(x_vgg)):
loss += self.weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach())
return loss
from torchvision import models
class Vgg19(torch.nn.Module):
def __init__(self, requires_grad=False):
super(Vgg19, self).__init__()
vgg_pretrained_features = models.vgg19(pretrained=True).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
for x in range(2):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(2, 7):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(7, 12):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(12, 21):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for x in range(21, 30):
self.slice5.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h_relu1 = self.slice1(X)
h_relu2 = self.slice2(h_relu1)
h_relu3 = self.slice3(h_relu2)
h_relu4 = self.slice4(h_relu3)
h_relu5 = self.slice5(h_relu4)
out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5]
return out
| 4,943 | 36.172932 | 122 | py |
BeyondtheSpectrum | BeyondtheSpectrum-main/sr_models/datasets.py | import random
import h5py
import numpy as np
from PIL import Image
from torch.utils.data import Dataset
from scipy.ndimage import gaussian_filter
from scipy.ndimage.filters import convolve
from io import BytesIO
import copy
class TrainDataset(Dataset):
def __init__(self, file_path, patch_size, scale, aug=False, colorization=False, completion=False):
super(TrainDataset, self).__init__()
self.files = ParseFile(file_path)
self.patch_size = patch_size
self.scale = scale
self.aug = aug
self.colorization = colorization
self.completion = completion
@staticmethod
def random_crop(lr, hr, size, scale):
lr_left = random.randint(0, lr.shape[1] - size)
lr_right = lr_left + size
lr_top = random.randint(0, lr.shape[0] - size)
lr_bottom = lr_top + size
hr_left = lr_left * scale
hr_right = lr_right * scale
hr_top = lr_top * scale
hr_bottom = lr_bottom * scale
lr = lr[lr_top:lr_bottom, lr_left:lr_right]
hr = hr[hr_top:hr_bottom, hr_left:hr_right]
return lr, hr
@staticmethod
def random_horizontal_flip(lr, hr):
if random.random() < 0.5:
lr = lr[:, ::-1, :].copy()
hr = hr[:, ::-1, :].copy()
return lr, hr
@staticmethod
def random_vertical_flip(lr, hr):
if random.random() < 0.5:
lr = lr[::-1, :, :].copy()
hr = hr[::-1, :, :].copy()
return lr, hr
# im is an numpy float/double array
@staticmethod
def add_gaussian_noise(im, std):
noise = np.random.normal(0,std,im.shape)
im = im + noise
return im
# im is read from PIL.Image.open
@staticmethod
def jpeg(im, jpeg_quality):
buffer = BytesIO()
im.save(buffer, 'jpeg', quality = jpeg_quality)
im = Image.open(buffer)
return im
@staticmethod
def random_rotate_90(lr, hr):
if random.random() < 0.5:
lr = np.rot90(lr, axes=(1, 0)).copy()
hr = np.rot90(hr, axes=(1, 0)).copy()
return lr, hr
def __getitem__(self, idx):
img = Image.open(self.files[idx])
img2 = img.copy()
hr = np.array(img).astype('float')
if self.aug and np.random.uniform(0,1) > 0.7071:
img2 = self.jpeg(img2, int(np.random.choice(np.arange(25, 75))))
#print('agument jpeg')
hr2 = np.array(img2).astype('float')
hr2[:,:,0] = convolve(hr2[:,:,0] , np.ones((15,15)).astype('float')/225)
hr2[:,:,1] = convolve(hr2[:,:,1] , np.ones((15,15)).astype('float')/225)
hr2[:,:,2] = convolve(hr2[:,:,2] , np.ones((15,15)).astype('float')/225)
lr = 0
for i in range(self.scale):
for j in range(self.scale):
lr = lr + hr[i::self.scale, j::self.scale] / (self.scale * self.scale)
lr, hr = self.random_crop(lr, hr, self.patch_size, self.scale)
lr, hr = self.random_horizontal_flip(lr, hr)
lr, hr = self.random_vertical_flip(lr, hr)
lr, hr = self.random_rotate_90(lr, hr)
if self.aug and np.random.uniform(0,1) > 0.7071:
lr = self.add_gaussian_noise(lr, np.random.uniform(0,10))
#print('augment noising')
lr = lr.astype(np.float32).transpose([2, 0, 1]) / 255.0
hr = hr.astype(np.float32).transpose([2, 0, 1]) / 255.0
if self.completion and np.random.uniform(0,1) > 0.7071:
dims = lr.shape
mask = np.random.uniform(0,1,(dims[1],dims[2]))
mask = mask < np.random.uniform(0.05,0.15)
lr[0,mask] = 0
lr[1,mask] = 0
lr[2,mask] = 0
if self.colorization and np.random.uniform(0,1) > 0.7071:
dims = lr.shape
mask = np.random.uniform(0,1,(dims[1],dims[2]))
mask = mask < np.random.uniform(0.05,0.15)
tmp = lr.mean(axis=0)
for i_dim in range(dims[0]):
lr[i_dim,mask] = tmp[mask]
return lr, hr
def __len__(self):
return len(self.files)
class TrainDataset256(Dataset):
def __init__(self, file_path, patch_size, scale, aug=False, colorization=False, completion=False):
super(TrainDataset256, self).__init__()
self.files = ParseFile(file_path)
self.patch_size = patch_size
self.scale = scale
self.aug = aug
self.colorization = colorization
self.completion = completion
@staticmethod
def random_crop(lr, hr, size, scale):
lr_left = random.randint(0, lr.shape[1] - size)
lr_right = lr_left + size
lr_top = random.randint(0, lr.shape[0] - size)
lr_bottom = lr_top + size
hr_left = lr_left * scale
hr_right = lr_right * scale
hr_top = lr_top * scale
hr_bottom = lr_bottom * scale
lr = lr[lr_top:lr_bottom, lr_left:lr_right]
hr = hr[hr_top:hr_bottom, hr_left:hr_right]
return lr, hr
@staticmethod
def random_horizontal_flip(lr, hr):
if random.random() < 0.5:
lr = lr[:, ::-1, :].copy()
hr = hr[:, ::-1, :].copy()
return lr, hr
@staticmethod
def random_vertical_flip(lr, hr):
if random.random() < 0.5:
lr = lr[::-1, :, :].copy()
hr = hr[::-1, :, :].copy()
return lr, hr
# im is an numpy float/double array
@staticmethod
def add_gaussian_noise(im, std):
noise = np.random.normal(0,std,im.shape)
im = im + noise
return im
# im is read from PIL.Image.open
@staticmethod
def jpeg(im, jpeg_quality):
buffer = BytesIO()
im.save(buffer, 'jpeg', quality = jpeg_quality)
im = Image.open(buffer)
return im
@staticmethod
def random_rotate_90(lr, hr):
if random.random() < 0.5:
lr = np.rot90(lr, axes=(1, 0)).copy()
hr = np.rot90(hr, axes=(1, 0)).copy()
return lr, hr
def __getitem__(self, idx):
img = Image.open(self.files[idx])
img = img.resize((256 , 256), resample=Image.BICUBIC)
img2 = img.copy()
hr = np.array(img).astype('float')
if self.aug and np.random.uniform(0,1) > 0.7071:
img2 = self.jpeg(img2, int(np.random.choice(np.arange(25, 75))))
#print('agument jpeg')
hr2 = np.array(img2).astype('float')
hr2[:,:,0] = convolve(hr2[:,:,0] , np.ones((15,15)).astype('float')/225)
hr2[:,:,1] = convolve(hr2[:,:,1] , np.ones((15,15)).astype('float')/225)
hr2[:,:,2] = convolve(hr2[:,:,2] , np.ones((15,15)).astype('float')/225)
lr = 0
for i in range(self.scale):
for j in range(self.scale):
lr = lr + hr[i::self.scale, j::self.scale] / (self.scale * self.scale)
lr, hr = self.random_crop(lr, hr, self.patch_size, self.scale)
lr, hr = self.random_horizontal_flip(lr, hr)
lr, hr = self.random_vertical_flip(lr, hr)
lr, hr = self.random_rotate_90(lr, hr)
if self.aug and np.random.uniform(0,1) > 0.7071:
lr = self.add_gaussian_noise(lr, np.random.uniform(0,10))
#print('augment noising')
lr = lr.astype(np.float32).transpose([2, 0, 1]) / 255.0
hr = hr.astype(np.float32).transpose([2, 0, 1]) / 255.0
if self.completion and np.random.uniform(0,1) > 0.7071:
dims = lr.shape
mask = np.random.uniform(0,1,(dims[1],dims[2]))
mask = mask < np.random.uniform(0.05,0.15)
lr[0,mask] = 0
lr[1,mask] = 0
lr[2,mask] = 0
if self.colorization and np.random.uniform(0,1) > 0.7071:
dims = lr.shape
mask = np.random.uniform(0,1,(dims[1],dims[2]))
mask = mask < np.random.uniform(0.05,0.15)
tmp = lr.mean(axis=0)
for i_dim in range(dims[0]):
lr[i_dim,mask] = tmp[mask]
return lr, hr
def __len__(self):
return len(self.files)
class EvalDataset(Dataset):
def __init__(self, file_path, scale):
super(EvalDataset, self).__init__()
self.files = ParseFile(file_path)
self.scale = scale
def __getitem__(self, idx):
hr = np.array(Image.open(self.files[idx])).astype('float')
hr2 = hr.copy()
hr2[:,:,0] = convolve(hr2[:,:,0] , np.ones((15,15)).astype('float')/225)
hr2[:,:,1] = convolve(hr2[:,:,1] , np.ones((15,15)).astype('float')/225)
hr2[:,:,2] = convolve(hr2[:,:,2] , np.ones((15,15)).astype('float')/225)
lr = 0
for i in range(self.scale):
for j in range(self.scale):
lr = lr + hr[i::self.scale, j::self.scale] / (self.scale * self.scale)
return lr, hr
def __len__(self):
return len(self.files)
def ParseFile(filepath):
output = []
with open(filepath) as fp:
for line in fp:
output.append(line[:-1])
return output
class EvalDataset256(Dataset):
def __init__(self, file_path, scale):
super(EvalDataset256, self).__init__()
self.files = ParseFile(file_path)
self.scale = scale
def __getitem__(self, idx):
hr = np.array(Image.open(self.files[idx])).astype('float')
hr = hr.resize((256 , 256), resample=Image.BICUBIC)
hr2 = hr.copy()
hr2[:,:,0] = convolve(hr2[:,:,0] , np.ones((15,15)).astype('float')/225)
hr2[:,:,1] = convolve(hr2[:,:,1] , np.ones((15,15)).astype('float')/225)
hr2[:,:,2] = convolve(hr2[:,:,2] , np.ones((15,15)).astype('float')/225)
lr = 0
for i in range(self.scale):
for j in range(self.scale):
lr = lr + hr[i::self.scale, j::self.scale] / (self.scale * self.scale)
return lr, hr
def __len__(self):
return len(self.files)
def ParseFile(filepath):
output = []
with open(filepath) as fp:
for line in fp:
output.append(line[:-1])
return output
| 10,057 | 32.415282 | 102 | py |
BeyondtheSpectrum | BeyondtheSpectrum-main/datasets/base.py | ###########################################################################
# Created by: Hang Zhang
# Email: zhang.hang@rutgers.edu
# Copyright (c) 2017
###########################################################################
import random
import numpy as np
from PIL import Image, ImageOps, ImageFilter
import torch
import torch.utils.data as data
__all__ = ['BaseDataset']
class BaseDataset(data.Dataset):
def __init__(self, root_pos, root_neg, flip=True):
self.root_pos = root_pos
self.root_neg = root_neg
self.flip = flip
def __getitem__(self, index):
raise NotImplemented
@property
def num_class(self):
return self.NUM_CLASS
@property
def pred_offset(self):
raise NotImplemented
def make_pred(self, x):
return x + self.pred_offset
| 835 | 22.222222 | 75 | py |
BeyondtheSpectrum | BeyondtheSpectrum-main/datasets/image_dataset.py | ###########################################################################
# Created by: Hang Zhang
# Email: zhang.hang@rutgers.edu
# Copyright (c) 2018
###########################################################################
import os
import sys
import random
import numpy as np
from tqdm import tqdm, trange
from PIL import Image, ImageOps, ImageFilter
import torch
import torch.utils.data as data
import torchvision.transforms as transform
from .base import BaseDataset
from os import listdir
from os.path import isfile, join
from scipy.interpolate import interp2d
class BinaryImageDataset(BaseDataset):
def __init__(self, root_pos=os.path.expanduser('/BS/work/data_pos'), root_neg=os.path.expanduser('/BS/work/data_neg'), flip=True, **kwargs):
super(BinaryImageDataset, self).__init__(
root_pos, root_neg, flip, **kwargs)
self.files = get_data_pairs(self.root_pos, self.root_neg)
assert (len(self.files[0]) == len(self.files[1]))
if len(self.files) == 3:
assert (len(self.files[0]) == len(self.files[2]))
if len(self.files[0]) == 0:
raise RuntimeError("Found 0 images in subfolders of: \
" + self.root + "\n")
print("Found %d examples" % len(self.files[0]))
def __getitem__(self, index):
tmp = Image.open(self.files[0][index][:])
data = np.array(tmp)
data = data.transpose(2, 0, 1)
if self.flip:
flip_step = np.random.randint(0, 2) * 2 - 1
data = data[:, :, ::flip_step]
label = self.files[1][index]
data = torch.from_numpy(data.copy()).float()
label = torch.tensor(label).long()
return data, label, self.files[0][index]
def __len__(self):
return len(self.files[0])
def get_data_pairs(pos_folder, neg_folder):
def get_pairs(pos_folder, neg_folder):
pos_data = sorted([os.path.join(pos_folder, f) for f in listdir(pos_folder) if isfile(join(pos_folder, f))])
neg_data = sorted([os.path.join(neg_folder, f) for f in listdir(neg_folder) if isfile(join(neg_folder, f))])
return pos_data, neg_data
pos_data, neg_data = get_pairs(pos_folder, neg_folder)
return [pos_data+neg_data, [1]*len(pos_data)+[0]*len(neg_data)]
| 2,300 | 31.871429 | 144 | py |
BeyondtheSpectrum | BeyondtheSpectrum-main/datasets/__init__.py | import warnings
from torchvision.datasets import *
from .base import *
from .image_dataset import BinaryImageDataset
datasets = {
'image': BinaryImageDataset,
}
def get_dataset(name, **kwargs):
return datasets[name.lower()](**kwargs)
| 246 | 16.642857 | 45 | py |
ps-lite | ps-lite-master/docs/conf.py | # -*- coding: utf-8 -*-
#
# ps-lite documentation build configuration file, created by
# sphinx-quickstart on Sun Mar 20 20:12:23 2016.
#
# Mu: additional changes
# - add breathe into extensions
# - change html theme into sphinx_rtd_theme
# - add sphnix_util.py
# - add .md into source_suffix
# - add setup() at the end
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.mathjax',
'breathe',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = ['.md']
# source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ps-lite'
copyright = u'2016, ps-lite developers'
author = u'ps-lite developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'ps-litedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ps-lite.tex', u'ps-lite Documentation',
u'ps-lite developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'ps-lite', u'ps-lite Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ps-lite', u'ps-lite Documentation',
author, 'ps-lite', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, curr_path)
from sphinx_util import MarkdownParser, AutoStructify, generate_doxygen_xml
MarkdownParser.github_doc_root = 'https://github.com/dmlc/ps-lite/tree/master/'
source_parsers = {
'.md': MarkdownParser,
'.Rmd': MarkdownParser,
}
breathe_projects = {'ps-lite' : 'xml/'}
breathe_default_project = 'ps-lite'
doc_root = 'http://dmlc.ml'
def setup(app):
app.connect("builder-inited", generate_doxygen_xml)
app.add_config_value('recommonmark_config', {
'url_resolver': lambda url: doc_root + url,
}, True)
app.add_transform(AutoStructify)
| 10,101 | 30.968354 | 80 | py |
minkasi | minkasi-master/examples/tsBowl_map_maker.py | import minkasi
import numpy as np
from matplotlib import pyplot as plt
import glob
import time
import minkasi_jax.presets_by_source as pbs
import os
from astropy.coordinates import Angle
from astropy import units as u
dir = '/scratch/r/rbond/jorlo/MS0735//TS_EaCMS0f0_51_5_Oct_2021/'
tod_names=glob.glob(dir+'Sig*.fits')
bad_tod, addtag = pbs.get_bad_tods("MS0735", ndo=False, odo=False)
#bad_tod.append('Signal_TOD-AGBT21A_123_03-s20.fits')
tod_names = minkasi.cut_blacklist(tod_names, bad_tod)
print("nproc: ", minkasi.nproc)
tod_names = tod_names[minkasi.myrank::minkasi.nproc]
todvec=minkasi.TodVec()
flatten = True
#loop over each file, and read it.
for i, fname in enumerate(tod_names):
if fname == '/scratch/r/rbond/jorlo/MS0735//TS_EaCMS0f0_51_5_Oct_2021/Signal_TOD-AGBT21A_123_03-s20.fits': continue
t1=time.time()
dat=minkasi.read_tod_from_fits(fname)
t2=time.time()
minkasi.truncate_tod(dat) #truncate_tod chops samples from the end to make
#the length happy for ffts
minkasi.downsample_tod(dat) #sometimes we have faster sampled data than we need.
#this fixes that. You don't need to, though.
minkasi.truncate_tod(dat) #since our length changed, make sure we have a happy length
#figure out a guess at common mode #and (assumed) linear detector drifts/offset
#drifts/offsets are removed, which is important for mode finding. CM is *not* removed.
dd, pred2, cm = minkasi.fit_cm_plus_poly(dat["dat_calib"], cm_ord=3, full_out=True)
dat['dat_calib']=dd
if flatten:
dat['dat_calib'] -= pred2
t3=time.time()
tod=minkasi.Tod(dat)
todvec.add_tod(tod)
print('took ',t2-t1,' ',t3-t2,' seconds to read and downsample file ',fname)
minkasi.barrier()
lims=todvec.lims()
pixsize=2.0/3600*np.pi/180
map=minkasi.SkyMap(lims,pixsize)
mapset = minkasi.Mapset()
for i, tod in enumerate(todvec.tods):
#print(i)
#print(tod.info['fname'])
ipix=map.get_pix(tod)
tod.info['ipix']=ipix
try:
tod.set_noise(minkasi.NoiseSmoothedSVD)
except:
print(i, tod.info['fname'])
tsVec = minkasi.tsModel(todvec = todvec, modelclass = minkasi.tsBowl)
#We add two things for pcg to do simulatenously here: make the maps from the tods
#and fit the polynomials to tsVec
mapset.add_map(map)
mapset.add_map(tsVec)
hits=minkasi.make_hits(todvec,map)
rhs=mapset.copy()
todvec.make_rhs(rhs)
x0=rhs.copy()
x0.clear()
#preconditioner is 1/ hit count map. helps a lot for
#convergence.
precon=mapset.copy()
tmp=hits.map.copy()
ii=tmp>0
tmp[ii]=1.0/tmp[ii]
precon.maps[0].map[:]=tmp[:]
#tsBowl precon is 1/todlength
if len(mapset.maps) > 1:
for key in precon.maps[1].data.keys():
temp = np.ones(precon.maps[1].data[key].params.shape)
temp /= precon.maps[1].data[key].vecs.shape[1]
precon.maps[1].data[key].params = temp
todvec_copy = minkasi.TodVec()
for tod in todvec.tods:
todvec_copy.add_tod(tod.copy())
#run PCG!
plot_info={}
plot_info['vmin']=-6e-4
plot_info['vmax']=6e-4
plot_iters=[1,2,3,5,10,15,20,25,30,35,40,45,49]
mapset_out=minkasi.run_pcg(rhs,x0,todvec,precon,maxiter=200)
if minkasi.myrank==0:
if len(mapset.maps)==1:
mapset_out.maps[0].write('/scratch/r/rbond/jorlo/{}_noBowl_map_precon_mpi_py3.fits'.format(flatten)) #and write out the map as a FITS file
else:
mapset_out.maps[0].write('/scratch/r/rbond/jorlo/{}_tsBowl_map_precon_mpi_py3.fits'.format(flatten))
else:
print('not writing map on process ',minkasi.myrank)
#if you wanted to run another round of PCG starting from the previous solution,
#you could, replacing x0 with mapset_out.
#mapset_out2=minkasi.run_pcg(rhs,mapset_out,todvec,mapset,maxiter=50)
#mapset_out2.maps[0].write('second_map.fits')
d2r=np.pi/180
sig=9/2.35/3600*d2r
theta0 = np.deg2rad(97)
x0 = Angle('07 41 44.5 hours').to(u.radian).value
y0 = Angle('74:14:38.7 degrees').to(u.radian).value
beta_pars=np.asarray([x0,y0,theta0,0.98,-8.2e-1])
x0_src = Angle('07 41 44.5 hours').to(u.radian).value
y0_src = Angle('74:14:38.7 degrees').to(u.radian).value
src1_pars=np.asarray([x0_src, y0_src,1.37e-5,1.7e-4])
#src2_pars=np.asarray([155.90447*d2r,4.1516*d2r,2.6e-5,5.1e-4])
pars=np.hstack([beta_pars,src1_pars]) #we need to combine parameters into a single vector
npar=np.hstack([len(beta_pars),len(src1_pars)]) #and the fitter needs to know how many per function
#this array of functions needs to return the model timestreams and derivatives w.r.t. parameters
#of the timestreams.
funs=[minkasi.derivs_from_isobeta_c,minkasi.derivs_from_gauss_c]
#we can keep some parameters fixed at their input values if so desired.
to_fit=np.ones(len(pars),dtype='bool')
to_fit[[0,1,2,5,6]]=False #Let's keep beta pegged to 0.7
'''
t1=time.time()
pars_fit,chisq,curve,errs=minkasi.fit_timestreams_with_derivs_manyfun(funs,pars,npar,todvec_copy,to_fit)
t2=time.time()
if minkasi.myrank==0:
print('No Sub: ')
print('took ',t2-t1,' seconds to fit timestreams')
for i in range(len(pars_fit)):
print('parameter ',i,' is ',pars_fit[i],' with error ',errs[i])
'''
minkasi.comm.barrier()
if minkasi.myrank==0:
for tod in todvec.tods:
todname = tod.info['fname']
temp = minkasi.map2todbowl(mapset_out.maps[1].data[todname].vecs, mapset_out.maps[1].data[todname].params)
tod.info['dat_calib'] -= temp
'''
t1=time.time()
pars_fit,chisq,curve,errs=minkasi.fit_timestreams_with_derivs_manyfun(funs,pars,npar,todvec,to_fit)
t2=time.time()
if minkasi.myrank==0:
print('Sub: ')
print('took ',t2-t1,' seconds to fit timestreams')
for i in range(len(pars_fit)):
print('parameter ',i,' is ',pars_fit[i],' with error ',errs[i])
'''
lims=todvec.lims()
pixsize=2.0/3600*np.pi/180
map=minkasi.SkyMap(lims,pixsize)
mapset = minkasi.Mapset()
mapset.add_map(map)
hits=minkasi.make_hits(todvec,map)
rhs=mapset.copy()
todvec.make_rhs(rhs)
x0=rhs.copy()
x0.clear()
#preconditioner is 1/ hit count map. helps a lot for
#convergence.
precon=mapset.copy()
tmp=hits.map.copy()
ii=tmp>0
tmp[ii]=1.0/tmp[ii]
precon.maps[0].map[:]=tmp[:]
mapset_out=minkasi.run_pcg(rhs,x0,todvec,precon,maxiter=50)
if minkasi.myrank==0:
mapset_out.maps[0].write('/scratch/r/rbond/jorlo/{}_sub_tsBowl_map_precon_mpi_py3.fits'.format(flatten)) #and write out the map as a FITS file
else:
print('not writing map on process ',minkasi.myrank)
| 6,463 | 28.788018 | 146 | py |
tbsm | tbsm-main/tbsm_pytorch.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
### import packages ###
from __future__ import absolute_import, division, print_function, unicode_literals
# miscellaneous
import time
import os
from os import path
import random
# numpy and scikit-learn
import numpy as np
from sklearn.metrics import roc_auc_score
# pytorch
import torch
import torch.nn as nn
import torch.nn.functional as Functional
from torch.nn.parameter import Parameter
from torch.utils.tensorboard import SummaryWriter
# tbsm data
import tbsm_data_pytorch as tp
# set python, numpy and torch random seeds
def set_seed(seed, use_gpu):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
if use_gpu:
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if using multi-GPU.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
### define time series layer (TSL) ###
class TSL_Net(nn.Module):
def __init__(
self,
arch_interaction_op='dot',
arch_attention_mechanism='mlp',
ln=None,
model_type="tsl",
tsl_inner="def",
mha_num_heads=8,
ln_top=""
):
super(TSL_Net, self).__init__()
# save arguments
self.arch_interaction_op = arch_interaction_op
self.arch_attention_mechanism = arch_attention_mechanism
self.model_type = model_type
self.tsl_inner = tsl_inner
# setup for mechanism type
if self.arch_attention_mechanism == 'mlp':
self.mlp = dlrm.DLRM_Net().create_mlp(ln, len(ln) - 2)
# setup extra parameters for some of the models
if self.model_type == "tsl" and self.tsl_inner in ["def", "ind"]:
m = ln_top[-1] # dim of dlrm output
mean = 0.0
std_dev = np.sqrt(2 / (m + m))
W = np.random.normal(mean, std_dev, size=(1, m, m)).astype(np.float32)
self.A = Parameter(torch.tensor(W), requires_grad=True)
elif self.model_type == "mha":
m = ln_top[-1] # dlrm output dim
self.nheads = mha_num_heads
self.emb_m = self.nheads * m # mha emb dim
mean = 0.0
std_dev = np.sqrt(2 / (m + m)) # np.sqrt(1 / m) # np.sqrt(1 / n)
qm = np.random.normal(mean, std_dev, size=(1, m, self.emb_m)) \
.astype(np.float32)
self.Q = Parameter(torch.tensor(qm), requires_grad=True)
km = np.random.normal(mean, std_dev, size=(1, m, self.emb_m)) \
.astype(np.float32)
self.K = Parameter(torch.tensor(km), requires_grad=True)
vm = np.random.normal(mean, std_dev, size=(1, m, self.emb_m)) \
.astype(np.float32)
self.V = Parameter(torch.tensor(vm), requires_grad=True)
def forward(self, x=None, H=None):
# adjust input shape
(batchSize, vector_dim) = x.shape
x = torch.reshape(x, (batchSize, 1, -1))
x = torch.transpose(x, 1, 2)
# debug prints
# print("shapes: ", self.A.shape, x.shape)
# perform mode operation
if self.model_type == "tsl":
if self.tsl_inner == "def":
ax = torch.matmul(self.A, x)
x = torch.matmul(self.A.permute(0, 2, 1), ax)
# debug prints
# print("shapes: ", H.shape, ax.shape, x.shape)
elif self.tsl_inner == "ind":
x = torch.matmul(self.A, x)
# perform interaction operation
if self.arch_interaction_op == 'dot':
if self.arch_attention_mechanism == 'mul':
# coefficients
a = torch.transpose(torch.bmm(H, x), 1, 2)
# context
c = torch.bmm(a, H)
elif self.arch_attention_mechanism == 'mlp':
# coefficients
a = torch.transpose(torch.bmm(H, x), 1, 2)
# MLP first/last layer dims are automatically adjusted to ts_length
y = dlrm.DLRM_Net().apply_mlp(a, self.mlp)
# context, y = mlp(a)
c = torch.bmm(torch.reshape(y, (batchSize, 1, -1)), H)
else:
sys.exit('ERROR: --arch-attention-mechanism='
+ self.arch_attention_mechanism + ' is not supported')
else:
sys.exit('ERROR: --arch-interaction-op=' + self.arch_interaction_op
+ ' is not supported')
elif self.model_type == "mha":
x = torch.transpose(x, 1, 2)
Qx = torch.transpose(torch.matmul(x, self.Q), 0, 1)
HK = torch.transpose(torch.matmul(H, self.K), 0, 1)
HV = torch.transpose(torch.matmul(H, self.V), 0, 1)
# multi-head attention (mha)
multihead_attn = nn.MultiheadAttention(self.emb_m, self.nheads).to(x.device)
attn_output, _ = multihead_attn(Qx, HK, HV)
# context
c = torch.squeeze(attn_output, dim=0)
# debug prints
# print("shapes:", c.shape, Qx.shape)
return c
### define Time-based Sequence Model (TBSM) ###
class TBSM_Net(nn.Module):
def __init__(
self,
m_spa,
ln_emb,
ln_bot,
ln_top,
arch_interaction_op,
arch_interaction_itself,
ln_mlp,
ln_tsl,
tsl_interaction_op,
tsl_mechanism,
ts_length,
ndevices=-1,
model_type="",
tsl_seq=False,
tsl_proj=True,
tsl_inner="def",
tsl_num_heads=1,
mha_num_heads=8,
rnn_num_layers=5,
debug_mode=False,
):
super(TBSM_Net, self).__init__()
# save arguments
self.ndevices = ndevices
self.debug_mode = debug_mode
self.ln_bot = ln_bot
self.ln_top = ln_top
self.ln_tsl = ln_tsl
self.ts_length = ts_length
self.tsl_interaction_op = tsl_interaction_op
self.tsl_mechanism = tsl_mechanism
self.model_type = model_type
self.tsl_seq = tsl_seq
self.tsl_proj = tsl_proj
self.tsl_inner = tsl_inner
self.tsl_num_heads = tsl_num_heads
self.mha_num_heads = mha_num_heads
self.rnn_num_layers = rnn_num_layers
self.ams = nn.ModuleList()
self.mlps = nn.ModuleList()
if self.model_type == "tsl":
self.num_mlps = int(self.tsl_num_heads) # number of tsl components
else:
self.num_mlps = 1
#debug prints
if self.debug_mode:
print(self.model_type)
print(ln_bot)
print(ln_top)
print(ln_emb)
# embedding layer (implemented through dlrm tower, without last layer sigmoid)
if "qr" in model_type:
self.dlrm = dlrm.DLRM_Net(
m_spa, ln_emb, ln_bot, ln_top,
arch_interaction_op, arch_interaction_itself,
qr_flag=True, qr_operation="add", qr_collisions=4, qr_threshold=100000
)
print("Using QR embedding method.")
else:
self.dlrm = dlrm.DLRM_Net(
m_spa, ln_emb, ln_bot, ln_top,
arch_interaction_op, arch_interaction_itself,
)
# prepare data needed for tsl layer construction
if self.model_type == "tsl":
if not self.tsl_seq:
self.ts_array = [self.ts_length] * self.num_mlps
else:
self.ts_array = []
m = self.ts_length / self.tsl_num_heads
for j in range(self.tsl_num_heads, 0, -1):
t = min(self.ts_length, round(m * j))
self.ts_array.append(t)
elif self.model_type == "mha":
self.ts_array = [self.ts_length]
else:
self.ts_array = []
# construction of one or more tsl components
for ts in self.ts_array:
ln_tsl = np.concatenate((np.array([ts]), self.ln_tsl))
ln_tsl = np.append(ln_tsl, ts)
# create tsl mechanism
am = TSL_Net(
arch_interaction_op=self.tsl_interaction_op,
arch_attention_mechanism=self.tsl_mechanism,
ln=ln_tsl, model_type=self.model_type,
tsl_inner=self.tsl_inner,
mha_num_heads=self.mha_num_heads, ln_top=self.ln_top,
)
self.ams.append(am)
# tsl MLPs (with sigmoid on last layer)
for _ in range(self.num_mlps):
mlp_tsl = dlrm.DLRM_Net().create_mlp(ln_mlp, ln_mlp.size - 2)
self.mlps.append(mlp_tsl)
# top mlp if needed
if self.num_mlps > 1:
f_mlp = np.array([self.num_mlps, self.num_mlps + 4, 1])
self.final_mlp = dlrm.DLRM_Net().create_mlp(f_mlp, f_mlp.size - 2)
# Offsets need to be stored beforehand if args.run_fast.
if args.run_fast:
# Constant offsets tensor - resize if needed.
self.max_offset = 10000000
self.offsets = torch.tensor(list(range(self.max_offset)))
self.offsets_moved = False
def forward(self, x, lS_o, lS_i):
# Move offsets to device if needed and not already done.
if args.run_fast and not self.offsets_moved:
self.offsets = self.offsets.to(x[0].device)
self.offsets_moved = True
# data point is history H and last entry w
n = x[0].shape[0] # batch_size
ts = len(x)
H = torch.zeros(n, self.ts_length, self.ln_top[-1]).to(x[0].device)
# Compute H using either fast or original approach depending on args.run_fast.
if args.run_fast:
# j determines access indices of input; first, determine j bounds and get all inputs.
j_lower = (ts - self.ts_length - 1)
j_upper = (ts - 1)
# Concatenate x[j]s using j bounds.
concatenated_x = torch.cat(x[j_lower : j_upper])
# Set offsets and increase size if needed.
curr_max_offset = (j_upper - j_lower) * n
if curr_max_offset > self.max_offset + 1:
# Resize offsets to 2x required size.
self.offsets = torch.tensor(list(range(curr_max_offset * 2))).to(self.offsets.device)
self.max_offset = curr_max_offset * 2
concatenated_lS_o = [self.offsets[: curr_max_offset] for j in range(len(lS_o[0]))]
# Concatenate lS_i[0, 1, 2]s.
concatenated_lS_i = [torch.cat([lS_i[i][j] for i in range(j_lower, j_upper)]) for j in range(len(lS_i[0]))]
# oj determines access indices of output; determine oj bounds to assign output values in H. oj is just j indices adjusted to start at 0.
oj_lower = 0 - (ts - self.ts_length - 1)
oj_upper = (ts - 1) - (ts - self.ts_length - 1)
# After fetching all inputs, run through DLRM.
concatenated_dlrm_output = self.dlrm(concatenated_x, concatenated_lS_o, concatenated_lS_i)
# Reshape output with new TS dimension and transpose to get H output.
transposed_concatenated_dlrm_output = torch.transpose(concatenated_dlrm_output.reshape((j_upper - j_lower), n, self.ln_top[-1]), 0, 1)
if self.model_type == "tsl" and self.tsl_proj:
dlrm_output = Functional.normalize(transposed_concatenated_dlrm_output, p=2, dim=2)
else:
dlrm_output = transposed_concatenated_dlrm_output
# Assign the output to H with correct output bounds.
H[:, oj_lower : oj_upper, :] = dlrm_output
else:
# split point into first part (history)
# and last item
for j in range(ts - self.ts_length - 1, ts - 1):
oj = j - (ts - self.ts_length - 1)
v = self.dlrm(x[j], lS_o[j], lS_i[j])
if self.model_type == "tsl" and self.tsl_proj:
v = Functional.normalize(v, p=2, dim=1)
H[:, oj, :] = v
w = self.dlrm(x[-1], lS_o[-1], lS_i[-1])
# project onto sphere
if self.model_type == "tsl" and self.tsl_proj:
w = Functional.normalize(w, p=2, dim=1)
# print("data: ", x[-1], lS_o[-1], lS_i[-1])
(mini_batch_size, _) = w.shape
# for cases when model is tsl or mha
if self.model_type != "rnn":
# create MLP for each TSL component
# each ams[] element is one component
for j in range(self.num_mlps):
ts = self.ts_length - self.ts_array[j]
c = self.ams[j](w, H[:, ts:, :])
c = torch.reshape(c, (mini_batch_size, -1))
# concat context and w
z = torch.cat([c, w], dim=1)
# obtain probability of a click as a result of MLP
p = dlrm.DLRM_Net().apply_mlp(z, self.mlps[j])
if j == 0:
ps = p
else:
ps = torch.cat((ps, p), dim=1)
if ps.shape[1] > 1:
p_out = dlrm.DLRM_Net().apply_mlp(ps, self.final_mlp)
else:
p_out = ps
# RNN based on LSTM cells case, context is final hidden state
else:
hidden_dim = w.shape[1] # equal to dim(w) = dim(c)
level = self.rnn_num_layers # num stacks of rnns
Ht = H.permute(1, 0, 2)
rnn = nn.LSTM(int(self.ln_top[-1]), int(hidden_dim),
int(level)).to(x[0].device)
h0 = torch.randn(level, n, hidden_dim).to(x[0].device)
c0 = torch.randn(level, n, hidden_dim).to(x[0].device)
output, (hn, cn) = rnn(Ht, (h0, c0))
hn, cn = torch.squeeze(hn[level - 1, :, :]), \
torch.squeeze(cn[level - 1, :, :])
if self.debug_mode:
print(w.shape, output.shape, hn.shape)
# concat context and w
z = torch.cat([hn, w], dim=1)
p_out = dlrm.DLRM_Net().apply_mlp(z, self.mlps[0])
return p_out
# construct tbsm model or read it from the file specified
# by args.save_model
def get_tbsm(args, use_gpu):
# train, test, or train-test
modes = args.mode.split("-")
model_file = args.save_model
if args.debug_mode:
print("model_file: ", model_file)
print("model_type: ", args.model_type)
if use_gpu:
ngpus = torch.cuda.device_count() # 1
devicenum = "cuda:" + str(args.device_num % ngpus)
print("device:", devicenum)
device = torch.device(devicenum)
print("Using {} GPU(s)...".format(ngpus))
else:
device = torch.device("cpu")
print("Using CPU...")
# prepare dlrm arch
m_spa = args.arch_sparse_feature_size
# this is an array of sizes of cat features
ln_emb = np.fromstring(args.arch_embedding_size, dtype=int, sep="-")
num_fea = ln_emb.size + 1 # user: num sparse + bot_mlp(all dense)
ln_bot = np.fromstring(args.arch_mlp_bot, dtype=int, sep="-")
# m_den = ln_bot[0]
ln_bot[ln_bot.size - 1] = m_spa # enforcing
m_den_out = ln_bot[ln_bot.size - 1] # must be == m_spa (embed dim)
if args.arch_interaction_op == "dot":
# approach 1: all
# num_int = num_fea * num_fea + m_den_out
# approach 2: unique
if args.arch_interaction_itself:
num_int = (num_fea * (num_fea + 1)) // 2 + m_den_out
else:
num_int = (num_fea * (num_fea - 1)) // 2 + m_den_out
elif args.arch_interaction_op == "cat":
num_int = num_fea * m_den_out
else:
sys.exit(
"ERROR: --arch-interaction-op="
+ args.arch_interaction_op
+ " is not supported"
)
arch_mlp_top_adjusted = str(num_int) + "-" + args.arch_mlp_top
ln_top = np.fromstring(arch_mlp_top_adjusted, dtype=int, sep="-")
# sigmoid_top = len(ln_top) - 2 # used only if length_ts == 1
# attention mlp (will be automatically adjusted so that first and last
# layer correspond to number of vectors (ts_length) used in attention)
ln_atn = np.fromstring(args.tsl_mlp, dtype=int, sep="-")
# context MLP (with automatically adjusted first layer)
if args.model_type == "mha":
num_cat = (int(args.mha_num_heads) + 1) * ln_top[-1] # mha with k heads + w
else: # tsl or rnn
num_cat = 2 * ln_top[-1] # [c,w]
arch_mlp_adjusted = str(num_cat) + "-" + args.arch_mlp
ln_mlp = np.fromstring(arch_mlp_adjusted, dtype=int, sep="-")
# construct TBSM
tbsm = TBSM_Net(
m_spa,
ln_emb,
ln_bot,
ln_top,
args.arch_interaction_op,
args.arch_interaction_itself,
ln_mlp,
ln_atn,
args.tsl_interaction_op,
args.tsl_mechanism,
args.ts_length,
-1,
args.model_type,
args.tsl_seq,
args.tsl_proj,
args.tsl_inner,
args.tsl_num_heads,
args.mha_num_heads,
args.rnn_num_layers,
args.debug_mode,
)
# move model to gpu
if use_gpu:
tbsm = tbsm.to(device) # .cuda()
# load existing pre-trained model if needed
if path.exists(model_file):
if modes[0] == "test" or (len(modes) > 1 and modes[1] == "test"):
if use_gpu:
ld_model = torch.load(
model_file,
map_location=torch.device('cuda')
)
else:
# when targeting inference on CPU
ld_model = torch.load(model_file, map_location=torch.device('cpu'))
tbsm.load_state_dict(ld_model['model_state_dict'])
return tbsm, device
def data_wrap(X, lS_o, lS_i, use_gpu, device):
if use_gpu: # .cuda()
return ([xj.to(device) for xj in X],
[[S_o.to(device) for S_o in row] for row in lS_o],
[[S_i.to(device) for S_i in row] for row in lS_i])
else:
return X, lS_o, lS_i
def time_wrap(use_gpu, device):
if use_gpu:
torch.cuda.synchronize(device)
return time.time()
def loss_fn_wrap(Z, T, use_gpu, device):
if use_gpu:
return loss_fn(Z, T.to(device))
else:
return loss_fn(Z, T)
loss_fn = torch.nn.BCELoss(reduction="mean")
# iterate through validation data, which can be used to determine the best seed and
# during main training for deciding to save the current model
def iterate_val_data(val_ld, tbsm, use_gpu, device):
# NOTE: call to tbsm.eval() not needed here, see
# https://discuss.pytorch.org/t/model-eval-vs-with-torch-no-grad/19615
total_loss_val = 0
total_accu_test = 0
total_samp_test = 0
for _, (X, lS_o, lS_i, T_test) in enumerate(val_ld):
batchSize = X[0].shape[0]
Z_test = tbsm(*data_wrap(X,
lS_o,
lS_i,
use_gpu,
device
))
# # compute loss and accuracy
z = Z_test.detach().cpu().numpy() # numpy array
t = T_test.detach().cpu().numpy() # numpy array
A_test = np.sum((np.round(z, 0) == t).astype(np.uint8))
total_accu_test += A_test
total_samp_test += batchSize
E_test = loss_fn_wrap(Z_test, T_test, use_gpu, device)
L_test = E_test.detach().cpu().numpy() # numpy array
total_loss_val += (L_test * batchSize)
return total_accu_test, total_samp_test, total_loss_val
# iterate through training data, which is called once every epoch. It updates weights,
# computes loss, accuracy, saves model if needed and calls iterate_val_data() function.
# isMainTraining is True for main training and False for fast seed selection
def iterate_train_data(args, train_ld, val_ld, tbsm, k, use_gpu, device, writer, losses, accuracies, isMainTraining):
# select number of batches
if isMainTraining:
nbatches = len(train_ld) if args.num_batches == 0 else args.num_batches
else:
nbatches = len(train_ld)
# specify the optimizer algorithm
optimizer = torch.optim.Adagrad(tbsm.parameters(), lr=args.learning_rate)
total_time = 0
total_loss = 0
total_accu = 0
total_iter = 0
total_samp = 0
max_gA_test = 0
for j, (X, lS_o, lS_i, T) in enumerate(train_ld):
if j >= nbatches:
break
t1 = time_wrap(use_gpu, device)
batchSize = X[0].shape[0]
# forward pass
Z = tbsm(*data_wrap(X,
lS_o,
lS_i,
use_gpu,
device
))
# loss
E = loss_fn_wrap(Z, T, use_gpu, device)
# compute loss and accuracy
L = E.detach().cpu().numpy() # numpy array
z = Z.detach().cpu().numpy() # numpy array
t = T.detach().cpu().numpy() # numpy array
# rounding t
A = np.sum((np.round(z, 0) == np.round(t, 0)).astype(np.uint8))
optimizer.zero_grad()
# backward pass
E.backward(retain_graph=True)
# weights update
optimizer.step()
t2 = time_wrap(use_gpu, device)
total_time += t2 - t1
total_loss += (L * batchSize)
total_accu += A
total_iter += 1
total_samp += batchSize
print_tl = ((j + 1) % args.print_freq == 0) or (j + 1 == nbatches)
# print time, loss and accuracy
if print_tl and isMainTraining:
gT = 1000.0 * total_time / total_iter if args.print_time else -1
total_time = 0
gL = total_loss / total_samp
total_loss = 0
gA = total_accu / total_samp
total_accu = 0
str_run_type = "inference" if args.inference_only else "training"
print(
"Finished {} it {}/{} of epoch {}, ".format(
str_run_type, j + 1, nbatches, k
)
+ "{:.2f} ms/it, loss {:.8f}, accuracy {:3.3f} %".format(
gT, gL, gA * 100
)
)
total_iter = 0
total_samp = 0
if isMainTraining:
should_test = (
(args.test_freq > 0
and (j + 1) % args.test_freq == 0) or j + 1 == nbatches
)
else:
should_test = (j == min(int(0.05 * len(train_ld)), len(train_ld) - 1))
# validation run
if should_test:
total_accu_test, total_samp_test, total_loss_val = iterate_val_data(val_ld, tbsm, use_gpu, device)
gA_test = total_accu_test / total_samp_test
if not isMainTraining:
break
gL_test = total_loss_val / total_samp_test
print("At epoch {:d} validation accuracy is {:3.3f} %".
format(k, gA_test * 100))
if args.enable_summary and isMainTraining:
writer.add_scalars('train and val loss',
{'train_loss': gL,
'val_loss': gL_test},
k * len(train_ld) + j)
writer.add_scalars('train and val accuracy',
{'train_acc': gA * 100,
'val_acc': gA_test * 100},
k * len(train_ld) + j)
losses = np.append(losses, np.array([[j, gL, gL_test]]),
axis=0)
accuracies = np.append(accuracies, np.array([[j, gA * 100,
gA_test * 100]]), axis=0)
# save model if best so far
if gA_test > max_gA_test and isMainTraining:
print("Saving current model...")
max_gA_test = gA_test
model_ = tbsm
torch.save(
{
"model_state_dict": model_.state_dict(),
# "opt_state_dict": optimizer.state_dict(),
},
args.save_model,
)
if not isMainTraining:
return gA_test
# selects best seed, and does main model training
def train_tbsm(args, use_gpu):
# prepare the data
train_ld, _ = tp.make_tbsm_data_and_loader(args, "train")
val_ld, _ = tp.make_tbsm_data_and_loader(args, "val")
# setup initial values
isMainTraining = False
writer = SummaryWriter()
losses = np.empty((0,3), np.float32)
accuracies = np.empty((0,3), np.float32)
# selects best seed out of 5. Sometimes Adagrad gets stuck early, this
# seems to occur randomly depending on initial weight values and
# is independent of chosen model: N-inner, dot etc.
# this procedure is used to reduce the probability of this happening.
def select(args):
seeds = np.random.randint(2, 10000, size=5)
if args.debug_mode:
print(seeds)
best_index = 0
max_val_accuracy = 0.0
testpoint = min(int(0.05 * len(train_ld)), len(train_ld) - 1)
print("testpoint, total batches: ", testpoint, len(train_ld))
for i, seed in enumerate(seeds):
set_seed(seed, use_gpu)
tbsm, device = get_tbsm(args, use_gpu)
gA_test = iterate_train_data(args, train_ld, val_ld, tbsm, 0, use_gpu,
device, writer, losses, accuracies,
isMainTraining)
if args.debug_mode:
print("select: ", i, seed, gA_test, max_val_accuracy)
if gA_test > max_val_accuracy:
best_index = i
max_val_accuracy = gA_test
return seeds[best_index]
# select best seed if needed
if args.no_select_seed or path.exists(args.save_model):
seed = args.numpy_rand_seed
else:
print("Choosing best seed...")
seed = select(args)
set_seed(seed, use_gpu)
print("selected seed:", seed)
# create or load TBSM
tbsm, device = get_tbsm(args, use_gpu)
if args.debug_mode:
print("initial parameters (weights and bias):")
for name, param in tbsm.named_parameters():
print(name)
print(param.detach().cpu().numpy())
# main training loop
isMainTraining = True
print("time/loss/accuracy (if enabled):")
with torch.autograd.profiler.profile(args.enable_profiling, use_gpu) as prof:
for k in range(args.nepochs):
iterate_train_data(args, train_ld, val_ld, tbsm, k, use_gpu, device,
writer, losses, accuracies, isMainTraining)
# collect metrics and other statistics about the run
if args.enable_summary:
with open('summary.npy', 'wb') as acc_loss:
np.save(acc_loss, losses)
np.save(acc_loss, accuracies)
writer.close()
# debug prints
if args.debug_mode:
print("final parameters (weights and bias):")
for name, param in tbsm.named_parameters():
print(name)
print(param.detach().cpu().numpy())
# profiling
if args.enable_profiling:
with open("tbsm_pytorch.prof", "w") as prof_f:
prof_f.write(
prof.key_averages(group_by_input_shape=True).table(
sort_by="self_cpu_time_total"
)
)
prof.export_chrome_trace("./tbsm_pytorch.json")
return
# evaluates model on test data and computes AUC metric
def test_tbsm(args, use_gpu):
# prepare data
test_ld, N_test = tp.make_tbsm_data_and_loader(args, "test")
# setup initial values
z_test = np.zeros((N_test, ), dtype=np.float)
t_test = np.zeros((N_test, ), dtype=np.float)
# check saved model exists
if not path.exists(args.save_model):
sys.exit("Can't find saved model. Exiting...")
# create or load TBSM
tbsm, device = get_tbsm(args, use_gpu)
print(args.save_model)
# main eval loop
# NOTE: call to tbsm.eval() not needed here, see
# https://discuss.pytorch.org/t/model-eval-vs-with-torch-no-grad/19615
offset = 0
for _, (X, lS_o, lS_i, T) in enumerate(test_ld):
batchSize = X[0].shape[0]
Z = tbsm(*data_wrap(X,
lS_o,
lS_i,
use_gpu,
device
))
z_test[offset: offset + batchSize] = np.squeeze(Z.detach().cpu().numpy(),
axis=1)
t_test[offset: offset + batchSize] = np.squeeze(T.detach().cpu().numpy(),
axis=1)
offset += batchSize
if args.quality_metric == "auc":
# compute AUC metric
auc_score = 100.0 * roc_auc_score(t_test.astype(int), z_test)
print("auc score: ", auc_score)
else:
sys.exit("Metric not supported.")
if __name__ == "__main__":
### import packages ###
import sys
import argparse
### parse arguments ###
parser = argparse.ArgumentParser(description="Time Based Sequence Model (TBSM)")
# path to dlrm
parser.add_argument("--dlrm-path", type=str, default="")
# data type: taobao or synthetic (generic)
parser.add_argument("--datatype", type=str, default="synthetic")
# mode: train or inference or both
parser.add_argument("--mode", type=str, default="train") # train, test, train-test
# data locations
parser.add_argument("--raw-train-file", type=str, default="./input/train.txt")
parser.add_argument("--pro-train-file", type=str, default="./output/train.npz")
parser.add_argument("--raw-test-file", type=str, default="./input/test.txt")
parser.add_argument("--pro-test-file", type=str, default="./output/test.npz")
parser.add_argument("--pro-val-file", type=str, default="./output/val.npz")
parser.add_argument("--num-train-pts", type=int, default=100)
parser.add_argument("--num-val-pts", type=int, default=20)
# time series length for train/val and test
parser.add_argument("--ts-length", type=int, default=20)
# model_type = "tsl", "mha", "rnn"
parser.add_argument("--model-type", type=str, default="tsl") # tsl, mha, rnn
parser.add_argument("--tsl-seq", action="store_true", default=False) # k-seq method
parser.add_argument("--tsl-proj", action="store_true", default=True) # sphere proj
parser.add_argument("--tsl-inner", type=str, default="def") # ind, def, dot
parser.add_argument("--tsl-num-heads", type=int, default=1) # num tsl components
parser.add_argument("--mha-num-heads", type=int, default=8) # num mha heads
parser.add_argument("--rnn-num-layers", type=int, default=5) # num rnn layers
# num positive (and negative) points per user
parser.add_argument("--points-per-user", type=int, default=10)
# model arch related parameters
# embedding dim for all sparse features (same for all features)
parser.add_argument("--arch-sparse-feature-size", type=int, default=4) # emb_dim
# number of distinct values for each sparse feature
parser.add_argument("--arch-embedding-size", type=str, default="4-3-2") # vectors
# for taobao use "987994-4162024-9439")
# MLP 1: num dense fea --> embedding dim for sparse fea (out_dim enforced)
parser.add_argument("--arch-mlp-bot", type=str, default="1-4")
# MLP 2: num_interactions + bot[-1] --> top[-1]
# (in_dim adjusted, out_dim can be any)
parser.add_argument("--arch-mlp-top", type=str, default="2-2")
# MLP 3: attention: ts_length --> ts_length (both adjusted)
parser.add_argument("--tsl-mlp", type=str, default="2-2")
# MLP 4: final prob. of click: 2 * top[-1] --> [0,1] (in_dim adjusted)
parser.add_argument("--arch-mlp", type=str, default="4-1")
# interactions
parser.add_argument("--arch-interaction-op", type=str, default="dot")
parser.add_argument("--arch-interaction-itself", action="store_true", default=False)
parser.add_argument("--tsl-interaction-op", type=str, default="dot")
parser.add_argument("--tsl-mechanism", type=str, default="mlp") # mul or MLP
# data
parser.add_argument("--num-batches", type=int, default=0)
# training
parser.add_argument("--mini-batch-size", type=int, default=1)
parser.add_argument("--nepochs", type=int, default=1)
parser.add_argument("--learning-rate", type=float, default=0.05)
parser.add_argument("--print-precision", type=int, default=5)
parser.add_argument("--numpy-rand-seed", type=int, default=123)
parser.add_argument("--no-select-seed", action="store_true", default=False)
# inference
parser.add_argument("--quality-metric", type=str, default="auc")
parser.add_argument("--test-freq", type=int, default=0)
parser.add_argument("--inference-only", type=bool, default=False)
# saving model
parser.add_argument("--save-model", type=str, default="./output/model.pt")
# gpu
parser.add_argument("--use-gpu", action="store_true", default=False)
parser.add_argument("--device-num", type=int, default=0)
# debugging and profiling
parser.add_argument("--debug-mode", action="store_true", default=False)
parser.add_argument("--print-freq", type=int, default=1)
parser.add_argument("--print-time", action="store_true", default=False)
parser.add_argument("--enable-summary", action="store_true", default=False)
parser.add_argument("--enable-profiling", action="store_true", default=False)
parser.add_argument("--run-fast", action="store_true", default=False)
args = parser.parse_args()
# the code requires access to dlrm model
if not path.exists(str(args.dlrm_path)):
sys.exit("Please provide path to DLRM as --dlrm-path")
sys.path.insert(1, args.dlrm_path)
import dlrm_s_pytorch as dlrm
if args.datatype == "taobao" and args.arch_embedding_size != "987994-4162024-9439":
sys.exit(
"ERROR: arch-embedding-size for taobao "
+ " needs to be 987994-4162024-9439"
)
if args.tsl_inner not in ["def", "ind"] and int(args.tsl_num_heads) > 1:
sys.exit(
"ERROR: dot product "
+ " assumes one tsl component (due to redundancy)"
)
# model_type = "tsl", "mha", "rnn"
print("dlrm path: ", args.dlrm_path)
print("model_type: ", args.model_type)
print("time series length: ", args.ts_length)
print("seed: ", args.numpy_rand_seed)
print("model_file:", args.save_model)
### some basic setup ###
use_gpu = args.use_gpu and torch.cuda.is_available()
set_seed(args.numpy_rand_seed, use_gpu)
np.set_printoptions(precision=args.print_precision)
torch.set_printoptions(precision=args.print_precision)
print("use-gpu:", use_gpu)
# possible modes:
# "train-test" for both training and metric computation on test data,
# "train" for training model
# "test" for metric computation on test data using saved trained model
modes = args.mode.split("-")
if modes[0] == "train":
train_tbsm(args, use_gpu)
if modes[0] == "test" or (len(modes) > 1 and modes[1] == "test"):
test_tbsm(args, use_gpu)
| 35,338 | 36.917382 | 148 | py |
tbsm | tbsm-main/tbsm_synthetic.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# miscellaneous
from os import path
import sys
# numpy and scikit-learn
import numpy as np
from sklearn.metrics import roc_auc_score
# pytorch
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
# In synthetic experiment we generate the output vectors z of the embedding
# layer directly, therefore we create a custom TBSM, rather than instantiate
# an existing general model.
# Synthetic experiment code
# It generates time series data in D dimensions
# with the property that binary label has some dependency
# on coupling between time series components in pairs of dimensions.
def synthetic_experiment():
N, Nt, D, T = 50000, 5000, 5, 10
auc_results = np.empty((0, 5), np.float32)
def generate_data(N, high):
H = np.random.uniform(low=-1.0, high=1.0, size=N * D * T).reshape(N, T, D)
w = np.random.uniform(low=-1.0, high=1.0, size=N * D).reshape(N, 1, D)
return H, w
for K in range(0, 31, 10):
print("num q terms: ", K)
# ----- train set ------
H, w = generate_data(N, 1.0)
wt = np.transpose(w, (0, 2, 1))
p = np.zeros(D * K, dtype=np.int).reshape(K, D)
for j in range(K):
p[j, :] = np.random.permutation(D)
wt2 = wt[:, p[j], :]
wt = wt + wt2
Q = np.matmul(H[:, :, :], wt[:, :, :]) # similarity coefs
Q = np.squeeze(Q, axis=2)
R = np.mean(Q, axis=1)
R = np.sign(R)
# s1 = np.count_nonzero(R > 0)
# print(Q.shape)
# print("num pos, total: ", s1, N)
R = R + 1
t_train = R.reshape(N, 1)
z_train = np.concatenate((H, w), axis=1)
# ----- test set ------
H, w = generate_data(Nt, 1.0)
wt = np.transpose(w, (0, 2, 1))
for j in range(K):
wt2 = wt[:, p[j], :]
wt = wt + wt2
Q = np.matmul(H[:, :, :], wt[:, :, :]) # dot product
Q = np.squeeze(Q, axis=2)
R = np.mean(Q, axis=1)
R = np.sign(R) + 1
t_test = R.reshape(Nt, 1)
z_test = np.concatenate((H, w), axis=1)
# debug prints
# print(z_train.shape, t_train.shape)
class SyntheticDataset:
def __init__(self, F, y):
self.F = F
self.y = y
def __getitem__(self, index):
if isinstance(index, slice):
return [
self[idx] for idx in range(
index.start or 0, index.stop or len(self), index.step or 1
)
]
return self.F[index], self.y[index]
def __len__(self):
return len(self.y)
ztraind = SyntheticDataset(z_train, t_train)
ztestd = SyntheticDataset(z_test, t_test)
def collate_zfn(list_of_tuples):
data = list(zip(*list_of_tuples))
F = torch.tensor(data[0], dtype=torch.float)
y = torch.tensor(data[1], dtype=torch.float)
# y = torch.unsqueeze(y, 1)
return F, y
ztrain_ld = torch.utils.data.DataLoader(
ztraind,
batch_size=128,
num_workers=0,
collate_fn=collate_zfn,
shuffle=True
)
ztest_ld = torch.utils.data.DataLoader(
ztestd,
batch_size=Nt,
num_workers=0,
collate_fn=collate_zfn,
)
### define TBSM in PyTorch ###
class TBSM_SubNet(nn.Module):
def __init__(
self,
mode,
num_inner,
D,
T,
):
super(TBSM_SubNet, self).__init__()
self.mode = mode
self.num_inner = num_inner
if self.mode in ["def", "ind", "dot"]:
if self.mode in ["def", "ind"]:
self.A = []
mean = 0.0
std_dev = np.sqrt(2 / (D + D))
for _ in range(self.num_inner):
E = np.eye(D, dtype=np.float32)
W = np.random.normal(mean, std_dev, size=(1, D, D)) \
.astype(np.float32)
self.A.append(Parameter(torch.tensor(E + W),
requires_grad=True))
d = self.num_inner * T
# d = self.num_inner * D + D
ln_mlp = np.array([d, 2 * d, 1])
self.mlp = dlrm.DLRM_Net().create_mlp(ln_mlp, ln_mlp.size - 2)
elif self.mode == "mha":
m = D # dim
self.nheads = 8
self.emb_m = self.nheads * m # mha emb dim
mean = 0.0
std_dev = np.sqrt(2 / (m + m)) # np.sqrt(1 / m) # np.sqrt(1 / n)
qm = np.random.normal(mean, std_dev, size=(1, m, self.emb_m)) \
.astype(np.float32)
self.Q = Parameter(torch.tensor(qm), requires_grad=True)
km = np.random.normal(mean, std_dev, size=(1, m, self.emb_m)) \
.astype(np.float32)
self.K = Parameter(torch.tensor(km), requires_grad=True)
vm = np.random.normal(mean, std_dev, size=(1, m, self.emb_m)) \
.astype(np.float32)
self.V = Parameter(torch.tensor(vm), requires_grad=True)
d = self.nheads * m
ln_mlp = np.array([d, 2 * d, 1])
self.mlp = dlrm.DLRM_Net().create_mlp(ln_mlp, ln_mlp.size - 2)
else:
d = D * (T + 1)
ln_mlp = np.array([d, 2 * d, 1])
self.mlp = dlrm.DLRM_Net().create_mlp(ln_mlp, ln_mlp.size - 2)
def forward(self, x):
# H * w
H = x[:, :-1, :]
w = torch.unsqueeze(x[:, -1, :], dim=1)
w = torch.transpose(w, 1, 2)
# inner products
if self.mode in ["def", "ind"]:
for j in range(self.num_inner):
aw = torch.matmul(self.A[j], w)
if self.mode == "def":
aw = torch.matmul(self.A[j].permute(0, 2, 1), aw)
a1 = torch.bmm(H, aw)
if j == 0:
z = a1
else:
z = torch.cat([z, a1], dim=1)
z = torch.squeeze(z, dim=2)
elif self.mode == "dot":
z = torch.bmm(H, w)
z = torch.squeeze(z, dim=2)
elif self.mode == "mha":
w = torch.transpose(w, 1, 2)
# print("mha shapes: ", w.shape, self.Q.shape)
Qx = torch.transpose(torch.matmul(w, self.Q), 0, 1)
HK = torch.transpose(torch.matmul(H, self.K), 0, 1)
HV = torch.transpose(torch.matmul(H, self.V), 0, 1)
multihead_attn = nn.MultiheadAttention(self.emb_m, self.nheads)
attn_output, _ = multihead_attn(Qx, HK, HV)
# print("attn shape: ", attn_output.shape)
z = torch.squeeze(attn_output, dim=0)
else: # concat
H = torch.flatten(H, start_dim=1, end_dim=2)
w = torch.flatten(w, start_dim=1, end_dim=2)
z = torch.cat([H, w], dim=1)
# obtain probability of a click as a result of MLP
p = dlrm.DLRM_Net().apply_mlp(z, self.mlp)
return p
def train_inner(znet):
loss_fn = torch.nn.BCELoss(reduction="mean")
# loss_fn = torch.nn.L1Loss(reduction="mean")
optimizer = torch.optim.Adagrad(znet.parameters(), lr=0.05)
# optimizer = torch.optim.SGD(znet.parameters(), lr=0.05)
znet.train()
nepochs = 1
for _ in range(nepochs):
TA = 0
TS = 0
for _, (X, y) in enumerate(ztrain_ld):
batchSize = X.shape[0]
# forward pass
Z = znet(X)
# loss
# print("Z, y: ", Z.shape, y.shape)
E = loss_fn(Z, y)
# compute loss and accuracy
# L = E.detach().cpu().numpy() # numpy array
z = Z.detach().cpu().numpy() # numpy array
t = y.detach().cpu().numpy() # numpy array
# rounding t: smooth labels case
A = np.sum((np.round(z, 0) == np.round(t, 0)).astype(np.uint16))
TA += A
TS += batchSize
optimizer.zero_grad()
# backward pass
E.backward(retain_graph=True)
# optimizer
optimizer.step()
# if j % 500 == 0:
# acc = 100.0 * TA / TS
# print("j, acc: ", j, acc)
# TA = 0
# TS = 0
z_final = np.zeros(Nt, dtype=np.float)
offset = 0
znet.eval()
for _, (X, _) in enumerate(ztest_ld):
batchSize = X.shape[0]
Z = znet(X)
z_final[offset: offset + batchSize] = \
np.squeeze(Z.detach().cpu().numpy(), axis=1)
offset += batchSize
# E = loss_fn(Z, y)
# L = E.detach().cpu().numpy() # numpy array
# loss_net = L
# print(znet.num_inner, znet.mode, ": ", loss_net)
auc_net = 100.0 * roc_auc_score(t_test.astype(int), z_final)
print(znet.num_inner, znet.mode, ": ", auc_net)
return auc_net
dim = T
znet = TBSM_SubNet("dot", 1, D, dim) # c or c,w
res1 = train_inner(znet)
znet = TBSM_SubNet("def", 1, D, dim) # c or c,w
res2 = train_inner(znet)
znet = TBSM_SubNet("def", 4, D, dim) # c or c,w
res3 = train_inner(znet)
znet = TBSM_SubNet("def", 8, D, dim) # c or c,w
res4 = train_inner(znet)
znet = TBSM_SubNet("mha", 1, D, dim) # c or c,w
res5 = train_inner(znet)
auc_results = np.append(auc_results, np.array([[res1, res2, res3, res4, res5]]),
axis=0)
print(auc_results)
# np.savez_compressed(
# 'auc_synthetic.npz',
# auc_results=auc_results,
# )
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Synthetic data experiments (TBSM)")
# path to dlrm
parser.add_argument("--dlrm-path", type=str, default="")
args = parser.parse_args()
if not path.exists(str(args.dlrm_path)):
sys.exit("Please provide path to DLRM as --dlrm-path")
sys.path.insert(1, args.dlrm_path)
import dlrm_s_pytorch as dlrm
synthetic_experiment()
| 11,465 | 36.106796 | 88 | py |
tbsm | tbsm-main/tbsm_data_pytorch.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
### import packages ###
from __future__ import absolute_import, division, print_function, unicode_literals
# miscellaneous
from os import path
import sys
# numpy and scikit-learn
import numpy as np
# pytorch
import torch
# dataset (either synthetic or Taobao)
class TBSMDataset():
def __init__(
self,
datatype,
mode,
ts_length=20,
points_per_user=4,
numpy_rand_seed=7,
raw_path="",
pro_data="",
spa_fea_sizes="",
num_pts=1, # pts to train or test
):
# save arguments
if mode == "train":
self.numpy_rand_seed = numpy_rand_seed
else:
self.numpy_rand_seed = numpy_rand_seed + 31
self.mode = mode
# save dataset parameters
self.total = num_pts # number of lines in txt to process
self.ts_length = ts_length
self.points_per_user = points_per_user # pos and neg points per user
self.spa_fea_sizes = spa_fea_sizes
self.M = 200 # max history length
# split the datafile into path and filename
lstr = raw_path.split("/")
self.d_path = "/".join(lstr[0:-1]) + "/"
self.d_file = lstr[-1]
# preprocess data if needed
if path.exists(str(pro_data)):
print("Reading pre-processed data=%s" % (str(pro_data)))
file = str(pro_data)
else:
file = str(pro_data)
levels = np.fromstring(self.spa_fea_sizes, dtype=int, sep="-")
if datatype == "taobao":
self.Unum = levels[0] # 987994 num of users
self.Inum = levels[1] # 4162024 num of items
self.Cnum = levels[2] # 9439 num of categories
print("Reading raw data=%s" % (str(raw_path)))
if self.mode == "test":
self.build_taobao_test(
raw_path,
file,
)
else:
self.build_taobao_train_or_val(
raw_path,
file,
)
elif datatype == "synthetic":
self.build_synthetic_train_or_val(
file,
)
# load data
with np.load(file) as data:
self.X_cat = data["X_cat"]
self.X_int = data["X_int"]
self.y = data["y"]
# common part between train/val and test generation
# truncates (if needed) and shuffles data points
def truncate_and_save(self, out_file, do_shuffle, t, users, items, cats, times, y):
# truncate. If for some users we didn't generate had too short history
# we truncate the unused portion of the pre-allocated matrix.
if t < self.total_out:
users = users[:t, :]
items = items[:t, :]
cats = cats[:t, :]
times = times[:t, :]
y = y[:t]
# shuffle
if do_shuffle:
indices = np.arange(len(y))
indices = np.random.permutation(indices)
users = users[indices]
items = items[indices]
cats = cats[indices]
times = times[indices]
y = y[indices]
N = len(y)
X_cat = np.zeros((3, N, self.ts_length + 1), dtype="i4") # 4 byte int
X_int = np.zeros((1, N, self.ts_length + 1), dtype=np.float)
X_cat[0, :, :] = users
X_cat[1, :, :] = items
X_cat[2, :, :] = cats
X_int[0, :, :] = times
# saving to compressed numpy file
if not path.exists(out_file):
np.savez_compressed(
out_file,
X_cat=X_cat,
X_int=X_int,
y=y,
)
return
# processes raw train or validation into npz format required by training
# for train data out of each line in raw datafile produces several randomly chosen
# datapoints, max number of datapoints per user is specified by points_per_user
# argument, for validation data produces one datapoint per user.
def build_taobao_train_or_val(self, raw_path, out_file):
with open(str(raw_path)) as f:
for i, _ in enumerate(f):
if i % 50000 == 0:
print("pre-processing line: ", i)
self.total = min(self.total, i + 1)
print("total lines: ", self.total)
self.total_out = self.total * self.points_per_user * 2 # pos + neg points
print("Total number of points in raw datafile: ", self.total)
print("Total number of points in output will be at most: ", self.total_out)
np.random.seed(self.numpy_rand_seed)
r_target = np.arange(0, self.M - 1)
time = np.arange(self.ts_length + 1, dtype=np.int32) / (self.ts_length + 1)
# time = np.ones(self.ts_length + 1, dtype=np.int32)
users = np.zeros((self.total_out, self.ts_length + 1), dtype="i4") # 4 byte int
items = np.zeros((self.total_out, self.ts_length + 1), dtype="i4") # 4 byte int
cats = np.zeros((self.total_out, self.ts_length + 1), dtype="i4") # 4 byte int
times = np.zeros((self.total_out, self.ts_length + 1), dtype=np.float)
y = np.zeros(self.total_out, dtype="i4") # 4 byte int
# determine how many datapoints to take from each user based on the length of
# user behavior sequence
# ind=0, 1, 2, 3,... t < 10, 20, 30, 40, 50, 60, ...
k = 20
regime = np.zeros(k, dtype=np.int)
regime[1], regime[2], regime[3] = 1, 3, 6
for j in range(4, k):
regime[j] = self.points_per_user
if self.mode == "val":
self.points_per_user = 1
for j in range(k):
regime[j] = np.min([regime[j], self.points_per_user])
last = self.M - 1 # max index of last item
# try to generate the desired number of points (time series) per each user.
# if history is short it may not succeed to generate sufficiently different
# time series for a particular user.
t, t_pos, t_neg, t_short = 0, 0, 0, 0
with open(str(raw_path)) as f:
for i, line in enumerate(f):
if i % 1000 == 0:
print("processing line: ", i, t, t_pos, t_neg, t_short)
if i >= self.total:
break
units = line.strip().split("\t")
item_hist_list = units[4].split(",")
cate_hist_list = units[5].split(",")
neg_item_hist_list = units[6].split(",")
neg_cate_hist_list = units[7].split(",")
user = np.array(np.maximum(np.int32(units[0]) - self.Inum, 0),
dtype=np.int32)
# y[i] = np.int32(units[3])
items_ = np.array(
list(map(lambda x: np.maximum(np.int32(x), 0), item_hist_list)),
dtype=np.int32
)
cats_ = np.array(
list(map(lambda x: np.maximum(np.int32(x)
- self.Inum - self.Unum, 0), cate_hist_list)), dtype=np.int32
)
neg_items_ = np.array(
list(map(lambda x: np.maximum(np.int32(x), 0), neg_item_hist_list)),
dtype=np.int32
)
neg_cats_ = np.array(
list(map(lambda x: np.maximum(np.int32(x)
- self.Inum - self.Unum, 0), neg_cate_hist_list)),
dtype=np.int32
)
# select datapoints
first = np.argmax(items_ > 0)
ind = int((last - first) // 10) # index into regime array
# pos
for _ in range(regime[ind]):
a1 = min(first + self.ts_length, last - 1)
end = np.random.randint(a1, last)
indices = np.arange(end - self.ts_length, end + 1)
if items_[indices[0]] == 0:
t_short += 1
items[t] = items_[indices]
cats[t] = cats_[indices]
users[t] = np.full(self.ts_length + 1, user)
times[t] = time
y[t] = 1
# check
if np.any(users[t] < 0) or np.any(items[t] < 0) \
or np.any(cats[t] < 0):
sys.exit("Categorical feature less than zero after \
processing. Aborting...")
t += 1
t_pos += 1
# neg
for _ in range(regime[ind]):
a1 = min(first + self.ts_length - 1, last - 1)
end = np.random.randint(a1, last)
indices = np.arange(end - self.ts_length + 1, end + 1)
if items_[indices[0]] == 0:
t_short += 1
items[t, :-1] = items_[indices]
cats[t, :-1] = cats_[indices]
neg_indices = np.random.choice(r_target, 1,
replace=False) # random final item
items[t, -1] = neg_items_[neg_indices]
cats[t, -1] = neg_cats_[neg_indices]
users[t] = np.full(self.ts_length + 1, user)
times[t] = time
y[t] = 0
# check
if np.any(users[t] < 0) or np.any(items[t] < 0) \
or np.any(cats[t] < 0):
sys.exit("Categorical feature less than zero after \
processing. Aborting...")
t += 1
t_neg += 1
print("total points, pos points, neg points: ", t, t_pos, t_neg)
self.truncate_and_save(out_file, True, t, users, items, cats, times, y)
return
# processes raw test datafile into npz format required to be used by
# inference step, produces one datapoint per user by taking last ts-length items
def build_taobao_test(self, raw_path, out_file):
with open(str(raw_path)) as f:
for i, _ in enumerate(f):
if i % 50000 == 0:
print("pre-processing line: ", i)
self.total = i + 1
self.total_out = self.total # pos + neg points
print("ts_length: ", self.ts_length)
print("Total number of points in raw datafile: ", self.total)
print("Total number of points in output will be at most: ", self.total_out)
time = np.arange(self.ts_length + 1, dtype=np.int32) / (self.ts_length + 1)
users = np.zeros((self.total_out, self.ts_length + 1), dtype="i4") # 4 byte int
items = np.zeros((self.total_out, self.ts_length + 1), dtype="i4") # 4 byte int
cats = np.zeros((self.total_out, self.ts_length + 1), dtype="i4") # 4 byte int
times = np.zeros((self.total_out, self.ts_length + 1), dtype=np.float)
y = np.zeros(self.total_out, dtype="i4") # 4 byte int
# try to generate the desired number of points (time series) per each user.
# if history is short it may not succeed to generate sufficiently different
# time series for a particular user.
t, t_pos, t_neg = 0, 0, 0
with open(str(raw_path)) as f:
for i, line in enumerate(f):
if i % 1000 == 0:
print("processing line: ", i, t, t_pos, t_neg)
if i >= self.total:
break
units = line.strip().split("\t")
item_hist_list = units[4].split(",")
cate_hist_list = units[5].split(",")
user = np.array(np.maximum(np.int32(units[0]) - self.Inum, 0),
dtype=np.int32)
y[t] = np.int32(units[3])
items_ = np.array(
list(map(lambda x: np.maximum(np.int32(x), 0), item_hist_list)),
dtype=np.int32
)
cats_ = np.array(
list(map(lambda x: np.maximum(np.int32(x)
- self.Inum - self.Unum, 0), cate_hist_list)), dtype=np.int32
)
# get pts
items[t] = items_[-(self.ts_length + 1):]
cats[t] = cats_[-(self.ts_length + 1):]
users[t] = np.full(self.ts_length + 1, user)
times[t] = time
# check
if np.any(users[t] < 0) or np.any(items[t] < 0) \
or np.any(cats[t] < 0):
sys.exit("Categorical feature less than zero after \
processing. Aborting...")
if y[t] == 1:
t_pos += 1
else:
t_neg += 1
t += 1
print("total points, pos points, neg points: ", t, t_pos, t_neg)
self.truncate_and_save(out_file, False, t, users, items, cats, times, y)
return
# builds small synthetic data mimicking the structure of taobao data
def build_synthetic_train_or_val(self, out_file):
np.random.seed(123)
fea_sizes = np.fromstring(self.spa_fea_sizes, dtype=int, sep="-")
maxval = np.min(fea_sizes)
num_s = len(fea_sizes)
X_cat = np.random.randint(maxval, size=(num_s, self.total, self.ts_length + 1),
dtype="i4") # 4 byte int
X_int = np.random.uniform(0, 1, size=(1, self.total, self.ts_length + 1))
y = np.random.randint(0, 2, self.total, dtype="i4") # 4 byte int
# saving to compressed numpy file
if not path.exists(out_file):
np.savez_compressed(
out_file,
X_cat=X_cat,
X_int=X_int,
y=y,
)
return
def __getitem__(self, index):
if isinstance(index, slice):
return [
self[idx] for idx in range(
index.start or 0, index.stop or len(self), index.step or 1
)
]
return self.X_cat[:, index, :], self.X_int[:, index, :], self.y[index]
def __len__(self):
return len(self.y)
# defines transform to be performed during each call to batch,
# used by loader
def collate_wrapper_tbsm(list_of_tuples):
# turns tuple into X, S_o, S_i, take last ts_length items
data = list(zip(*list_of_tuples))
all_cat = torch.tensor(data[0], dtype=torch.long)
all_int = torch.tensor(data[1], dtype=torch.float)
# print("shapes:", all_cat.shape, all_int.shape)
num_den_fea = all_int.shape[1]
num_cat_fea = all_cat.shape[1]
batchSize = all_cat.shape[0]
ts_len = all_cat.shape[2]
all_int = torch.reshape(all_int, (batchSize, num_den_fea * ts_len))
X = []
lS_i = []
lS_o = []
# transform data into the form used in dlrm nn
for j in range(ts_len):
lS_i_h = []
for i in range(num_cat_fea):
lS_i_h.append(all_cat[:, i, j])
lS_o_h = [torch.tensor(range(batchSize)) for _ in range(len(lS_i_h))]
lS_i.append(lS_i_h)
lS_o.append(lS_o_h)
X.append(all_int[:, j].view(-1, 1))
T = torch.tensor(data[2], dtype=torch.float32).view(-1, 1)
return X, lS_o, lS_i, T
# creates a loader (train, val or test data) to be used in the main training loop
# or during inference step
def make_tbsm_data_and_loader(args, mode):
if mode == "train":
raw = args.raw_train_file
proc = args.pro_train_file
numpts = args.num_train_pts
batchsize = args.mini_batch_size
doshuffle = True
elif mode == "val":
raw = args.raw_train_file
proc = args.pro_val_file
numpts = args.num_val_pts
batchsize = 25000
doshuffle = True
else:
raw = args.raw_test_file
proc = args.pro_test_file
numpts = 1
batchsize = 25000
doshuffle = False
data = TBSMDataset(
args.datatype,
mode,
args.ts_length,
args.points_per_user,
args.numpy_rand_seed,
raw,
proc,
args.arch_embedding_size,
numpts,
)
loader = torch.utils.data.DataLoader(
data,
batch_size=batchsize,
num_workers=0,
collate_fn=collate_wrapper_tbsm,
shuffle=doshuffle,
)
return loader, len(data)
| 16,772 | 36.52349 | 88 | py |
rllab | rllab-master/docs/conf.py | # -*- coding: utf-8 -*-
#
# rllab documentation build configuration file, created by
# sphinx-quickstart on Mon Feb 15 20:07:12 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'rllab'
copyright = '2016, rllab contributors'
author = 'rllab contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'rllabdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'rllab.tex', 'rllab Documentation',
'rllab contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'rllab', 'rllab Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'rllab', 'rllab Documentation',
author, 'rllab', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 9,550 | 31.050336 | 79 | py |
wakenet | wakenet-master/Code/example_main.py | from neuralWake import *
from optimisation import *
import synth_and_train as st
def florisPw(u_stream, tis, xs, ys, yws):
# Initialise FLORIS for initial configuraiton
if curl == True:
fi.floris.farm.set_wake_model("curl")
fi.reinitialize_flow_field(wind_speed=u_stream)
fi.reinitialize_flow_field(turbulence_intensity=tis)
fi.reinitialize_flow_field(layout_array=[xs, ys])
fi.calculate_wake(yaw_angles=yws)
# Get initial FLORIS power
floris_power_0 = fi.get_farm_power()
return round(floris_power_0/1e6, 2)
def main():
if train_net == True:
# Start training timer
t0 = time.time()
# Create the dataset
X_train, X_val, X_test, y_train, y_val, y_test = st.create()
# Set neural model
model = wakeNet().to(device)
# Feed domain points to train the model
print("Training...")
vloss_plot, tloss_plot, v_plot, t_plot = \
st.training(X_train, X_val, X_test, y_train, y_val, y_test, model, plot_curves=1, saveas='tcurv')
# End training timer
t1 = time.time()
print("Training took: ", int(t1 - t0), " seconds")
else:
# Set neural model
model = wakeNet().to(device)
model.load_state_dict(torch.load(weights_path, map_location=device))
model.eval().to(device)
# Sets test case value
test = int(input("Please enter the test case number (1-4): "))
if test == 1:
# Single and multiple wake comparisons
# Single
xs = np.array([D])
ys = np.array([D])
yws = [-30]
compare(
plots=True,
yws=yws,
ws=11,
ti=0.05,
xs=xs,
ys=ys,
print_times=True,
single=False,
)
# Multiple 1
xs = np.array([1*D, 1*D, 1*D,
4.5*D, 4.5*D, 4.5*D,
8*D, 8*D, 8*D])
ys = np.array([1*D, 3*D, 5*D,
2*D, 4*D, 6*D,
1*D, 3*D, 5*D])
yws = [30, -30, 30, -30, 30, -30, 30, -30, 30, -30]
compare(
plots=True,
yws=yws,
ws=11,
ti=0.05,
xs=xs,
ys=ys,
print_times=True,
single=False,
)
if test == 2:
# Case A (yaw) M3
xs = np.array([1*D, 1*D, 8*D, 8*D, 15*D, 15*D])
ys = np.array([1*D, 7*D, 1*D, 7*D, 1*D, 7*D])
florisOptimiser(ws=11, ti=0.05, layout_x=xs, layout_y=ys, plots=True)
neuralOptimiser(ws=11, ti=0.05, xs=xs, ys=ys, plots=True, floris_gain=True)
# Yaw power heatmaps
heatmap(xs, ys, res=3, farm_opt=False)
# Case B (yaw) M2
xs = np.array([1*D, 1*D, 1*D, 4.5*D, 4.5*D,
8*D, 8*D, 8*D, 11.5*D, 11.5*D,
15*D, 15*D, 15*D, 18.5*D, 18.5*D])
ys = np.array([1*D, 5*D, 9*D, 3*D, 7*D,
1*D, 5*D, 9*D, 3*D, 7*D,
1*D, 5*D, 9*D, 3*D, 7*D])
florisOptimiser(ws=11, ti=0.05, layout_x=xs, layout_y=ys, plots=False)
neuralOptimiser(ws=11, ti=0.05, xs=xs, ys=ys, plots=False, floris_gain=True)
# Yaw power heatmaps
heatmap(xs, ys, res=3, farm_opt=False)
if test == 3:
# Case C (Layout)
# 6-turb
xs = np.array([1*D, 1*D, 8*D, 8*D, 15*D, 15*D])
ys = np.array([1*D, 5*D, 1*D, 5*D, 1*D, 5*D])
# # 15-turb
# xs = np.array([1*D, 1*D, 1*D, 4.5*D, 4.5*D,
# 8*D, 8*D, 8*D, 11.5*D, 11.5*D,
# 15*D, 15*D, 15*D, 18.5*D, 18.5*D])
# ys = np.array([1*D, 5*D, 9*D, 3*D, 7*D,
# 1*D, 5*D, 9*D, 3*D, 7*D,
# 1*D, 5*D, 9*D, 3*D, 7*D])
neuralOptimiser(ws=11.0, ti=0.05, xs=xs, ys=ys, plots=True, floris_gain=True, mode='farm')
florisOptimiser(ws=11.0, ti=0.05, layout_x=xs, layout_y=ys, plots=True, mode='farm')
# Layout power heatmaps
heatmap(xs, ys, res=10, farm_opt=True)
heatmap(xs, ys, res=3, farm_opt=True)
if __name__=="__main__":
main()
| 4,399 | 30.884058 | 105 | py |
wakenet | wakenet-master/Code/packages.py | # Package list
import os
import time
import json
import random
import warnings
import numpy as np
import scipy.stats as stats
from matplotlib import rc
import matplotlib.pyplot as plt
from matplotlib.pyplot import gca
import torch
import torch.nn as nn
import torch.optim as optim
from torch import nn, optim
from torch.utils.data import TensorDataset, DataLoader
import dask
from dask.distributed import Client, progress
from PIL import Image
from scipy import interp
from scipy import ndimage
from scipy import interpolate
from scipy.optimize import minimize
import floris.tools as wfct
import floris.tools.visualization as vis
from floris.tools.optimization.scipy.yaw import YawOptimization
from floris.tools.optimization.scipy.layout import LayoutOptimization
import logging
logging.getLogger("floris").setLevel(logging.WARNING)
| 840 | 21.72973 | 69 | py |
wakenet | wakenet-master/Code/superposition.py | from re import S
from neuralWake import *
from torch import cpu
from CNNWake.FCC_model import *
warnings.filterwarnings("ignore")
# Synth value
if train_net == 0:
# Load model
model = wakeNet().to(device)
model.load_state_dict(torch.load(weights_path, map_location=device))
model.eval().to(device)
# Use CNNWake module to calculate local ti values
# Initialise network to local turbulent intensities
nr_input_values = 42 # Number of input values
nr_neurons_ti = 200 # Number of neurons in every layer
nr_neurons = 300 # Number of neurons in every layer
nr_output = 1 # Number of outputs from model
# Use CNNWake module to calculate local power and ti values
if local_ti == True:
TI_model = FCNN(nr_input_values, nr_neurons_ti, nr_output).to(device)
# Load trained model and set it to evaluation mode
TI_model.load_model("CNNWake/FCNN_TI.pt", device=device)
TI_model.eval()
if local_pw == True:
pw_model = FCNN(nr_input_values, nr_neurons, nr_output).to(device)
# Load trained model and set it to evaluation mode
pw_model.load_model("CNNWake/power_model.pt", device=device)
pw_model.eval()
def florisPw(u_stream, tis, xs, ys, yws):
# Initialise FLORIS for initial configuraiton
if curl == True:
fi.floris.farm.set_wake_model("curl")
fi.reinitialize_flow_field(wind_speed=u_stream)
fi.reinitialize_flow_field(turbulence_intensity=tis)
fi.reinitialize_flow_field(layout_array=[xs, ys])
fi.calculate_wake(yaw_angles=yws)
# Get initial FLORIS power
floris_power_0 = fi.get_farm_power()
return round(floris_power_0, 2)
def superposition(
inpt1,
inpt2,
u_stream,
tis,
cp=None,
wind_speed=None,
farm_opt=False,
plots=False,
power_opt=True,
print_times=False,
timings=False,
floris_gain=False,
x0=np.zeros(1),
single=False,
saveas=None,
):
"""
Calls the neural model to produce neural wakes and superimposes them on the
computational domain in order to calculate the total farm power output in MW.
Args:
yws (numpy array of floats) Yaws of each turbine.
u_stream (float) Free stream velocity.
tis (numpy array of floats) Yaws of each turbine.
xs (numpy array of floats) Turbine x coordinates.
ys (numpy array of floats) Turbine y coordinates.
cp (numpy array of floats) Cp values of turbine Cp-wind speed curve.
wind_speed (numpy array of floats) Wind speed values of turbine Cp-wind speed curve.
plots (boolean, optional) If True, Plots superimposed wakes.
power_opt (boolean, optional) If True, performs one optimization step.
print_times (boolean, optional) If True, prints timings.
timings (boolean, optional) Returns model timings.
floris_gain (boolean, optional) If True, calculates and returns gained power output
with the optimised results of the DNN but using Floris for comparison.
x0 (numpy array, optional) Defined with size > 1 for farm optimisations for storing
initial turbine coordinates.
Returns:
floris_time (float) Time required for Floris computation.
neural_time (float) Time required for a forward solution of the DNN.
or
-power_tot (float) Total (negative) farm power output produced by the DNN,
based on the input turbine yaws and positions.
floris_power_opt (float) Total farm power output produced by Floris in MW,
based on the input turine yaws and positions.
"""
# Local pw
pw_ar = []
# Scales final domain
xscale = 0.7
if curl == True:
fi.floris.farm.set_wake_model("curl")
# Select first and second argument based on the optimisiation mode.
# Scipy's "minimise" prefers the parameter of optimisaion to be first.
if farm_opt == True:
layout = inpt1
yws = inpt2
else:
layout = inpt2
yws = inpt1
# Save initial positions. x0 defined only for farm optimisation.
if x0.size > 1:
xs0 = x0[: int(layout.size / 2 + 0.25)]
ys0 = x0[int(layout.size / 2 + 0.25) :]
xs0_arg = xs0.argsort()
xs0 = xs0[xs0_arg]
ys0 = ys0[xs0_arg]
# Split layout vector in x and y coordinates
layout = np.array(layout)
xs = layout[: int(layout.size / 2 + 0.25)]
ys = layout[int(layout.size / 2 + 0.25) :]
# Sort x, y and yaws based on x coordinates to superimpose
# the turbines from left to right (downstream direction).
xs_arg = xs.argsort()
xs = xs[xs_arg]
ys = ys[xs_arg]
yws = np.array(yws)
yws = yws[xs_arg]
# Initialisations
n = xs.size # Turbine number
clean = np.zeros(n)
if n == 1: single = True
hbs = 90 # Hub height
inlet_speed = u_stream # Speed at inlet
# Domain dimensions
x_domain = x_bounds[1] - x_bounds[0]
y_domain = y_bounds[1] - y_bounds[0]
# Hub speeds and Yaws' initialization
hub_speeds = np.zeros(n)
hub_speeds_power = np.zeros(n)
hub_speeds_mean = np.zeros(n)
# Define dx, dy
dx = np.abs(x_domain / dimx)
dy = np.abs(y_domain / dimy)
# Domain dimensions
length = np.max(np.abs(xs)) + x_domain
domain_cols = int(length / dx + .5)
height = 2 * np.max(np.abs(ys)) + y_domain
domain_rows = int(height / dy + .5)
# Domain shape initialization
domain = np.ones((domain_rows, domain_cols)) * inlet_speed
neural_old = np.ones((dimy, dimx)) * inlet_speed
# Calculate the position of the first wake in the domain.
rows1 = int(domain_rows / 2 - ys[0] / dy - dimy / 2 + .5)
rows2 = int(domain_rows / 2 - ys[0] / dy + dimy / 2 + .5)
cols1 = int(xs[0] / dx + .5)
cols2 = int(xs[0] / dx + .5) + dimx
# Start DNN timer
t0 = time.time()
for p in range(n):
# Define start and finish rows of the current turbine's hub
hub_start = int((rows2 + rows1) / 2 - D / dy / 2 + .5)
hub_finish = int((rows2 + rows1) / 2 + D / dy / 2 + .5)
hub_tot = hub_finish - hub_start
if np.all(domain[hub_start:hub_finish, cols1] == u_stream):
clean[p] = 1
# Method A (mean). Calculate the mean speed on the hub.
inlet_speed_mean = np.mean(domain[hub_start:hub_finish, cols1])
# Method B (rings). Numerically integrate over the rotor swept area surface.
# This gives a better approximation to the 3D domain calculations of Floris.
inlet_speed = 0
inlet_speed_pw = 0
area = np.pi * D * D / 4
for i in range(int(hub_tot / 2)):
# Stop calculation if the profile == u_stream
if clean[p] == 1:
break
# Find mean ring speed assuming symmetric flow with respect to the tower.
mean_hub_speed = np.mean([domain[hub_start + i, cols1], domain[hub_finish - i, cols1]])
# # Calculate total rotor area.
# area += 2 * np.pi * int((hub_tot/2-i)*dy) * dy
# Calculate inlet speed of current turbine based on the current state of the domain.
inlet_speed += (mean_hub_speed * 2 * np.pi * (int(hub_tot / 2) - i) * dy * dy)
if local_pw != True:
# Calculate speed^3 (kinetic energy) term that will go in the calculation of power.
area_int = 2 * np.pi * (int(hub_tot / 2) - i) * dy * dy
inlet_speed_pw += (mean_hub_speed * mean_hub_speed * mean_hub_speed * area_int)
# Divide speeds by total calculated area
inlet_speed /= area
inlet_speed_pw /= area
inlet_speed_pw = (inlet_speed_pw) ** (1 / 3)
# Profile == u_stream or Single wake condition
if clean[p] == 1 or single == True:
inlet_speed = u_stream
inlet_speed_pw = u_stream
# Limit the minimum speed at the minimum training speed of the DNN.
if inlet_speed < ws_range[0]:
inlet_speed = ws_range[0]
# Use CNNWake module to calculate local ti values for each turbine
ti_ar = np.ones(2)*tis
if local_ti == True or local_pw == True:
speeds_50m = domain[hub_start:hub_finish, cols1 - int(50 / dx + .5)] # ***
sss = speeds_50m.size
ult = np.array([((speeds_50m[i - 1] + speeds_50m[i] + speeds_50m[i + 1])/3)
for i in np.linspace(1, sss-2, 40, dtype=int)])
yaw_angle = yws[p]
turbulent_int = tis
ult /= 15
# The array conists of 40 wind speeds values, the yaw angle and inflow TI
# change the two last values of the array to yaw angle and inflow TI b4 passing to NN
ult = np.append(ult, yaw_angle / 35)
ult = np.append(ult, turbulent_int)
if local_ti == True and clean[p] != 1:# and curl != 1:
ti_norm = 0.3
ti2 = (TI_model((torch.tensor(ult).float().to(device))).detach().cpu().numpy() * ti_norm)
if ti2 < turbulent_int * 0.7:
ti2 = turbulent_int * 1.5
# clip ti values to max and min trained
ti_ar[1] = np.clip(ti2, 0.01, 0.25).item(0)
ti_ar[0] = tis
if local_pw == True:
pw_norm = 4996386
pw = (pw_model((torch.tensor(ult).float().to(device))).detach().cpu().numpy() * pw_norm)
pw_ar.append(pw)
# Get the DNN result
# print(u_stream, inlet_speed, ti_ar, yws[p], hbs)
neural = model.compareContour(
u_stream, inlet_speed, ti_ar, yws[p], hbs, model, result_plots=False
)
# Save the inlet speed terms
hub_speeds[p] = inlet_speed
hub_speeds_mean[p] = inlet_speed_mean
hub_speeds_power[p] = inlet_speed_pw
# Apply SOS for after the first turbine is placed in the domain
if p != 0 and p != (xs.size):
# Apply the SOS superposition model
def1 = np.square(1 - neural / hub_speeds[p])
def2 = np.square(1 - neural_old / u_stream)
neural = (1 - np.sqrt(def1 + def2)) * u_stream
# Apply denoise filter (mainly for plotting purposes)
if denoise > 1:
neural[:, 1:] = ndimage.median_filter(
neural[:, 1:], denoise
) # denoise filter
# Place the DNN output inside the domain
domain[rows1:rows2, cols1:cols2] = neural
# Calculate the rows and columns of the next wake inside the domain
if p != (xs.size - 1):
p2 = p + 1
rows1 = int(domain_rows / 2 - ys[p2] / dy - dimy / 2 + .5)
rows2 = int(domain_rows / 2 - ys[p2] / dy + dimy / 2 + .5)
cols1 = int(xs[p2] / dx + .5)
cols2 = int(xs[p2] / dx + .5) + dimx
# Store an old image of the domain to be used in the next superposition
neural_old = np.copy(domain[rows1:rows2, cols1:cols2])
# End DNN timer
t1 = time.time()
# Print DNN time
neural_time = t1 - t0
neural_time_rnd = round(t1 - t0, 2)
if print_times == True:
print("Total Neural time: ", neural_time_rnd)
# 2 Modes: Plot contours and/or Return calculation timings.
if plots == True or timings == True:
# Start FLORIS timer
t0 = time.time()
# Initialise FLORIS
if curl == True:
fi.floris.farm.set_wake_model("curl")
fi.reinitialize_flow_field(wind_speed=u_stream)
fi.reinitialize_flow_field(turbulence_intensity=tis)
if single != True:
fi.reinitialize_flow_field(layout_array=[xs, -ys])
# Get FLORIS power
if timings == False:
fi.calculate_wake(yaw_angles=yaw_ini)
floris_power_0 = fi.get_farm_power()
fi.calculate_wake(yaw_angles=yws)
floris_power_opt = fi.get_farm_power()
if plots == True:
nocut=0
else:
nocut=1
if nocut != 1:
if single == True:
cut_plane = fi.get_hor_plane(height=hbs,
x_bounds=x_bounds,
y_bounds=y_bounds,
x_resolution=dimx,
y_resolution=dimy)
else:
cut_plane = fi.get_hor_plane(height=hbs,
x_bounds=(0, length+0.5*dx),
y_bounds=(-height/2, height/2),
x_resolution=domain_cols,
y_resolution=domain_rows)
u_mesh0 = cut_plane.df.u.values.reshape(
cut_plane.resolution[1], cut_plane.resolution[0]
)
if single == True:
u_mesh = np.ones((domain_rows, domain_cols)) * inlet_speed
u_mesh[rows1:rows2, cols1:cols2] = u_mesh0
else:
u_mesh = u_mesh0
# End FLORIS timer
t1 = time.time()
floris_time = t1 - t0
floris_time_rnd = round(t1 - t0, 2)
if print_times == True:
print("Total Floris time: ", floris_time_rnd)
if timings == True:
return floris_time, neural_time
# Define plot length and height
fx = cut_plane.df.x1.values
new_len = np.max(fx) - np.min(fx)
new_height1 = np.min(ys) - 2 * D
new_height2 = np.max(ys) + 2 * D
row_start = int(domain.shape[0] / 2 - np.max(ys) / dy - 2 * D / dy + .0)
row_finish = int(domain.shape[0] / 2 - np.min(ys) / dy + 2 * D / dy + .5)
# Keep the FLORIS and DNN domains to be plotted
u_mesh = u_mesh[row_start:row_finish, :]
domain_final = domain[row_start:row_finish, :]
# domain_final = domain
# Keep min and max velocities of FLORIS domain
vmin = np.min(u_mesh)
vmax = np.max(u_mesh)
if u_mesh.shape != domain_final.shape:
print("Error: unequal domain shapes!")
# Set figure properties
fig, axs = plt.subplots(3, sharex=False)
cmap = "coolwarm"
# ----- FLORIS wake plots ----- #
if contours_on == True:
X, Y = np.meshgrid(
np.linspace(0, new_len, u_mesh.shape[1]),
np.linspace(new_height2, new_height1, u_mesh.shape[0]),
)
contours = axs[0].contour(X, Y, u_mesh, 4, alpha=0.5, linewidths=0.5, colors="white")
axs[0].clabel(contours, inline=False, fontsize=1)
im1 = axs[0].imshow(
u_mesh[:, :int(xscale*u_mesh.shape[1])],
vmin=vmin+1.25,
vmax=vmax,
cmap=cmap,
extent=[0, new_len*xscale, new_height1, new_height2],
)
fig.colorbar(im1, ax=axs[0])
axs[0].tick_params(axis="x", direction="in")
axs[0].tick_params(axis="y", direction="in", length=0)
# ----- DNN wake plots ----- #
if contours_on == True:
X, Y = np.meshgrid(
np.linspace(0, new_len, domain_final.shape[1]),
np.linspace(new_height2, new_height1, domain_final.shape[0]),
)
contours = axs[1].contour(X, Y, domain_final, 1, colors="white")
axs[1].clabel(contours, inline=True, fontsize=8)
im2 = axs[1].imshow(
domain_final[:, :int(xscale*domain_final.shape[1])],
vmin=vmin+1.25,
vmax=vmax,
cmap=cmap,
extent=[0, new_len*xscale, new_height1, new_height2],
)
fig.colorbar(im2, ax=axs[1])
axs[1].tick_params(axis="x", direction="in")
axs[1].tick_params(axis="y", direction="in", length=0)
# ----- ERROR (%) plots ----- #
max_val = np.max(u_mesh)
im3 = axs[2].imshow(
(np.abs(u_mesh - domain_final) / max_val * 100)[:, :int(xscale*domain_final.shape[1])],
cmap=cmap,
extent=[0, new_len*xscale, new_height1, new_height2],
vmax=20,
)
axs[2].tick_params(axis="x", direction="in")
axs[2].tick_params(axis="y", direction="in", length=0)
plt.colorbar(im3, ax=axs[2])
if saveas != None:
fig.savefig("figures/"+str(saveas), dpi=1200)
else:
plt.show()
absdifsum = np.sum(np.abs(u_mesh - domain_final))
error = round(1 / (dimx * dimy) * absdifsum / max_val * 100, 2)
print("Abs mean error (%): ", error)
# ----- Y-Transect plots ----- #
mindx = np.min(xs)/dx+0.5
mindx = int(mindx)
tlist = mindx + np.array([3*D/dx, 6.5*D/dx, 10*D/dx]).astype(int)
transects = tlist.size # defines the number of transects
fig, axs = plt.subplots(1, transects, sharey=False)
cnt = 0
for indx in tlist:
yy1 = u_mesh[:, indx] # FLORIS transect
yy2 = domain_final[:, indx] # CNN transect
axs[cnt].plot(
np.flip(yy1, axis=0),
np.arange(u_mesh.shape[0]),
color="navy",
linestyle="--",
)
axs[cnt].plot(
np.flip(yy2, axis=0), np.arange(u_mesh.shape[0]), color="crimson"
)
axs[cnt].title.set_text(str(int(indx * dx)))
cnt += 1
if saveas != None:
fig.savefig("figures/"+str(saveas)+"yt", dpi=1200)
else:
plt.show()
if power_opt == True:
# Calculation of total farm power
if local_pw == True:
power_tot = pw_ar
else:
rho = 1.225 # air density
hub_speeds_old = np.copy(hub_speeds_power)
# Interpolate cp values
cp_interp = np.interp(hub_speeds_old, wind_speed, cp)
# Multiply by cos(theta) term
# Default exponent of cos term is 1.0.
# An exponent of .78 was found to perform best.
cp_interp *= np.cos(np.pi / 180 * (-yws)) ** (1.3)
# Calculate powers using the kinetic energy term
power_tot = 0.5 * rho * cp_interp * hub_speeds_power**3 * area
# Sum of all turbine power outputs
power_tot = np.sum(power_tot)
if floris_gain == True:
# Calculate power gain as provided by FLORIS (for final assessment of optimisation).
# Initialise FLORIS for initial configuraiton
if curl == True:
fi.floris.farm.set_wake_model("curl")
fi.reinitialize_flow_field(wind_speed=u_stream)
fi.reinitialize_flow_field(turbulence_intensity=tis)
if x0.size > 1:
fi.reinitialize_flow_field(layout_array=[xs0, ys0])
else:
fi.reinitialize_flow_field(layout_array=[xs, ys])
fi.calculate_wake(yaw_angles=yaw_ini)
# Get initial FLORIS power
floris_power_0 = fi.get_farm_power()
floris_power_opt = florisPw(u_stream, tis, xs, ys, yws)
floris_power_gain = round(
(floris_power_opt - floris_power_0) / floris_power_0 * 100, 2
)
if plots == True:
print("----------------FLORIS for Neural--------------------")
print("Floris Initial Power", round(floris_power_0 / 1e6, 2), "MW")
print("Floris Optimal power", round(floris_power_opt / 1e6, 2), "MW")
print("Floris Power Gain (%)", floris_power_gain)
print("-----------------------------------------------------")
return -power_tot, floris_power_opt/1e6, floris_power_0/1e6
else:
# Calculate power gain as provided by the DNN (used in optimisation steps).
return -power_tot
| 19,926 | 37.469112 | 101 | py |
wakenet | wakenet-master/Code/synth_and_train.py | from neuralWake import *
def set_seed(seed):
"""
Use this to set ALL the random seeds to a fixed value and remove randomness from cuda kernels
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# Uses the inbuilt cudnn auto-tuner to find the fastest convolution algorithms
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.enabled = False
return True
def normalise(x, mode, print_output=False):
"""
Normalises input data.
Args:
x (numpy float array) Data to be normalised.
mode (values: 1, 2, 3) Modes of normalisation,
1: Min-Max [0, 1], 2: Z-Score [-0.5, 0.5], 3: Min-Max [-1, 1].
print_output (boolean, optional) Prints normalised data.
Returns:
x (numpy float array) Normalised data.
"""
if mode == 1:
x = np.true_divide(x - np.min(x), np.max(x) - np.min(x))
if print_output == True:
print("Normalised speeds:", x)
input("Press enter to continue...")
elif mode == 2:
x -= np.mean(x)
x /= np.std(x)
if print_output == True:
print("Normalised speeds:", x)
input("Press enter to continue...")
elif mode == 3:
x = (np.true_divide(x - np.min(x), np.max(x) - np.min(x)) - 0.5) * 2
if print_output == True:
print("Normalised speeds:", x)
input("enter")
return x
def create(plots=False):
"""
Generates synthetic wake deficit data.
Args:
plots (boolean, optional) Plots indicative sample.
Returns:
X_train, X_val, X_test (1D numpy float arrays) Training, validation
and test sets input inlet conditions.
y_train, y_val, y_test (1D numpy float arrays) Training, validation
and test sets output wake deficits as calculated by Floris.
"""
# Random Dataset
speeds, ti = wakeNet.tiVsVel(data_size)
np.random.seed(51)
yw = (np.random.rand(data_size) - 0.5) * (
yw_range[1] - yw_range[0]
) # hub yaw angles
np.random.seed(256)
hbs = (
np.random.rand(data_size) * (hb_range[1] - hb_range[0]) + hb_range[0]
) # height slice
print("Max inlet speed:", round(np.max(speeds), 2), "m/s")
speeds_out = np.zeros((data_size, out_piece, rows))
u_rs = np.zeros((out_piece, rows))
sample_plots = []
cnt = 1
sample_size = 9 # must be perfect square for sample plots
if save_data == True:
print("Are you sure you want to create new dataset? (y/n)")
if input() == "y":
os.system("mkdir " + "wake_dataset")
np.save("wake_dataset/inlets.npy", np.stack((speeds, ti, yw), axis = 1))
elif curl == True:
inlets = np.load("wake_dataset/inlets.npy")
speeds, ti, yw = inlets[:data_size, 0], inlets[:data_size, 1], inlets[:data_size, 2]
for i in range(data_size):
if curl == True:
fi.floris.farm.set_wake_model("curl")
if make_data == True:
if i == 0:
print("Synthesizing data...")
if i % 100 == 0:
print("Synthesised", int(i / data_size * 100), "%", "of wakes.")
if inputs == 1:
fi.reinitialize_flow_field(wind_speed=speeds[i])
fi.calculate_wake()
if inputs == 2:
fi.reinitialize_flow_field(wind_speed=speeds[i])
fi.reinitialize_flow_field(turbulence_intensity=ti[i])
fi.calculate_wake()
if inputs == 3:
fi.reinitialize_flow_field(wind_speed=speeds[i])
fi.reinitialize_flow_field(turbulence_intensity=ti[i])
fi.calculate_wake(yaw_angles=yw[i])
if inputs == 4:
fi.reinitialize_flow_field(wind_speed=speeds[i])
fi.reinitialize_flow_field(turbulence_intensity=ti[i])
fi.change_turbine([0], {"yaw_angle": yw[i]})
cut_plane = fi.get_hor_plane(
height=hbs[i],
x_resolution=dimx,
y_resolution=dimy,
x_bounds=x_bounds,
y_bounds=y_bounds,
)
else:
cut_plane = fi.get_hor_plane(
height=hh,
x_resolution=dimx,
y_resolution=dimy,
x_bounds=x_bounds,
y_bounds=y_bounds,
)
u_mesh = cut_plane.df.u.values.reshape(
cut_plane.resolution[1], cut_plane.resolution[0]
)
if save_data == True:
# Save velocities as numpy array
np.save("wake_dataset/" + "wake" + str(i), u_mesh)
continue
if save_data == False and curl == True:
if i == 0:
print("Loading data...")
if i % 100 == 0:
print("Loaded ", int(i / data_size * 100), "%", "of wakes.")
# Read back into different array "r"
u_mesh = np.load("wake_dataset/" + "wake" + str(i) + ".npy")
if row_major == 0:
u_mesh = u_mesh.T
u_mesh = u_mesh.flatten()
for kapa in range(rows):
u_rs[:, kapa] = u_mesh[kapa * out_piece : (kapa + 1) * out_piece]
if cubes == 1:
jj = 0
ii = 0
alpha = np.zeros((dim1 * dim2, int(u_rs.size / (out_piece))))
for k in range(int(u_rs.size / (dim1 * dim2))):
alpha[:, k] = u_rs[ii : ii + dim1, jj : jj + dim2].flatten("C")
jj += dim2
if jj >= u_rs.shape[1]:
jj = 0
ii += dim1
speeds_out[i] = alpha
else:
speeds_out[i] = u_rs
# Store synthesized data for plotting
if plots == True:
sample_plots.append(cut_plane)
# Plot synthesized data (batches of sample_size)
if plots == True and np.mod(i + 1, sample_size) == 0:
fig, axarr = plt.subplots(
int(np.sqrt(sample_size)),
int(np.sqrt(sample_size)),
sharex=True,
sharey=True,
figsize=(12, 5),
)
axarr = axarr.flatten()
minspeed = np.min(speeds[(cnt - 1) * sample_size : cnt * sample_size])
maxspeed = np.max(speeds[(cnt - 1) * sample_size : cnt * sample_size])
for ii in range(sample_size):
ax = axarr[ii]
title = (
"("
+ str(np.round(speeds[(cnt - 1) * sample_size + ii], 1))
+ ", "
+ str(np.round(ti[(cnt - 1) * sample_size + ii], 2))
+ ", "
+ str(np.round(yw[(cnt - 1) * sample_size + ii], 1))
+ ")"
)
hor_plane = sample_plots[ii]
wfct.visualization.visualize_cut_plane(
hor_plane, ax=ax, minSpeed=minspeed, maxSpeed=maxspeed
)
ax.set_title(title)
ax.set_yticklabels(ax.get_yticks().astype(int))
ax.set_xticklabels(ax.get_xticks().astype(int))
plt.show()
sample_plots = []
cnt += 1
# Normalisation
speeds = ((speeds - ws_range[0]) / (ws_range[1] - ws_range[0]) - 0.5) * 3
ti = ((ti - ti_range[0]) / (ti_range[1] - ti_range[0]) - 0.5) * 3
yw = ((yw - yw_range[0]) / (yw_range[1] - yw_range[0]) - 0.5) * 3
hbs = ((hbs - hb_range[0]) / (hb_range[1] - hb_range[0]) - 0.5) * 3
# Make X and y
X_input = np.zeros((data_size, inputs))
if inputs == 1:
X_input[:, 0] = speeds
if inputs == 2:
X_input[:, 0] = speeds
X_input[:, 1] = ti
if inputs == 3:
X_input[:, 0] = speeds
X_input[:, 1] = ti
X_input[:, 2] = yw
if inputs == 4:
X_input[:, 0] = speeds
X_input[:, 1] = ti
X_input[:, 2] = yw
X_input[:, 3] = hbs
X = torch.tensor(X_input, dtype=torch.float)
y = torch.tensor(speeds_out, dtype=torch.float)
X = X.view(data_size, -1)
print("X shape:", X.shape)
print("y shape:", y.shape)
# Train, Validation, Test slices
c1 = int(data_size * (train_slice))
c2 = int(data_size * (train_slice + val_slice))
c3 = int(data_size * (train_slice + val_slice + test_slice))
X_train = X[:c1]
y_train = y[:c1]
X_val = X[c1:c2]
y_val = y[c1:c2]
X_test = X[c2:c3]
y_test = y[c2:c3]
return X_train, X_val, X_test, y_train, y_val, y_test
def dif_central(u, dx, eflag=0):
batches = u.shape[0]
u_x = torch.ones_like(u)
for ii in range(batches):
for jj in range(1, dimx-1):
u_x[ii, :, jj] = (u[ii, :, jj+1] - u[ii, :, jj-1])/(2*dx)
u_x[ii, :, 0] = (u[ii, :, 1] - u[ii, :, 0])/dx
u_x[ii, :, -1] = (u[ii, :, -2] - u[ii, :, -1])/dx
if eflag==-1:
u_x[:,:,:10] = 0
plt.figure(1)
plt.imshow(u[0].detach().cpu().numpy())
plt.figure(2)
plt.imshow(u_x[0].detach().cpu().numpy())
plt.show()
return u_x
def training(X_train, X_val, X_test, y_train, y_val, y_test, model, plot_curves=0,
multiplots=False, data_size=data_size, batch_size=batch_size, saveas=None):
"""
Trains the neural model.
Args:
plots (boolean, optional) Plots indicative sample.
"""
if batch_size > X_train.shape[0]:
print('Error: batch_size must be <', X_train.shape[0])
exit()
# Define validation and test batch sizes
val_batch_size = y_val.size()[0]
train_split = TensorDataset(X_train, y_train[:, :, -1])
validation_split = TensorDataset(X_val, y_val[:, :, -1])
train_loader = DataLoader(
train_split, batch_size=batch_size, shuffle=True, num_workers=workers, drop_last=True
)
validation_loader = DataLoader(
validation_split, batch_size=val_batch_size, shuffle=True, num_workers=workers, drop_last=True
)
# Seed, optimiser and criterion
set_seed(42)
params = list(model.fc1.parameters()) + \
list(model.fc2.parameters()) + \
list(model.fc3.parameters())
# Optimizers
if opt_method == "SGD":
optimizer = optim.SGD(params, lr=lr, momentum=momentum)
elif opt_method == "Rprop":
optimizer = optim.Rprop(params, lr=lr, etas=(0.5, 1.2), step_sizes=(1e-06, 50))
elif opt_method == "Adam":
optimizer = optim.Adam(params, lr=lr)
# Loss criterions
criterion = nn.MSELoss(size_average=1)
criterion = criterion.to(device)
# Initialise plots
t_plot = []; v_plot = []
t_loss_plot = []; v_loss_plot = []
lossmin = 1e16; valmax = 0.5
# Model Training
for i_epoch in range(epochs):
print("Epoch:", i_epoch, "/", epochs)
t_loss = 0; t_lossc1 = 0; t_lossc1_ = 0; t_lossc2 = 0; t_acc = 0
v_loss = 0; v_acc = 0; v_lossc1_ = 0; v_min = 0;
model.train().to(device)
eflag = i_epoch
for X, y in train_loader:
# Get yt_pred
X, y = X.to(device), y.to(device)
yt_pred = model(X)
c1 = criterion(yt_pred, y)
yy = yt_pred.detach().cpu().numpy()
yy_ = y.detach().cpu().numpy()
c2 = torch.tensor(0)
# Losses
train_loss = c1 + c2
t_loss += train_loss.item()
tterm = torch.abs(y - yt_pred)/torch.max(y)
t_acc += torch.sum(torch.pow(tterm, 2)).detach().cpu().numpy()
t_lossc1 += c1.item()
t_lossc1_ += torch.sum(torch.pow(y - yt_pred, 2)).detach().cpu().numpy()
t_lossc2 += c2.item()
optimizer.zero_grad()
train_loss.backward()
optimizer.step()
eflag = 0
# Training results
t_loss = t_loss/(train_slice*data_size/batch_size)
t_lossc1 = t_lossc1/(train_slice*data_size/batch_size)
t_lossc1_ /= train_slice*data_size*out_piece
t_lossc2 = t_lossc2/(train_slice*data_size/batch_size)
t_acc /= train_slice*data_size*out_piece
t_acc = 1 - np.sqrt(t_acc)
model.eval().to(device)
for X, y in validation_loader:
with torch.no_grad():
val_batch = y.shape[0]
X, y = X.to(device), y.to(device)
y_pred = model(X)
c1 = criterion(y_pred, y)
c2 = torch.tensor(0)
val_loss = c1 + c2
v_loss += val_loss.item()
vvterm = torch.abs(y - y_pred)/torch.max(y)
v_acc += torch.sum(torch.pow(vvterm, 2)).detach().cpu().numpy()
v_min += torch.min(1 - torch.abs(y - y_pred)).detach().cpu().numpy()
v_lossc1_ += torch.sum(torch.pow(y - y_pred, 2)).detach().cpu().numpy()
# # Validation results
v_loss = v_loss/(val_batch_size/val_batch)
v_lossc1_ /= val_batch_size*out_piece
v_acc /= val_batch_size*out_piece
v_acc = 1 - np.sqrt(v_acc)
v_min /= val_batch_size*out_piece
# Append to plots
t_plot.append(t_acc); v_plot.append(v_acc)
t_loss_plot.append(t_loss); v_loss_plot.append(v_loss)
if v_loss < lossmin: # and i_epoch > epochs*0.8:
lossmin = v_loss
# Save model weights
torch.save(model.state_dict(), weights_path)
print("Saved weights with", v_loss, "loss")
if v_acc > valmax: # and i_epoch > epochs*0.8:
valmax = v_acc
# Mean sum squared loss
print(
"t_acc: " + str(round(t_acc, 4)) + " v_acc: " + str(round(v_acc, 4))
+ " t_loss: " + str(round(t_loss, 2)) + " v_loss: " + str(round(v_loss, 2))
+ " t_lossc1: " + str(round(t_lossc1, 2)) + " t_lossc2: " + str(round(t_lossc2, 2))
)
# ------------- Loss and Accuracy Plots -------------#
if plot_curves == 1 or saveas != None:
fig, axs = plt.subplots(1, 2)
del fig
axs[0].plot(np.arange(epochs), t_loss_plot, color="navy", linestyle="--")
axs[0].plot(np.arange(epochs), v_loss_plot, color="crimson")
axs[1].plot(np.arange(epochs), t_plot, color="navy", linestyle="--")
axs[1].plot(np.arange(epochs), v_plot, color="crimson")
axs[1].set_ylim(0.5, 1)
print("Validation loss:", lossmin)
print("Validation accuracy:", valmax)
axs[0].tick_params(axis="x", direction="in")
axs[0].tick_params(axis="y", direction="in")
axs[0].set_aspect(aspect=1.0 / axs[0].get_data_ratio())
axs[1].tick_params(axis="x", direction="in")
axs[1].tick_params(axis="y", direction="in")
axs[1].set_aspect(aspect=1.0 / axs[1].get_data_ratio())
if saveas != None:
plt.savefig("figures/"+str(saveas), dpi=1200)
elif multiplots == False:
plt.show()
# Replace last values with best values
v_loss_plot[-1] = lossmin
v_plot[-1] = valmax
return v_loss_plot, t_loss_plot, v_plot, t_plot
| 15,305 | 31.916129 | 102 | py |
wakenet | wakenet-master/Code/neuralWake.py | from packages import *
from initialisations import *
class wakeNet(nn.Module):
"""
wakeNet class definition
"""
def __init__(self, inputs=3, hidden_neurons=[100, 200]):
"""
wakeNet initializations
Args:
u_stream (torch float array) Inputs of training step.
ws (float) Wind speed.
ti (float) Turbulence intensity.
yw (float) Yaw angle.
hb (float) Hub height.
model (torch model) Passes the neural model to be used.
timings (boolean, optional) Prints and output timings of both Neural
and Analytical calculations.
Returns:
gauss_time, neural_time, error (floats) Analytical, Neural timings and
absolute mean error between the Analytical and Neural wake deficits.
or
final (2D numpy float array) Wake profile with u_stream background velocity.
"""
super(wakeNet, self).__init__()
# Parameters
self.inputSize = inputs
self.outputSize = out_piece
self.hidden_neurons = hidden_neurons
self.layers = len(self.hidden_neurons) + 1
iSize = [self.inputSize] + self.hidden_neurons + [self.outputSize]
# Initialisation of linear layers
self.fc = []
# Append layers
for psi in range(self.layers):
self.fc.append(nn.Linear(iSize[psi], iSize[psi+1], bias=True).to(device))
self.fc1 = nn.Linear(iSize[0], iSize[1], bias=True).to(device)
self.fc2 = nn.Linear(iSize[1], iSize[2], bias=True).to(device)
self.fc3 = nn.Linear(iSize[2], iSize[3], bias=True).to(device)
# Initialisation of batchnorm layers
self.fcb = []
# Append layers
for psi in range(self.layers-1):
self.fcb.append(nn.BatchNorm1d(iSize[psi+1], affine=False).to(device))
self.fcb1 = nn.BatchNorm1d(iSize[1], affine=False).to(device)
self.fcb2 = nn.BatchNorm1d(iSize[2], affine=False).to(device)
# Dropout
self.drop = nn.Dropout(0.2).to(device) # 20% probability
# Activation functions
self.act = nn.Tanh().to(device)
self.act2 = self.purelin
def tansig(self, s):
return 2 / (1 + torch.exp(-2 * s)) - 1
def purelin(self, s):
return s
@staticmethod
def tiVsVel(n, weather=weather, plots=False):
"""Make ti vs speeds distribution"""
if plots == True:
np.random.seed(89)
xs0 = (np.random.rand(data_size) * (ws_range[1] - ws_range[0]) + ws_range[0]) # ws
np.random.seed(42)
ys0 = (np.random.rand(data_size) * (ti_range[1] - ti_range[0]) + ti_range[0]) # ti
lower, upper = ws_range[0], ws_range[1]
s = 1e-9
mu, sigma = 3, 8
xx = stats.truncnorm((lower - mu) / sigma, (upper - mu) / sigma, loc=mu, scale=sigma)
xs = xx.rvs(n)
yy = 2 ** (1 / (xs + s) / 6) - 0.9
rs = []
for _ in range(n):
rs.append(-0.01 + random.random() * 0.02)
ys = 2 ** (1 / (xs + s) / 6) - 0.9 + rs * (1 + 60 * (yy - 0.1))
plt.scatter(xs, ys, s=0.5)
plt.show()
exit()
if weather == False:
np.random.seed(89)
xs = (np.random.rand(data_size) * (ws_range[1] - ws_range[0]) + ws_range[0]) # ws
np.random.seed(42)
ys = (np.random.rand(data_size) * (ti_range[1] - ti_range[0]) + ti_range[0]) # ti
else:
lower, upper = ws_range[0], ws_range[1]
s = 1e-9
mu, sigma = 3, 8
xx = stats.truncnorm((lower - mu) / sigma, (upper - mu) / sigma, loc=mu, scale=sigma)
xs = xx.rvs(n)
yy = 2 ** (1 / (xs + s) / 6) - 0.9
rs = []
for _ in range(n):
rs.append(-0.01 + random.random() * 0.02)
ys = 2 ** (1 / (xs + s) / 6) - 0.9 + rs * (1 + 60 * (yy - 0.1))
return xs, ys
def forward(self, X):
"""
Performs a forward step during training.
Args:
X (torch float array) Inputs of training step.
point (int): Number of individual sub-network to be trained.
Returns:
out (torch float array) Turbine wake output.
"""
X = X.to(device)
if train_net == 0:
X = X.view(1, -1)
X = self.fc1(X)
X = self.act(X)
X = self.fcb1(X)
if train_net == 0:
X = X.view(1, -1)
X = self.fc2(X)
X = self.act(X)
X = self.fcb2(X)
out = self.act2(self.fc3(X))
return out
def saveWeights(self, model):
torch.save(model, "NN")
def compareContour(
self,
u_stream,
ws,
ti_ar,
yw,
hb,
model,
result_plots=result_plots,
timings=False,
):
"""
Performs a forward step during training.
Args:
u_stream (torch float array) Inputs of training step.
ws (float) Wind speed.
ti (float) Turbulence intensity.
yw (float) Yaw angle.
hb (float) Hub height.
model (torch model) Passes the neural model to be used.
timings (boolean, optional) Prints and output timings of both Neural
and Analytical calculations.
Returns:
gauss_time, neural_time, error (floats) Analytical, Neural timings and
absolute mean error between the Analytical and Neural wake deficits.
or
final (2D numpy float array) Wake profile with u_stream background velocity.
"""
tmp = yw
yw = np.zeros(1)
yw[0] = tmp
hb = np.array(hb)
ti = ti_ar[0]
if timings == True or result_plots == True:
t0 = time.time()
# Set Floris parameters
if curl == True:
fi.floris.farm.set_wake_model("curl") # curl model
if inputs == 1:
fi.reinitialize_flow_field(wind_speed=ws)
fi.calculate_wake()
if inputs == 2:
fi.reinitialize_flow_field(wind_speed=ws)
fi.reinitialize_flow_field(turbulence_intensity=ti)
fi.calculate_wake()
if inputs == 3:
fi.reinitialize_flow_field(wind_speed=ws)
fi.reinitialize_flow_field(turbulence_intensity=ti)
fi.calculate_wake(yaw_angles=yw)
if inputs == 4:
fi.reinitialize_flow_field(wind_speed=ws)
fi.reinitialize_flow_field(turbulence_intensity=ti)
fi.change_turbine([0], {"yaw_angle": yw})
cut_plane = fi.get_hor_plane(
height=hb,
x_resolution=dimx,
y_resolution=dimy,
x_bounds=x_bounds,
y_bounds=y_bounds,
)
else:
cut_plane = fi.get_hor_plane(
height=hh,
x_resolution=dimx,
y_resolution=dimy,
x_bounds=x_bounds,
y_bounds=y_bounds,
)
u_mesh = cut_plane.df.u.values.reshape(
dimy, dimx
)
t1 = time.time()
# Get analytical model timing
gauss_time = t1 - t0
# Keep min value for plotting
vmin = np.min(u_mesh)
vmax = np.max(u_mesh)
# Initialise model for evaluation
model.eval().to(device)
t0 = time.time()
# Initialise neural output vector
neural = np.zeros(dimx * dimy)
ti = ti_ar[1]
# Normalisation
speed_norm = ((ws - ws_range[0]) / (ws_range[1] - ws_range[0]) - 0.5) * 3
ti_norm = ((ti - ti_range[0]) / (ti_range[1] - ti_range[0]) - 0.5) * 3
yw_norm = ((yw - yw_range[0]) / (yw_range[1] - yw_range[0]) - 0.5) * 3
hbs_norm = ((hb - hb_range[0]) / (hb_range[1] - hb_range[0]) - 0.5) * 3
# Make input tensor
if inputs == 1:
inpt = torch.tensor(([speed_norm]), dtype=torch.float)
elif inputs == 2:
inpt = torch.tensor(([speed_norm, ti_norm]), dtype=torch.float)
elif inputs == 3:
inpt = torch.tensor(([speed_norm, ti_norm, yw_norm]), dtype=torch.float)
elif inputs == 4:
inpt = torch.tensor(
([speed_norm, ti_norm, yw_norm, hbs_norm]), dtype=torch.float
)
model.eval().to(device)
neural = model(inpt).detach().cpu().numpy()
# Apply Filter to replace backround with u_stream (helps with scattering)
if fltr < 1.0:
neural[neural > ws * fltr] = ws
if cubes == 1:
# Compose 2D velocity deficit made of blocks
dd = dim1 * dim2
jj = 0
ii = 0
alpha = np.zeros((dimy, dimx))
for k in range(int(dimx * dimy / (dim1 * dim2))):
alpha[ii : ii + dim1, jj : jj + dim2] = np.reshape(
neural[k * dd : k * dd + dd], (dim1, dim2)
)
jj += dim2
if jj >= dimx:
jj = 0
ii += dim1
neural = alpha.T
else:
if row_major == 0:
# Compose 2D velocity deficit column-wise
neural = np.reshape(neural, (dimx, dimy)).T
else:
# Compose 2D velocity deficit row-wise
neural = np.reshape(neural, (dimy, dimx))
t1 = time.time()
# Get neural timing
neural_time = t1 - t0
# ----------------- Plot wake deficit results -----------------#
if timings == True or result_plots == True:
if result_plots == True:
cmap = "coolwarm"
fig, axs = plt.subplots(2)
fig.suptitle("Velocities(m/s): Analytical (top), Neural (bot)")
im1 = axs[0].imshow(
u_mesh,
vmin=vmin,
vmax=vmax,
cmap=cmap,
extent=[x_bounds[0], x_bounds[1], y_bounds[0], y_bounds[1]],
)
fig.colorbar(im1, ax=axs[0])
im2 = axs[1].imshow(
neural,
vmin=vmin,
vmax=vmax,
interpolation=None,
cmap=cmap,
extent=[x_bounds[0], x_bounds[1], y_bounds[0], y_bounds[1]],
)
fig.colorbar(im2, ax=axs[1])
plt.show()
max_val = np.max(u_mesh)
if timings == True:
absdifsum = np.sum(np.abs(u_mesh - neural))
error = round(1 / (dimx * dimy) * absdifsum / max_val * 100, 2)
if result_plots == True:
print("Abs mean error (%): ", error)
if result_plots == True:
plt.imshow(
(np.abs(u_mesh - neural) / max_val * 100),
vmax=20,
extent=[x_bounds[0], x_bounds[1], y_bounds[0], y_bounds[1]],
cmap=cmap,
)
plt.colorbar()
plt.title("Abs difference")
plt.show()
# ----- Y-Transect plots ----- #
dx = 6.048
tlist = np.array([3*D/dx, 6.5*D/dx, 10*D/dx]).astype(int)
transects = tlist.size # defines the number of transects
fig, axs = plt.subplots(1, transects, sharey=False)
cnt = 0
for indx in tlist:
yy1 = u_mesh[:, indx] # FLORIS transect
yy2 = neural[:, indx] # CNN transect
axs[cnt].plot(
np.flip(yy1, axis=0),
np.arange(u_mesh.shape[0]),
color="navy",
linestyle="--",
)
axs[cnt].plot(
np.flip(yy2, axis=0), np.arange(u_mesh.shape[0]), color="crimson"
)
axs[cnt].title.set_text(str(int(indx * dx)))
cnt += 1
plt.show()
final = np.copy(neural)
# Replace current turbine inlet speed (ws) with farm u_stream (for superimposed wakes)
final[final == ws] = u_stream
if timings == True:
return gauss_time, neural_time, error
else:
return final
| 12,814 | 32.372396 | 97 | py |
wakenet | wakenet-master/Code/initialisations.py | import numpy as np
from packages import json
from packages import torch
import floris.tools as wfct
from floris.tools import static_class as sc
# Initialisation of variables #
# =================================================================================================#
# Open JSON file (change based on the wake model)
neural_info = open(
"example_inputs/inputs_gauss.json",
)
# returns JSON object as a dictionary
data = json.load(neural_info)
# Close JSON file
neural_info.close()
# Turbine parameters
hh = data["turbine"]["cut_plane"] # hub height
file_path = data["turbine"]["file_path"]
# Data creation parameters
train_net = data["data"]["train_net"]
make_data = data["data"]["make_data"]
save_data = data["data"]["save_data"]
local_ti = data["data"]["local_ti"]
local_pw = data["data"]["local_pw"]
curl = data["data"]["curl"]
weather = data["data"]["weather"]
row_major = data["data"]["row_major"]
x_bounds = data["data"]["x_bounds"]
y_bounds = data["data"]["y_bounds"]
data_size = data["data"]["data_size"]
batch_size = data["data"]["batch_size"]
dimx = data["data"]["dimx"]
dimy = data["data"]["dimy"]
dim1 = data["data"]["dim1"]
dim2 = data["data"]["dim2"]
cubes = data["data"]["cubes"]
norm = data["data"]["norm"]
inputs = data["data"]["inputs"]
plot_curves = data["data"]["plot_curves"]
result_plots = data["data"]["result_plots"]
full_domain = data["data"]["full_domain"]
defo = data["data"]["defo"]
# Data range
ws_range = data["data_range"]["ws_range"]
ti_range = data["data_range"]["ti_range"]
yw_range = data["data_range"]["yw_range"]
hb_range = data["data_range"]["hb_range"]
# Training hyperparameters
# device = data["training"]["device"]
if train_net == True:
device = "cuda"
else:
device = "cpu"
parallel = data["training"]["parallel"]
para_workers = data["training"]["para_workers"]
seed = data["training"]["seed"]
epochs = data["training"]["epochs"]
lr = data["training"]["lr"]
momentum = data["training"]["momentum"]
test_batch_size = data["training"]["test_batch_size"]
weight_decay = data["training"]["weight_decay"]
workers = data["training"]["workers"]
train_slice = data["training"]["train_slice"]
val_slice = data["training"]["val_slice"]
test_slice = data["training"]["test_slice"]
opt_method = data["training"]["opt_method"]
# Results parameters
weights_path = data["results"]["weights_path"]
fltr = data["results"]["fltr"]
denoise = data["results"]["denoise"]
contours_on = data["results"]["contours_on"]
# Optimisation boundaries
opt_xbound = data["optimisation"]["opt_xbound"]
opt_ybound = data["optimisation"]["opt_ybound"]
yaw_ini = data["optimisation"]["yaw_ini"]
# Opening turbine JSON file
f = open(
file_path,
)
# returns JSON object as a dictionary
data2 = json.load(f)
f.close()
# Set GPU if Available
if device == "cuda":
if torch.cuda.device_count() > 0 and torch.cuda.is_available():
print("Cuda installed! Running on GPU!")
device = "cuda"
else:
device = "cpu"
print("No GPU available! Running on CPU.")
# Get turbine cp curve
cp = np.array(data2["turbine"]["properties"]["power_thrust_table"]["power"])
wind_speed = np.array(
data2["turbine"]["properties"]["power_thrust_table"]["wind_speed"]
)
# Read turbine json
sc.x_bounds = x_bounds
sc.y_bounds = y_bounds
fi = wfct.floris_interface.FlorisInterface(file_path)
D = fi.floris.farm.turbines[0].rotor_diameter # turbine rotor diameter
D = float(D)
# Define the size of the partition. if full_domain==Flase, defaults at row or column size.
if full_domain == True:
out_piece = dimx * dimy
elif cubes == 0:
out_piece = dim1 * dim2
else:
if row_major == True:
out_piece = dimy
else:
out_piece = dimx
# Calculates ref_point
# (the list of all points of the domain that the DNN is going to be trained on).
rows = int(dimx * dimy / out_piece)
ref_point_x = np.linspace(0, dimy - 1, dimy)
ref_point_y = np.linspace(0, dimx - 1, dimx)
ref_point = np.zeros((dimx * dimy, 2))
k = 0
for i in range(dimy):
for j in range(dimx):
ref_point[k, 0] = ref_point_x[i]
ref_point[k, 1] = ref_point_y[j]
k += 1
ref_point = ref_point.astype(np.int)
# Wake boundaries definition
if defo == 1:
x_bounds = None
y_bounds = None
else:
x_bounds = (x_bounds[0], x_bounds[1])
y_bounds = (y_bounds[0], y_bounds[1])
| 4,417 | 28.065789 | 100 | py |
wakenet | wakenet-master/Code/CNNWake/FCC_model.py | import torch
import torch.nn as nn
import numpy as np
import random
import floris.tools as wfct
import torch.optim as optim
import matplotlib.pyplot as plt
from torch.utils.data import TensorDataset, DataLoader
from torch.optim import lr_scheduler
__author__ = "Jens Bauer"
__copyright__ = "Copyright 2021, CNNwake"
__credits__ = ["Jens Bauer"]
__license__ = "MIT"
__version__ = "1.0"
__email__ = "jens.bauer20@imperial.ac.uk"
__status__ = "Development"
class FCNN(nn.Module):
"""
The class is the Neural Network that can predicts the power output of
wind turbine and the turbulent intensity (TI) at the turbine. The same
network architecture is used for both TI and power predict which
simplifies the code. The network uses the pytorch framwork and uses fully
connected layers. The methods of this class include the training of
the network, testing of the accuracy and generaton of training data.
The networks can be fine tuned via transfer learing if a specific park
layout is known, this will stongly improve the accuracy.
"""
def __init__(self, in_size, nr_neurons, out_size=1):
"""
init method that generates the network architecture using pytroch.
The number of input varibles can be changed incase more flow data is
available in the line segment upstream the turbine.
The nr_neurons defines the size of the given network. The output size
is set to 1 because the network only predicts either the power or TI.
In theory it should be able to do both the error was the high
therefore two networks are used.
Args:
in_size (int): Nr. of inputs, usually 42, 40 for wind speed
and the global ti and yaw angle of the turbine
nr_neurons (int): Nr. of neurons used in the layers, more
neurons means that
the network will have more parameters
out_size (int): Nr. of outputs in the last layer,
set to one if the NN only predicts a single value.
"""
super(FCNN, self).__init__()
# This defines the model architecture
self.disc = nn.Sequential(
# The linear layer is the fully connected layer
torch.nn.Linear(in_size, nr_neurons),
# LeakyReLU activation function after every fully
# connected layer
torch.nn.LeakyReLU(negative_slope=0.01),
torch.nn.Linear(nr_neurons, nr_neurons),
torch.nn.LeakyReLU(negative_slope=0.01),
torch.nn.Linear(nr_neurons, nr_neurons),
torch.nn.LeakyReLU(negative_slope=0.01),
torch.nn.Linear(nr_neurons, out_size),
)
def forward(self, x):
"""
Functions defines a forward pass though the network. Can be used for
a single input or a batch of inputs
Args:
x (torch.tensor): input tensor, to be passed through the network
Returns:
flow_fields (torch.tensor): Output of network
"""
# Use the architecture defined above for a forward pass
return self.disc(x)
def initialize_weights(self):
"""
Initilize weights using a xavier uniform distribution which has
helped training.
Loop over all modules, if module is a linear layer then
initialize weigths.
For more information about xavier initialization please read:
Understanding the difficulty of training deep feedforward neural
networks.
X. Glorot, und Y. Bengio. AISTATS , Volume 9 JMLR Proceedings,
249-256, 2010
"""
# for ever layer in model
if type(self) == nn.Linear:
# initialize weights using a xavier distribution
torch.nn.init.xavier_uniform(self.weight)
# initialize bias with 0.0001
self.bias.data.fill_(0.0001)
@staticmethod
def power_ti_from_FLORIS(x_position, y_position, yawn_angles,
wind_velocity, turbulent_int,
type='ti', nr_varabiles=40,
florisjason_path='.'):
"""
This function uses FLORIS to create the dataset to train the FCNN.
The wind speed along a line just upstream every wind turbine and
the corresponding TI or power output will be returned as numpy arrays.
Args:
x_position (list or numpy array): 1d array of the x postions of
the wind turbines in m.
y_position (list or numpy array): 1d array of the y postions of
the wind turbines in m.
yawn_angles (lisr numpy array): 1d array of the yaw angle of every
wind turbinein degree, from -30° to 30°
wind_velocity (float): Free stream wind velocity in m/s,
from 3 m/s to 12 m/s
turbulent_int (float): Turbulent intensity in percent ,
from 1.5% to 25%
type (str): Type of data that is returned, if set to power,
the power generated by every turbine is Returned. If set to
anything else, the func will return the TI
nr_varabiles (int): Nr of points along the line upstream the
turbine to take u values from. More points means that more wind
speeds are sampled from upstream the turbine. 40 was a good value
florisjason_path (string): Location of the FLORIS jason file
Returns:
numpy array: Final 2d array of flow field around the wind park.
U_list (2d np.array): array of size len(x_position) x 1 x
nr_varabiles + 2 where all the wind speeds upstream every
turbine are stored
ti_power_list (np.array): array of size len(x_position) x 1
where either all power or TI values of the turbines are stored
"""
# define the x and y length of a single cell in the array
# This is set by the standard value used in FLROIS wakes
dx = 18.4049079755
dy = 2.45398773006
# Set the maximum length of the array to be 3000m and 400m
# more than the maximum x and y position of the turbines
x_max = np.max(x_position) + 3005
y_max = np.max(y_position) + 400
# Number of cells in x and y needed to create a 2d array of
# the maximum size
Nx = int(x_max / dx)
Ny = int(y_max / dy)
# Init FLORIS from the jason file
wind_farm = wfct.floris_interface.FlorisInterface("FLORIS_input"
"_gauss.json")
# Set the x and y postions of the wind turbines
wind_farm.reinitialize_flow_field(layout_array=[x_position,
y_position])
# Set the yaw angle of every turbine
for _ in range(0, len(x_position)):
wind_farm.change_turbine([_], {'yaw_angle': yawn_angles[_],
"blade_pitch": 0.0})
# Set inlet wind speed and TI
wind_farm.reinitialize_flow_field(wind_speed=wind_velocity,
turbulence_intensity=turbulent_int)
# Calculate wind field
wind_farm.calculate_wake()
# Extract 2d slice from 3d domain at hub height
# This slice needs to have the same number of cells in x and y
# and same physical dimensions
cut_plane = wind_farm.get_hor_plane(
height=90, x_resolution=Nx, y_resolution=Ny, x_bounds=[0, x_max],
y_bounds=[0, y_max]).df.u.values.reshape(Ny, Nx)
# Calculate power generated by every turbine
power = wind_farm.get_turbine_power()
# Calculate local TI at every tribune
ti = wind_farm.get_turbine_ti()
# Initialize list to store all all the u values
# Number of turbines x 1 x number of values used + 2
U_list = np.zeros((len(x_position), 1, nr_varabiles + 2))
# Initialise list to store TI or u valurs
ti_power_list = np.zeros((len(x_position), 1))
# From the flow field generated by FLORIS, extract the wind speeds
# from a line 60 meter upstream the turbines
for i in range(len(x_position)):
# determine the x and y cells that the tubine center is at
turbine_cell = [int((x_position[i]) / dx),
int((y_position[i] - 200) / dy)]
# extract wind speeds along the rotor, 60 meters upstream
u_upstream_hub = cut_plane[
turbine_cell[1] + 45: turbine_cell[1] + 110,
turbine_cell[0] - 3]
# Do an running average, this is done because CNNwake has slight
# variations in the u predictions, also normalise the u values
u_average = [((u_upstream_hub[i - 1] +
u_upstream_hub[i] +
u_upstream_hub[i + 1]) / 3) / 12 for i in
np.linspace(1, 63, nr_varabiles, dtype=int)]
# append yaw which is normalised and ti
u_average = np.append(u_average, yawn_angles[i] / 30)
u_input_fcnn = np.append(u_average, turbulent_int)
U_list[i] = u_input_fcnn
# If type required is power then use power else
# use TI
if type == 'power':
ti_power_list[i] = power[i]
else:
ti_power_list[i] = ti[i]
# round values to 2 places
return np.round(U_list, 2), np.round(ti_power_list, 2)
@staticmethod
def create_ti_power_dataset(size, u_range, ti_range, yaw_range,
nr_varabiles=40, type='power',
floris_path='.'):
"""
This function will create a training or test set to train the power
or turbulent intensity (TI) prediction networks. The function will
use FLORIS to create the flowfield around 4 example wind parks
and saves the wind speed just upstream the wind rotor of every turbine
and the corresponding TI or power output. The wind speeds are along a
line which spans the entire diameter of the turbine blades and along
this line nr_varibles of points are sampled and the wind farm TI and
yaw angle of the corresponding turbine is added.
This allows the network to predict the power output of every turbine
under different inflow conditions or TI at every trubine.
Four different wind parks examples are used to generate the data,
this does not cover all possible flow fields
but delivers a good inital guess for the network.
The corresponding TI or power values are normalised by the maximum
value of the array, this will make all values to be between
0 and 1 which helps training.
Args:
size (int, optional): Nr of example flows generated and saved for
training. Defaults to 400.
u_range (list): Bound of u values [u_min, u_max] used
ti_range (list): Bound of TI values [TI_min, TI_max] used
yaw_range (list): Bound of yaw angles [yaw_min, yaw_max] used
nr_varabiles (int, optional): Nr. of values sampled along line.
Defaults to 40.
type (str, optional): If set to power, the power will be saved,
if set to anything else the TI at every turbine will be saved
Defaults to 'power'.
floris_path (str, optinal): Path to FLORIS jason file.
Returns:
x [torch tensor]: Tensor of size size*6 x 1 x nr_varabiles+2 where
all the flow data along line is stored. This will be the input
to the FCNN
y [torch tensor]: Tensor of size chuck_size*6 x 1 where all the
TI or pwoer data for every turbine is stored, this is what the
FCNN is trained to predict
"""
# 4 wind parks are used the generate data
# for every wind park generates 1/4 of the dataset
chuck_size = int(size/4)
# initialize empty numpy array to store 2d arrays
# and corresponding u, ti and yawn values
y = np.zeros((chuck_size * 4 * 6, 1, nr_varabiles + 2))
x = np.zeros((chuck_size * 6 * 4, 1))
# index to add the wind fields in the right postion
index = [i for i in range(0, size * 6, 6)]
# create train examples
print("generate FLORIS data")
# WIND PARK 1
for _ in range(0, chuck_size):
# sample u, ti and yaw from uniform distro
u_list = round(random.uniform(u_range[0], u_range[1]), 2)
ti_list = round(random.uniform(ti_range[0], ti_range[1]), 2)
yawlist = [round(random.uniform(yaw_range[0], yaw_range[1]), 2) for _ in range(0, 6)]
# get the wind speeds along line and corresponding TI or power
# from FLORIS for the wind park
u_list_hub, floris_power_ti = FCNN.power_ti_from_FLORIS(
[100, 300, 1000, 1300, 2000, 2300],
[300, 500, 300, 500, 300, 500],
yawlist, u_list, ti_list, type, nr_varabiles,
florisjason_path=floris_path)
# add u and power/TI in correct postion
y[index[_]: index[_ + 1], :, :] = u_list_hub
x[index[_]: index[_ + 1], :] = floris_power_ti
# WIND PARK 2
for _ in range(chuck_size, chuck_size * 2):
u_list = round(random.uniform(u_range[0], u_range[1]), 2)
ti_list = round(random.uniform(ti_range[0], ti_range[1]), 2)
yawlist = [round(random.uniform(yaw_range[0], yaw_range[1]), 2) for _ in range(0, 6)]
u_list_hub, floris_power_ti = FCNN.power_ti_from_FLORIS(
[100, 600, 1000, 1300, 2000, 2900],
[300, 300, 300, 300, 300, 500],
yawlist, u_list, ti_list, type, nr_varabiles)
y[index[_]: index[_ + 1], :, :] = u_list_hub
x[index[_]: index[_ + 1], :] = floris_power_ti
# WIND PARK 3
for _ in range(chuck_size * 2, chuck_size * 3):
u_list = round(random.uniform(u_range[0], u_range[1]), 2)
ti_list = round(random.uniform(ti_range[0], ti_range[1]), 2)
yawlist = [round(random.uniform(yaw_range[0], yaw_range[1]), 2) for _ in range(0, 6)]
u_list_hub, floris_power_ti = FCNN.power_ti_from_FLORIS(
[100, 100, 800, 1600, 1600, 2600],
[300, 500, 400, 300, 500, 400],
yawlist, u_list, ti_list, type, nr_varabiles)
y[index[_]: index[_ + 1], :, :] = u_list_hub
x[index[_]: index[_ + 1], :] = floris_power_ti
# WIND PARK 4
for _ in range(chuck_size * 3, chuck_size * 4 - 1):
u_list = round(random.uniform(u_range[0], u_range[1]), 2)
ti_list = round(random.uniform(ti_range[0], ti_range[1]), 2)
yawlist = [round(random.uniform(yaw_range[0], yaw_range[1]), 2) for _ in range(0, 6)]
u_list_hub, floris_power_ti = FCNN.power_ti_from_FLORIS(
[100, 300, 500, 1000, 1300, 1600],
[300, 500, 300, 300, 500, 400],
yawlist, u_list, ti_list, type, nr_varabiles)
y[index[_]: index[_ + 1], :, :] = u_list_hub
x[index[_]: index[_ + 1], :] = floris_power_ti
# transform into a pytroch tensor
x = torch.tensor(x[0:-6], dtype=torch.float)
y = torch.tensor(y[0:-6], dtype=torch.float)
print(f"Normalisation used: {torch.max(x)}")
# Normalise the power/TI by maximum value so that they are
# between 0-1
x = x / torch.max(x)
return y, x
def epoch_training(self, criterion, optimizer, dataloader, device):
"""
Trains the model for one epoch data provided by dataloader. The model
will be updated after each batch and the function will return the
train loss of the last batch
Args:
criterion (torch.nn.criterion): Loss function used to
train model
optimizer (torch.optim.Optimizer): Optimizer used for
gradient descent
dataloader (torch.utils.data.DataLoader): Dataloader for dataset
device (str): Device on which model and data is stored,
cpu or cuda
Returns:
training loss (float): Loss value of training set defined
by criterion
"""
# For all data in datalaoder
for power_ti, input_u in dataloader:
# one batch at a time, get network prediction
output = self(input_u.to(device))
# compute loss
train_loss = criterion(output.squeeze(), power_ti[:, 0].to(device))
self.zero_grad() # Zero the gradients
train_loss.backward() # Calc gradients
optimizer.step() # Do parameter update
return train_loss.item()
def learn_wind_park(self, x_postion, y_position, size, eval_size,
nr_varabiles=40, type='power',
device='cpu', nr_epochs=50,
batch_size=100, lr=0.003):
"""
EXPERIMENTAL FUNCTION; DOES NOT WORK YET, DO NOT USE!!!
This function is supposed to fine tune a already trained TI/Power
model on a specific wind park. This should further reduce the error
in predicting power or local TI. However, it currently increase the
error so there is something wrong. DO NOT USE!!!!
Args:
x_postion (list or numpy array): 1d array of the x positions of
the wind turbines in m.
y_position (list or numpy array): 1d array of the y positions of
the wind turbines in m.
size (list numpy array): Size of training set
eval_size (list numpy array): Size of test set
nr_varabiles (int): Nr of points along the line upstream the
turbine to take u values from. More points means that more
speeds are sampled from upstream the turbine. 40 was a good
type (str): Type of data that is returned, if set to power,
the power generated by every turbine is Returned. If set to
anything else, the func will return the TI
device (torch.device): Device to run the training on, cuda or cpu
nr_epochs (int): Nr. of training epochs
batch_size (int): Training batch size
lr (float): Model learning rate
Returns:
[Bool]: True if training was successful
"""
nr_values = int(((size + eval_size)*len(x_postion)))
# initialize empty numpy array to store 2d arrays and
# corresponding u, ti and yawn values
y = np.zeros((nr_values, 1, nr_varabiles + 2))
x = np.zeros((nr_values, 1))
print(nr_values)
print(len(x_postion))
print(int(nr_values/len(x_postion)))
index = [i for i in range(0, nr_values * 2, len(x_postion))]
# create train examples of the specified wind farm using FLORIS
print("generate FLORIS data")
for _ in range(0, int(nr_values/len(x_postion))):
u_list = round(random.uniform(3, 12), 2)
ti_list = round(random.uniform(0.015, 0.25), 2)
yawlist = [round(random.uniform(-30, 30), 2)
for _ in range(0, len(x_postion))]
u_list_hub, floris_power_ti = FCNN.power_ti_from_FLORIS(
x_postion, y_position, yawlist, u_list, ti_list, type,
nr_varabiles)
y[index[_]: index[_ + 1], :, :] = u_list_hub
x[index[_]: index[_ + 1], :] = floris_power_ti
x = torch.tensor(x, dtype=torch.float)
y = torch.tensor(y, dtype=torch.float)
print(f"Normalisation used: {torch.max(x)}")
x = x / torch.max(x)
x_train = x[0:size * len(x_postion)]
y_train = y[0:size * len(x_postion)]
x_eval = x[-eval_size*len(x_postion):]
y_eval = y[-eval_size*len(x_postion):]
print(x_eval.size(), x_train.size())
dataset = TensorDataset(x_train, y_train.float())
# generate dataload for training
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
optimizer = optim.Adam(self.parameters(), lr=lr)
scheduler_gen = lr_scheduler.ReduceLROnPlateau(optimizer, 'min',
factor=0.6, patience=4,
verbose=True)
# use L2 norm as criterion
criterion = nn.MSELoss()
# init to list to store error
error_list = []
# Train model on data
for _ in range(nr_epochs): # train model
self.train() # set model to training mode
loss = self.epoch_training(criterion, optimizer,
dataloader, device)
self.eval() # set model to evaluation
# evaluation on validation set
val_error = self.error(y_eval, x_eval, device)
# if error has not decreased over the past 4 epochs
# decrease the lr by a factor of 0.6
scheduler_gen.step(val_error)
error_list.append(val_error)
print(f" Epoch: {_:.0f}, Training loss: {loss:.4f},"
f" Validation error: {val_error:.2f}")
# plot the val error over the epochs
plt.plot(range(nr_epochs), error_list)
plt.show()
return True
def error(self, x_eval, y_eval, device='cpu'):
"""
Function to calculate the error between the networks
predictions and the actual output. The x and y values
need to be generated using the create_ti_power_dataset
function. The error will be the mean percentage difference
between all values predicted by the network and the actual
values
Args:
x_eval (torch tensor): Tensor of all flow, ti and yaw values
for different turbines, this the the model input.
y_eval (torch tensor): Tensor of all TI or power outputs as
calculated by floris for the corresponding flow field in x
device (str, optional): Device where the model is stored on.
Defaults to 'cpu'.
Returns:
error (float): percentage error
"""
error_list = []
# Do forward pass of the x data
model_predict = self.forward(x_eval.to(device))
for n in range(0, len(y_eval)):
# sometimes the power prediction is zero, this will give
# an error of inf due to divide by zero in step below.
# Therefore filter out very small power here
if abs(y_eval.detach().cpu().numpy()[n]) < 0.01:
continue
else:
# calculate error
power_error = abs(y_eval.detach().cpu().numpy()[n] -
model_predict[n].detach().cpu().numpy()) / (
y_eval.detach().cpu().numpy()[n] + 1e-8)
error_list.append(power_error * 100)
return np.mean(error_list)
def load_model(self, path='.', device='cpu'):
"""
Function to load model from a pt file into this class.
Args:
path (str): path to saved model.
device (torch.device): Device to load onto, cpu or cuda
"""
# Load a previously trained model
self.load_state_dict(torch.load(path, map_location=device))
def save_model(self, name='generator.pt'):
"""
Function to save current model paramters so that it can
be used again later. Needs to be saved with as .pt file
Args:
name (str): name of .pt file from which to load model
"""
torch.save(self.state_dict(), name)
| 24,254 | 42.467742 | 97 | py |
wakenet | wakenet-master/Code/CNNWake/visualise.py | import torch
import matplotlib.pyplot as plt
import numpy as np
import time
import floris.tools as wfct
from .superposition import super_position
__author__ = "Jens Bauer"
__copyright__ = "Copyright 2021, CNNwake"
__credits__ = ["Jens Bauer"]
__license__ = "MIT"
__version__ = "1.0"
__email__ = "jens.bauer20@imperial.ac.uk"
__status__ = "Development"
def visualize_turbine(plane, domain_size, nr_points, title="", ax=None):
"""
Function to plot the flow field around a single turbine
Args:
plane (2d numpy array): Flow field around turbine
domain_size (list or numpy array): x and y limits of the domain,
the first two values correspond to min and max of x and
similar for the y values [x_min, x_max, y_min, y_max]
nr_points (list or numpy array): Nr. of points in the array
title (str, optional): Title of the graph. Defaults to "".
ax (ax.pcolormesh, optional): Pyplot subplot class,
adds the plot to this location.
Returns:
ax.pcolormesh: Image of the flow field
"""
# create mesh grid for plotting
x = np.linspace(domain_size[0], domain_size[1], nr_points[0])
y = np.linspace(domain_size[2], domain_size[3], nr_points[1])
x_mesh, y_mesh = np.meshgrid(x, y)
# Plot the cut-through
im = ax.pcolormesh(x_mesh, y_mesh, plane, shading='auto', cmap="coolwarm")
ax.set_title(title)
# Make equal axis
ax.set_aspect("equal")
return im
def visualize_farm(
plane, nr_points, size_x, size_y, title="", ax=None, vmax=False):
"""
Function to plot flow-field around a wind farm.
Args:
plane (2d numpy array): Flow field of wind farm
nr_points (list or np array): List of nr of points in x and y
size_x (int): Size of domain in x direction (km)
size_y (int): Size of domain in y direction (km)
title (str, optional): Title of the plot. Defaults to "".
ax (ax.pcolormesh, optional): Pyplot subplot class, adds the plot
to this location.
vmax (bool, optional): Maximum value to plot. If false,
the max value of the plane is used a vmax
Returns:
ax.pcolormesh: Image of the flow field around the wind farm
"""
x = np.linspace(0, size_x, nr_points[0]) # this is correct!
y = np.linspace(0, size_y, nr_points[1])
x_mesh, y_mesh = np.meshgrid(x, y)
# if no vmax is set, use the maximum of plane
if vmax is False:
vmax = np.max(plane)
# Plot the cut-through
im = ax.pcolormesh(x_mesh, y_mesh, plane,
shading='auto', cmap="coolwarm", vmax=vmax)
ax.set_title(title)
# Make equal axis
ax.set_aspect("equal")
return im
def Compare_CNN_FLORIS(
x_position, y_position, yawn_angles, wind_velocity, turbulent_int,
CNN_generator, Power_model, TI_model, device,
florisjason_path='', plot=False):
"""
Generates the wind field around a wind park using the neural networks.
The individual wakes of the turbines are calculated using thee CNN and
superimposed onto the wind farm flow field using a super-position model.
The energy produced by the turbines are calcuated using another fully
connected network from the flow data just upstream the turbine.
The functions generates the same wind park flow field using FLORIS so that
the two solutions can be compared when plot = True is set.
Args:
x_position (list): 1d array of x locations of the wind turbines in m.
y_position (list): 1d array of y locations of the wind turbines in m.
yawn_angles (list): 1d array of yaw angles of every wind turbine.
wind_velocity (float): Free stream wind velocity in m/s.
turbulent_int (float): Turbulent intensity in percent.
device (torch.device): Device to store and run the neural network on,
cpu or cuda
florisjason_path (string): Location of the FLORIS jason file
plot (bool, optional): If True, the FLORIS and CNN solution will
be plotted and compared.
Returns:
numpy array: Final 2d array of flow field around the wind park.
"""
# Define the x and y length of a single cell in the array
# This is set by the standard value used in FLORIS wakes
dx = 18.4049079755
dy = 2.45398773006
# Set the maximum length of the array to be 3000m and 400m
# more than the maximum x and y position of the wind park
# If a larger physical domain was used change adapt the values
x_max = np.max(x_position) + 3000
y_max = np.max(y_position) + 300
# Number of cells in x and y needed to create a 2d array of
# that is x_max x y_max using dx, dy values
Nx = int(x_max / dx)
Ny = int(y_max / dy)
# Initialise a 2d array of the wind park with the
# inlet wind speed
farm_array = np.ones((Ny, Nx)) * wind_velocity
# set up FLORIS model
floris_model = wfct.floris_interface.FlorisInterface(
florisjason_path + "FLORIS_input_gauss.json")
floris_model.reinitialize_flow_field(
layout_array=[x_position, np.array(y_position)])
for _ in range(0, len(x_position)):
floris_model.change_turbine([_], {'yaw_angle': yawn_angles[_],
"blade_pitch": 0.0})
floris_model.reinitialize_flow_field(wind_speed=wind_velocity,
turbulence_intensity=turbulent_int)
start_t = time.time()
# Calcuate using FLORIS and extract 2d flow field
floris_model.calculate_wake()
print(f"Time taken for FLORIS to generate"
f" wind park: {time.time() - start_t:.3f}")
floris_plane = floris_model.get_hor_plane(
height=90, x_resolution=Nx, y_resolution=Ny, x_bounds=[0, x_max],
y_bounds=[0, y_max]).df.u.values.reshape(Ny, Nx)
floris_power = floris_model.get_turbine_power()
floris_ti = floris_model.get_turbine_ti()
# print(floris_power, floris_ti)
power_CNN = []
ti_CNN = []
t = time.time()
with torch.no_grad():
# Do CNNwake cautions
for i in range(len(x_position)):
# determine the x and y cells that the turbine center is at
turbine_cell = [int((x_position[i]) / dx),
int((y_position[i] - 200) / dy)]
t1 = time.time()
# extract wind speeds along the rotor, 60 meters upstream
u_upstream_hub = farm_array[
turbine_cell[1] + 45: turbine_cell[1] + 110,
turbine_cell[0] - 3]
# Do an running average, this is done because CNNwake has slight
# variations in the u predictions, also normalise the u values
u_power = [
((u_upstream_hub[i - 1] + u_upstream_hub[i] +
u_upstream_hub[i + 1]) / 3) / 12 for
i in np.linspace(5, 55, 40, dtype=int)]
u_power = np.append(u_power, yawn_angles[i] / 30)
u_power = np.append(u_power, turbulent_int)
# The local TI does not change from inlet TI if the turbine
# is not covered by a wake, therefore check if if all values
# in u_list_hub are the same -> means no wake coverage
# Local TI also depends on yaw, if yaw is less than 12° and
# turbine is not in wake -> use inlet TI for local TI
if np.allclose(u_power[0], u_power[0:-3],
rtol=1e-02, atol=1e-02) and abs(u_power[-2]) < 0.4:
# print("Turbine in free stream, set ti to normal")
ti = turbulent_int
else:
ti = TI_model((torch.tensor(u_power).float().to(device))).detach().cpu().numpy() * 0.30000001192092896
# regulate TI to ensure it is not to different from free stream
if ti < turbulent_int * 0.7:
# print(f"TI REGULATED 1 AT {i}")
ti = turbulent_int * 1.5
# clip ti values to max and min trained
ti = np.clip(ti, 0.015, 0.25).item(0)
ti_CNN.append(ti)
u_power[-1] = ti
energy = Power_model(torch.tensor(u_power).float().to(device)).detach().cpu().numpy() * 4834506
power_CNN.append(energy[0])
hub_speed = np.round(np.mean(u_upstream_hub), 2)
turbine_condition = [[hub_speed, ti, yawn_angles[i]]]
turbine_field = CNN_generator(torch.tensor(turbine_condition).float().to(device))
# Use CNN to calculate wake of individual trubine
# Since CNN output is normalised,
# mutiply by 12 and create a numpy array
turbine_field = turbine_field[0][0].detach().cpu().numpy() * 12
# Place wake of indivual turbine in the farm_array
farm_array = super_position(
farm_array, turbine_field, turbine_cell, hub_speed,
wind_velocity, sp_model="SOS")
# print information
print(f"Time taken for CNNwake to generate wind park: {time.time() - t:.3f}")
print(f"CNNwake power prediction error: "
f"{100 * np.mean(abs(np.array(floris_power) - np.array(power_CNN)) / np.array(floris_power)):.2f} %")
print(f"CNNwake TI prediction error: {100 * np.mean(abs(np.array(floris_ti) - np.array(ti_CNN)) / np.array(floris_ti)):.2f} %")
print(f"APWP error: {100 * np.mean(abs(floris_plane - farm_array) / np.max(floris_plane)):.2f}")
if plot:
plt.rcParams.update({'font.size': 16})
# Plot wake fields of both wind farms and error field
fig, axarr = plt.subplots(3, 1, sharex=True, figsize=(20, 49))
im1 = visualize_farm(farm_array, nr_points=[Nx, Ny], size_x=x_max,
size_y=y_max, title="CNNwake", ax=axarr[0])
im2 = visualize_farm(floris_plane, nr_points=[Nx, Ny], size_x=x_max,
size_y=y_max, title="FLORIS", ax=axarr[1])
im3 = visualize_farm(
(100 * abs(floris_plane - farm_array) / np.max(floris_plane)),
nr_points=[Nx, Ny], size_x=x_max, size_y=y_max,
title="Pixel wise percentage error ", ax=axarr[2], vmax=20)
col1 = fig.colorbar(im1, ax=axarr[0])
col1.set_label('m/s', labelpad=15, y=1.06, rotation=0)
col2 = fig.colorbar(im2, ax=axarr[1])
col2.set_label('m/s', labelpad=15, y=1.06, rotation=0)
col3 = fig.colorbar(im3, ax=axarr[2])
col3.set_label('%', labelpad=11, y=0.9, rotation=0)
axarr[2].set_xlabel('m', fontsize=15)
axarr[0].set_ylabel('m', labelpad=9, rotation=0, y=0.4, fontsize=15)
axarr[1].set_ylabel('m', labelpad=9, rotation=0, y=0.4, fontsize=15)
axarr[2].set_ylabel('m', labelpad=9, rotation=0, y=0.4, fontsize=15)
# Plot TI and Power of every turbine for FLORIS adn CNNNwake
fig, axarr = plt.subplots(2, figsize=(9, 9))
axarr[0].plot(range(1, len(x_position) + 1),
np.array(power_CNN)/1.e06, 'o--', label="CNNwake")
axarr[0].plot(range(1, len(x_position) + 1),
np.array(floris_power)/1.e06, 'o--', label="FLORIS")
axarr[1].plot(range(1, len(x_position) + 1),
np.array(ti_CNN), 'o--', label="CNNwake")
axarr[1].plot(range(1, len(x_position) + 1),
floris_ti, 'o--', label="FLORIS")
axarr[0].set_ylabel('Power output [MW]', fontsize=15)
axarr[1].set_ylabel('Local TI [%]', fontsize=15)
axarr[1].set_xlabel('Turbine Nr.', rotation=0, fontsize=15)
axarr[1].legend()
axarr[0].legend()
plt.show()
return farm_array, floris_plane
if __name__ == '__main__':
# To run individual CNNWake files, the imports are not allowed to be
# relative. Instead of: from .superposition import super_position
# it needs to be: from superposition import super_position, for all CNNWake imports
# also import all NNs
from CNN_model import Generator
from FCC_model import FCNN
from superposition import super_position
# Set up/load all NNs
device = torch.device("cpu" if torch.cuda.is_available() else "cpu")
CNN_generator = Generator(3, 30).to(device)
CNN_generator.load_model('./trained_models/CNN_FLOW.pt', device=device)
CNN_generator = CNN_generator.to()
CNN_generator.eval()
# the first forward pass is super slow so do it outside loop and use the
# output for a simple assert test
example_out = CNN_generator(torch.tensor([[4, 0.1, 20]]).float().to(device))
assert example_out.size() == torch.Size([1, 1, 163, 163])
Power_model = FCNN(42, 300, 1).to(device)
Power_model.load_state_dict(torch.load('./trained_models/FCNN_POWER.pt', map_location=device))
Power_model.eval()
# the first forward pass is super slow so do it outside loop and use the
# output for a simple assert test
energy = Power_model(torch.tensor([i for i in range(0, 42)]).float().to(device))
assert energy.size() == torch.Size([1])
TI_model = FCNN(42, 300, 1).to(device)
TI_model.load_state_dict(torch.load('./trained_models/FCNN_TI.pt', map_location=device))
TI_model.eval()
# the first forward pass is super slow so do it outside loop and use the
# output for a simple assert test
TI = TI_model(torch.tensor([i for i in range(0, 42)]).float().to(device))
assert TI.size() == torch.Size([1])
# Compare a single wind farm, this will show the wake, energy and local TI
# for every turbine and compare it to FLORIS
farm, a = Compare_CNN_FLORIS([100, 100, 700, 700, 1200, 1200],
[300, 800, 1300, 550, 1050, 300],
[0, 0, 0, 0, 0, 0, 0], 11.6, 0.06,
CNN_generator, Power_model,
TI_model, device, plot=True)
| 13,979 | 42.6875 | 131 | py |
wakenet | wakenet-master/Code/CNNWake/train_CNN.py | import torch
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
from torch.utils.data import TensorDataset, DataLoader
from torch.optim import lr_scheduler
from .CNN_model import Generator
__author__ = "Jens Bauer"
__copyright__ = "Copyright 2021, CNNwake"
__credits__ = ["Jens Bauer"]
__license__ = "MIT"
__version__ = "1.0"
__email__ = "jens.bauer20@imperial.ac.uk"
__status__ = "Development"
def train_CNN_model(
nr_filters, nr_epochs, learing_rate, batch_size,
train_size, val_size, image_size, device, u_range,
ti_range, yaw_range, model_name, nr_workers=0, floris_path="."):
"""
Create a new model and train it for a certain number of epochs using a
newly generated dataset. Hyper-parameters such as model size or lr can be
changed as input to the function.
After training the model error for all epochs is plotted and the model
performance will be evaluated on a test set. Finally, the model
will saved as the model_name which needs to add as .pt file
Args:
nr_filters (int): Nr. of filters used for the conv layers
nr_epochs (int): Nr. of training epochs
learing_rate (float): Model learning rate
batch_size (int): Training batch size
train_size (int): Size of the generated training set
val_size (int): Size of the generated validation set
image_size (int): Size of the data set images, needs to match the
model output size for the current model this is 163 x 163
device (torch.device): Device to run the training on, cuda or cpu
u_range (list): Bound of u values [u_min, u_max] used
ti_range (list): Bound of TI values [TI_min, TI_max] used
yaw_range (list): Bound of yaw angles [yaw_min, yaw_max] used
model_name (str): Name of the trained saved model (needs be .pt)
nr_workers (int, optional): Nr. of worker to load data. Defaults to 0.
floris_path (str, optinal): Path to FLORIS jason file.
Returns:
gen (Generator): Trained model
loss (float): training loss defined by the loss function
val_error (float): Percentage error on the validation set
"""
# The current inputs are: u, ti and yaw. If more are
# used please change this input var
nr_input_var = 3
# create a generator of the specified size
gen = Generator(nr_input_var, nr_filters).to(device)
# create a datasets from the data generated by FLORIS
x_train, y_train = gen.create_floris_dataset(
size=train_size, image_size=image_size, u_range=u_range,
ti_range=ti_range, yaw_range=yaw_range, floris_init_path=floris_path,
curl=False)
x_eval, y_eval = gen.create_floris_dataset(
size=val_size, image_size=image_size, u_range=u_range,
ti_range=ti_range, yaw_range=yaw_range, floris_init_path=floris_path,
curl=False)
dataset = TensorDataset(y_train.unsqueeze(1), x_train.float())
# generate dataload for training
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True,
num_workers=nr_workers)
# init the weights of the generator
gen.initialize_weights()
# set up and optimizer and learing rate scheduler using hyperparameters
optimizer = optim.Adam(gen.parameters(), lr=learing_rate)
scheduler_gen = lr_scheduler.ReduceLROnPlateau(
optimizer, 'min', factor=0.6, patience=4, verbose=True)
# use L2 norm as criterion
criterion = nn.MSELoss()
# init to list to store error
error_list = []
for _ in range(nr_epochs): # train model
gen.train() # set model to training mode
# use method to train for one epoch
loss = gen.epoch_training(criterion, optimizer, dataloader, device)
gen.eval() # set model to evaluation
# evaluation on validation set
val_error = gen.error(x_eval, y_eval,
device, image_size=image_size,
normalisation=12)
# if error has not decreased over the past 4
# epochs decrease the lr by a factor of 0.6
scheduler_gen.step(val_error)
error_list.append(val_error)
print(f" Epoch: {_:.0f},"
f" Training loss: {loss:.4f},"
f" Validation error: {val_error:.2f}")
print("Finished training")
# save model
gen.save_model(model_name)
# plot the val error over the epochs
plt.plot(range(nr_epochs), error_list)
plt.show()
return gen, loss, val_error
if __name__ == '__main__':
# To run individual CNNWake files, the imports are not allowed to be
# relative. Instead of: from .CNN_model import Generator
# it needs to be: from CNN_model import Generator for all CNNWake imports
# Set device used for training
devices = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Train a new model with the given parameters
train_CNN_model(
nr_filters=16, nr_epochs=25, learing_rate=0.003, batch_size=50,
train_size=200, val_size=30, image_size=163, device=devices,
u_range=[3, 12], ti_range=[0.015, 0.25], yaw_range=[-30, 30],
model_name='generator.pt'
)
| 5,251 | 38.19403 | 78 | py |
wakenet | wakenet-master/Code/CNNWake/train_FCNN.py | import torch
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
from torch.utils.data import TensorDataset, DataLoader
from torch.optim import lr_scheduler
from FCC_model import FCNN
__author__ = "Jens Bauer"
__copyright__ = "Copyright 2021, CNNwake"
__credits__ = ["Jens Bauer"]
__license__ = "MIT"
__version__ = "1.0"
__email__ = "jens.bauer20@imperial.ac.uk"
__status__ = "Development"
def train_FCNN_model(
nr_neurons, input_size, nr_epochs, learing_rate, batch_size,
train_size, val_size, u_range, ti_range, yaw_range, model_name,
type='power', device='cpu', nr_workers=0, floris_path="."):
"""
Create a new model and train it for a certain number of epochs using a
newly generated dataset. Hyper-parameters such as model size or lr can be
changed as input to the function.
After training the model error over all epochs is plotted and the model
performance will be evaluated on a unseen test set. Finally, the model
will saved as the model_name which needs to add as .pt file
Args:
nr_filters (int): Nr. of filters used for the conv layers
nr_epochs (int): Nr. of training epochs
learing_rate (float): Model learing rate
batch_size (int): Training batch size
train_size (int): Size of the generated training set
val_size (int): Size of the generated validation set
u_range (list): Bound of u values [u_min, u_max] used
ti_range (list): Bound of TI values [TI_min, TI_max] used
yaw_range (list): Bound of yaw angles [yaw_min, yaw_max] used
model_name (str): Name of the trained saved model (needs to be .pt)
image_size (int): Size of the data set images, needs to match the
model output size for the current model this is 163
device (torch.device): Device to run the training on, cuda or cpu
nr_workers (int, optional): Nr. of workers to load data. Defaults to 0.
floris_path (str, optinal): Path to FLORIS jason file.
Returns:
gen (Generator): Trained model
loss (float): training loss defined by the loss function
val_error (float): Percentage error on the validation set
"""
# The current inputs are: u, ti and yaw. If more are used please
# change this input var
model_input_size = input_size + 2
# create a generator of the specified size
model = FCNN(model_input_size, nr_neurons, 1).to(device)
# create a datasets from the data generated by FLORIS
x_train, y_train = model.create_ti_power_dataset(
size=train_size, u_range=u_range, ti_range=ti_range,
yaw_range=yaw_range, nr_varabiles=input_size, type=type,
floris_path=floris_path)
x_eval, y_eval = model.create_ti_power_dataset(
size=val_size, u_range=u_range, ti_range=ti_range,
yaw_range=yaw_range, nr_varabiles=input_size, type=type,
floris_path=floris_path)
dataset = TensorDataset(y_train, x_train.float())
# generate dataload for training
dataloader = DataLoader(
dataset, batch_size=batch_size, shuffle=True, num_workers=nr_workers)
# init the weights of the generator
model.initialize_weights()
# set up and optimizer and learing rate scheduler using hyperparameters
optimizer = optim.Adam(model.parameters(), lr=learing_rate)
scheduler_gen = lr_scheduler.ReduceLROnPlateau(
optimizer, 'min', factor=0.6, patience=4, verbose=True)
# use L2 norm as criterion
criterion = nn.MSELoss()
# init to list to store error
error_list = []
for _ in range(nr_epochs): # train model
model.train() # set model to training mode
loss = model.epoch_training(criterion, optimizer, dataloader, device)
model.eval() # set model to evaluation
# evaluation on validation set
val_error = model.error(x_eval, y_eval, device)
# if error has not decreased over the past 4 epochs decrease
# the lr by a factor of 0.6
scheduler_gen.step(val_error)
error_list.append(val_error)
print(f" Epoch: {_:.0f},"
f" Training loss: {loss:.4f},"
f" Validation error: {val_error:.2f}")
# save model
model.save_model(model_name)
# plot the val error over the epochs
plt.plot(range(nr_epochs), error_list)
plt.show()
return model, loss, val_error
if __name__ == '__main__':
# To run indivual CNNWake files, the imports are not allowed to be
# relative. Instead of: from .FCC_model import FCNN
# it needs to be: from FCC_model import FCNN, for all CNNWake imports
# Set device used for training
devices = torch.device("cpu" if torch.cuda.is_available() else "cpu")
# Train a FCNN to predict power
train_FCNN_model(
nr_neurons=20, input_size=20, nr_epochs=150, learing_rate=0.003,
batch_size=30, train_size=50, val_size=40, u_range=[3, 12],
ti_range=[0.015, 0.25], yaw_range=[-30, 30],
model_name='power_model.pt', type='power', device=devices)
| 5,082 | 38.1 | 79 | py |
wakenet | wakenet-master/Code/CNNWake/superposition.py | import torch
from torch.backends import cudnn
import matplotlib.pyplot as plt
import numpy as np
import time
import floris.tools as wfct
__author__ = "Jens Bauer"
__copyright__ = "Copyright 2021, CNNwake"
__credits__ = ["Jens Bauer"]
__license__ = "MIT"
__version__ = "1.0"
__email__ = "jens.bauer20@imperial.ac.uk"
__status__ = "Development"
def super_position(farm_array, turbine_array, turbine_postion,
hub_speed, wind_velocity, sp_model="SOS"):
"""
Generate super-position of a turbine wind field and a farm wind field.
The turbine wind field is superimposed onto the wind farm flow field using
different super-postion models. The recommended model is the root sum of
squares (SOS), more information about super-postion can be found in this
paper: https://doi.org/10.1088/1742-6596/749/1/012003
Args:
farm_array (numpy array): 2d wind field of whole wind farm
turbine_array (numpy array): 2d wind field around wind turbine
turbine_postion (numpy array): x and y cell number of wind turbine
in the global array [x_cell, y_cell]
hub_speed (float): u velocity at turbine hub in m/s
wind_U_turbine (float): wind speed at turbine hub
wind_velocity (float): free stream wind speed
sp_model (str, optional): Select model to be used for the
super-positioning Defaults to "SOS".
Returns:
numpy array: 2d wind field of whole wind farm with flow
field around turbine superimposed
"""
# normalize wind by the free stream and hub height speed
turbine_u = turbine_array/hub_speed
farm_u = farm_array/wind_velocity
# Define the start and end coordinates of the turbine wake
# in the global wind park array
x_start = turbine_postion[0]
x_end = turbine_postion[0]+turbine_u.shape[0]
y_start = turbine_postion[1]
y_end = turbine_postion[1]+turbine_u.shape[1]
if sp_model == "SOS":
# For SOS model with one turbine, the equation is:
# u = 1 - sqrt((1 - u_1)^2 + (1 - u_2)^2)
sos1 = np.square(1 - turbine_u)
sos2 = np.square(1 - farm_u)
# place the SOS superpostion in the correct location of the farm array
farm_array[y_start:y_end, x_start:x_end] = (1 - np.sqrt(
sos1 + sos2[y_start:y_end, x_start:x_end])) * wind_velocity
# farm_array now includes the velocity field of the turbine
return farm_array
elif sp_model == "linear":
# For SOS model with one turbine, the equation is:
# u = 1 - ((1 - u_1) + (1 - u_2))
sos1 = 1 - turbine_u
sos2 = 1 - farm_u
# place the linear superpostion in the correct
# location of the farm array
farm_array[
turbine_postion[1]:turbine_postion[1]+sos1.shape[1],
turbine_postion[0]:turbine_postion[0]+sos1.shape[0]] = \
(1 - (sos1 + sos2[
turbine_postion[1]:turbine_postion[1]+sos1.shape[1],
turbine_postion[0]:turbine_postion[0]+sos1.shape[0]]))\
* wind_velocity
# farm_array now includes the velocity field of the turbine
return farm_array
elif sp_model == "largest_deficit":
# u = min(u_1, u_2)
# place the SOS super postion in the correct location of the farm array
farm_array[
turbine_postion[1]:turbine_postion[1]+turbine_u.shape[1],
turbine_postion[0]:turbine_postion[0]+turbine_u.shape[0]]\
= np.minimum(turbine_u,
farm_u[y_start:y_end,
x_start:x_end]) * wind_velocity
# farm_array now includes the velocity field of the turbine
return farm_array
else:
# other models to be added
raise Exception('No super position model selected, please'
' either select: SOS, linear or largest_deficit')
def CNNWake_farm_power(
yawn_angles, x_position, y_position, wind_velocity, turbulent_int,
CNN_generator, Power_model, TI_model, device,
ti_normalisation=0.30000001, power_normalisation=4834506):
"""
Calculates the power output of the wind farm using the NN.
The generated power is returned as negative number for the minimization.
The individual wakes of the turbines are calculated using the CNN and
superimposed onto the wind farm flow field using a super-position model.
The energy produced by the turbines are calculated using another fully
connected network from the flow data just upstream the turbine.
Please ensure that the x position are in ascending order and every
turbine is placed at least 300 above 0 in the direction. This is done
to ensure that no wake is lost at the edge of the domain.
Args:
yawn_angles (list): 1d array of the yaw angle of every wind turbine
in degree, from -30° to 30°
x_position (list): 1d array of the x postions of the wind
turbines in meters.
y_position (list): 1d array of the y postions of the wind
turbines in meters.
wind_velocity (float): Free stream wind velocity in m/s,
from 3 m/s to 12 m/s
turbulent_int (float): Turbulent intensity in percent,
from 1.5% to 25%
CNN_generator (Generator): CNN to predict the wake of a single
turbine, ensure it is trained and set to validation mode
Power_model (Generator): FCNN to predict the power generated
by a turbine, ensure it is trained and set to validation mode
TI_model (Generator): FCNN to predict the local TI of a
turbine, ensure it is trained and set to validation mode
device (torch.device): Device to store and run the neural network
on, either cpu or cuda
ti_normalisation (float): Normalisation of the TI training set
power_normalisation (float): Normalisation of the power training set
Returns:
power float: negative power output
"""
# Define the x and y length of a single cell in the array
# This is set by the standard value used in FLORIS wakes
dx = 18.4049079755
dy = 2.45398773006
# Set the maximum length of the array to be 3000m and 400m
# more than the maximum x and y position of the wind park
# If a larger physical domain was used change adapt the values
x_max = np.max(x_position) + 3000
y_max = np.max(y_position) + 300
# Number of cells in x and y needed to create a 2d array of
# that is x_max x y_max using dx, dy values
Nx = int(x_max/dx)
Ny = int(y_max/dy)
# Initialise a 2d array of the wind park with the
# inlet wind speed
farm_array = np.ones((Ny, Nx)) * wind_velocity
# round yaw angle
yawn_angles = np.round(yawn_angles, 2)
# Initialise array to store power and TI for every turbine
power_CNN = []
ti_CNN = []
with torch.no_grad(): # Ensure no gradients are calculated
# For every wind turbine
for i in range(len(x_position)):
# determine the x and y cells that the turbine center is at
turbine_cell = [int((x_position[i])/dx),
int((y_position[i] - 200)/dy)]
# extract wind speeds along the rotor, 60 meters upstream
u_upstream_hub = farm_array[
turbine_cell[1] + 45: turbine_cell[1] + 110, turbine_cell[0] - 3]
# Do an running average, this is done because CNNwake has slight
# variations in the u predictions, also normalise the u values
u_list_hub = [
((u_upstream_hub[i-1] + u_upstream_hub[i] +
u_upstream_hub[i+1])/3)/12 for i in np.linspace(
5, len(u_upstream_hub)-5, 40, dtype=int)]
# append yaw angle and normalised it, also append ti
u_list_hub = np.append(u_list_hub, yawn_angles[i]/30)
u_list_hub = np.append(u_list_hub, turbulent_int)
# The local TI does not change from inlet TI if the turbine
# is not covered by a wake, therefore check if if all values
# in u_list_hub are the same -> means no wake coverage
# Local TI also depends on yaw, if yaw is less than 12° and
# turbine is not in wake -> use inlet TI for local TI
if np.allclose(
u_list_hub[0], u_list_hub[0:-3], rtol=1e-02, atol=1e-02)\
and abs(u_list_hub[-2]) < 0.4:
ti = turbulent_int
# If turbine is in wake or yaw angle is larger use FCNN to find
# local TI
else:
# Use FCNN forward pass to predict TI
ti = TI_model((torch.tensor(u_list_hub).float().to(device))).detach().cpu().numpy() * ti_normalisation
# regulate TI to ensure it is not to different from free stream
if ti < turbulent_int*0.7:
ti = turbulent_int * 1.5
# clip ti values to max and min trained
ti = np.clip(ti, 0.015, 0.25).item(0)
ti_CNN.append(ti) # Save ti value
# Replace global/inlet TI in u_list with local TI
u_list_hub[-1] = ti
# Use FCNN to predcit power generated by turbine
turbine_energy = Power_model(torch.tensor(u_list_hub).float().to(device)).detach().cpu().numpy() * power_normalisation
power_CNN.append(turbine_energy) # Save power
# Find the mean wind speed upstream the turbine
hub_speed = np.round(np.mean(u_upstream_hub), 2)
# Create Array of array to pass it to CNN
turbine_condition = [[hub_speed, ti, yawn_angles[i]]]
# Use CNN to calculate wake of individual trubine
turbine_field = CNN_generator(torch.tensor(turbine_condition).float().to(device))
# Since CNN output is normalised,
# mutiply by 12 and create a numpy array
turbine_field = turbine_field[0][0].detach().cpu().numpy() * 12
# Place wake of indivual turbine in the farm_array
farm_array = super_position(
farm_array, turbine_field, turbine_cell,
hub_speed, wind_velocity, sp_model="SOS")
# Return the value negative of power generated
return -sum(power_CNN).item(0)
def FLORIS_farm_power(
yawn_angles, x_position, y_position, wind_velocity,
turbulent_int, floris_park):
"""
Function to generate the power output of a wind farm defined by
the x, y and yaw angles of every turbine in the farm.
The function will only use FLORIS to calcaute the power and
returnes the power as a negtive value which is needed for
the minimisation.
Args:
yawn_angles (list): Yaw angle of every turbine in the wind park
x_position (list): All x locations of the turbines
y_position (list): All y locations of the turbines
wind_velocity (float): Inlet wind speed
turbulent_int (float): Inlet turbulent intensity
floris_park (floris.tools.FlorisInterface): Floris interface
loads in data from jason file
Returns:
power (float): negative power generated by wind park
"""
# Round yaw angle input
yawn_angles = np.round(yawn_angles, 2)
# Set the x and y postions of the wind turbines
floris_park.reinitialize_flow_field(layout_array=[x_position,
np.array(y_position)])
# Set the yaw angle of every turbine
for _ in range(0, len(x_position)):
floris_park.change_turbine([_],
{'yaw_angle': yawn_angles[_],
"blade_pitch": 0.0})
# Set inlet wind speed and TI
floris_park.reinitialize_flow_field(wind_speed=wind_velocity,
turbulence_intensity=turbulent_int)
# Calculate wind field
floris_park.calculate_wake()
# Calculate power generated by every turbine
power = floris_park.get_turbine_power()
# Return the sum of all power per turbine but as negative
# value for the optimisation
return -sum(power)
| 12,291 | 43.375451 | 130 | py |
wakenet | wakenet-master/Code/CNNWake/optimisation.py | from scipy.optimize import minimize
import numpy as np
import torch
import time
import floris.tools as wfct
from .superposition import CNNWake_farm_power, FLORIS_farm_power
from .CNN_model import Generator
from .FCC_model import FCNN
__author__ = "Jens Bauer"
__copyright__ = "Copyright 2021, CNNwake"
__credits__ = ["Jens Bauer"]
__license__ = "MIT"
__version__ = "1.0"
__email__ = "jens.bauer20@imperial.ac.uk"
__status__ = "Development"
def CNNwake_wake_steering(x_position, y_position, initial_yaw, wind_velocity,
turbulent_int, CNN_generator, Power_model, TI_model,
device, bounds, tolerance):
"""
Function will optimise the yaw angle of a specific wind farm for
a given inlet wind speed and TI using CNNwake's wind farm function.
Please ensure that the x position are in ascending order and every
turbine is placed at least 300 above 0 in the y direction. This is done
to ensure that no wake is lost at the edge of the domain.
Args:
x_position (list or numpy array): 1d array of the x postions of
the wind turbines in m.
y_position (list or numpy array): 1d array of the y postions of
the wind turbines in m.
initial_yaw (list or numpy array): 1d array of inital yaw angle
of every wind turbine in degree, set to 0
wind_velocity (float): Free stream wind velocity in m/s,
ensure NNa are trained on this wind speed
turbulent_int (float): Turbulent intensity in percent ,
ensure NNs are trained on this TI
CNN_generator (Generator): CNN to predict the wake of a single
turbine, ensure it is trained and set to validation mode
Power_model (Generator): FCNN to predict the power generated
by a turbine, ensure it is trained and set to validation mode
TI_model (Generator): FCNN to predict the local TI of a
turbine, ensure it is trained and set to validation mode
device (torch.device): Device to store and run the neural network
on, either cpu or cuda
bounds (list): Yaw angle bounds for optimisation [min_yaw, max_yaw]
tolerance (float): Relative solver tolerance
Returns:
opt_yaw.x (np.array): Optimal yaw angle
opt_yaw.func (float): Optimal power output
time_taken (float): Time taken for optimisation
"""
# Set all NNs to evaluation mode
CNN_generator.eval()
Power_model.eval()
TI_model.eval()
# Run a few check to ensure that optimisation will work
# Check if there are same number of turbines defined in
# x, y and yaw anf´gle arrays
assert len(x_position) == len(y_position)
assert len(y_position) == len(initial_yaw)
# check if x_list in ascending order, if this assert fails
# ensure that x goes from smallest to largest
if len(x_position) > 1:
assert np.any(np.diff(np.array(x_position)) > 0)
# Check if all the NNs work as expected
assert CNN_generator(torch.tensor([[
4, 0.1, 20]]).float().to(device)).size() == \
torch.Size([1, 1, 163, 163])
assert TI_model(torch.tensor([
i for i in range(0, 42)]).float().to(device)).size() == \
torch.Size([1])
assert Power_model(torch.tensor([
i for i in range(0, 42)]).float().to(device)).size() == \
torch.Size([1])
# create a list of tuples of bounds for the optimizer
bounds_list = [(bounds[0], bounds[1]) for _ in range(0, len(x_position))]
init_t = time.time() # start timer
# Using scipy.optimize function to find the optimal yaw setting by calling
# CNNWake_farm_power many times with different yaw angles. Ensure that all
# arguments are given in the correct order
opt_yaw = minimize(
CNNWake_farm_power, initial_yaw,
args=(x_position, y_position, wind_velocity, turbulent_int,
CNN_generator, Power_model, TI_model, device), method='SLSQP',
bounds=bounds_list, options={'ftol': tolerance, 'eps': 0.1,
'disp': False})
# find time taken for optimisation
time_taken = time.time() - init_t
return np.round(opt_yaw.x, 2), abs(opt_yaw.fun), time_taken
def FLORIS_wake_steering(x_position, y_position, initial_yaw, wind_velocity,
turbulent_int, bounds, tolerance, floris_path='./'):
"""
Function will optimise the yaw angle of a specific wind farm for
a given inlet wind speed and TI using FLORIS.
Please ensure that the x position are in ascending order and every
turbine is placed at least 300 above 0 in the direction. This is done
to ensure that no wake is lost at the edge of the domain.
Args:
x_position (list or numpy array): 1d array of the x postions of
the wind turbines in m.
y_position (list or numpy array): 1d array of the y postions of
the wind turbines in m.
initial_yaw (list or numpy array): 1d array of inital yaw angle
of every wind turbine in degree, set to 0
wind_velocity (float): Free stream wind velocity in m/s,
ensure NNa are trained on this wind speed
turbulent_int (float): Turbulent intensity in percent ,
ensure NNs are trained on this TI
bounds (list): Yaw angle bounds for optimisation [min, max]
tolerance (float): Relative solver tolerance
floris_path (str): Path to FLORIS jason file
Returns:
floris_opti.x (np.array): Optimal yaw angle
floris_opti.func (float): Optimal power output
time_taken (float): Time taken for optimisation
"""
# Check if there are same number of turbines defined in
# x, y and yaw anf´gle arrays
assert len(x_position) == len(y_position)
assert len(y_position) == len(initial_yaw)
# create a list of tuples of bounds for the optimizer
bounds = [(bounds[0], bounds[1]) for _ in range(0, len(x_position))]
# This variable is used to sum up all the power generated by turbines
floris_park = 0
# Check if path to FLORIS jason file is correct by testing if it can
# open it
try:
floris_park = wfct.floris_interface.FlorisInterface(
floris_path + "FLORIS_input_gauss.json")
except FileNotFoundError:
print('No FLORIS_input_gauss.jason file found at this lcoation, '
'please specfiy the path to this file')
init_t = time.time() # Start timer
# Using scipy.optimize function to find the optimal yaw setting by calling
# FLORIS_farm_power many times with different yaw angles. Ensure that all
# arguments are given in the correct order
floris_opti = minimize(
FLORIS_farm_power, initial_yaw,
args=(x_position, y_position, wind_velocity,
turbulent_int, floris_park),
method='SLSQP', bounds=bounds,
options={'ftol': tolerance, 'eps': 0.1,
'disp': False})
time_taken = time.time() - init_t
return np.round(floris_opti.x, 2), abs(floris_opti.fun), time_taken
if __name__ == '__main__':
# To run individual CNNWake files, the imports are not allowed to be
# relative. Instead of: from .CNN_model import Generator
# it needs to be: from CNN_model import Generator for all CNNWake imports
# select device to run model on
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Load and set up all NNs
CNN_generator = Generator(3, 30).to(device)
CNN_generator.load_model('./trained_models/CNN_FLOW.pt', device=device)
Power_model = FCNN(42, 300, 1).to(device)
Power_model.load_state_dict(torch.load('./trained_models/FCNN_POWER.pt',
map_location=device))
TI_model = FCNN(42, 300, 1).to(device)
TI_model.load_state_dict(torch.load('./trained_models/FCNN_TI.pt',
map_location=device))
# Use optimisation to find best yaw angle
yaw1, power1, timing1 = CNNwake_wake_steering(
[100, 100, 1000, 1000],
[300, 800, 300, 800],
[0, 0, 0, 0], 10.6, 0.09, CNN_generator, Power_model, TI_model,
device, [-30, 30], 1e-07)
print(f"CNNwake optimized yaw abgle: {yaw1}")
# Find FLORIS best yaw abgle
yaw, power, timing = FLORIS_wake_steering(
[100, 100, 1000, 1000],
[300, 800, 300, 800],
[0, 0, 0, 0], 10.6, 0.09, [-30, 30], 1e-07)
print(f"FLORIS optimized yaw abgle: {yaw}")
| 8,797 | 42.554455 | 79 | py |
wakenet | wakenet-master/Code/CNNWake/CNN_model.py | import torch
import torch.nn as nn
import numpy as np
import random
import floris.tools as wfct
__author__ = "Jens Bauer"
__copyright__ = "Copyright 2021, CNNwake"
__credits__ = ["Jens Bauer"]
__license__ = "MIT"
__version__ = "1.0"
__email__ = "jens.bauer20@imperial.ac.uk"
__status__ = "Development"
class Generator(nn.Module):
"""
The class is the Neural Network that generates the flow field around a
wind turbine. The network uses the pytorch framwork and uses fully
connected and transpose convolutional layers.
The methods of this class include the training of the network,
testing of the accuracy and generaton of the training data.
"""
def __init__(self, nr_input_var, nr_filter):
"""
init method that generates the network architecture using pytroch's
ConvTranspose2d and Sequential layers. The number of input varibles
and size of the given network can be changed. The output size will not
change and it set at 163 x 163 pixels.
Args:
nr_input_var (int): Nr. of inputs, usually 3 for u, ti and yaw
nr_filter (int): Nr. filters used in deconv layers, more filters
means that the network will have more parameters
"""
super(Generator, self).__init__()
# linear layer
self.FC_Layer = nn.Sequential(nn.Linear(in_features=nr_input_var,
out_features=9))
# Deconvolutional layer
self.net = nn.Sequential(
self.layer(1, nr_filter * 16, 4, 2, 1),
self.layer(nr_filter * 16, nr_filter * 8, 4, 1, 1),
self.layer(nr_filter * 8, nr_filter * 8, 4, 2, 1),
self.layer(nr_filter * 8, nr_filter * 4, 4, 2, 1),
self.layer(nr_filter * 4, nr_filter * 4, 3, 2, 1),
nn.ConvTranspose2d(nr_filter * 4, 1, kernel_size=3,
stride=3, padding=1),
)
def layer(self, in_filters, out_filters, kernel_size, stride, padding):
"""
One layer of the CNN which consits of ConvTranspose2d,
a batchnorm and LRelu activation function.
Function is used to define one layer of the network
Args:
in_filters (int): Nr. of filters in the previous layer
out_filters (int): Nr. of output filters
kernel_size (int): Size of the ConvTranspose2d layer
stride (int): Stride of the ConvTranspose2d layer
padding (int): Padding used in this layer
Returns:
nn.Sequential: Pytroch Sequential container that defines one layer
"""
# One layer of the network uses:
# Deconvolutional layer, then batch norm and leakyrelu
# activation function
single_layer = nn.Sequential(nn.ConvTranspose2d(in_filters,
out_filters,
kernel_size,
stride,
padding,
bias=False, ),
nn.BatchNorm2d(out_filters),
nn.LeakyReLU(0.2), )
return single_layer
def initialize_weights(self):
"""
Initilize weights using a normal distribution with mean = 0,std2 = 0.02
which has helped training. Loop over all modules, if module is
convolutional layer or batchNorm then initialize weights.
Args:
model (torch model): Neural network model defined using Pytorch
"""
# for ever layer in model
for m in self.modules():
# check if it deconvolutional ot batch nrom layer
if isinstance(m, (nn.Conv2d, nn.BatchNorm2d)):
# initialize weights using a normal distribution
nn.init.normal_(m.weight.data, 0.0, 0.02)
def forward(self, x):
"""
Functions defines a forward pass though the network. Can be used for
a single input or a batch of inputs
Args:
x (torch.tensor): input tensor, to be passed through the network
Returns:
flow_fields (torch.tensor): Output of network
"""
# first the fully connected layer takes in the input, and outputs
# 9 neurons which are reshaped into a 3x3 array
x = self.FC_Layer(x).view(len(x), -1, 3, 3)
# the Conv layers take in the 3x3 array and output a 163x163 array
return self.net(x)
@staticmethod
def create_floris_dataset(
size, image_size, u_range, ti_range, yaw_range,
floris_init_path=".", curl=False):
"""
Function to generate the dataset needed for training using FLORIS.
The flowfield around a turbine is generated for a large range of wind
speeds, turbulent intensities and yaw angles. The 2d array and
correspoding init conditions are saved for training. The data is
generated using a Gaussian wake mode, please see:
https://doi.org/10.1016/j.renene.2014.01.002.
For more information about FLORIS see: https://github.com/NREL/floris.
Function can be used to generated training, validation and test sets.
Args:
size (int): Size of the dataset
image_size (int): Size of the flow field outputs that
are generated, this depends on the
Neural network used, should be 163.
u_range (list): Bound of u values [u_min, u_max] used
ti_range (list): Bound of TI values [TI_min, TI_max] used
yaw_range (list): Bound of yaw angles [yaw_min, yaw_max] used
floris_init_path (str, optional): Path to the FLORIS jason file.
Defaults to ".".
curl (bool, optional): If curl model should be used please set
to True, see this for more information:
https://doi.org/10.5194/wes-4-127-2019.
Defaults to False.
Returns:
y (torch.tensor): Tensor of size (size, image_size, image_size)
which includes all the generated flow fields. The flow fields
are normalised to help training
x (torch.tensor): Tensor of size (size, 1, 3) which includes the
flow conditons of the correspoding flow field in the x tensor.
"""
# sample u, ti and yawn angle from a uniform distribution
u_list = [round(random.uniform(u_range[0], u_range[1]), 2) for
i in range(0, size)]
ti_list = [round(random.uniform(ti_range[0], ti_range[1]), 2) for
i in range(0, size)]
yawn_list = [round(random.uniform(yaw_range[0], yaw_range[1]), 2) for
i in range(0, size)]
# initialize FLORIS model using the jason file
if curl is False:
floris_turbine = wfct.floris_interface.FlorisInterface(
floris_init_path + "/FLORIS_input_gauss.json")
else:
floris_turbine = wfct.floris_interface.FlorisInterface(
floris_init_path + "/FLORIS_input_curl.json")
# initialize empty numpy array to store 2d arrays and
# corresponding u, ti and yawn values
y = np.zeros((size, image_size, image_size))
x = np.zeros((size, 3))
# create train examples
print("generate FLORIS data")
for _ in range(0, size):
if _ % 500 == 0:
print(f"{_}/{size}")
# set wind speed, ti and yawn angle for FLORIS model
floris_turbine.reinitialize_flow_field(
wind_speed=u_list[_],
turbulence_intensity=ti_list[_])
floris_turbine.change_turbine([0], {'yaw_angle': yawn_list[_]})
# calculate the wakefield
floris_turbine.calculate_wake()
# extract horizontal plane at hub height
cut_plane = floris_turbine.get_hor_plane(
height=90,
x_resolution=image_size,
y_resolution=image_size,
x_bounds=[0, 3000],
y_bounds=[-200, 200]).df.u.values.reshape(image_size,
image_size)
# save the wind speed values of the plane at hub height and
# the corresponding turbine stats
y[_] = cut_plane
x[_] = u_list[_], ti_list[_], yawn_list[_]
# turn numpy array into a pytroch tensor
x = torch.tensor(x, dtype=torch.float)
# The wind speeds are normalised by dividing it by 12
# i.e. every value will be between 0-1 which helps training
y = torch.tensor(y, dtype=torch.float)/12
return x, y
def error(self, x_eval, y_eval, device, image_size=163, normalisation=12):
r"""
Calculate the average pixel wise percentage error of the model on
a evaluation set. For error function is:
error = 1/set_size *\sum_{n=0}^{set_size}(1/image_size**2 *
\sum_{i=0}^{image_size**2}(100*abs(FLORIS_{n,i} - GAN_{n,i})/
max(FLORIS_{n,i})))
For a detailed explanation of this function please see the report in
the ACSE9 repo.
"""
error_list = []
# Use model to predict the wakes for the given conditions in x
model_predict = self.forward(x_eval.to(device))
for n in range(0, len(x_eval)):
# Calculate the mean error between CNNwake output and FLORIS
# for a given flow field using the function given above
pixel_error = np.sum(abs(
y_eval.detach().cpu().numpy()[n] -
model_predict.squeeze(1)[n].detach().cpu().numpy()) /
(torch.max(y_eval.detach()[n]).cpu().numpy()))
# divide by number of pixels in array for an mean value
pixel_error /= image_size * image_size
error_list.append(pixel_error * 100)
# return mean error
return np.mean(error_list)
def evaluate_model(self, set_size, u_range, ti_range, yaw_range,
image_size=163, device='cpu', normalisation=12,
florisjason_path="."):
"""
Function to calculate a average pixel wise percentage error
of the model using the error function. This functions generates
a test set and evaluates the model on this unseen data to provide
a test error.
Args:
set_size (int, optional): Nr. of samples to be used for testing.
u_range (list): Bound of u values [u_min, u_max] used
ti_range (list): Bound of TI values [TI_min, TI_max] used
yaw_range (list): Bound of yaw angles [yaw_min, yaw_max] used
image_size (int, optional): Size of the flow field.
Defaults to 163.
device (str): Device to store and run the neural network on,
either cpu or cuda.
normalisation (int, optional): The CNN output is between
0 and 1 due to the normalisation used, therefore it needs to
be renormalised. Defaults to 12.
florisjason_path (str, optional): Location of the FLORIS jason
file. Defaults to ".".
Returns:
error (float): Error of model on test set
"""
# Create a dataset to test the model on
x_eval, y_eval = self.create_floris_dataset(
set_size, image_size, u_range=u_range, ti_range=ti_range,
yaw_range=yaw_range, floris_init_path=florisjason_path)
# Use generated data set to calculate the error of CNNwake when
# compared with the FLORIS output
test_error = self.error(x_eval, y_eval, device,
image_size, normalisation=12)
return test_error
def epoch_training(self, criterion, optimizer, dataloader, device):
"""
Trains the model for one epoch data provided by dataloader. The model
will be updated after each batch and the function will return the
train loss of the last batch
Args:
criterion (torch.nn.criterion): Loss function used to train model
optimizer (torch.optim.Optimizer): Optimizer for gradient descent
dataloader (torch.utils.DataLoader): Dataloader to store dataset
device (str): Device on which model/data is stored, cpu or cuda
Returns:
training loss (float): Loss of training set defined by criterion
"""
# For all training data in epoch
for real_images, label in dataloader:
# move data to device
real_images = real_images.to(device)
label = label.to(device)
# images need to be in correct shape: batch_size x 1 x 1 x 3
# compute reconstructions of flow-field using the CNN
outputs = self.forward(label)
# compute training reconstruction loss using the
# loss function set
train_loss = criterion(outputs, real_images)
optimizer.zero_grad() # Zero gradients of previous step
train_loss.backward() # compute accumulated gradients
optimizer.step() # Do optimizer step
# return training loss
return train_loss.item()
def load_model(self, path='.', device='cpu'):
"""
Function to load model from a pt file into this class.
Args:
path (str): path to saved model.
device (torch.device): Device to load onto, cpu or cuda
"""
# load the pretrained model
self.load_state_dict(torch.load(path, map_location=device))
def save_model(self, name='generator.pt'):
"""
Function to save current model paramters so that it can
be used again later. Needs to be saved with as .pt file
Args:
name (str): name of .pt file from which to load model
"""
# Save current model parameters
torch.save(self.state_dict(), name)
| 14,425 | 42.583082 | 79 | py |
Enhancement-Coded-Speech | Enhancement-Coded-Speech-master/CepsDomCNN_Train.py | #####################################################################################
# Training the CNN for cepstral domain approach III.
# Input:
# 1- Training input: Train_inputSet_g711.mat
# 2- Training target: Train_targetSet_g711.mat
# 3- Validation input: Validation_inputSet_g711.mat
# 4- Validation target: Validation_targetSet_g711.mat
# Output:
# 1- Trained CNN weights: cnn_weights_ceps_g711_best_example.h5
#####################################################################################
""" import os
import tensorflow as tf
from keras.engine.topology import Layer
from keras.models import Model
from keras.layers import Input, Add, Multiply, Average, Activation, LeakyReLU
from keras.layers.convolutional import Conv1D, MaxPooling1D, UpSampling1D, AveragePooling1D
from keras import backend as K
import keras.optimizers as optimizers
import numpy as np
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, CSVLogger, LearningRateScheduler
import keras.callbacks as cbs
from numpy import random
import scipy.io as sio
from sklearn import preprocessing
import math
import time """
import os
import time
import numpy as np
from numpy import random
import scipy.io as sio
import h5py as h5
import scipy.io.wavfile as swave
import tensorflow as tf
import tensorflow.keras as K
from tensorflow.keras.models import Model, load_model, save_model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import Input, Add, Activation, LeakyReLU, Conv1D, MaxPooling1D, UpSampling1D
from tensorflow.keras.callbacks import ReduceLROnPlateau, CSVLogger, EarlyStopping, ModelCheckpoint, LearningRateScheduler
def snr(y_true, y_pred):
"""
SNR is Signal to Noise Ratio
"""
return 10.0 * tf.math.log((tf.math.reduce_sum(tf.math.square(y_true))) / (tf.math.reduce_sum(tf.math.square(y_pred - y_true)))) / tf.math.log(10.0)
#####################################################################################
# 0. Setup
#####################################################################################
# Settings and CNN topology parameters
codec = "g711"
fram_length = 32
n1 = 22 # F=22 in paper
n2 = 44 #
n3 = 22 #
N_cnn = 6 # N=6 in paper
# Training parameters
nb_epochs = 2
batch_size = 16
learning_rate = 5e-4
#####################################################################################
# 1. load data
#####################################################################################
print('> Loading data... ')
# Load Input Data
mat_input = "./data/Train_inputSet_g711.mat"
mat_input = os.path.normcase(mat_input)
x_train_noisy = sio.loadmat(mat_input)
x_train_noisy = x_train_noisy['inputSetNorm']
x_train_noisy = np.array(x_train_noisy)
# Load Input Data for Validation
mat_input_vali = "./data/Validation_inputSet_g711.mat"
mat_input_vali = os.path.normcase(mat_input_vali)
x_train_noisy_vali = sio.loadmat(mat_input_vali)
x_train_noisy_vali = x_train_noisy_vali['inputSetNorm']
x_train_noisy_vali = np.array(x_train_noisy_vali)
# Load Target Data
mat_target = "./data/Train_targetSet_g711.mat"
mat_target = os.path.normcase(mat_target)
x_train = sio.loadmat(mat_target)
x_train = x_train['targetSet']
x_train = np.array(x_train)
# Load Target Data for Validation
mat_target_vali = "./data/Validation_targetSet_g711.mat"
mat_target_vali = os.path.normcase(mat_target_vali)
x_train_vali = sio.loadmat(mat_target_vali)
x_train_vali = x_train_vali['targetSet']
x_train_vali = np.array(x_train_vali)
# Randomization of Training Pairs
random.seed(1024)
train = np.column_stack((x_train_noisy, x_train))
np.random.shuffle(train)
x_train_noisy = train[:, :fram_length]
x_train = train[:, fram_length:]
# Reshape of Traing Pairs and validation Pairs
x_train_noisy = np.reshape(x_train_noisy, (x_train_noisy.shape[0], x_train_noisy.shape[1], 1))
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
x_train_noisy_vali = np.reshape(x_train_noisy_vali, (x_train_noisy_vali.shape[0], x_train_noisy_vali.shape[1], 1))
x_train_vali = np.reshape(x_train_vali, (x_train_vali.shape[0], x_train_vali.shape[1], 1))
print('> Data Loaded. Compiling...')
#####################################################################################
# 2. define model
#####################################################################################
input_vec = Input(shape=(fram_length, 1))
c1 = Conv1D(n1, N_cnn, padding='same')(input_vec)
c1 = LeakyReLU(0.2)(c1)
c1 = Conv1D(n1, N_cnn, padding='same')(c1)
c1 = LeakyReLU(0.2)(c1)
x = MaxPooling1D(2)(c1)
c2 = Conv1D(n2, N_cnn, padding='same')(x)
c2 = LeakyReLU(0.2)(c2)
c2 = Conv1D(n2, N_cnn, padding='same')(c2)
c2 = LeakyReLU(0.2)(c2)
x = MaxPooling1D(2)(c2)
c3 = Conv1D(n3, N_cnn, padding='same')(x)
c3 = LeakyReLU(0.2)(c3)
x = UpSampling1D(2)(c3)
c2_2 = Conv1D(n2, N_cnn, padding='same')(x)
c2_2 = LeakyReLU(0.2)(c2_2)
c2_2 = Conv1D(n2, N_cnn, padding='same')(c2_2)
c2_2 = LeakyReLU(0.2)(c2_2)
m1 = Add()([c2, c2_2])
m1 = UpSampling1D(2)(m1)
c1_2 = Conv1D(n1, N_cnn, padding='same')(m1)
c1_2 = LeakyReLU(0.2)(c1_2)
c1_2 = Conv1D(n1, N_cnn, padding='same')(c1_2)
c1_2 = LeakyReLU(0.2)(c1_2)
m2 = Add()([c1, c1_2])
decoded = Conv1D(1, N_cnn, padding='same', activation='linear')(m2)
model = Model(input_vec, decoded)
model.summary()
adam = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
model.compile(optimizer=adam, loss='mse', metrics=[snr])
#####################################################################################
# 3. Fit the model
#####################################################################################
# Stop criteria
stop_str = EarlyStopping(monitor='val_loss', patience=16, verbose=1, mode='min')
# Reduce learning rate
reduce_LR = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=2, verbose=1, mode='min', epsilon=0.0001, cooldown=0, min_lr=0)
# Save only best weights
best_weights = "./data/cnn_weights_ceps_g711_example.h5"
best_weights = os.path.normcase(best_weights)
model_save = ModelCheckpoint(best_weights, monitor='val_loss', save_best_only=True, mode='min', save_weights_only=True, period=1)
start = time.time()
print("> Training model " + "using Batch-size: " + str(batch_size) + ", Learning_rate: " + str(learning_rate) + "...")
hist = model.fit(x=x_train_noisy, y=x_train, epochs=nb_epochs, batch_size=batch_size, shuffle=True, initial_epoch=0,
callbacks=[reduce_LR, stop_str, model_save],
validation_data=(x_train_noisy_vali, x_train_vali)
)
print("> Saving Completed, Time : ", time.time() - start)
print('> +++++++++++++++++++++++++++++++++++++++++++++++++++++ ')
| 6,716 | 35.112903 | 151 | py |
Enhancement-Coded-Speech | Enhancement-Coded-Speech-master/CepsDomCNN_Test.py | #####################################################################################
# Use the trained CNN for cepstral domain approach III.
# Input:
# 1- CNN input: type_3_cnn_input_ceps.mat
# 2- Trained CNN weights: cnn_weights_ceps_g711_best.h5
# Output:
# 1- CNN output: type_3_cnn_output_ceps.mat
#####################################################################################
""" import os
from keras.models import Model
from keras.engine.topology import Layer
from keras.layers import Input, Add, Multiply, Average, Activation, LeakyReLU
from keras.layers import merge, Input, Dense, Flatten, BatchNormalization, Activation, LeakyReLU
from keras.layers.convolutional import Conv1D, MaxPooling1D, UpSampling1D, AveragePooling1D
from keras import backend as K
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, CSVLogger, TensorBoard, LearningRateScheduler
import keras.optimizers as optimizers
import numpy as np
import matplotlib.pyplot as plt
import scipy.io as sio
import h5py as h5
import scipy.io.wavfile as swave
from sklearn import preprocessing
from weightnorm import AdamWithWeightnorm
from tensorflow.python.framework import ops
import math
import time """
import os
import numpy as np
import scipy.io as sio
import h5py as h5
import scipy.io.wavfile as swave
import tensorflow as tf
import tensorflow.keras as K
from tensorflow.keras.models import Model, load_model, save_model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import Input, Add, Activation, LeakyReLU, Conv1D, MaxPooling1D, UpSampling1D
from tensorflow.keras.callbacks import ReduceLROnPlateau, CSVLogger, EarlyStopping, ModelCheckpoint, LearningRateScheduler
def snr(y_true, y_pred):
"""
SNR is Signal to Noise Ratio
"""
return 10.0 * tf.math.log((tf.math.reduce_sum(tf.math.square(y_true))) / (tf.math.reduce_sum(tf.math.square(y_pred - y_true)))) / tf.math.log(10.0)
#####################################################################################
# 0. Setup
#####################################################################################
# Settings and CNN topology parameters
fram_length = 32
n1 = 22 # F=22 in paper
n2 = 44 #
n3 = 22 #
N_cnn = 6 # N=6 in paper
#####################################################################################
# 2. define model
#####################################################################################
input_vec = Input(shape=(fram_length, 1))
c1 = Conv1D(n1, N_cnn, padding='same')(input_vec)
c1 = LeakyReLU(0.2)(c1)
c1 = Conv1D(n1, N_cnn, padding='same')(c1)
c1 = LeakyReLU(0.2)(c1)
x = MaxPooling1D(2)(c1)
c2 = Conv1D(n2, N_cnn, padding='same')(x)
c2 = LeakyReLU(0.2)(c2)
c2 = Conv1D(n2, N_cnn, padding='same')(c2)
c2 = LeakyReLU(0.2)(c2)
x = MaxPooling1D(2)(c2)
c3 = Conv1D(n3, N_cnn, padding='same')(x)
c3 = LeakyReLU(0.2)(c3)
x = UpSampling1D(2)(c3)
c2_2 = Conv1D(n2, N_cnn, padding='same')(x)
c2_2 = LeakyReLU(0.2)(c2_2)
c2_2 = Conv1D(n2, N_cnn, padding='same')(c2_2)
c2_2 = LeakyReLU(0.2)(c2_2)
m1 = Add()([c2, c2_2])
m1 = UpSampling1D(2)(m1)
c1_2 = Conv1D(n1, N_cnn, padding='same')(m1)
c1_2 = LeakyReLU(0.2)(c1_2)
c1_2 = Conv1D(n1, N_cnn, padding='same')(c1_2)
c1_2 = LeakyReLU(0.2)(c1_2)
m2 = Add()([c1, c1_2])
decoded = Conv1D(1, N_cnn, padding='same', activation='linear')(m2)
model = Model(input_vec, decoded)
model.summary()
model.load_weights("./data/cnn_weights_ceps_g711_best.h5")
#####################################################################################
# 4. Test
#####################################################################################
print('> Loading Test data ... ')
mat_input = "./data/type_3_cnn_input_ceps.mat"
mat_input = os.path.normcase(mat_input)
x_test_noisy = sio.loadmat(mat_input)
x_test_noisy = x_test_noisy['inputTestNorm']
x_test_noisy = np.array(x_test_noisy)
x_test_noisy = np.reshape(x_test_noisy,(x_test_noisy.shape[0], x_test_noisy.shape[1], 1))
predicted = model.predict(x_test_noisy)
preOutput = "./data/type_3_cnn_output_ceps.mat"
preOutput = os.path.normcase(preOutput)
sio.savemat(preOutput, {'predictions': predicted})
| 4,173 | 31.866142 | 151 | py |
kl_sample | kl_sample-master/kl_sample/cosmo.py | """
Module containing all the relevant functions
to compute and manipulate cosmology.
Functions:
- get_cosmo_mask(params)
- get_cosmo_ccl(params)
- get_cls_ccl(params, cosmo, pz, ell_max)
- get_xipm_ccl(cosmo, cls, theta)
"""
import numpy as np
import pyccl as ccl
import kl_sample.reshape as rsh
import kl_sample.likelihood as lkl
import kl_sample.settings as set
# ------------------- Masks --------------------------------------------------#
def get_cosmo_mask(params):
""" Infer from the cosmological parameters
array which are the varying parameters.
Args:
params: array containing the cosmological parameters.
Returns:
mask: boolean array with varying parameters.
"""
# Function that decide for a given parameter if
# it is varying or not.
def is_varying(param):
if param[0] is None or param[2] is None:
return True
if param[0] < param[1] or param[1] < param[2]:
return True
return False
return np.array([is_varying(x) for x in params])
# ------------------- CCL related --------------------------------------------#
def get_cosmo_ccl(params):
""" Get cosmo object.
Args:
params: array with cosmological parameters.
Returns:
cosmo object from CCL.
"""
cosmo = ccl.Cosmology(
h=params[0],
Omega_c=params[1]/params[0]**2.,
Omega_b=params[2]/params[0]**2.,
A_s=(10.**(-10.))*np.exp(params[3]),
n_s=params[4],
w0=params[5],
wa=params[6],
transfer_function='boltzmann_class'
)
return cosmo
def get_cls_ccl(params, cosmo, pz, ell_max, add_ia=False):
""" Get theory Cl's.
Args:
cosmo: cosmo object from CCL.
pz: probability distribution for each redshift bin.
ell_max: maximum multipole.
Returns:
array with Cl's.
"""
# Local variables
n_bins = len(pz)-1
n_ells = ell_max+1
# z and pz
z = pz[0].astype(np.float64)
prob_z = pz[1:].astype(np.float64)
# If add_ia
if add_ia:
f_z = np.ones(len(z))
# Bias
Omega_m = (params[1]+params[2])/params[0]**2.
D_z = ccl.background.growth_factor(cosmo, 1./(1.+z))
b_z = -params[7]*set.C_1*set.RHO_CRIT*Omega_m/D_z
b_z = np.outer(set.L_I_OVER_L_0**params[8], b_z)
# Tracers
lens = np.array([
ccl.WeakLensingTracer(
cosmo,
dndz=(z, prob_z[x]),
ia_bias=(z, b_z[x]),
red_frac=(z, f_z),
) for x in range(n_bins)])
else:
# Tracers
lens = np.array([
ccl.WeakLensingTracer(
cosmo,
dndz=(z, prob_z[x])
) for x in range(n_bins)])
# Cl's
ell = np.arange(n_ells)
cls = np.zeros((n_bins, n_bins, n_ells))
for count1 in range(n_bins):
for count2 in range(count1, n_bins):
cls[count1, count2] = \
ccl.angular_cl(cosmo, lens[count1], lens[count2], ell)
cls[count2, count1] = cls[count1, count2]
cls = np.transpose(cls, axes=[2, 0, 1])
return cls
def get_xipm_ccl(cosmo, cls, theta):
""" Get theory correlation function.
Args:
cosmo: cosmo object from CCL.
cls: array of cls for each pair of bins.
theta: array with angles for the correlation function.
Returns:
correlation function.
"""
# Local variables
n_bins = cls.shape[-1]
n_theta = len(theta)
ell = np.arange(len(cls))
# Main loop: compute correlation function for each bin pair
xi_th = np.zeros((2, n_bins, n_bins, n_theta))
for c1 in range(n_bins):
for c2 in range(n_bins):
for c3 in range(n_theta):
xi_th[0, c1, c2, c3] = \
ccl.correlation(cosmo, ell, cls[:, c1, c2], theta[c3],
corr_type='L+', method='FFTLog')
xi_th[1, c1, c2, c3] = \
ccl.correlation(cosmo, ell, cls[:, c1, c2], theta[c3],
corr_type='L-', method='FFTLog')
# Transpose to have (pm, theta, bin1, bin2)
xi_th = np.transpose(xi_th, axes=[0, 3, 1, 2])
return xi_th
# ------------------- KL related ---------------------------------------------#
def get_theory(var, full, mask, data, settings):
""" Get theory correlation function or Cl's.
Args:
var: array containing the varying cosmo parameters.
full: array containing all the cosmo parameters.
mask: array containing the mask for the cosmo parameters.
data: dictionary with all the data used
settings: dictionary with all the settings used
Returns:
array with correlation function or Cl's.
"""
# Local variables
pz = data['photo_z']
theta = data['theta_ell']
ell_max = settings['ell_max']
bp = settings['bp_ell']
ell = np.arange(bp[-1, -1] + 1)
nf = settings['n_fields']
nb = settings['n_bins']
# Merge in a single array varying and fixed parameters
pars = np.empty(len(mask))
count1 = 0
for count2 in range(len(pars)):
if not mask[count2]:
pars[count2] = full[count2][1]
else:
pars[count2] = var[count1]
count1 = count1+1
# Get corr
cosmo = get_cosmo_ccl(pars)
if set.THEORY == 'CCL':
corr = get_cls_ccl(pars, cosmo, pz, ell_max, add_ia=settings['add_ia'])
if settings['space'] == 'real':
corr = get_xipm_ccl(cosmo, corr, theta)
elif set.THEORY == 'Camera':
corr = settings['cls_template']
Om = (pars[1] + pars[2])/pars[0]**2.
s8 = get_sigma_8(var, full, mask)
Oms8 = np.zeros(len(set.Z_BINS))
for nbin, bin in enumerate(set.Z_BINS):
z = (bin[0] + bin[1])/2.
D = cosmo.growth_factor(1./(1. + z))
Oms8[nbin] = D*Om*s8
# Multiply twice the template by Oms8 array along the last two axes
corr = mult_elementwiselastaxis(corr, Oms8)
corr = np.moveaxis(corr, [-2], [-1])
corr = mult_elementwiselastaxis(corr, Oms8)
# Keep cls coupled or not
if set.KEEP_CELLS_COUPLED:
corr = rsh.couple_cl(ell, corr, settings['mcm'], nf, nb, len(bp))
else:
corr = rsh.couple_decouple_cl(ell, corr, settings['mcm'], nf, nb,
len(bp))
# Apply KL
if settings['method'] in ['kl_off_diag', 'kl_diag']:
corr = lkl.apply_kl(data['kl_t'], corr, settings)
if settings['method'] == 'kl_diag':
is_diag = True
else:
is_diag = False
# Reshape corr
if settings['space'] == 'real':
corr = rsh.flatten_xipm(corr, settings)
corr = rsh.mask_xipm(corr, data['mask_theta_ell'], settings)
else:
corr = rsh.mask_cl(corr, is_diag=is_diag)
corr = rsh.unify_fields_cl(corr, data['cov_pf'], is_diag=is_diag,
pinv=set.PINV)
# Apply BNT if required
if set.BNT:
corr = apply_bnt(corr, data['bnt_mat'])
corr = rsh.flatten_cl(corr, is_diag=is_diag)
return corr
def get_sigma_8(var, full, mask):
# Merge in a single array varying and fixed parameters
pars = np.empty(len(mask))
count1 = 0
for count2 in range(len(pars)):
if not mask[count2]:
pars[count2] = full[count2][1]
else:
pars[count2] = var[count1]
count1 = count1+1
# Cosmology
cosmo = get_cosmo_ccl(pars)
sigma8 = ccl.sigma8(cosmo)
return sigma8
def mult_elementwiselastaxis(A, B):
C = np.outer(A, B)
C = C.reshape(A.shape+B.shape)
C = np.diagonal(C, axis1=-2, axis2=-1)
return C
class BNT(object):
def __init__(self, params, photo_z):
cosmo = get_cosmo_ccl(params[:, 1])
self.z = photo_z[0] # np.array of redshifts
self.chi = cosmo.comoving_radial_distance(1./(1.+photo_z[0]))
self.n_i_list = photo_z[1:]
self.nbins = len(self.n_i_list)
def get_matrix(self):
A_list = []
B_list = []
for i in range(self.nbins):
nz = self.n_i_list[i]
A_list += [np.trapz(nz, self.z)]
B_list += [np.trapz(nz / self.chi, self.z)]
BNT_matrix = np.eye(self.nbins)
BNT_matrix[1, 0] = -1.
for i in range(2, self.nbins):
mat = np.array([[A_list[i-1], A_list[i-2]],
[B_list[i-1], B_list[i-2]]])
A = -1. * np.array([A_list[i], B_list[i]])
soln = np.dot(np.linalg.inv(mat), A)
BNT_matrix[i, i-1] = soln[0]
BNT_matrix[i, i-2] = soln[1]
return BNT_matrix
def apply_bnt(cl, bnt):
bnt_cl = np.dot(cl, bnt)
bnt_cl = np.moveaxis(bnt_cl, [-1], [-2])
bnt_cl = np.dot(bnt_cl, bnt)
return bnt_cl
| 8,962 | 27.453968 | 79 | py |
ilmart | ilmart-main/experiments/nrgam/nrgam_evaluate.py | #!/usr/bin/env python
# coding: utf-8
import tensorflow as tf
import tensorflow_datasets as tfds
import tensorflow_ranking as tfr
import pickle
import argparse
from tqdm import tqdm
from rankeval.metrics.ndcg import NDCG
from collections import defaultdict
import yahoo_dataset
import numpy as np
DATASET_DICT = {
"web30k": "mslr_web/30k_fold1",
"istella": "istella/s",
"yahoo": "yahoo"
}
LOG_NORMALIZATION = {
"web30k": False,
"istella": True,
"yahoo": False
}
BATCH_SIZE = 128
NORMALIZATION_CONSTANT = 10
def ds_transform(ds, log=False):
ds = ds.map(
lambda feature_map: {key: tf.where(value < 10 ** 6, value, 10 ** 6) for key, value in feature_map.items()})
ds = ds.map(lambda feature_map: {
"_mask": tf.ones_like(feature_map["label"], dtype=tf.bool),
**feature_map
})
ds = ds.padded_batch(batch_size=BATCH_SIZE)
ds = ds.map(lambda feature_map: (feature_map, tf.where(feature_map["_mask"], feature_map.pop("label"), -1.)))
if log:
ds = ds.map(
lambda feature_map, label: (
{key: value + NORMALIZATION_CONSTANT for key, value in feature_map.items() if key != "_mask"}, label))
ds = ds.map(
lambda feature_map, label: (
{key: tf.math.log1p(value) for key, value in feature_map.items() if key != "_mask"}, label))
else:
ds = ds.map(
lambda feature_map, label: ({key: value for key, value in feature_map.items() if key != "_mask"}, label))
return ds
def compute_ndcg_results(batch_results, ds_test_y, cutoffs):
ndcg_results = defaultdict(list)
for batch_id, batch_y_true in tqdm(enumerate(ds_test_y)):
for query_in_batch, y_true_padded in enumerate(batch_y_true):
start_padding_index = np.argmax(y_true_padded == -1)
y_true = y_true_padded[:start_padding_index].numpy()
y_pred = np.array(batch_results[batch_id][query_in_batch][:start_padding_index])
for cutoff in cutoffs:
ndcg = NDCG(cutoff=cutoff, no_relevant_results=1, implementation="exp")
ndcg_results[cutoff].append(ndcg.eval_per_query(y_true, y_pred))
return ndcg_results
def main():
parser = argparse.ArgumentParser(
description="Evaluate the accuracy of Neural Rank GAM for the three dataset: istella, web30k, yahoo")
parser.add_argument("-base_dir", default="../best_models/nrgam", type=str,
help="Base path where the models are saved")
parser.add_argument("-output_file", default="../results/ndcg/nrgam.pickle", type=str,
help="Path of the model to continue to train")
args = parser.parse_args()
base_path = args.base_dir
model_paths = {
"istella": f"{base_path}/istella_model",
"web30k": f"{base_path}/web30k_model",
"yahoo": f"{base_path}/yahoo_model",
}
best_tf_models = {}
for name, path in model_paths.items():
best_tf_models[name] = tf.keras.models.load_model(path)
test_datasets = {}
for name in model_paths.keys():
test_datasets[name] = ds_transform(tfds.load(DATASET_DICT[name], split="test"), log=LOG_NORMALIZATION[name])
ndcgs_nrgam = {}
cutoffs = [1, 5, 10]
for name, model in best_tf_models.items():
ds_test_y = test_datasets[name].map(lambda feature_map, label: label)
ds_test_X = test_datasets[name].map(lambda feature_map, label: feature_map)
batch_results = [model.predict(batch_sample) for batch_sample in tqdm(ds_test_X)]
ndcgs_nrgam[name] = compute_ndcg_results(batch_results, ds_test_y, cutoffs)
with open(args.output_file, "wb") as f:
pickle.dump(ndcgs_nrgam, f)
if __name__ == '__main__':
main()
| 3,771 | 34.92381 | 118 | py |
ilmart | ilmart-main/experiments/nrgam/nrgam_train.py | import tensorflow as tf
import tensorflow_datasets as tfds
import tensorflow_ranking as tfr
import argparse
import pickle
from pathlib import Path
tf.config.threading.set_inter_op_parallelism_threads(40)
tf.config.threading.set_intra_op_parallelism_threads(40)
DATSET_DICT = {
"mslr_web/30k_fold1": "web30k",
"istella/s": "istella",
"yahoo": "yahoo_dataset"
}
LOG_NORMALIZATION = {
"mslr_web/30k_fold1": False,
"istella/s": True,
"yahoo": False
}
LEARNING_RATE = 0.05
HIDDEN_LAYERS = [16, 8]
BATCH_SIZE = 128
# The minimum value in istella for 4 features (50, 134, 148, 176) could be slightly less than 0,
# and to avoid numerical issue with the log1p transformation we added a constant value to each feature.
NORMALIZATION_CONSTANT = 10
LOSS = "approx_ndcg_loss"
def ds_transform(ds, log=False):
ds = ds.map(
lambda feature_map: {key: tf.where(value < 10 ** 6, value, 10 ** 6) for key, value in feature_map.items()})
ds = ds.map(lambda feature_map: {
"_mask": tf.ones_like(feature_map["label"], dtype=tf.bool),
**feature_map
})
ds = ds.padded_batch(batch_size=BATCH_SIZE)
ds = ds.map(lambda feature_map: (feature_map, tf.where(feature_map["_mask"], feature_map.pop("label"), -1.)))
if log:
ds = ds.map(
lambda feature_map, label: (
{key: value + NORMALIZATION_CONSTANT for key, value in feature_map.items() if key != "_mask"}, label))
ds = ds.map(
lambda feature_map, label: (
{key: tf.math.log1p(value) for key, value in feature_map.items() if key != "_mask"}, label))
else:
ds = ds.map(
lambda feature_map, label: ({key: value for key, value in feature_map.items() if key != "_mask"}, label))
return ds
def init_model(feat_names, initial_model):
if initial_model is not None:
model = tf.keras.models.load_model(initial_model)
print(f"Model correctly loaded from {initial_model}")
else:
feat_cols = {name: tf.feature_column.numeric_column(name, shape=(1,), default_value=0.0)
for name in feat_names}
network = tfr.keras.canned.GAMRankingNetwork(
context_feature_columns=None,
example_feature_columns=feat_cols,
example_hidden_layer_dims=HIDDEN_LAYERS,
activation=tf.nn.relu,
use_batch_norm=True)
loss = tfr.keras.losses.get(LOSS)
metrics = tfr.keras.metrics.default_keras_metrics()
optimizer = tf.keras.optimizers.Adagrad(learning_rate=LEARNING_RATE)
model = tfr.keras.model.create_keras_model(network=network,
loss=loss,
metrics=metrics,
optimizer=optimizer,
size_feature_name=None)
return model
def train_eval(ds_train, ds_vali, ds_test, name, initial_model, epochs=2000, patience=100, base_dir="."):
CHECKPOINTS_FOLDER = f"{name}_checkpoints"
feat_names = list(list(ds_train.take(1))[0][0].keys())
model = init_model(feat_names, initial_model)
early_stopping_callback = tf.keras.callbacks.EarlyStopping(monitor='val_metric/ndcg_10',
patience=patience,
mode="max",
restore_best_weights=True)
Path(CHECKPOINTS_FOLDER).mkdir(parents=True, exist_ok=True)
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(filepath=CHECKPOINTS_FOLDER)
history = model.fit(ds_train, epochs=epochs, validation_data=ds_vali,
callbacks=[early_stopping_callback, checkpoint_callback])
model.save(f"{base_dir}/{name}_model")
with open(f"{base_dir}/{name}_history.pickle", "wb") as f:
pickle.dump(history.history, f)
eval_dict = model.evaluate(ds_test, return_dict=True)
with open(f"{base_dir}/{name}_eval_dict.pickle", "wb") as f:
pickle.dump(eval_dict, f)
def main():
parser = argparse.ArgumentParser(description="Train results of Neural Rank Gam.")
parser.add_argument("dataset", metavar="dataset", type=str,
choices=["mslr_web/30k_fold1", "istella/s", "yahoo"],
help="""
Dataset to be used during training.
Possible choice to replicate the results:
- mslr_web/30k_fold1
- istella/s
- yahoo
""")
parser.add_argument("-keep_training", default=None, type=str, help="Path of the model to continue to train")
parser.add_argument("-base_dir", default="../best_models/nrgam", type=str,
help="Path of the model to continue to train")
args = parser.parse_args()
Path(args.base_dir).mkdir(parents=True, exist_ok=True)
ds_train = ds_transform(tfds.load(args.dataset, split="train"), log=LOG_NORMALIZATION[args.dataset])
ds_vali = ds_transform(tfds.load(args.dataset, split="vali"), log=LOG_NORMALIZATION[args.dataset])
ds_test = ds_transform(tfds.load(args.dataset, split="test"), log=LOG_NORMALIZATION[args.dataset])
train_eval(ds_train, ds_vali, ds_test, DATSET_DICT[args.dataset], args.keep_training, base_dir=args.base_dir)
if __name__ == '__main__':
main()
| 5,534 | 39.698529 | 118 | py |
pytorch-darknet19 | pytorch-darknet19-master/demo/darknet19_demo.py | import numpy as np
import torch
import torchvision.datasets as dset
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
from model import darknet
def main():
imageNet_label = [line.strip() for line in open("demo/imagenet.shortnames.list", 'r')]
dataset = dset.ImageFolder(root="demo/samples/",
transform=transforms.Compose([
transforms.Resize((448, 448)),
transforms.ToTensor()
]))
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False)
darknet19 = darknet.Darknet19(pretrained=True)
darknet19.eval()
for data, _ in dataloader:
output = darknet19.forward(data)
answer = int(torch.argmax(output))
print("Class: {}({})".format(imageNet_label[answer],answer))
plt.imshow(np.array(np.transpose(data[0], (1, 2, 0))))
plt.show()
if __name__ == "__main__":
main()
| 995 | 31.129032 | 90 | py |
pytorch-darknet19 | pytorch-darknet19-master/base/base_model.py | import logging
import torch.nn as nn
import numpy as np
class BaseModel(nn.Module):
"""
Base class for all models
"""
def __init__(self):
super(BaseModel, self).__init__()
self.logger = logging.getLogger(self.__class__.__name__)
def forward(self, *input):
"""
Forward pass logic
:return: Model output
"""
raise NotImplementedError
def summary(self):
"""
Model summary
"""
model_parameters = filter(lambda p: p.requires_grad, self.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
self.logger.info('Trainable parameters: {}'.format(params))
self.logger.info(self)
def __str__(self):
"""
Model prints with number of trainable parameters
"""
model_parameters = filter(lambda p: p.requires_grad, self.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
return super(BaseModel, self).__str__() + '\nTrainable parameters: {}'.format(params)
| 1,076 | 26.615385 | 93 | py |
pytorch-darknet19 | pytorch-darknet19-master/model/darknet.py | from collections import OrderedDict
from torch import nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from base import BaseModel
model_paths = {
'darknet19': 'https://s3.ap-northeast-2.amazonaws.com/deepbaksuvision/darknet19-deepBakSu-e1b3ec1e.pth'
}
class GlobalAvgPool2d(nn.Module):
def __init__(self):
super(GlobalAvgPool2d, self).__init__()
def forward(self, x):
N = x.data.size(0)
C = x.data.size(1)
H = x.data.size(2)
W = x.data.size(3)
x = F.avg_pool2d(x, (H, W))
x = x.view(N, C)
return x
class Darknet19(BaseModel):
def __init__(self, pretrained=True):
super(Darknet19, self).__init__()
self.features = nn.Sequential(OrderedDict([
('layer1', nn.Sequential(OrderedDict([
('conv1_1', nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False)),
('bn1_1', nn.BatchNorm2d(32)),
('leaky1_1', nn.LeakyReLU(0.1, inplace=True)),
('maxpool1', nn.MaxPool2d(kernel_size=2, stride=2))
]))),
('layer2', nn.Sequential(OrderedDict([
('conv2_1', nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1, bias=False)),
('bn2_1', nn.BatchNorm2d(64)),
('leaky2_1', nn.LeakyReLU(0.1, inplace=True)),
('maxpool2', nn.MaxPool2d(kernel_size=2, stride=2))
]))),
('layer3', nn.Sequential(OrderedDict([
('conv3_1', nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=False)),
('bn3_1', nn.BatchNorm2d(128)),
('leaky3_1', nn.LeakyReLU(0.1, inplace=True)),
('conv3_2', nn.Conv2d(128, 64, kernel_size=1, stride=1, padding=0, bias=False)),
('bn3_2', nn.BatchNorm2d(64)),
('leaky3_2', nn.LeakyReLU(0.1, inplace=True)),
('conv3_3', nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=False)),
('bn3_3', nn.BatchNorm2d(128)),
('leaky3_3', nn.LeakyReLU(0.1, inplace=True)),
('maxpool3', nn.MaxPool2d(kernel_size=2, stride=2))
]))),
('layer4', nn.Sequential(OrderedDict([
('conv4_1', nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1, bias=False)),
('bn4_1', nn.BatchNorm2d(256)),
('leaky4_1', nn.LeakyReLU(0.1, inplace=True)),
('conv4_2', nn.Conv2d(256, 128, kernel_size=1, stride=1, padding=0, bias=False)),
('bn4_2', nn.BatchNorm2d(128)),
('leaky4_2', nn.LeakyReLU(0.1, inplace=True)),
('conv4_3', nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1, bias=False)),
('bn4_3', nn.BatchNorm2d(256)),
('leaky4_3', nn.LeakyReLU(0.1, inplace=True)),
('maxpool4', nn.MaxPool2d(kernel_size=2, stride=2))
]))),
('layer5', nn.Sequential(OrderedDict([
('conv5_1', nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1, bias=False)),
('bn5_1', nn.BatchNorm2d(512)),
('leaky5_1', nn.LeakyReLU(0.1, inplace=True)),
('conv5_2', nn.Conv2d(512, 256, kernel_size=1, stride=1, padding=0, bias=False)),
('bn5_2', nn.BatchNorm2d(256)),
('leaky5_2', nn.LeakyReLU(0.1, inplace=True)),
('conv5_3', nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1, bias=False)),
('bn5_3', nn.BatchNorm2d(512)),
('leaky5_3', nn.LeakyReLU(0.1, inplace=True)),
('conv5_4', nn.Conv2d(512, 256, kernel_size=1, stride=1, padding=1, bias=False)),
('bn5_4', nn.BatchNorm2d(256)),
('leaky5_4', nn.LeakyReLU(0.1, inplace=True)),
('conv5_5', nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1, bias=False)),
('bn5_5', nn.BatchNorm2d(512)),
('leaky5_5', nn.LeakyReLU(0.1, inplace=True)),
('maxpool5', nn.MaxPool2d(kernel_size=2, stride=2))
]))),
('layer6', nn.Sequential(OrderedDict([
('conv6_1', nn.Conv2d(512, 1024, kernel_size=3, stride=1, padding=1, bias=False)),
('bn6_1', nn.BatchNorm2d(1024)),
('leaky6_1', nn.LeakyReLU(0.1, inplace=True)),
('conv6_2', nn.Conv2d(1024, 512, kernel_size=1, stride=1, padding=0, bias=False)),
('bn6_2', nn.BatchNorm2d(512)),
('leaky6_2', nn.LeakyReLU(0.1, inplace=True)),
('conv6_3', nn.Conv2d(512, 1024, kernel_size=3, stride=1, padding=1, bias=False)),
('bn6_3', nn.BatchNorm2d(1024)),
('leaky6_3', nn.LeakyReLU(0.1, inplace=True)),
('conv6_4', nn.Conv2d(1024, 512, kernel_size=1, stride=1, padding=1, bias=False)),
('bn6_4', nn.BatchNorm2d(512)),
('leaky6_4', nn.LeakyReLU(0.1, inplace=True)),
('conv6_5', nn.Conv2d(512, 1024, kernel_size=3, stride=1, padding=1, bias=False)),
('bn6_5', nn.BatchNorm2d(1024)),
('leaky6_5', nn.LeakyReLU(0.1, inplace=True))
])))
]))
self.classifier = nn.Sequential(OrderedDict([
('conv7_1', nn.Conv2d(1024, 1000, kernel_size=(1, 1), stride=(1, 1))),
('globalavgpool', GlobalAvgPool2d()),
('softmax', nn.Softmax(dim=1))
]))
if pretrained:
self.load_state_dict(model_zoo.load_url(model_paths['darknet19'], progress=True))
print('Model is loaded')
def forward(self, x):
out = self.features(x)
out = self.classifier(out)
return out
| 5,509 | 46.094017 | 107 | py |
RBNN | RBNN-master/imagenet/main.py | import argparse
import os
import time
import logging
import random
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import models_cifar
import models_imagenet
import numpy as np
from torch.autograd import Variable
from utils.options import args
from utils.common import *
from modules import *
from datetime import datetime
import dataset
def main():
global args, best_prec1, best_prec5, conv_modules
best_prec1 = 0
best_prec5 = 0
args.print_freq=int(256/args.batch_size*500)
random.seed(args.seed)
if args.evaluate:
args.results_dir = '/tmp'
save_path = os.path.join(args.results_dir, args.save)
if not os.path.exists(save_path):
os.makedirs(save_path)
if not args.resume:
with open(os.path.join(save_path,'config.txt'), 'w') as args_file:
args_file.write(str(datetime.now())+'\n\n')
for args_n,args_v in args.__dict__.items():
args_v = '' if not args_v and not isinstance(args_v,int) else args_v
args_file.write(str(args_n)+': '+str(args_v)+'\n')
setup_logging(os.path.join(save_path, 'logger.log'))
logging.info("saving to %s", save_path)
logging.debug("run arguments: %s", args)
else:
setup_logging(os.path.join(save_path, 'logger.log'), filemode='a')
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
if 'cuda' in args.type:
args.gpus = [int(i) for i in args.gpus.split(',')]
cudnn.benchmark = True
else:
args.gpus = None
if args.dataset=='tinyimagenet':
num_classes=200
model_zoo = 'models_imagenet.'
elif args.dataset=='imagenet':
num_classes=1000
model_zoo = 'models_imagenet.'
elif args.dataset=='cifar10':
num_classes=10
model_zoo = 'models_cifar.'
elif args.dataset=='cifar100':
num_classes=100
model_zoo = 'models_cifar.'
if len(args.gpus)==1:
model = eval(model_zoo+args.model)(num_classes=num_classes).cuda()
else:
model = nn.DataParallel(eval(model_zoo+args.model)(num_classes=num_classes))
if not args.resume:
logging.info("creating model %s", args.model)
logging.info("model structure: %s", model)
num_parameters = sum([l.nelement() for l in model.parameters()])
logging.info("number of parameters: %d", num_parameters)
# evaluate
if args.evaluate:
if not os.path.isfile(args.evaluate):
logging.error('invalid checkpoint: {}'.format(args.evaluate))
else:
checkpoint = torch.load(args.evaluate)
if len(args.gpus)>1:
checkpoint['state_dict'] = dataset.add_module_fromdict(checkpoint['state_dict'])
model.load_state_dict(checkpoint['state_dict'])
logging.info("loaded checkpoint '%s' (epoch %s)",
args.evaluate, checkpoint['epoch'])
elif args.resume:
checkpoint_file = os.path.join(save_path,'checkpoint.pth.tar')
if os.path.isdir(checkpoint_file):
checkpoint_file = os.path.join(
checkpoint_file, 'model_best.pth.tar')
if os.path.isfile(checkpoint_file):
checkpoint = torch.load(checkpoint_file)
if len(args.gpus)>1:
checkpoint['state_dict'] = dataset.add_module_fromdict(checkpoint['state_dict'])
args.start_epoch = checkpoint['epoch'] - 1
best_prec1 = checkpoint['best_prec1']
best_prec5 = checkpoint['best_prec5']
model.load_state_dict(checkpoint['state_dict'])
logging.info("loaded checkpoint '%s' (epoch %s)",
checkpoint_file, checkpoint['epoch'])
else:
logging.error("no checkpoint found at '%s'", args.resume)
criterion = nn.CrossEntropyLoss().cuda()
criterion = criterion.type(args.type)
model = model.type(args.type)
if args.evaluate:
if args.use_dali:
val_loader = dataset.get_imagenet(
type='val',
image_dir=args.data_path,
batch_size=args.batch_size_test,
num_threads=args.workers,
crop=224,
device_id='cuda:0',
num_gpus=1)
else:
val_loader = dataset.get_imagenet_torch(
type='val',
image_dir=args.data_path,
batch_size=args.batch_size_test,
num_threads=args.workers,
device_id='cuda:0'
)
with torch.no_grad():
val_loss, val_prec1, val_prec5 = validate(val_loader, model, criterion, 0)
logging.info('\n Validation Loss {val_loss:.4f} \t'
'Validation Prec@1 {val_prec1:.3f} \t'
'Validation Prec@5 {val_prec5:.3f} \n'
.format(val_loss=val_loss, val_prec1=val_prec1, val_prec5=val_prec5))
return
if args.dataset=='imagenet':
if args.use_dali:
train_loader = dataset.get_imagenet(
type='train',
image_dir=args.data_path,
batch_size=args.batch_size,
num_threads=args.workers,
crop=224,
device_id='cuda:0',
num_gpus=1)
val_loader = dataset.get_imagenet(
type='val',
image_dir=args.data_path,
batch_size=args.batch_size_test,
num_threads=args.workers,
crop=224,
device_id='cuda:0',
num_gpus=1)
else:
train_loader = dataset.get_imagenet_torch(
type='train',
image_dir=args.data_path,
batch_size=args.batch_size,
num_threads=args.workers,
device_id='cuda:0',
)
val_loader = dataset.get_imagenet_torch(
type='val',
image_dir=args.data_path,
batch_size=args.batch_size_test,
num_threads=args.workers,
device_id='cuda:0'
)
else:
train_loader, val_loader = dataset.load_data(
dataset=args.dataset,
data_path=args.data_path,
batch_size=args.batch_size,
batch_size_test=args.batch_size_test,
num_workers=args.workers)
optimizer = torch.optim.SGD([{'params':model.parameters(),'initial_lr':args.lr}], args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
def cosin(i,T,emin=0,emax=0.01):
"customized cos-lr"
return emin+(emax-emin)/2 * (1+np.cos(i*np.pi/T))
if args.resume:
for param_group in optimizer.param_groups:
param_group['lr'] = cosin(args.start_epoch-args.warm_up*4, args.epochs-args.warm_up*4,0, args.lr)
if args.lr_type == 'cos':
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.epochs-args.warm_up*4, eta_min = 0, last_epoch=args.start_epoch-args.warm_up*4)
elif args.lr_type == 'step':
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, args.lr_decay_step, gamma=0.1, last_epoch=-1)
if not args.resume:
logging.info("criterion: %s", criterion)
logging.info('scheduler: %s', lr_scheduler)
def cpt_tk(epoch):
"compute t&k in back-propagation"
T_min, T_max = torch.tensor(args.Tmin).float(), torch.tensor(args.Tmax).float()
Tmin, Tmax = torch.log10(T_min), torch.log10(T_max)
t = torch.tensor([torch.pow(torch.tensor(10.), Tmin + (Tmax - Tmin) / args.epochs * epoch)]).float()
k = max(1/t,torch.tensor(1.)).float()
return t, k
#* setup conv_modules.epoch
conv_modules=[]
for name,module in model.named_modules():
if isinstance(module,nn.Conv2d):
conv_modules.append(module)
for epoch in range(args.start_epoch+1, args.epochs):
time_start = datetime.now()
#*warm up
if args.warm_up and epoch <5:
for param_group in optimizer.param_groups:
param_group['lr'] = args.lr * (epoch+1) / 5
for param_group in optimizer.param_groups:
logging.info('lr: %s', param_group['lr'])
#* compute t/k in back-propagation
t,k = cpt_tk(epoch)
for name,module in model.named_modules():
if isinstance(module,nn.Conv2d):
module.k = k.cuda()
module.t = t.cuda()
for module in conv_modules:
module.epoch = epoch
# train
train_loss, train_prec1, train_prec5 = train(
train_loader, model, criterion, epoch, optimizer)
#* adjust Lr
if epoch >= 4 * args.warm_up:
lr_scheduler.step()
# evaluate
with torch.no_grad():
for module in conv_modules:
module.epoch = -1
val_loss, val_prec1, val_prec5 = validate(
val_loader, model, criterion, epoch)
# remember best prec
is_best = val_prec1 > best_prec1
if is_best:
best_prec1 = max(val_prec1, best_prec1)
best_prec5 = max(val_prec5, best_prec5)
best_epoch = epoch
best_loss = val_loss
# save model
if epoch % 1 == 0:
model_state_dict = model.module.state_dict() if len(args.gpus) > 1 else model.state_dict()
model_parameters = model.module.parameters() if len(args.gpus) > 1 else model.parameters()
save_checkpoint({
'epoch': epoch + 1,
'model': args.model,
'state_dict': model_state_dict,
'best_prec1': best_prec1,
'best_prec5': best_prec5,
'parameters': list(model_parameters),
}, is_best, path=save_path)
if args.time_estimate > 0 and epoch % args.time_estimate==0:
time_end = datetime.now()
cost_time,finish_time = get_time(time_end-time_start,epoch,args.epochs)
logging.info('Time cost: '+cost_time+'\t'
'Time of Finish: '+finish_time)
logging.info('\n Epoch: {0}\t'
'Training Loss {train_loss:.4f} \t'
'Training Prec@1 {train_prec1:.3f} \t'
'Training Prec@5 {train_prec5:.3f} \t'
'Validation Loss {val_loss:.4f} \t'
'Validation Prec@1 {val_prec1:.3f} \t'
'Validation Prec@5 {val_prec5:.3f} \n'
.format(epoch + 1, train_loss=train_loss, val_loss=val_loss,
train_prec1=train_prec1, val_prec1=val_prec1,
train_prec5=train_prec5, val_prec5=val_prec5))
logging.info('*'*50+'DONE'+'*'*50)
logging.info('\n Best_Epoch: {0}\t'
'Best_Prec1 {prec1:.4f} \t'
'Best_Prec5 {prec5:.4f} \t'
'Best_Loss {loss:.3f} \t'
.format(best_epoch+1, prec1=best_prec1, prec5=best_prec5, loss=best_loss))
def forward(data_loader, model, criterion, epoch=0, training=True, optimizer=None):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end = time.time()
if args.use_dali:
for i, batch_data in enumerate(data_loader):
# measure data loading time
data_time.update(time.time() - end)
if i==1 and training:
for module in conv_modules:
module.epoch=-1
inputs = batch_data[0]['data']
target = batch_data[0]['label'].squeeze().long()
batchsize = args.batch_size if training else args.batch_size_test
len_dataloader = int(np.ceil(data_loader._size/batchsize))
if args.gpus is not None:
inputs = inputs.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
input_var = Variable(inputs.type(args.type))
target_var = Variable(target)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
if type(output) is list:
output = output[0]
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data.item(), inputs.size(0))
top1.update(prec1.item(), inputs.size(0))
top5.update(prec5.item(), inputs.size(0))
if training:
# compute gradient
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i*batchsize % args.print_freq == 0:
logging.info('{phase} - Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i*batchsize, data_loader._size,
phase='TRAINING' if training else 'EVALUATING',
batch_time=batch_time,
data_time=data_time, loss=losses,
top1=top1, top5=top5))
else:
for i, (inputs, target) in enumerate(data_loader):
# measure data loading time
data_time.update(time.time() - end)
if i==1 and training:
for module in conv_modules:
module.epoch=-1
if args.gpus is not None:
inputs = inputs.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
input_var = Variable(inputs.type(args.type))
target_var = Variable(target)
batchsize = args.batch_size if training else args.batch_size_test
# compute output
output = model(input_var)
loss = criterion(output, target_var)
if type(output) is list:
output = output[0]
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data.item(), inputs.size(0))
top1.update(prec1.item(), inputs.size(0))
top5.update(prec5.item(), inputs.size(0))
if training:
# compute gradient
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
logging.info('{phase} - Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i*batchsize, len(data_loader)*batchsize,
phase='TRAINING' if training else 'EVALUATING',
batch_time=batch_time,
data_time=data_time, loss=losses,
top1=top1, top5=top5))
return losses.avg, top1.avg, top5.avg
def train(data_loader, model, criterion, epoch, optimizer):
model.train()
return forward(data_loader, model, criterion, epoch,
training=True, optimizer=optimizer)
def validate(data_loader, model, criterion, epoch):
model.eval()
return forward(data_loader, model, criterion, epoch,
training=False, optimizer=None)
if __name__ == '__main__':
main()
| 17,019 | 40.111111 | 161 | py |
RBNN | RBNN-master/imagenet/modules/binarized_modules.py | import torch
import torch.nn as nn
import math
import numpy as np
import torch.nn.functional as F
from torch.autograd import Function, Variable
from scipy.stats import ortho_group
from utils.options import args
class BinarizeConv2d(nn.Conv2d):
def __init__(self, *kargs, **kwargs):
super(BinarizeConv2d, self).__init__(*kargs, **kwargs)
self.k = torch.tensor([10.]).float()
self.t = torch.tensor([0.1]).float()
self.epoch = -1
w = self.weight
self.a, self.b = get_ab(np.prod(w.shape[1:]))
R1 = torch.tensor(ortho_group.rvs(dim=self.a)).float().cuda()
R2 = torch.tensor(ortho_group.rvs(dim=self.b)).float().cuda()
self.register_buffer('R1', R1)
self.register_buffer('R2', R2)
self.Rweight = torch.ones_like(w)
sw = w.abs().view(w.size(0), -1).mean(-1).float().view(w.size(0), 1, 1).detach()
self.alpha = nn.Parameter(sw.cuda(), requires_grad=True)
self.rotate = nn.Parameter(torch.ones(w.size(0), 1, 1, 1).cuda()*np.pi/2, requires_grad=True)
self.Rotate = torch.zeros(1)
def forward(self, input):
a0 = input
w = self.weight
w1 = w - w.mean([1,2,3], keepdim=True)
w2 = w1 / w1.std([1,2,3], keepdim=True)
a1 = a0 - a0.mean([1,2,3], keepdim=True)
a2 = a1 / a1.std([1,2,3], keepdim=True)
a, b = self.a, self.b
X = w2.view(w.shape[0], a, b)
if self.epoch > -1 and self.epoch % args.rotation_update == 0:
for _ in range(3):
#* update B
V = self.R1.t() @ X.detach() @ self.R2
B = torch.sign(V)
#* update R1
D1 = sum([Bi@(self.R2.t())@(Xi.t()) for (Bi,Xi) in zip(B,X.detach())])
U1, S1, V1 = torch.svd(D1)
self.R1 = (V1@(U1.t()))
#* update R2
D2 = sum([(Xi.t())@self.R1@Bi for (Xi,Bi) in zip(X.detach(),B)])
U2, S2, V2 = torch.svd(D2)
self.R2 = (U2@(V2.t()))
self.Rweight = ((self.R1.t())@X@(self.R2)).view_as(w)
delta = self.Rweight.detach() - w2
w3 = w2 + torch.abs(torch.sin(self.rotate)) * delta
#* binarize
bw = BinaryQuantize().apply(w3, self.k.to(w.device), self.t.to(w.device))
if args.a32:
ba = a2
else:
ba = BinaryQuantize_a().apply(a2, self.k.to(w.device), self.t.to(w.device))
#* 1bit conv
output = F.conv2d(ba, bw, self.bias, self.stride, self.padding,
self.dilation, self.groups)
#* scaling factor
output = output * self.alpha
return output
class BinaryQuantize(Function):
@staticmethod
def forward(ctx, input, k, t):
ctx.save_for_backward(input, k, t)
out = torch.sign(input)
return out
@staticmethod
def backward(ctx, grad_output):
input, k, t = ctx.saved_tensors
grad_input = k * (2 * torch.sqrt(t**2 / 2) - torch.abs(t**2 * input))
grad_input = grad_input.clamp(min=0) * grad_output.clone()
return grad_input, None, None
class BinaryQuantize_a(Function):
@staticmethod
def forward(ctx, input, k, t):
ctx.save_for_backward(input, k, t)
out = torch.sign(input)
return out
@staticmethod
def backward(ctx, grad_output):
input, k, t = ctx.saved_tensors
k = torch.tensor(1.).to(input.device)
t = max(t, torch.tensor(1.).to(input.device))
grad_input = k * (2 * torch.sqrt(t**2 / 2) - torch.abs(t**2 * input))
grad_input = grad_input.clamp(min=0) * grad_output.clone()
return grad_input, None, None
def get_ab(N):
sqrt = int(np.sqrt(N))
for i in range(sqrt, 0, -1):
if N % i == 0:
return i, N // i
| 3,835 | 34.518519 | 101 | py |
RBNN | RBNN-master/imagenet/dataset/dataset.py | from datetime import datetime
import os
import torch
from torch import nn
import torch.nn.functional as F
from torchvision import transforms, datasets
from torch.utils.data import DataLoader
def load_data(type='both',dataset='cifar10',data_path='/data',batch_size = 256,batch_size_test=256,num_workers=0):
# load data
param = {'cifar10':{'name':datasets.CIFAR10,'size':32,'normalize':[[0.485, 0.456, 0.406], [0.229, 0.224, 0.225]]},
'cifar100':{'name':datasets.CIFAR100,'size':32,'normalize':[(0.507, 0.487, 0.441), (0.267, 0.256, 0.276)]},
'mnist':{'name':datasets.MNIST,'size':32,'normalize':[(0.5,0.5,0.5),(0.5,0.5,0.5)]},
'tinyimagenet':{'name':datasets.ImageFolder,'size':224,'normalize':[(0.4802, 0.4481, 0.3975), (0.2302, 0.2265, 0.2262)]}}
data = param[dataset]
if data['name']==datasets.ImageFolder:
data_transforms = {
'train': transforms.Compose([
transforms.Resize(data['size']),
transforms.RandomRotation(20),
transforms.RandomHorizontalFlip(0.5),
transforms.ToTensor(),
transforms.Normalize(*data['normalize']),
]),
'val': transforms.Compose([
transforms.Resize(data['size']),
transforms.ToTensor(),
transforms.Normalize(*data['normalize']),
]),
'test': transforms.Compose([
transforms.Resize(data['size']),
transforms.ToTensor(),
transforms.Normalize(*data['normalize']),
])
}
data_dir = os.path.join(data_path,'tiny-imagenet-200')
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: DataLoader(image_datasets[x], batch_size=batch_size, shuffle=(x=='train'), num_workers=num_workers)
for x in ['train', 'val']}
return dataloaders.values()
else:
transform1 = transforms.Compose([
transforms.RandomCrop(data['size'],padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(*data['normalize']),
])
transform2 = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(*data['normalize']),
])
trainset = data['name'](root=data_path,
train=True,
download=False,
transform=transform1);
trainloader = DataLoader(
trainset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=True)
testset = data['name'](root=data_path,
train=False,
download=False,
transform=transform2);
testloader = DataLoader(
testset,
batch_size=batch_size_test,
shuffle=False,
num_workers=num_workers,
pin_memory=True)
if type=='both':
return trainloader, testloader
elif type=='train':
return trainloader
elif type=='val':
return testloader
def delete_module_fromdict(statedict):
from collections import OrderedDict
new_state_dict = OrderedDict()
for k,v in statedict.items():
name = k[7:]
new_state_dict[name] = v
return new_state_dict
def add_module_fromdict(statedict):
from collections import OrderedDict
new_state_dict = OrderedDict()
for k,v in statedict.items():
name = 'module.'+k
new_state_dict[name] = v
return new_state_dict
| 3,858 | 37.59 | 134 | py |
RBNN | RBNN-master/imagenet/dataset/__init__.py | from .dataset import load_data, add_module_fromdict
from .imagenet import get_imagenet_iter_dali as get_imagenet
from .imagenet import get_imagenet_iter_torch as get_imagenet_torch | 180 | 59.333333 | 67 | py |
RBNN | RBNN-master/imagenet/dataset/imagenet.py | import time
import torch.utils.data
import nvidia.dali.ops as ops
import nvidia.dali.types as types
import torchvision.datasets as datasets
from nvidia.dali.pipeline import Pipeline
import torchvision.transforms as transforms
from nvidia.dali.plugin.pytorch import DALIClassificationIterator, DALIGenericIterator
class HybridTrainPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, data_dir, crop, dali_cpu=False, local_rank=0, world_size=1):
super(HybridTrainPipe, self).__init__(batch_size, num_threads, device_id, seed=12 + device_id)
dali_device = "gpu"
self.input = ops.FileReader(file_root=data_dir, shard_id=local_rank, num_shards=world_size, random_shuffle=True)
self.decode = ops.ImageDecoder(device="mixed", output_type=types.RGB)
self.res = ops.RandomResizedCrop(device="gpu", size=crop, random_area=[0.08, 1.25])
self.cmnp = ops.CropMirrorNormalize(device="gpu",
output_dtype=types.FLOAT,
output_layout=types.NCHW,
image_type=types.RGB,
mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
std=[0.229 * 255, 0.224 * 255, 0.225 * 255])
self.coin = ops.CoinFlip(probability=0.5)
print('DALI "{0}" variant'.format(dali_device))
def define_graph(self):
rng = self.coin()
self.jpegs, self.labels = self.input(name="Reader")
images = self.decode(self.jpegs)
images = self.res(images)
output = self.cmnp(images, mirror=rng)
return [output, self.labels]
class HybridValPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, data_dir, crop, size, local_rank=0, world_size=1):
super(HybridValPipe, self).__init__(batch_size, num_threads, device_id, seed=12 + device_id)
self.input = ops.FileReader(file_root=data_dir, shard_id=local_rank, num_shards=world_size,
random_shuffle=False)
self.decode = ops.ImageDecoder(device="mixed", output_type=types.RGB)
self.res = ops.Resize(device="gpu", resize_shorter=size, interp_type=types.INTERP_TRIANGULAR)
self.cmnp = ops.CropMirrorNormalize(device="gpu",
output_dtype=types.FLOAT,
output_layout=types.NCHW,
crop=(crop, crop),
image_type=types.RGB,
mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
std=[0.229 * 255, 0.224 * 255, 0.225 * 255])
def define_graph(self):
self.jpegs, self.labels = self.input(name="Reader")
images = self.decode(self.jpegs)
images = self.res(images)
output = self.cmnp(images)
return [output, self.labels]
def get_imagenet_iter_dali(type, image_dir, batch_size, num_threads, device_id, num_gpus=1, crop=224, val_size=256,
world_size=1, local_rank=0):
if type == 'train':
pip_train = HybridTrainPipe(batch_size=batch_size, num_threads=num_threads, device_id=local_rank,
data_dir=image_dir + '/ILSVRC2012_img_train',
crop=crop, world_size=world_size, local_rank=local_rank)
pip_train.build()
dali_iter_train = DALIClassificationIterator(pip_train, size=pip_train.epoch_size("Reader") // world_size, auto_reset=True)
return dali_iter_train
elif type == 'val':
pip_val = HybridValPipe(batch_size=batch_size, num_threads=num_threads, device_id=local_rank,
data_dir=image_dir + '/val',
crop=crop, size=val_size, world_size=world_size, local_rank=local_rank)
pip_val.build()
dali_iter_val = DALIClassificationIterator(pip_val, size=pip_val.epoch_size("Reader") // world_size, auto_reset=True)
return dali_iter_val
def get_imagenet_iter_torch(type, image_dir, batch_size, num_threads, device_id, num_gpus=1, crop=224, val_size=256,
world_size=1, local_rank=0):
if type == 'train':
transform = transforms.Compose([
transforms.RandomResizedCrop(crop, scale=(0.08, 1.25)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
dataset = datasets.ImageFolder(image_dir + '/train', transform)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=num_threads,
pin_memory=True)
else:
transform = transforms.Compose([
transforms.Resize(val_size),
transforms.CenterCrop(crop),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
dataset = datasets.ImageFolder(image_dir + '/val', transform)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=num_threads,
pin_memory=True)
return dataloader
if __name__ == '__main__':
train_loader = get_imagenet_iter_dali(type='train', image_dir='/userhome/memory_data/imagenet', batch_size=256,
num_threads=4, crop=224, device_id=0, num_gpus=1)
print('start iterate')
start = time.time()
for i, data in enumerate(train_loader):
images = data[0]["data"].cuda(non_blocking=True)
labels = data[0]["label"].squeeze().long().cuda(non_blocking=True)
end = time.time()
print('end iterate')
print('dali iterate time: %fs' % (end - start))
train_loader = get_imagenet_iter_torch(type='train', image_dir='/userhome/data/imagenet', batch_size=256,
num_threads=4, crop=224, device_id=0, num_gpus=1)
print('start iterate')
start = time.time()
for i, data in enumerate(train_loader):
images = data[0].cuda(non_blocking=True)
labels = data[1].cuda(non_blocking=True)
end = time.time()
print('end iterate')
print('torch iterate time: %fs' % (end - start))
| 6,531 | 51.677419 | 131 | py |
RBNN | RBNN-master/imagenet/models_imagenet/resnet.py | import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import torch.nn.init as init
from modules import *
BN = None
__all__ = ['resnet18_1w1a', 'resnet34_1w1a']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
}
def conv3x3Binary(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return BinarizeConv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3Binary(inplanes, planes, stride)
self.bn1 = BN(planes)
self.nonlinear = nn.Hardtanh(inplace=True)
self.conv2 = conv3x3Binary(planes, planes)
self.bn2 = BN(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.nonlinear(out)
residual = out
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.nonlinear(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, deep_stem=False,
avg_down=False, bypass_last_bn=False,
bn_group_size=1,
bn_group=None,
bn_sync_stats=False,
use_sync_bn=True):
global BN, bypass_bn_weight_list
BN = nn.BatchNorm2d
bypass_bn_weight_list = []
self.inplanes = 64
super(ResNet, self).__init__()
self.deep_stem = deep_stem
self.avg_down = avg_down
if self.deep_stem:
self.conv1 = nn.Sequential(
nn.Conv2d(3, 32, kernel_size=3, stride=2, padding=1, bias=False),
BN(32),
nn.Hardtanh(inplace=True),
nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1, bias=False),
BN(32),
nn.Hardtanh(inplace=True),
nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1, bias=False),
)
else:
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = BN(64)
self.nonlinear1 = nn.Hardtanh(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.bn2 = nn.BatchNorm1d(512 * block.expansion)
self.nonlinear2 = nn.Hardtanh(inplace=True)
self.fc = nn.Linear(512 * block.expansion, num_classes)
self.bn3 = nn.BatchNorm1d(num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1e-8)
m.bias.data.zero_()
if bypass_last_bn:
for param in bypass_bn_weight_list:
param.data.zero_()
print('bypass {} bn.weight in BottleneckBlocks'.format(len(bypass_bn_weight_list)))
def _make_layer(self, block, planes, blocks, stride=1, avg_down=False):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
if self.avg_down:
downsample = nn.Sequential(
nn.AvgPool2d(stride, stride=stride, ceil_mode=True, count_include_pad=False),
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=1, bias=False),
BN(planes * block.expansion),
)
else:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
BN(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.nonlinear1(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.bn2(x)
x = self.fc(x)
return x
def resnet18_1w1a(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34_1w1a(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
| 5,965 | 31.248649 | 97 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.