code stringlengths 17 6.64M |
|---|
def _unsqueeze_ft(tensor):
'add new dementions at the front and the tail'
return tensor.unsqueeze(0).unsqueeze((- 1))
|
class _SynchronizedBatchNorm(_BatchNorm):
def __init__(self, num_features, eps=1e-05, momentum=0.001, affine=True):
super(_SynchronizedBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine)
self._sync_master = SyncMaster(self._data_parallel_master)
self._is_parallel = False
self._parallel_id = None
self._slave_pipe = None
self._moving_average_fraction = (1.0 - momentum)
self.register_buffer('_tmp_running_mean', torch.zeros(self.num_features))
self.register_buffer('_tmp_running_var', torch.ones(self.num_features))
self.register_buffer('_running_iter', torch.ones(1))
self._tmp_running_mean = (self.running_mean.clone() * self._running_iter)
self._tmp_running_var = (self.running_var.clone() * self._running_iter)
def forward(self, input):
if (not (self._is_parallel and self.training)):
return F.batch_norm(input, self.running_mean, self.running_var, self.weight, self.bias, self.training, self.momentum, self.eps)
input_shape = input.size()
input = input.view(input.size(0), self.num_features, (- 1))
sum_size = (input.size(0) * input.size(2))
input_sum = _sum_ft(input)
input_ssum = _sum_ft((input ** 2))
if (self._parallel_id == 0):
(mean, inv_std) = self._sync_master.run_master(_ChildMessage(input_sum, input_ssum, sum_size))
else:
(mean, inv_std) = self._slave_pipe.run_slave(_ChildMessage(input_sum, input_ssum, sum_size))
if self.affine:
output = (((input - _unsqueeze_ft(mean)) * _unsqueeze_ft((inv_std * self.weight))) + _unsqueeze_ft(self.bias))
else:
output = ((input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std))
return output.view(input_shape)
def __data_parallel_replicate__(self, ctx, copy_id):
self._is_parallel = True
self._parallel_id = copy_id
if (self._parallel_id == 0):
ctx.sync_master = self._sync_master
else:
self._slave_pipe = ctx.sync_master.register_slave(copy_id)
def _data_parallel_master(self, intermediates):
'Reduce the sum and square-sum, compute the statistics, and broadcast it.'
intermediates = sorted(intermediates, key=(lambda i: i[1].sum.get_device()))
to_reduce = [i[1][:2] for i in intermediates]
to_reduce = [j for i in to_reduce for j in i]
target_gpus = [i[1].sum.get_device() for i in intermediates]
sum_size = sum([i[1].sum_size for i in intermediates])
(sum_, ssum) = ReduceAddCoalesced.apply(target_gpus[0], 2, *to_reduce)
(mean, inv_std) = self._compute_mean_std(sum_, ssum, sum_size)
broadcasted = Broadcast.apply(target_gpus, mean, inv_std)
outputs = []
for (i, rec) in enumerate(intermediates):
outputs.append((rec[0], _MasterMessage(*broadcasted[(i * 2):((i * 2) + 2)])))
return outputs
def _add_weighted(self, dest, delta, alpha=1, beta=1, bias=0):
'return *dest* by `dest := dest*alpha + delta*beta + bias`'
return (((dest * alpha) + (delta * beta)) + bias)
def _compute_mean_std(self, sum_, ssum, size):
'Compute the mean and standard-deviation with sum and square-sum. This method\n also maintains the moving average on the master device.'
assert (size > 1), 'BatchNorm computes unbiased standard-deviation, which requires size > 1.'
mean = (sum_ / size)
sumvar = (ssum - (sum_ * mean))
unbias_var = (sumvar / (size - 1))
bias_var = (sumvar / size)
self._tmp_running_mean = self._add_weighted(self._tmp_running_mean, mean.data, alpha=self._moving_average_fraction)
self._tmp_running_var = self._add_weighted(self._tmp_running_var, unbias_var.data, alpha=self._moving_average_fraction)
self._running_iter = self._add_weighted(self._running_iter, 1, alpha=self._moving_average_fraction)
self.running_mean = (self._tmp_running_mean / self._running_iter)
self.running_var = (self._tmp_running_var / self._running_iter)
return (mean, (bias_var.clamp(self.eps) ** (- 0.5)))
|
class SynchronizedBatchNorm1d(_SynchronizedBatchNorm):
"Applies Synchronized Batch Normalization over a 2d or 3d input that is seen as a\n mini-batch.\n\n .. math::\n\n y = \\frac{x - mean[x]}{ \\sqrt{Var[x] + \\epsilon}} * gamma + beta\n\n This module differs from the built-in PyTorch BatchNorm1d as the mean and\n standard-deviation are reduced across all devices during training.\n\n For example, when one uses `nn.DataParallel` to wrap the network during\n training, PyTorch's implementation normalize the tensor on each device using\n the statistics only on that device, which accelerated the computation and\n is also easy to implement, but the statistics might be inaccurate.\n Instead, in this synchronized version, the statistics will be computed\n over all training samples distributed on multiple devices.\n \n Note that, for one-GPU or CPU-only case, this module behaves exactly same\n as the built-in PyTorch implementation.\n\n The mean and standard-deviation are calculated per-dimension over\n the mini-batches and gamma and beta are learnable parameter vectors\n of size C (where C is the input size).\n\n During training, this layer keeps a running estimate of its computed mean\n and variance. The running sum is kept with a default momentum of 0.1.\n\n During evaluation, this running mean/variance is used for normalization.\n\n Because the BatchNorm is done over the `C` dimension, computing statistics\n on `(N, L)` slices, it's common terminology to call this Temporal BatchNorm\n\n Args:\n num_features: num_features from an expected input of size\n `batch_size x num_features [x width]`\n eps: a value added to the denominator for numerical stability.\n Default: 1e-5\n momentum: the value used for the running_mean and running_var\n computation. Default: 0.1\n affine: a boolean value that when set to ``True``, gives the layer learnable\n affine parameters. Default: ``True``\n\n Shape:\n - Input: :math:`(N, C)` or :math:`(N, C, L)`\n - Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input)\n\n Examples:\n >>> # With Learnable Parameters\n >>> m = SynchronizedBatchNorm1d(100)\n >>> # Without Learnable Parameters\n >>> m = SynchronizedBatchNorm1d(100, affine=False)\n >>> input = torch.autograd.Variable(torch.randn(20, 100))\n >>> output = m(input)\n "
def _check_input_dim(self, input):
if ((input.dim() != 2) and (input.dim() != 3)):
raise ValueError('expected 2D or 3D input (got {}D input)'.format(input.dim()))
super(SynchronizedBatchNorm1d, self)._check_input_dim(input)
|
class SynchronizedBatchNorm2d(_SynchronizedBatchNorm):
"Applies Batch Normalization over a 4d input that is seen as a mini-batch\n of 3d inputs\n\n .. math::\n\n y = \\frac{x - mean[x]}{ \\sqrt{Var[x] + \\epsilon}} * gamma + beta\n\n This module differs from the built-in PyTorch BatchNorm2d as the mean and\n standard-deviation are reduced across all devices during training.\n\n For example, when one uses `nn.DataParallel` to wrap the network during\n training, PyTorch's implementation normalize the tensor on each device using\n the statistics only on that device, which accelerated the computation and\n is also easy to implement, but the statistics might be inaccurate.\n Instead, in this synchronized version, the statistics will be computed\n over all training samples distributed on multiple devices.\n \n Note that, for one-GPU or CPU-only case, this module behaves exactly same\n as the built-in PyTorch implementation.\n\n The mean and standard-deviation are calculated per-dimension over\n the mini-batches and gamma and beta are learnable parameter vectors\n of size C (where C is the input size).\n\n During training, this layer keeps a running estimate of its computed mean\n and variance. The running sum is kept with a default momentum of 0.1.\n\n During evaluation, this running mean/variance is used for normalization.\n\n Because the BatchNorm is done over the `C` dimension, computing statistics\n on `(N, H, W)` slices, it's common terminology to call this Spatial BatchNorm\n\n Args:\n num_features: num_features from an expected input of\n size batch_size x num_features x height x width\n eps: a value added to the denominator for numerical stability.\n Default: 1e-5\n momentum: the value used for the running_mean and running_var\n computation. Default: 0.1\n affine: a boolean value that when set to ``True``, gives the layer learnable\n affine parameters. Default: ``True``\n\n Shape:\n - Input: :math:`(N, C, H, W)`\n - Output: :math:`(N, C, H, W)` (same shape as input)\n\n Examples:\n >>> # With Learnable Parameters\n >>> m = SynchronizedBatchNorm2d(100)\n >>> # Without Learnable Parameters\n >>> m = SynchronizedBatchNorm2d(100, affine=False)\n >>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45))\n >>> output = m(input)\n "
def _check_input_dim(self, input):
if (input.dim() != 4):
raise ValueError('expected 4D input (got {}D input)'.format(input.dim()))
super(SynchronizedBatchNorm2d, self)._check_input_dim(input)
|
class SynchronizedBatchNorm3d(_SynchronizedBatchNorm):
"Applies Batch Normalization over a 5d input that is seen as a mini-batch\n of 4d inputs\n\n .. math::\n\n y = \\frac{x - mean[x]}{ \\sqrt{Var[x] + \\epsilon}} * gamma + beta\n\n This module differs from the built-in PyTorch BatchNorm3d as the mean and\n standard-deviation are reduced across all devices during training.\n\n For example, when one uses `nn.DataParallel` to wrap the network during\n training, PyTorch's implementation normalize the tensor on each device using\n the statistics only on that device, which accelerated the computation and\n is also easy to implement, but the statistics might be inaccurate.\n Instead, in this synchronized version, the statistics will be computed\n over all training samples distributed on multiple devices.\n \n Note that, for one-GPU or CPU-only case, this module behaves exactly same\n as the built-in PyTorch implementation.\n\n The mean and standard-deviation are calculated per-dimension over\n the mini-batches and gamma and beta are learnable parameter vectors\n of size C (where C is the input size).\n\n During training, this layer keeps a running estimate of its computed mean\n and variance. The running sum is kept with a default momentum of 0.1.\n\n During evaluation, this running mean/variance is used for normalization.\n\n Because the BatchNorm is done over the `C` dimension, computing statistics\n on `(N, D, H, W)` slices, it's common terminology to call this Volumetric BatchNorm\n or Spatio-temporal BatchNorm\n\n Args:\n num_features: num_features from an expected input of\n size batch_size x num_features x depth x height x width\n eps: a value added to the denominator for numerical stability.\n Default: 1e-5\n momentum: the value used for the running_mean and running_var\n computation. Default: 0.1\n affine: a boolean value that when set to ``True``, gives the layer learnable\n affine parameters. Default: ``True``\n\n Shape:\n - Input: :math:`(N, C, D, H, W)`\n - Output: :math:`(N, C, D, H, W)` (same shape as input)\n\n Examples:\n >>> # With Learnable Parameters\n >>> m = SynchronizedBatchNorm3d(100)\n >>> # Without Learnable Parameters\n >>> m = SynchronizedBatchNorm3d(100, affine=False)\n >>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45, 10))\n >>> output = m(input)\n "
def _check_input_dim(self, input):
if (input.dim() != 5):
raise ValueError('expected 5D input (got {}D input)'.format(input.dim()))
super(SynchronizedBatchNorm3d, self)._check_input_dim(input)
|
class FutureResult(object):
'A thread-safe future implementation. Used only as one-to-one pipe.'
def __init__(self):
self._result = None
self._lock = threading.Lock()
self._cond = threading.Condition(self._lock)
def put(self, result):
with self._lock:
assert (self._result is None), "Previous result has't been fetched."
self._result = result
self._cond.notify()
def get(self):
with self._lock:
if (self._result is None):
self._cond.wait()
res = self._result
self._result = None
return res
|
class SlavePipe(_SlavePipeBase):
'Pipe for master-slave communication.'
def run_slave(self, msg):
self.queue.put((self.identifier, msg))
ret = self.result.get()
self.queue.put(True)
return ret
|
class SyncMaster(object):
'An abstract `SyncMaster` object.\n\n - During the replication, as the data parallel will trigger an callback of each module, all slave devices should\n call `register(id)` and obtain an `SlavePipe` to communicate with the master.\n - During the forward pass, master device invokes `run_master`, all messages from slave devices will be collected,\n and passed to a registered callback.\n - After receiving the messages, the master device should gather the information and determine to message passed\n back to each slave devices.\n '
def __init__(self, master_callback):
'\n\n Args:\n master_callback: a callback to be invoked after having collected messages from slave devices.\n '
self._master_callback = master_callback
self._queue = queue.Queue()
self._registry = collections.OrderedDict()
self._activated = False
def register_slave(self, identifier):
'\n Register an slave device.\n\n Args:\n identifier: an identifier, usually is the device id.\n\n Returns: a `SlavePipe` object which can be used to communicate with the master device.\n\n '
if self._activated:
assert self._queue.empty(), 'Queue is not clean before next initialization.'
self._activated = False
self._registry.clear()
future = FutureResult()
self._registry[identifier] = _MasterRegistry(future)
return SlavePipe(identifier, self._queue, future)
def run_master(self, master_msg):
'\n Main entry for the master device in each forward pass.\n The messages were first collected from each devices (including the master device), and then\n an callback will be invoked to compute the message to be sent back to each devices\n (including the master device).\n\n Args:\n master_msg: the message that the master want to send to itself. This will be placed as the first\n message when calling `master_callback`. For detailed usage, see `_SynchronizedBatchNorm` for an example.\n\n Returns: the message to be sent back to the master device.\n\n '
self._activated = True
intermediates = [(0, master_msg)]
for i in range(self.nr_slaves):
intermediates.append(self._queue.get())
results = self._master_callback(intermediates)
assert (results[0][0] == 0), 'The first result should belongs to the master.'
for (i, res) in results:
if (i == 0):
continue
self._registry[i].result.put(res)
for i in range(self.nr_slaves):
assert (self._queue.get() is True)
return results[0][1]
@property
def nr_slaves(self):
return len(self._registry)
|
class CallbackContext(object):
pass
|
def execute_replication_callbacks(modules):
'\n Execute an replication callback `__data_parallel_replicate__` on each module created by original replication.\n\n The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`\n\n Note that, as all modules are isomorphism, we assign each sub-module with a context\n (shared among multiple copies of this module on different devices).\n Through this context, different copies can share some information.\n\n We guarantee that the callback on the master copy (the first copy) will be called ahead of calling the callback\n of any slave copies.\n '
master_copy = modules[0]
nr_modules = len(list(master_copy.modules()))
ctxs = [CallbackContext() for _ in range(nr_modules)]
for (i, module) in enumerate(modules):
for (j, m) in enumerate(module.modules()):
if hasattr(m, '__data_parallel_replicate__'):
m.__data_parallel_replicate__(ctxs[j], i)
|
class DataParallelWithCallback(DataParallel):
'\n Data Parallel with a replication callback.\n\n An replication callback `__data_parallel_replicate__` of each module will be invoked after being created by\n original `replicate` function.\n The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`\n\n Examples:\n > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)\n > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])\n # sync_bn.__data_parallel_replicate__ will be invoked.\n '
def replicate(self, module, device_ids):
modules = super(DataParallelWithCallback, self).replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
|
def patch_replication_callback(data_parallel):
'\n Monkey-patch an existing `DataParallel` object. Add the replication callback.\n Useful when you have customized `DataParallel` implementation.\n\n Examples:\n > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)\n > sync_bn = DataParallel(sync_bn, device_ids=[0, 1])\n > patch_replication_callback(sync_bn)\n # this is equivalent to\n > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)\n > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])\n '
assert isinstance(data_parallel, DataParallel)
old_replicate = data_parallel.replicate
@functools.wraps(old_replicate)
def new_replicate(module, device_ids):
modules = old_replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
data_parallel.replicate = new_replicate
|
def handy_var(a, unbias=True):
n = a.size(0)
asum = a.sum(dim=0)
as_sum = (a ** 2).sum(dim=0)
sumvar = (as_sum - ((asum * asum) / n))
if unbias:
return (sumvar / (n - 1))
else:
return (sumvar / n)
|
class NumericTestCase(TorchTestCase):
def testNumericBatchNorm(self):
a = torch.rand(16, 10)
bn = nn.BatchNorm2d(10, momentum=1, eps=1e-05, affine=False)
bn.train()
a_var1 = Variable(a, requires_grad=True)
b_var1 = bn(a_var1)
loss1 = b_var1.sum()
loss1.backward()
a_var2 = Variable(a, requires_grad=True)
a_mean2 = a_var2.mean(dim=0, keepdim=True)
a_std2 = torch.sqrt(handy_var(a_var2, unbias=False).clamp(min=1e-05))
b_var2 = ((a_var2 - a_mean2) / a_std2)
loss2 = b_var2.sum()
loss2.backward()
self.assertTensorClose(bn.running_mean, a.mean(dim=0))
self.assertTensorClose(bn.running_var, handy_var(a))
self.assertTensorClose(a_var1.data, a_var2.data)
self.assertTensorClose(b_var1.data, b_var2.data)
self.assertTensorClose(a_var1.grad, a_var2.grad)
|
def handy_var(a, unbias=True):
n = a.size(0)
asum = a.sum(dim=0)
as_sum = (a ** 2).sum(dim=0)
sumvar = (as_sum - ((asum * asum) / n))
if unbias:
return (sumvar / (n - 1))
else:
return (sumvar / n)
|
def _find_bn(module):
for m in module.modules():
if isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, SynchronizedBatchNorm1d, SynchronizedBatchNorm2d)):
return m
|
class SyncTestCase(TorchTestCase):
def _syncParameters(self, bn1, bn2):
bn1.reset_parameters()
bn2.reset_parameters()
if (bn1.affine and bn2.affine):
bn2.weight.data.copy_(bn1.weight.data)
bn2.bias.data.copy_(bn1.bias.data)
def _checkBatchNormResult(self, bn1, bn2, input, is_train, cuda=False):
'Check the forward and backward for the customized batch normalization.'
bn1.train(mode=is_train)
bn2.train(mode=is_train)
if cuda:
input = input.cuda()
self._syncParameters(_find_bn(bn1), _find_bn(bn2))
input1 = Variable(input, requires_grad=True)
output1 = bn1(input1)
output1.sum().backward()
input2 = Variable(input, requires_grad=True)
output2 = bn2(input2)
output2.sum().backward()
self.assertTensorClose(input1.data, input2.data)
self.assertTensorClose(output1.data, output2.data)
self.assertTensorClose(input1.grad, input2.grad)
self.assertTensorClose(_find_bn(bn1).running_mean, _find_bn(bn2).running_mean)
self.assertTensorClose(_find_bn(bn1).running_var, _find_bn(bn2).running_var)
def testSyncBatchNormNormalTrain(self):
bn = nn.BatchNorm1d(10)
sync_bn = SynchronizedBatchNorm1d(10)
self._checkBatchNormResult(bn, sync_bn, torch.rand(16, 10), True)
def testSyncBatchNormNormalEval(self):
bn = nn.BatchNorm1d(10)
sync_bn = SynchronizedBatchNorm1d(10)
self._checkBatchNormResult(bn, sync_bn, torch.rand(16, 10), False)
def testSyncBatchNormSyncTrain(self):
bn = nn.BatchNorm1d(10, eps=1e-05, affine=False)
sync_bn = SynchronizedBatchNorm1d(10, eps=1e-05, affine=False)
sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
bn.cuda()
sync_bn.cuda()
self._checkBatchNormResult(bn, sync_bn, torch.rand(16, 10), True, cuda=True)
def testSyncBatchNormSyncEval(self):
bn = nn.BatchNorm1d(10, eps=1e-05, affine=False)
sync_bn = SynchronizedBatchNorm1d(10, eps=1e-05, affine=False)
sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
bn.cuda()
sync_bn.cuda()
self._checkBatchNormResult(bn, sync_bn, torch.rand(16, 10), False, cuda=True)
def testSyncBatchNorm2DSyncTrain(self):
bn = nn.BatchNorm2d(10)
sync_bn = SynchronizedBatchNorm2d(10)
sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
bn.cuda()
sync_bn.cuda()
self._checkBatchNormResult(bn, sync_bn, torch.rand(16, 10, 16, 16), True, cuda=True)
|
def as_numpy(v):
if isinstance(v, Variable):
v = v.data
return v.cpu().numpy()
|
class TorchTestCase(unittest.TestCase):
def assertTensorClose(self, a, b, atol=0.001, rtol=0.001):
(npa, npb) = (as_numpy(a), as_numpy(b))
self.assertTrue(np.allclose(npa, npb, atol=atol), 'Tensor close check failed\n{}\n{}\nadiff={}, rdiff={}'.format(a, b, np.abs((npa - npb)).max(), np.abs(((npa - npb) / np.fmax(npa, 1e-05))).max()))
|
def async_copy_to(obj, dev, main_stream=None):
if torch.is_tensor(obj):
v = obj.cuda(dev, non_blocking=True)
if (main_stream is not None):
v.data.record_stream(main_stream)
return v
elif isinstance(obj, collections.Mapping):
return {k: async_copy_to(o, dev, main_stream) for (k, o) in obj.items()}
elif isinstance(obj, collections.Sequence):
return [async_copy_to(o, dev, main_stream) for o in obj]
else:
return obj
|
def dict_gather(outputs, target_device, dim=0):
'\n Gathers variables from different GPUs on a specified device\n (-1 means the CPU), with dictionary support.\n '
def gather_map(outputs):
out = outputs[0]
if torch.is_tensor(out):
if (out.dim() == 0):
outputs = [o.unsqueeze(0) for o in outputs]
return Gather.apply(target_device, dim, *outputs)
elif (out is None):
return None
elif isinstance(out, collections.Mapping):
return {k: gather_map([o[k] for o in outputs]) for k in out}
elif isinstance(out, collections.Sequence):
return type(out)(map(gather_map, zip(*outputs)))
return gather_map(outputs)
|
class DictGatherDataParallel(nn.DataParallel):
def gather(self, outputs, output_device):
return dict_gather(outputs, output_device, dim=self.dim)
|
class UserScatteredDataParallel(DictGatherDataParallel):
def scatter(self, inputs, kwargs, device_ids):
assert (len(inputs) == 1)
inputs = inputs[0]
inputs = _async_copy_stream(inputs, device_ids)
inputs = [[i] for i in inputs]
assert (len(kwargs) == 0)
kwargs = [{} for _ in range(len(inputs))]
return (inputs, kwargs)
|
def user_scattered_collate(batch):
return batch
|
def _async_copy(inputs, device_ids):
nr_devs = len(device_ids)
assert (type(inputs) in (tuple, list))
assert (len(inputs) == nr_devs)
outputs = []
for (i, dev) in zip(inputs, device_ids):
with cuda.device(dev):
outputs.append(async_copy_to(i, dev))
return tuple(outputs)
|
def _async_copy_stream(inputs, device_ids):
nr_devs = len(device_ids)
assert (type(inputs) in (tuple, list))
assert (len(inputs) == nr_devs)
outputs = []
streams = [_get_stream(d) for d in device_ids]
for (i, dev, stream) in zip(inputs, device_ids, streams):
with cuda.device(dev):
main_stream = cuda.current_stream()
with cuda.stream(stream):
outputs.append(async_copy_to(i, dev, main_stream=main_stream))
main_stream.wait_stream(stream)
return outputs
|
def _get_stream(device):
'Gets a background stream for copying between CPU and GPU'
global _streams
if (device == (- 1)):
return None
if (_streams is None):
_streams = ([None] * cuda.device_count())
if (_streams[device] is None):
_streams[device] = cuda.Stream(device)
return _streams[device]
|
class ExceptionWrapper(object):
'Wraps an exception plus traceback to communicate across threads'
def __init__(self, exc_info):
self.exc_type = exc_info[0]
self.exc_msg = ''.join(traceback.format_exception(*exc_info))
|
def _worker_loop(dataset, index_queue, data_queue, collate_fn, seed, init_fn, worker_id):
global _use_shared_memory
_use_shared_memory = True
_set_worker_signal_handlers()
torch.set_num_threads(1)
torch.manual_seed(seed)
np.random.seed(seed)
if (init_fn is not None):
init_fn(worker_id)
while True:
r = index_queue.get()
if (r is None):
break
(idx, batch_indices) = r
try:
samples = collate_fn([dataset[i] for i in batch_indices])
except Exception:
data_queue.put((idx, ExceptionWrapper(sys.exc_info())))
else:
data_queue.put((idx, samples))
|
def _worker_manager_loop(in_queue, out_queue, done_event, pin_memory, device_id):
if pin_memory:
torch.cuda.set_device(device_id)
while True:
try:
r = in_queue.get()
except Exception:
if done_event.is_set():
return
raise
if (r is None):
break
if isinstance(r[1], ExceptionWrapper):
out_queue.put(r)
continue
(idx, batch) = r
try:
if pin_memory:
batch = pin_memory_batch(batch)
except Exception:
out_queue.put((idx, ExceptionWrapper(sys.exc_info())))
else:
out_queue.put((idx, batch))
|
def default_collate(batch):
'Puts each data field into a tensor with outer dimension batch size'
error_msg = 'batch must contain tensors, numbers, dicts or lists; found {}'
elem_type = type(batch[0])
if torch.is_tensor(batch[0]):
out = None
if _use_shared_memory:
numel = sum([x.numel() for x in batch])
storage = batch[0].storage()._new_shared(numel)
out = batch[0].new(storage)
return torch.stack(batch, 0, out=out)
elif ((elem_type.__module__ == 'numpy') and (elem_type.__name__ != 'str_') and (elem_type.__name__ != 'string_')):
elem = batch[0]
if (elem_type.__name__ == 'ndarray'):
if (re.search('[SaUO]', elem.dtype.str) is not None):
raise TypeError(error_msg.format(elem.dtype))
return torch.stack([torch.from_numpy(b) for b in batch], 0)
if (elem.shape == ()):
py_type = (float if elem.dtype.name.startswith('float') else int)
return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
elif isinstance(batch[0], int_classes):
return torch.LongTensor(batch)
elif isinstance(batch[0], float):
return torch.DoubleTensor(batch)
elif isinstance(batch[0], string_classes):
return batch
elif isinstance(batch[0], collections.Mapping):
return {key: default_collate([d[key] for d in batch]) for key in batch[0]}
elif isinstance(batch[0], collections.Sequence):
transposed = zip(*batch)
return [default_collate(samples) for samples in transposed]
raise TypeError(error_msg.format(type(batch[0])))
|
def pin_memory_batch(batch):
if torch.is_tensor(batch):
return batch.pin_memory()
elif isinstance(batch, string_classes):
return batch
elif isinstance(batch, collections.Mapping):
return {k: pin_memory_batch(sample) for (k, sample) in batch.items()}
elif isinstance(batch, collections.Sequence):
return [pin_memory_batch(sample) for sample in batch]
else:
return batch
|
def _set_SIGCHLD_handler():
if (sys.platform == 'win32'):
return
if (not isinstance(threading.current_thread(), threading._MainThread)):
return
global _SIGCHLD_handler_set
if _SIGCHLD_handler_set:
return
previous_handler = signal.getsignal(signal.SIGCHLD)
if (not callable(previous_handler)):
previous_handler = None
def handler(signum, frame):
_error_if_any_worker_fails()
if (previous_handler is not None):
previous_handler(signum, frame)
signal.signal(signal.SIGCHLD, handler)
_SIGCHLD_handler_set = True
|
class DataLoaderIter(object):
"Iterates once over the DataLoader's dataset, as specified by the sampler"
def __init__(self, loader):
self.dataset = loader.dataset
self.collate_fn = loader.collate_fn
self.batch_sampler = loader.batch_sampler
self.num_workers = loader.num_workers
self.pin_memory = (loader.pin_memory and torch.cuda.is_available())
self.timeout = loader.timeout
self.done_event = threading.Event()
self.sample_iter = iter(self.batch_sampler)
if (self.num_workers > 0):
self.worker_init_fn = loader.worker_init_fn
self.index_queue = multiprocessing.SimpleQueue()
self.worker_result_queue = multiprocessing.SimpleQueue()
self.batches_outstanding = 0
self.worker_pids_set = False
self.shutdown = False
self.send_idx = 0
self.rcvd_idx = 0
self.reorder_dict = {}
base_seed = torch.LongTensor(1).random_(0, ((2 ** 31) - 1))[0]
self.workers = [multiprocessing.Process(target=_worker_loop, args=(self.dataset, self.index_queue, self.worker_result_queue, self.collate_fn, (base_seed + i), self.worker_init_fn, i)) for i in range(self.num_workers)]
if (self.pin_memory or (self.timeout > 0)):
self.data_queue = queue.Queue()
if self.pin_memory:
maybe_device_id = torch.cuda.current_device()
else:
maybe_device_id = None
self.worker_manager_thread = threading.Thread(target=_worker_manager_loop, args=(self.worker_result_queue, self.data_queue, self.done_event, self.pin_memory, maybe_device_id))
self.worker_manager_thread.daemon = True
self.worker_manager_thread.start()
else:
self.data_queue = self.worker_result_queue
for w in self.workers:
w.daemon = True
w.start()
_set_worker_pids(id(self), tuple((w.pid for w in self.workers)))
_set_SIGCHLD_handler()
self.worker_pids_set = True
for _ in range((2 * self.num_workers)):
self._put_indices()
def __len__(self):
return len(self.batch_sampler)
def _get_batch(self):
if (self.timeout > 0):
try:
return self.data_queue.get(timeout=self.timeout)
except queue.Empty:
raise RuntimeError('DataLoader timed out after {} seconds'.format(self.timeout))
else:
return self.data_queue.get()
def __next__(self):
if (self.num_workers == 0):
indices = next(self.sample_iter)
batch = self.collate_fn([self.dataset[i] for i in indices])
if self.pin_memory:
batch = pin_memory_batch(batch)
return batch
if (self.rcvd_idx in self.reorder_dict):
batch = self.reorder_dict.pop(self.rcvd_idx)
return self._process_next_batch(batch)
if (self.batches_outstanding == 0):
self._shutdown_workers()
raise StopIteration
while True:
assert ((not self.shutdown) and (self.batches_outstanding > 0))
(idx, batch) = self._get_batch()
self.batches_outstanding -= 1
if (idx != self.rcvd_idx):
self.reorder_dict[idx] = batch
continue
return self._process_next_batch(batch)
next = __next__
def __iter__(self):
return self
def _put_indices(self):
assert (self.batches_outstanding < (2 * self.num_workers))
indices = next(self.sample_iter, None)
if (indices is None):
return
self.index_queue.put((self.send_idx, indices))
self.batches_outstanding += 1
self.send_idx += 1
def _process_next_batch(self, batch):
self.rcvd_idx += 1
self._put_indices()
if isinstance(batch, ExceptionWrapper):
raise batch.exc_type(batch.exc_msg)
return batch
def __getstate__(self):
raise NotImplementedError('DataLoaderIterator cannot be pickled')
def _shutdown_workers(self):
try:
if (not self.shutdown):
self.shutdown = True
self.done_event.set()
while (not self.data_queue.empty()):
self.data_queue.get()
for _ in self.workers:
self.index_queue.put(None)
self.worker_result_queue.put(None)
finally:
if self.worker_pids_set:
_remove_worker_pids(id(self))
self.worker_pids_set = False
def __del__(self):
if (self.num_workers > 0):
self._shutdown_workers()
|
class DataLoader(object):
"\n Data loader. Combines a dataset and a sampler, and provides\n single- or multi-process iterators over the dataset.\n\n Arguments:\n dataset (Dataset): dataset from which to load the data.\n batch_size (int, optional): how many samples per batch to load\n (default: 1).\n shuffle (bool, optional): set to ``True`` to have the data reshuffled\n at every epoch (default: False).\n sampler (Sampler, optional): defines the strategy to draw samples from\n the dataset. If specified, ``shuffle`` must be False.\n batch_sampler (Sampler, optional): like sampler, but returns a batch of\n indices at a time. Mutually exclusive with batch_size, shuffle,\n sampler, and drop_last.\n num_workers (int, optional): how many subprocesses to use for data\n loading. 0 means that the data will be loaded in the main process.\n (default: 0)\n collate_fn (callable, optional): merges a list of samples to form a mini-batch.\n pin_memory (bool, optional): If ``True``, the data loader will copy tensors\n into CUDA pinned memory before returning them.\n drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,\n if the dataset size is not divisible by the batch size. If ``False`` and\n the size of dataset is not divisible by the batch size, then the last batch\n will be smaller. (default: False)\n timeout (numeric, optional): if positive, the timeout value for collecting a batch\n from workers. Should always be non-negative. (default: 0)\n worker_init_fn (callable, optional): If not None, this will be called on each\n worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as\n input, after seeding and before data loading. (default: None)\n\n .. note:: By default, each worker will have its PyTorch seed set to\n ``base_seed + worker_id``, where ``base_seed`` is a long generated\n by main process using its RNG. You may use ``torch.initial_seed()`` to access\n this value in :attr:`worker_init_fn`, which can be used to set other seeds\n (e.g. NumPy) before data loading.\n\n .. warning:: If ``spawn'' start method is used, :attr:`worker_init_fn` cannot be an\n unpicklable object, e.g., a lambda function.\n "
def __init__(self, dataset, batch_size=1, shuffle=False, sampler=None, batch_sampler=None, num_workers=0, collate_fn=default_collate, pin_memory=False, drop_last=False, timeout=0, worker_init_fn=None):
self.dataset = dataset
self.batch_size = batch_size
self.num_workers = num_workers
self.collate_fn = collate_fn
self.pin_memory = pin_memory
self.drop_last = drop_last
self.timeout = timeout
self.worker_init_fn = worker_init_fn
if (timeout < 0):
raise ValueError('timeout option should be non-negative')
if (batch_sampler is not None):
if ((batch_size > 1) or shuffle or (sampler is not None) or drop_last):
raise ValueError('batch_sampler is mutually exclusive with batch_size, shuffle, sampler, and drop_last')
if ((sampler is not None) and shuffle):
raise ValueError('sampler is mutually exclusive with shuffle')
if (self.num_workers < 0):
raise ValueError('num_workers cannot be negative; use num_workers=0 to disable multiprocessing.')
if (batch_sampler is None):
if (sampler is None):
if shuffle:
sampler = RandomSampler(dataset)
else:
sampler = SequentialSampler(dataset)
batch_sampler = BatchSampler(sampler, batch_size, drop_last)
self.sampler = sampler
self.batch_sampler = batch_sampler
def __iter__(self):
return DataLoaderIter(self)
def __len__(self):
return len(self.batch_sampler)
|
class Dataset(object):
'An abstract class representing a Dataset.\n\n All other datasets should subclass it. All subclasses should override\n ``__len__``, that provides the size of the dataset, and ``__getitem__``,\n supporting integer indexing in range from 0 to len(self) exclusive.\n '
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def __add__(self, other):
return ConcatDataset([self, other])
|
class TensorDataset(Dataset):
'Dataset wrapping data and target tensors.\n\n Each sample will be retrieved by indexing both tensors along the first\n dimension.\n\n Arguments:\n data_tensor (Tensor): contains sample data.\n target_tensor (Tensor): contains sample targets (labels).\n '
def __init__(self, data_tensor, target_tensor):
assert (data_tensor.size(0) == target_tensor.size(0))
self.data_tensor = data_tensor
self.target_tensor = target_tensor
def __getitem__(self, index):
return (self.data_tensor[index], self.target_tensor[index])
def __len__(self):
return self.data_tensor.size(0)
|
class ConcatDataset(Dataset):
'\n Dataset to concatenate multiple datasets.\n Purpose: useful to assemble different existing datasets, possibly\n large-scale datasets as the concatenation operation is done in an\n on-the-fly manner.\n\n Arguments:\n datasets (iterable): List of datasets to be concatenated\n '
@staticmethod
def cumsum(sequence):
(r, s) = ([], 0)
for e in sequence:
l = len(e)
r.append((l + s))
s += l
return r
def __init__(self, datasets):
super(ConcatDataset, self).__init__()
assert (len(datasets) > 0), 'datasets should not be an empty iterable'
self.datasets = list(datasets)
self.cumulative_sizes = self.cumsum(self.datasets)
def __len__(self):
return self.cumulative_sizes[(- 1)]
def __getitem__(self, idx):
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if (dataset_idx == 0):
sample_idx = idx
else:
sample_idx = (idx - self.cumulative_sizes[(dataset_idx - 1)])
return self.datasets[dataset_idx][sample_idx]
@property
def cummulative_sizes(self):
warnings.warn('cummulative_sizes attribute is renamed to cumulative_sizes', DeprecationWarning, stacklevel=2)
return self.cumulative_sizes
|
class Subset(Dataset):
def __init__(self, dataset, indices):
self.dataset = dataset
self.indices = indices
def __getitem__(self, idx):
return self.dataset[self.indices[idx]]
def __len__(self):
return len(self.indices)
|
def random_split(dataset, lengths):
'\n Randomly split a dataset into non-overlapping new datasets of given lengths\n ds\n\n Arguments:\n dataset (Dataset): Dataset to be split\n lengths (iterable): lengths of splits to be produced\n '
if (sum(lengths) != len(dataset)):
raise ValueError('Sum of input lengths does not equal the length of the input dataset!')
indices = randperm(sum(lengths))
return [Subset(dataset, indices[(offset - length):offset]) for (offset, length) in zip(_accumulate(lengths), lengths)]
|
class DistributedSampler(Sampler):
'Sampler that restricts data loading to a subset of the dataset.\n\n It is especially useful in conjunction with\n :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each\n process can pass a DistributedSampler instance as a DataLoader sampler,\n and load a subset of the original dataset that is exclusive to it.\n\n .. note::\n Dataset is assumed to be of constant size.\n\n Arguments:\n dataset: Dataset used for sampling.\n num_replicas (optional): Number of processes participating in\n distributed training.\n rank (optional): Rank of the current process within num_replicas.\n '
def __init__(self, dataset, num_replicas=None, rank=None):
if (num_replicas is None):
num_replicas = get_world_size()
if (rank is None):
rank = get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(((len(self.dataset) * 1.0) / self.num_replicas)))
self.total_size = (self.num_samples * self.num_replicas)
def __iter__(self):
g = torch.Generator()
g.manual_seed(self.epoch)
indices = list(torch.randperm(len(self.dataset), generator=g))
indices += indices[:(self.total_size - len(indices))]
assert (len(indices) == self.total_size)
offset = (self.num_samples * self.rank)
indices = indices[offset:(offset + self.num_samples)]
assert (len(indices) == self.num_samples)
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
|
class Sampler(object):
'Base class for all Samplers.\n\n Every Sampler subclass has to provide an __iter__ method, providing a way\n to iterate over indices of dataset elements, and a __len__ method that\n returns the length of the returned iterators.\n '
def __init__(self, data_source):
pass
def __iter__(self):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
|
class SequentialSampler(Sampler):
'Samples elements sequentially, always in the same order.\n\n Arguments:\n data_source (Dataset): dataset to sample from\n '
def __init__(self, data_source):
self.data_source = data_source
def __iter__(self):
return iter(range(len(self.data_source)))
def __len__(self):
return len(self.data_source)
|
class RandomSampler(Sampler):
'Samples elements randomly, without replacement.\n\n Arguments:\n data_source (Dataset): dataset to sample from\n '
def __init__(self, data_source):
self.data_source = data_source
def __iter__(self):
return iter(torch.randperm(len(self.data_source)).long())
def __len__(self):
return len(self.data_source)
|
class SubsetRandomSampler(Sampler):
'Samples elements randomly from a given list of indices, without replacement.\n\n Arguments:\n indices (list): a list of indices\n '
def __init__(self, indices):
self.indices = indices
def __iter__(self):
return (self.indices[i] for i in torch.randperm(len(self.indices)))
def __len__(self):
return len(self.indices)
|
class WeightedRandomSampler(Sampler):
'Samples elements from [0,..,len(weights)-1] with given probabilities (weights).\n\n Arguments:\n weights (list) : a list of weights, not necessary summing up to one\n num_samples (int): number of samples to draw\n replacement (bool): if ``True``, samples are drawn with replacement.\n If not, they are drawn without replacement, which means that when a\n sample index is drawn for a row, it cannot be drawn again for that row.\n '
def __init__(self, weights, num_samples, replacement=True):
self.weights = torch.DoubleTensor(weights)
self.num_samples = num_samples
self.replacement = replacement
def __iter__(self):
return iter(torch.multinomial(self.weights, self.num_samples, self.replacement))
def __len__(self):
return self.num_samples
|
class BatchSampler(object):
'Wraps another sampler to yield a mini-batch of indices.\n\n Args:\n sampler (Sampler): Base sampler.\n batch_size (int): Size of mini-batch.\n drop_last (bool): If ``True``, the sampler will drop the last batch if\n its size would be less than ``batch_size``\n\n Example:\n >>> list(BatchSampler(range(10), batch_size=3, drop_last=False))\n [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]\n >>> list(BatchSampler(range(10), batch_size=3, drop_last=True))\n [[0, 1, 2], [3, 4, 5], [6, 7, 8]]\n '
def __init__(self, sampler, batch_size, drop_last):
self.sampler = sampler
self.batch_size = batch_size
self.drop_last = drop_last
def __iter__(self):
batch = []
for idx in self.sampler:
batch.append(idx)
if (len(batch) == self.batch_size):
(yield batch)
batch = []
if ((len(batch) > 0) and (not self.drop_last)):
(yield batch)
def __len__(self):
if self.drop_last:
return (len(self.sampler) // self.batch_size)
else:
return (((len(self.sampler) + self.batch_size) - 1) // self.batch_size)
|
def as_variable(obj):
if isinstance(obj, Variable):
return obj
if isinstance(obj, collections.Sequence):
return [as_variable(v) for v in obj]
elif isinstance(obj, collections.Mapping):
return {k: as_variable(v) for (k, v) in obj.items()}
else:
return Variable(obj)
|
def as_numpy(obj):
if isinstance(obj, collections.Sequence):
return [as_numpy(v) for v in obj]
elif isinstance(obj, collections.Mapping):
return {k: as_numpy(v) for (k, v) in obj.items()}
elif isinstance(obj, Variable):
return obj.data.cpu().numpy()
elif torch.is_tensor(obj):
return obj.cpu().numpy()
else:
return np.array(obj)
|
def mark_volatile(obj):
if torch.is_tensor(obj):
obj = Variable(obj)
if isinstance(obj, Variable):
obj.no_grad = True
return obj
elif isinstance(obj, collections.Mapping):
return {k: mark_volatile(o) for (k, o) in obj.items()}
elif isinstance(obj, collections.Sequence):
return [mark_volatile(o) for o in obj]
else:
return obj
|
def make_evaluator(kind='default', ssim=True, lpips=True, fid=True, integral_kind=None, **kwargs):
logging.info(f'Make evaluator {kind}')
metrics = {}
if ssim:
metrics['ssim'] = SSIMScore()
if lpips:
metrics['lpips'] = LPIPSScore()
if fid:
metrics['fid'] = FIDScore()
if (integral_kind is None):
integral_func = None
elif (integral_kind == 'ssim_fid100_f1'):
integral_func = ssim_fid100_f1
elif (integral_kind == 'lpips_fid100_f1'):
integral_func = lpips_fid100_f1
else:
raise ValueError(f'Unexpected integral_kind={integral_kind}')
if (kind == 'default'):
return InpaintingEvaluatorOnline(scores=metrics, integral_func=integral_func, integral_title=integral_kind, **kwargs)
|
def load_image(fname, mode='RGB', return_orig=False):
img = np.array(Image.open(fname).convert(mode))
if (img.ndim == 3):
img = np.transpose(img, (2, 0, 1))
out_img = (img.astype('float32') / 255)
if return_orig:
return (out_img, img)
else:
return out_img
|
def ceil_modulo(x, mod):
if ((x % mod) == 0):
return x
return (((x // mod) + 1) * mod)
|
def pad_img_to_modulo(img, mod):
(channels, height, width) = img.shape
out_height = ceil_modulo(height, mod)
out_width = ceil_modulo(width, mod)
return np.pad(img, ((0, 0), (0, (out_height - height)), (0, (out_width - width))), mode='symmetric')
|
def pad_tensor_to_modulo(img, mod):
(batch_size, channels, height, width) = img.shape
out_height = ceil_modulo(height, mod)
out_width = ceil_modulo(width, mod)
return F.pad(img, pad=(0, (out_width - width), 0, (out_height - height)), mode='reflect')
|
def scale_image(img, factor, interpolation=cv2.INTER_AREA):
if (img.shape[0] == 1):
img = img[0]
else:
img = np.transpose(img, (1, 2, 0))
img = cv2.resize(img, dsize=None, fx=factor, fy=factor, interpolation=interpolation)
if (img.ndim == 2):
img = img[(None, ...)]
else:
img = np.transpose(img, (2, 0, 1))
return img
|
class InpaintingDataset(Dataset):
def __init__(self, datadir, img_suffix='.jpg', pad_out_to_modulo=None, scale_factor=None):
self.datadir = datadir
self.mask_filenames = sorted(list(glob.glob(os.path.join(self.datadir, '**', '*mask*.png'), recursive=True)))
self.img_filenames = [(fname.rsplit('_mask', 1)[0] + img_suffix) for fname in self.mask_filenames]
self.pad_out_to_modulo = pad_out_to_modulo
self.scale_factor = scale_factor
def __len__(self):
return len(self.mask_filenames)
def __getitem__(self, i):
image = load_image(self.img_filenames[i], mode='RGB')
mask = load_image(self.mask_filenames[i], mode='L')
result = dict(image=image, mask=mask[(None, ...)])
if (self.scale_factor is not None):
result['image'] = scale_image(result['image'], self.scale_factor)
result['mask'] = scale_image(result['mask'], self.scale_factor, interpolation=cv2.INTER_NEAREST)
if ((self.pad_out_to_modulo is not None) and (self.pad_out_to_modulo > 1)):
result['image'] = pad_img_to_modulo(result['image'], self.pad_out_to_modulo)
result['mask'] = pad_img_to_modulo(result['mask'], self.pad_out_to_modulo)
return result
|
class OurInpaintingDataset(Dataset):
def __init__(self, datadir, img_suffix='.jpg', pad_out_to_modulo=None, scale_factor=None):
self.datadir = datadir
self.mask_filenames = sorted(list(glob.glob(os.path.join(self.datadir, 'mask', '**', '*mask*.png'), recursive=True)))
self.img_filenames = [os.path.join(self.datadir, 'img', (os.path.basename(fname.rsplit('-', 1)[0].rsplit('_', 1)[0]) + '.png')) for fname in self.mask_filenames]
self.pad_out_to_modulo = pad_out_to_modulo
self.scale_factor = scale_factor
def __len__(self):
return len(self.mask_filenames)
def __getitem__(self, i):
result = dict(image=load_image(self.img_filenames[i], mode='RGB'), mask=load_image(self.mask_filenames[i], mode='L')[(None, ...)])
if (self.scale_factor is not None):
result['image'] = scale_image(result['image'], self.scale_factor)
result['mask'] = scale_image(result['mask'], self.scale_factor)
if ((self.pad_out_to_modulo is not None) and (self.pad_out_to_modulo > 1)):
result['image'] = pad_img_to_modulo(result['image'], self.pad_out_to_modulo)
result['mask'] = pad_img_to_modulo(result['mask'], self.pad_out_to_modulo)
return result
|
class PrecomputedInpaintingResultsDataset(InpaintingDataset):
def __init__(self, datadir, predictdir, inpainted_suffix='_inpainted.jpg', **kwargs):
super().__init__(datadir, **kwargs)
if (not datadir.endswith('/')):
datadir += '/'
self.predictdir = predictdir
self.pred_filenames = [os.path.join(predictdir, (os.path.splitext(fname[len(datadir):])[0] + inpainted_suffix)) for fname in self.mask_filenames]
def __getitem__(self, i):
result = super().__getitem__(i)
result['inpainted'] = load_image(self.pred_filenames[i])
if ((self.pad_out_to_modulo is not None) and (self.pad_out_to_modulo > 1)):
result['inpainted'] = pad_img_to_modulo(result['inpainted'], self.pad_out_to_modulo)
return result
|
class OurPrecomputedInpaintingResultsDataset(OurInpaintingDataset):
def __init__(self, datadir, predictdir, inpainted_suffix='png', **kwargs):
super().__init__(datadir, **kwargs)
if (not datadir.endswith('/')):
datadir += '/'
self.predictdir = predictdir
self.pred_filenames = [os.path.join(predictdir, (os.path.basename(os.path.splitext(fname)[0]) + f'_inpainted.{inpainted_suffix}')) for fname in self.mask_filenames]
def __getitem__(self, i):
result = super().__getitem__(i)
result['inpainted'] = self.file_loader(self.pred_filenames[i])
if ((self.pad_out_to_modulo is not None) and (self.pad_out_to_modulo > 1)):
result['inpainted'] = pad_img_to_modulo(result['inpainted'], self.pad_out_to_modulo)
return result
|
class InpaintingEvalOnlineDataset(Dataset):
def __init__(self, indir, mask_generator, img_suffix='.jpg', pad_out_to_modulo=None, scale_factor=None, **kwargs):
self.indir = indir
self.mask_generator = mask_generator
self.img_filenames = sorted(list(glob.glob(os.path.join(self.indir, '**', f'*{img_suffix}'), recursive=True)))
self.pad_out_to_modulo = pad_out_to_modulo
self.scale_factor = scale_factor
def __len__(self):
return len(self.img_filenames)
def __getitem__(self, i):
(img, raw_image) = load_image(self.img_filenames[i], mode='RGB', return_orig=True)
mask = self.mask_generator(img, raw_image=raw_image)
result = dict(image=img, mask=mask)
if (self.scale_factor is not None):
result['image'] = scale_image(result['image'], self.scale_factor)
result['mask'] = scale_image(result['mask'], self.scale_factor, interpolation=cv2.INTER_NEAREST)
if ((self.pad_out_to_modulo is not None) and (self.pad_out_to_modulo > 1)):
result['image'] = pad_img_to_modulo(result['image'], self.pad_out_to_modulo)
result['mask'] = pad_img_to_modulo(result['mask'], self.pad_out_to_modulo)
return result
|
class InpaintingEvaluator():
def __init__(self, dataset, scores, area_grouping=True, bins=10, batch_size=32, device='cuda', integral_func=None, integral_title=None, clamp_image_range=None):
'\n :param dataset: torch.utils.data.Dataset which contains images and masks\n :param scores: dict {score_name: EvaluatorScore object}\n :param area_grouping: in addition to the overall scores, allows to compute score for the groups of samples\n which are defined by share of area occluded by mask\n :param bins: number of groups, partition is generated by np.linspace(0., 1., bins + 1)\n :param batch_size: batch_size for the dataloader\n :param device: device to use\n '
self.scores = scores
self.dataset = dataset
self.area_grouping = area_grouping
self.bins = bins
self.device = torch.device(device)
self.dataloader = DataLoader(self.dataset, shuffle=False, batch_size=batch_size)
self.integral_func = integral_func
self.integral_title = integral_title
self.clamp_image_range = clamp_image_range
def _get_bin_edges(self):
bin_edges = np.linspace(0, 1, (self.bins + 1))
num_digits = max(0, (math.ceil(math.log10(self.bins)) - 1))
interval_names = []
for idx_bin in range(self.bins):
(start_percent, end_percent) = (round((100 * bin_edges[idx_bin]), num_digits), round((100 * bin_edges[(idx_bin + 1)]), num_digits))
start_percent = '{:.{n}f}'.format(start_percent, n=num_digits)
end_percent = '{:.{n}f}'.format(end_percent, n=num_digits)
interval_names.append('{0}-{1}%'.format(start_percent, end_percent))
groups = []
for batch in self.dataloader:
mask = batch['mask']
batch_size = mask.shape[0]
area = mask.to(self.device).reshape(batch_size, (- 1)).mean(dim=(- 1))
bin_indices = (np.searchsorted(bin_edges, area.detach().cpu().numpy(), side='right') - 1)
bin_indices[(bin_indices == self.bins)] = (self.bins - 1)
groups.append(bin_indices)
groups = np.hstack(groups)
return (groups, interval_names)
def evaluate(self, model=None):
"\n :param model: callable with signature (image_batch, mask_batch); should return inpainted_batch\n :return: dict with (score_name, group_type) as keys, where group_type can be either 'overall' or\n name of the particular group arranged by area of mask (e.g. '10-20%')\n and score statistics for the group as values.\n "
results = dict()
if self.area_grouping:
(groups, interval_names) = self._get_bin_edges()
else:
groups = None
for (score_name, score) in tqdm.auto.tqdm(self.scores.items(), desc='scores'):
score.to(self.device)
with torch.no_grad():
score.reset()
for batch in tqdm.auto.tqdm(self.dataloader, desc=score_name, leave=False):
batch = move_to_device(batch, self.device)
(image_batch, mask_batch) = (batch['image'], batch['mask'])
if (self.clamp_image_range is not None):
image_batch = torch.clamp(image_batch, min=self.clamp_image_range[0], max=self.clamp_image_range[1])
if (model is None):
assert ('inpainted' in batch), 'Model is None, so we expected precomputed inpainting results at key "inpainted"'
inpainted_batch = batch['inpainted']
else:
inpainted_batch = model(image_batch, mask_batch)
score(inpainted_batch, image_batch, mask_batch)
(total_results, group_results) = score.get_value(groups=groups)
results[(score_name, 'total')] = total_results
if (groups is not None):
for (group_index, group_values) in group_results.items():
group_name = interval_names[group_index]
results[(score_name, group_name)] = group_values
if (self.integral_func is not None):
results[(self.integral_title, 'total')] = dict(mean=self.integral_func(results))
return results
|
def ssim_fid100_f1(metrics, fid_scale=100):
ssim = metrics[('ssim', 'total')]['mean']
fid = metrics[('fid', 'total')]['mean']
fid_rel = (max(0, (fid_scale - fid)) / fid_scale)
f1 = (((2 * ssim) * fid_rel) / ((ssim + fid_rel) + 0.001))
return f1
|
def lpips_fid100_f1(metrics, fid_scale=100):
neg_lpips = (1 - metrics[('lpips', 'total')]['mean'])
fid = metrics[('fid', 'total')]['mean']
fid_rel = (max(0, (fid_scale - fid)) / fid_scale)
f1 = (((2 * neg_lpips) * fid_rel) / ((neg_lpips + fid_rel) + 0.001))
return f1
|
class InpaintingEvaluatorOnline(nn.Module):
def __init__(self, scores, bins=10, image_key='image', inpainted_key='inpainted', integral_func=None, integral_title=None, clamp_image_range=None):
'\n :param scores: dict {score_name: EvaluatorScore object}\n :param bins: number of groups, partition is generated by np.linspace(0., 1., bins + 1)\n :param device: device to use\n '
super().__init__()
LOGGER.info(f'{type(self)} init called')
self.scores = nn.ModuleDict(scores)
self.image_key = image_key
self.inpainted_key = inpainted_key
self.bins_num = bins
self.bin_edges = np.linspace(0, 1, (self.bins_num + 1))
num_digits = max(0, (math.ceil(math.log10(self.bins_num)) - 1))
self.interval_names = []
for idx_bin in range(self.bins_num):
(start_percent, end_percent) = (round((100 * self.bin_edges[idx_bin]), num_digits), round((100 * self.bin_edges[(idx_bin + 1)]), num_digits))
start_percent = '{:.{n}f}'.format(start_percent, n=num_digits)
end_percent = '{:.{n}f}'.format(end_percent, n=num_digits)
self.interval_names.append('{0}-{1}%'.format(start_percent, end_percent))
self.groups = []
self.integral_func = integral_func
self.integral_title = integral_title
self.clamp_image_range = clamp_image_range
LOGGER.info(f'{type(self)} init done')
def _get_bins(self, mask_batch):
batch_size = mask_batch.shape[0]
area = mask_batch.view(batch_size, (- 1)).mean(dim=(- 1)).detach().cpu().numpy()
bin_indices = np.clip((np.searchsorted(self.bin_edges, area) - 1), 0, (self.bins_num - 1))
return bin_indices
def forward(self, batch: Dict[(str, torch.Tensor)]):
'\n Calculate and accumulate metrics for batch. To finalize evaluation and obtain final metrics, call evaluation_end\n :param batch: batch dict with mandatory fields mask, image, inpainted (can be overriden by self.inpainted_key)\n '
result = {}
with torch.no_grad():
(image_batch, mask_batch, inpainted_batch) = (batch[self.image_key], batch['mask'], batch[self.inpainted_key])
if (self.clamp_image_range is not None):
image_batch = torch.clamp(image_batch, min=self.clamp_image_range[0], max=self.clamp_image_range[1])
self.groups.extend(self._get_bins(mask_batch))
for (score_name, score) in self.scores.items():
result[score_name] = score(inpainted_batch, image_batch, mask_batch)
return result
def process_batch(self, batch: Dict[(str, torch.Tensor)]):
return self(batch)
def evaluation_end(self, states=None):
":return: dict with (score_name, group_type) as keys, where group_type can be either 'overall' or\n name of the particular group arranged by area of mask (e.g. '10-20%')\n and score statistics for the group as values.\n "
LOGGER.info(f'{type(self)}: evaluation_end called')
self.groups = np.array(self.groups)
results = {}
for (score_name, score) in self.scores.items():
LOGGER.info(f'Getting value of {score_name}')
cur_states = ([s[score_name] for s in states] if (states is not None) else None)
(total_results, group_results) = score.get_value(groups=self.groups, states=cur_states)
LOGGER.info(f'Getting value of {score_name} done')
results[(score_name, 'total')] = total_results
for (group_index, group_values) in group_results.items():
group_name = self.interval_names[group_index]
results[(score_name, group_name)] = group_values
if (self.integral_func is not None):
results[(self.integral_title, 'total')] = dict(mean=self.integral_func(results))
LOGGER.info(f'{type(self)}: reset scores')
self.groups = []
for sc in self.scores.values():
sc.reset()
LOGGER.info(f'{type(self)}: reset scores done')
LOGGER.info(f'{type(self)}: evaluation_end done')
return results
|
def get_groupings(groups):
'\n :param groups: group numbers for respective elements\n :return: dict of kind {group_idx: indices of the corresponding group elements}\n '
(label_groups, count_groups) = np.unique(groups, return_counts=True)
indices = np.argsort(groups)
grouping = dict()
cur_start = 0
for (label, count) in zip(label_groups, count_groups):
cur_end = (cur_start + count)
cur_indices = indices[cur_start:cur_end]
grouping[label] = cur_indices
cur_start = cur_end
return grouping
|
class EvaluatorScore(nn.Module):
@abstractmethod
def forward(self, pred_batch, target_batch, mask):
pass
@abstractmethod
def get_value(self, groups=None, states=None):
pass
@abstractmethod
def reset(self):
pass
|
class PairwiseScore(EvaluatorScore, ABC):
def __init__(self):
super().__init__()
self.individual_values = None
def get_value(self, groups=None, states=None):
"\n :param groups:\n :return:\n total_results: dict of kind {'mean': score mean, 'std': score std}\n group_results: None, if groups is None;\n else dict {group_idx: {'mean': score mean among group, 'std': score std among group}}\n "
individual_values = (torch.cat(states, dim=0).reshape((- 1)).cpu().numpy() if (states is not None) else self.individual_values)
total_results = {'mean': individual_values.mean(), 'std': individual_values.std()}
if (groups is None):
return (total_results, None)
group_results = dict()
grouping = get_groupings(groups)
for (label, index) in grouping.items():
group_scores = individual_values[index]
group_results[label] = {'mean': group_scores.mean(), 'std': group_scores.std()}
return (total_results, group_results)
def reset(self):
self.individual_values = []
|
class SSIMScore(PairwiseScore):
def __init__(self, window_size=11):
super().__init__()
self.score = SSIM(window_size=window_size, size_average=False).eval()
self.reset()
def forward(self, pred_batch, target_batch, mask=None):
batch_values = self.score(pred_batch, target_batch)
self.individual_values = np.hstack([self.individual_values, batch_values.detach().cpu().numpy()])
return batch_values
|
class LPIPSScore(PairwiseScore):
def __init__(self, model='net-lin', net='vgg', model_path=None, use_gpu=True):
super().__init__()
self.score = PerceptualLoss(model=model, net=net, model_path=model_path, use_gpu=use_gpu, spatial=False).eval()
self.reset()
def forward(self, pred_batch, target_batch, mask=None):
batch_values = self.score(pred_batch, target_batch).flatten()
self.individual_values = np.hstack([self.individual_values, batch_values.detach().cpu().numpy()])
return batch_values
|
def fid_calculate_activation_statistics(act):
mu = np.mean(act, axis=0)
sigma = np.cov(act, rowvar=False)
return (mu, sigma)
|
def calculate_frechet_distance(activations_pred, activations_target, eps=1e-06):
(mu1, sigma1) = fid_calculate_activation_statistics(activations_pred)
(mu2, sigma2) = fid_calculate_activation_statistics(activations_target)
diff = (mu1 - mu2)
(covmean, _) = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if (not np.isfinite(covmean).all()):
msg = ('fid calculation produces singular product; adding %s to diagonal of cov estimates' % eps)
LOGGER.warning(msg)
offset = (np.eye(sigma1.shape[0]) * eps)
covmean = linalg.sqrtm((sigma1 + offset).dot((sigma2 + offset)))
if np.iscomplexobj(covmean):
if (not np.allclose(np.diagonal(covmean).imag, 0, atol=0.01)):
m = np.max(np.abs(covmean.imag))
raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return (((diff.dot(diff) + np.trace(sigma1)) + np.trace(sigma2)) - (2 * tr_covmean))
|
class FIDScore(EvaluatorScore):
def __init__(self, dims=2048, eps=1e-06):
LOGGER.info('FIDscore init called')
super().__init__()
if (getattr(FIDScore, '_MODEL', None) is None):
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
FIDScore._MODEL = InceptionV3([block_idx]).eval()
self.model = FIDScore._MODEL
self.eps = eps
self.reset()
LOGGER.info('FIDscore init done')
def forward(self, pred_batch, target_batch, mask=None):
activations_pred = self._get_activations(pred_batch)
activations_target = self._get_activations(target_batch)
self.activations_pred.append(activations_pred.detach().cpu())
self.activations_target.append(activations_target.detach().cpu())
return (activations_pred, activations_target)
def get_value(self, groups=None, states=None):
LOGGER.info('FIDscore get_value called')
(activations_pred, activations_target) = (zip(*states) if (states is not None) else (self.activations_pred, self.activations_target))
activations_pred = torch.cat(activations_pred).cpu().numpy()
activations_target = torch.cat(activations_target).cpu().numpy()
total_distance = calculate_frechet_distance(activations_pred, activations_target, eps=self.eps)
total_results = dict(mean=total_distance)
if (groups is None):
group_results = None
else:
group_results = dict()
grouping = get_groupings(groups)
for (label, index) in grouping.items():
if (len(index) > 1):
group_distance = calculate_frechet_distance(activations_pred[index], activations_target[index], eps=self.eps)
group_results[label] = dict(mean=group_distance)
else:
group_results[label] = dict(mean=float('nan'))
self.reset()
LOGGER.info('FIDscore get_value done')
return (total_results, group_results)
def reset(self):
self.activations_pred = []
self.activations_target = []
def _get_activations(self, batch):
activations = self.model(batch)[0]
if ((activations.shape[2] != 1) or (activations.shape[3] != 1)):
assert False, 'We should not have got here, because Inception always scales inputs to 299x299'
activations = activations.squeeze((- 1)).squeeze((- 1))
return activations
|
class SegmentationAwareScore(EvaluatorScore):
def __init__(self, weights_path):
super().__init__()
self.segm_network = SegmentationModule(weights_path=weights_path, use_default_normalization=True).eval()
self.target_class_freq_by_image_total = []
self.target_class_freq_by_image_mask = []
self.pred_class_freq_by_image_mask = []
def forward(self, pred_batch, target_batch, mask):
pred_segm_flat = self.segm_network.predict(pred_batch)[0].view(pred_batch.shape[0], (- 1)).long().detach().cpu().numpy()
target_segm_flat = self.segm_network.predict(target_batch)[0].view(pred_batch.shape[0], (- 1)).long().detach().cpu().numpy()
mask_flat = (mask.view(mask.shape[0], (- 1)) > 0.5).detach().cpu().numpy()
batch_target_class_freq_total = []
batch_target_class_freq_mask = []
batch_pred_class_freq_mask = []
for (cur_pred_segm, cur_target_segm, cur_mask) in zip(pred_segm_flat, target_segm_flat, mask_flat):
cur_target_class_freq_total = np.bincount(cur_target_segm, minlength=NUM_CLASS)[(None, ...)]
cur_target_class_freq_mask = np.bincount(cur_target_segm[cur_mask], minlength=NUM_CLASS)[(None, ...)]
cur_pred_class_freq_mask = np.bincount(cur_pred_segm[cur_mask], minlength=NUM_CLASS)[(None, ...)]
self.target_class_freq_by_image_total.append(cur_target_class_freq_total)
self.target_class_freq_by_image_mask.append(cur_target_class_freq_mask)
self.pred_class_freq_by_image_mask.append(cur_pred_class_freq_mask)
batch_target_class_freq_total.append(cur_target_class_freq_total)
batch_target_class_freq_mask.append(cur_target_class_freq_mask)
batch_pred_class_freq_mask.append(cur_pred_class_freq_mask)
batch_target_class_freq_total = np.concatenate(batch_target_class_freq_total, axis=0)
batch_target_class_freq_mask = np.concatenate(batch_target_class_freq_mask, axis=0)
batch_pred_class_freq_mask = np.concatenate(batch_pred_class_freq_mask, axis=0)
return (batch_target_class_freq_total, batch_target_class_freq_mask, batch_pred_class_freq_mask)
def reset(self):
super().reset()
self.target_class_freq_by_image_total = []
self.target_class_freq_by_image_mask = []
self.pred_class_freq_by_image_mask = []
|
def distribute_values_to_classes(target_class_freq_by_image_mask, values, idx2name):
assert ((target_class_freq_by_image_mask.ndim == 2) and (target_class_freq_by_image_mask.shape[0] == values.shape[0]))
total_class_freq = target_class_freq_by_image_mask.sum(0)
distr_values = (target_class_freq_by_image_mask * values[(..., None)]).sum(0)
result = (distr_values / (total_class_freq + 0.001))
return {idx2name[i]: val for (i, val) in enumerate(result) if (total_class_freq[i] > 0)}
|
def get_segmentation_idx2name():
return {(i - 1): name for (i, name) in segm_options['classes'].set_index('Idx', drop=True)['Name'].to_dict().items()}
|
class SegmentationAwarePairwiseScore(SegmentationAwareScore):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.individual_values = []
self.segm_idx2name = get_segmentation_idx2name()
def forward(self, pred_batch, target_batch, mask):
cur_class_stats = super().forward(pred_batch, target_batch, mask)
score_values = self.calc_score(pred_batch, target_batch, mask)
self.individual_values.append(score_values)
return (cur_class_stats + (score_values,))
@abstractmethod
def calc_score(self, pred_batch, target_batch, mask):
raise NotImplementedError()
def get_value(self, groups=None, states=None):
"\n :param groups:\n :return:\n total_results: dict of kind {'mean': score mean, 'std': score std}\n group_results: None, if groups is None;\n else dict {group_idx: {'mean': score mean among group, 'std': score std among group}}\n "
if (states is not None):
(target_class_freq_by_image_total, target_class_freq_by_image_mask, pred_class_freq_by_image_mask, individual_values) = states
else:
target_class_freq_by_image_total = self.target_class_freq_by_image_total
target_class_freq_by_image_mask = self.target_class_freq_by_image_mask
pred_class_freq_by_image_mask = self.pred_class_freq_by_image_mask
individual_values = self.individual_values
target_class_freq_by_image_total = np.concatenate(target_class_freq_by_image_total, axis=0)
target_class_freq_by_image_mask = np.concatenate(target_class_freq_by_image_mask, axis=0)
pred_class_freq_by_image_mask = np.concatenate(pred_class_freq_by_image_mask, axis=0)
individual_values = np.concatenate(individual_values, axis=0)
total_results = {'mean': individual_values.mean(), 'std': individual_values.std(), **distribute_values_to_classes(target_class_freq_by_image_mask, individual_values, self.segm_idx2name)}
if (groups is None):
return (total_results, None)
group_results = dict()
grouping = get_groupings(groups)
for (label, index) in grouping.items():
group_class_freq = target_class_freq_by_image_mask[index]
group_scores = individual_values[index]
group_results[label] = {'mean': group_scores.mean(), 'std': group_scores.std(), **distribute_values_to_classes(group_class_freq, group_scores, self.segm_idx2name)}
return (total_results, group_results)
def reset(self):
super().reset()
self.individual_values = []
|
class SegmentationClassStats(SegmentationAwarePairwiseScore):
def calc_score(self, pred_batch, target_batch, mask):
return 0
def get_value(self, groups=None, states=None):
"\n :param groups:\n :return:\n total_results: dict of kind {'mean': score mean, 'std': score std}\n group_results: None, if groups is None;\n else dict {group_idx: {'mean': score mean among group, 'std': score std among group}}\n "
if (states is not None):
(target_class_freq_by_image_total, target_class_freq_by_image_mask, pred_class_freq_by_image_mask, _) = states
else:
target_class_freq_by_image_total = self.target_class_freq_by_image_total
target_class_freq_by_image_mask = self.target_class_freq_by_image_mask
pred_class_freq_by_image_mask = self.pred_class_freq_by_image_mask
target_class_freq_by_image_total = np.concatenate(target_class_freq_by_image_total, axis=0)
target_class_freq_by_image_mask = np.concatenate(target_class_freq_by_image_mask, axis=0)
pred_class_freq_by_image_mask = np.concatenate(pred_class_freq_by_image_mask, axis=0)
target_class_freq_by_image_total_marginal = target_class_freq_by_image_total.sum(0).astype('float32')
target_class_freq_by_image_total_marginal /= target_class_freq_by_image_total_marginal.sum()
target_class_freq_by_image_mask_marginal = target_class_freq_by_image_mask.sum(0).astype('float32')
target_class_freq_by_image_mask_marginal /= target_class_freq_by_image_mask_marginal.sum()
pred_class_freq_diff = ((pred_class_freq_by_image_mask - target_class_freq_by_image_mask).sum(0) / (target_class_freq_by_image_mask.sum(0) + 0.001))
total_results = dict()
total_results.update({f'total_freq/{self.segm_idx2name[i]}': v for (i, v) in enumerate(target_class_freq_by_image_total_marginal) if (v > 0)})
total_results.update({f'mask_freq/{self.segm_idx2name[i]}': v for (i, v) in enumerate(target_class_freq_by_image_mask_marginal) if (v > 0)})
total_results.update({f'mask_freq_diff/{self.segm_idx2name[i]}': v for (i, v) in enumerate(pred_class_freq_diff) if (target_class_freq_by_image_total_marginal[i] > 0)})
if (groups is None):
return (total_results, None)
group_results = dict()
grouping = get_groupings(groups)
for (label, index) in grouping.items():
group_target_class_freq_by_image_total = target_class_freq_by_image_total[index]
group_target_class_freq_by_image_mask = target_class_freq_by_image_mask[index]
group_pred_class_freq_by_image_mask = pred_class_freq_by_image_mask[index]
group_target_class_freq_by_image_total_marginal = group_target_class_freq_by_image_total.sum(0).astype('float32')
group_target_class_freq_by_image_total_marginal /= group_target_class_freq_by_image_total_marginal.sum()
group_target_class_freq_by_image_mask_marginal = group_target_class_freq_by_image_mask.sum(0).astype('float32')
group_target_class_freq_by_image_mask_marginal /= group_target_class_freq_by_image_mask_marginal.sum()
group_pred_class_freq_diff = ((group_pred_class_freq_by_image_mask - group_target_class_freq_by_image_mask).sum(0) / (group_target_class_freq_by_image_mask.sum(0) + 0.001))
cur_group_results = dict()
cur_group_results.update({f'total_freq/{self.segm_idx2name[i]}': v for (i, v) in enumerate(group_target_class_freq_by_image_total_marginal) if (v > 0)})
cur_group_results.update({f'mask_freq/{self.segm_idx2name[i]}': v for (i, v) in enumerate(group_target_class_freq_by_image_mask_marginal) if (v > 0)})
cur_group_results.update({f'mask_freq_diff/{self.segm_idx2name[i]}': v for (i, v) in enumerate(group_pred_class_freq_diff) if (group_target_class_freq_by_image_total_marginal[i] > 0)})
group_results[label] = cur_group_results
return (total_results, group_results)
|
class SegmentationAwareSSIM(SegmentationAwarePairwiseScore):
def __init__(self, *args, window_size=11, **kwargs):
super().__init__(*args, **kwargs)
self.score_impl = SSIM(window_size=window_size, size_average=False).eval()
def calc_score(self, pred_batch, target_batch, mask):
return self.score_impl(pred_batch, target_batch).detach().cpu().numpy()
|
class SegmentationAwareLPIPS(SegmentationAwarePairwiseScore):
def __init__(self, *args, model='net-lin', net='vgg', model_path=None, use_gpu=True, **kwargs):
super().__init__(*args, **kwargs)
self.score_impl = PerceptualLoss(model=model, net=net, model_path=model_path, use_gpu=use_gpu, spatial=False).eval()
def calc_score(self, pred_batch, target_batch, mask):
return self.score_impl(pred_batch, target_batch).flatten().detach().cpu().numpy()
|
def calculade_fid_no_img(img_i, activations_pred, activations_target, eps=1e-06):
activations_pred = activations_pred.copy()
activations_pred[img_i] = activations_target[img_i]
return calculate_frechet_distance(activations_pred, activations_target, eps=eps)
|
class SegmentationAwareFID(SegmentationAwarePairwiseScore):
def __init__(self, *args, dims=2048, eps=1e-06, n_jobs=(- 1), **kwargs):
super().__init__(*args, **kwargs)
if (getattr(FIDScore, '_MODEL', None) is None):
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
FIDScore._MODEL = InceptionV3([block_idx]).eval()
self.model = FIDScore._MODEL
self.eps = eps
self.n_jobs = n_jobs
def calc_score(self, pred_batch, target_batch, mask):
activations_pred = self._get_activations(pred_batch)
activations_target = self._get_activations(target_batch)
return (activations_pred, activations_target)
def get_value(self, groups=None, states=None):
"\n :param groups:\n :return:\n total_results: dict of kind {'mean': score mean, 'std': score std}\n group_results: None, if groups is None;\n else dict {group_idx: {'mean': score mean among group, 'std': score std among group}}\n "
if (states is not None):
(target_class_freq_by_image_total, target_class_freq_by_image_mask, pred_class_freq_by_image_mask, activation_pairs) = states
else:
target_class_freq_by_image_total = self.target_class_freq_by_image_total
target_class_freq_by_image_mask = self.target_class_freq_by_image_mask
pred_class_freq_by_image_mask = self.pred_class_freq_by_image_mask
activation_pairs = self.individual_values
target_class_freq_by_image_total = np.concatenate(target_class_freq_by_image_total, axis=0)
target_class_freq_by_image_mask = np.concatenate(target_class_freq_by_image_mask, axis=0)
pred_class_freq_by_image_mask = np.concatenate(pred_class_freq_by_image_mask, axis=0)
(activations_pred, activations_target) = zip(*activation_pairs)
activations_pred = np.concatenate(activations_pred, axis=0)
activations_target = np.concatenate(activations_target, axis=0)
total_results = {'mean': calculate_frechet_distance(activations_pred, activations_target, eps=self.eps), 'std': 0, **self.distribute_fid_to_classes(target_class_freq_by_image_mask, activations_pred, activations_target)}
if (groups is None):
return (total_results, None)
group_results = dict()
grouping = get_groupings(groups)
for (label, index) in grouping.items():
if (len(index) > 1):
group_activations_pred = activations_pred[index]
group_activations_target = activations_target[index]
group_class_freq = target_class_freq_by_image_mask[index]
group_results[label] = {'mean': calculate_frechet_distance(group_activations_pred, group_activations_target, eps=self.eps), 'std': 0, **self.distribute_fid_to_classes(group_class_freq, group_activations_pred, group_activations_target)}
else:
group_results[label] = dict(mean=float('nan'), std=0)
return (total_results, group_results)
def distribute_fid_to_classes(self, class_freq, activations_pred, activations_target):
real_fid = calculate_frechet_distance(activations_pred, activations_target, eps=self.eps)
fid_no_images = Parallel(n_jobs=self.n_jobs)((delayed(calculade_fid_no_img)(img_i, activations_pred, activations_target, eps=self.eps) for img_i in range(activations_pred.shape[0])))
errors = (real_fid - fid_no_images)
return distribute_values_to_classes(class_freq, errors, self.segm_idx2name)
def _get_activations(self, batch):
activations = self.model(batch)[0]
if ((activations.shape[2] != 1) or (activations.shape[3] != 1)):
activations = F.adaptive_avg_pool2d(activations, output_size=(1, 1))
activations = activations.squeeze((- 1)).squeeze((- 1)).detach().cpu().numpy()
return activations
|
def get_activations(files, model, batch_size=50, dims=2048, cuda=False, verbose=False, keep_size=False):
'Calculates the activations of the pool_3 layer for all images.\n\n Params:\n -- files : List of image files paths\n -- model : Instance of inception model\n -- batch_size : Batch size of images for the model to process at once.\n Make sure that the number of samples is a multiple of\n the batch size, otherwise some samples are ignored. This\n behavior is retained to match the original FID score\n implementation.\n -- dims : Dimensionality of features returned by Inception\n -- cuda : If set to True, use GPU\n -- verbose : If set to True and parameter out_step is given, the number\n of calculated batches is reported.\n Returns:\n -- A numpy array of dimension (num images, dims) that contains the\n activations of the given tensor when feeding inception with the\n query tensor.\n '
model.eval()
if ((len(files) % batch_size) != 0):
print('Warning: number of images is not a multiple of the batch size. Some samples are going to be ignored.')
if (batch_size > len(files)):
print('Warning: batch size is bigger than the data size. Setting batch size to data size')
batch_size = len(files)
n_batches = (len(files) // batch_size)
n_used_imgs = (n_batches * batch_size)
pred_arr = np.empty((n_used_imgs, dims))
for i in tqdm(range(n_batches)):
if verbose:
print(('\rPropagating batch %d/%d' % ((i + 1), n_batches)), end='', flush=True)
start = (i * batch_size)
end = (start + batch_size)
t = (transform if (not keep_size) else ToTensor())
if isinstance(files[0], pathlib.PosixPath):
images = [t(Image.open(str(f))) for f in files[start:end]]
elif isinstance(files[0], Image.Image):
images = [t(f) for f in files[start:end]]
else:
raise ValueError(f'Unknown data type for image: {type(files[0])}')
batch = torch.stack(images)
if cuda:
batch = batch.cuda()
pred = model(batch)[0]
if ((pred.shape[2] != 1) or (pred.shape[3] != 1)):
pred = adaptive_avg_pool2d(pred, output_size=(1, 1))
pred_arr[start:end] = pred.cpu().data.numpy().reshape(batch_size, (- 1))
if verbose:
print(' done')
return pred_arr
|
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-06):
"Numpy implementation of the Frechet Distance.\n The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)\n and X_2 ~ N(mu_2, C_2) is\n d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).\n\n Stable version by Dougal J. Sutherland.\n\n Params:\n -- mu1 : Numpy array containing the activations of a layer of the\n inception net (like returned by the function 'get_predictions')\n for generated samples.\n -- mu2 : The sample mean over activations, precalculated on an\n representative data set.\n -- sigma1: The covariance matrix over activations for generated samples.\n -- sigma2: The covariance matrix over activations, precalculated on an\n representative data set.\n\n Returns:\n -- : The Frechet Distance.\n "
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert (mu1.shape == mu2.shape), 'Training and test mean vectors have different lengths'
assert (sigma1.shape == sigma2.shape), 'Training and test covariances have different dimensions'
diff = (mu1 - mu2)
(covmean, _) = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if (not np.isfinite(covmean).all()):
msg = ('fid calculation produces singular product; adding %s to diagonal of cov estimates' % eps)
print(msg)
offset = (np.eye(sigma1.shape[0]) * eps)
covmean = linalg.sqrtm((sigma1 + offset).dot((sigma2 + offset)))
if np.iscomplexobj(covmean):
if (not np.allclose(np.diagonal(covmean).imag, 0, atol=0.01)):
m = np.max(np.abs(covmean.imag))
raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return (((diff.dot(diff) + np.trace(sigma1)) + np.trace(sigma2)) - (2 * tr_covmean))
|
def calculate_activation_statistics(files, model, batch_size=50, dims=2048, cuda=False, verbose=False, keep_size=False):
'Calculation of the statistics used by the FID.\n Params:\n -- files : List of image files paths\n -- model : Instance of inception model\n -- batch_size : The images numpy array is split into batches with\n batch size batch_size. A reasonable batch size\n depends on the hardware.\n -- dims : Dimensionality of features returned by Inception\n -- cuda : If set to True, use GPU\n -- verbose : If set to True and parameter out_step is given, the\n number of calculated batches is reported.\n Returns:\n -- mu : The mean over samples of the activations of the pool_3 layer of\n the inception model.\n -- sigma : The covariance matrix of the activations of the pool_3 layer of\n the inception model.\n '
act = get_activations(files, model, batch_size, dims, cuda, verbose, keep_size=keep_size)
mu = np.mean(act, axis=0)
sigma = np.cov(act, rowvar=False)
return (mu, sigma)
|
def _compute_statistics_of_path(path, model, batch_size, dims, cuda):
if path.endswith('.npz'):
f = np.load(path)
(m, s) = (f['mu'][:], f['sigma'][:])
f.close()
else:
path = pathlib.Path(path)
files = (list(path.glob('*.jpg')) + list(path.glob('*.png')))
(m, s) = calculate_activation_statistics(files, model, batch_size, dims, cuda)
return (m, s)
|
def _compute_statistics_of_images(images, model, batch_size, dims, cuda, keep_size=False):
if isinstance(images, list):
(m, s) = calculate_activation_statistics(images, model, batch_size, dims, cuda, keep_size=keep_size)
return (m, s)
else:
raise ValueError
|
def calculate_fid_given_paths(paths, batch_size, cuda, dims):
'Calculates the FID of two paths'
for p in paths:
if (not os.path.exists(p)):
raise RuntimeError(('Invalid path: %s' % p))
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
model = InceptionV3([block_idx])
if cuda:
model.cuda()
(m1, s1) = _compute_statistics_of_path(paths[0], model, batch_size, dims, cuda)
(m2, s2) = _compute_statistics_of_path(paths[1], model, batch_size, dims, cuda)
fid_value = calculate_frechet_distance(m1, s1, m2, s2)
return fid_value
|
def calculate_fid_given_images(images, batch_size, cuda, dims, use_globals=False, keep_size=False):
if use_globals:
global FID_MODEL
for imgs in images:
if (isinstance(imgs, list) and isinstance(imgs[0], (Image.Image, JpegImagePlugin.JpegImageFile))):
pass
else:
raise RuntimeError('Invalid images')
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
if (('FID_MODEL' not in globals()) or (not use_globals)):
model = InceptionV3([block_idx])
if cuda:
model.cuda()
if use_globals:
FID_MODEL = model
else:
model = FID_MODEL
(m1, s1) = _compute_statistics_of_images(images[0], model, batch_size, dims, cuda, keep_size=False)
(m2, s2) = _compute_statistics_of_images(images[1], model, batch_size, dims, cuda, keep_size=False)
fid_value = calculate_frechet_distance(m1, s1, m2, s2)
return fid_value
|
class InceptionV3(nn.Module):
'Pretrained InceptionV3 network returning feature maps'
DEFAULT_BLOCK_INDEX = 3
BLOCK_INDEX_BY_DIM = {64: 0, 192: 1, 768: 2, 2048: 3}
def __init__(self, output_blocks=[DEFAULT_BLOCK_INDEX], resize_input=True, normalize_input=True, requires_grad=False, use_fid_inception=True):
"Build pretrained InceptionV3\n\n Parameters\n ----------\n output_blocks : list of int\n Indices of blocks to return features of. Possible values are:\n - 0: corresponds to output of first max pooling\n - 1: corresponds to output of second max pooling\n - 2: corresponds to output which is fed to aux classifier\n - 3: corresponds to output of final average pooling\n resize_input : bool\n If true, bilinearly resizes input to width and height 299 before\n feeding input to model. As the network without fully connected\n layers is fully convolutional, it should be able to handle inputs\n of arbitrary size, so resizing might not be strictly needed\n normalize_input : bool\n If true, scales the input from range (0, 1) to the range the\n pretrained Inception network expects, namely (-1, 1)\n requires_grad : bool\n If true, parameters of the model require gradients. Possibly useful\n for finetuning the network\n use_fid_inception : bool\n If true, uses the pretrained Inception model used in Tensorflow's\n FID implementation. If false, uses the pretrained Inception model\n available in torchvision. The FID Inception model has different\n weights and a slightly different structure from torchvision's\n Inception model. If you want to compute FID scores, you are\n strongly advised to set this parameter to true to get comparable\n results.\n "
super(InceptionV3, self).__init__()
self.resize_input = resize_input
self.normalize_input = normalize_input
self.output_blocks = sorted(output_blocks)
self.last_needed_block = max(output_blocks)
assert (self.last_needed_block <= 3), 'Last possible output block index is 3'
self.blocks = nn.ModuleList()
if use_fid_inception:
inception = fid_inception_v3()
else:
inception = models.inception_v3(pretrained=True)
block0 = [inception.Conv2d_1a_3x3, inception.Conv2d_2a_3x3, inception.Conv2d_2b_3x3, nn.MaxPool2d(kernel_size=3, stride=2)]
self.blocks.append(nn.Sequential(*block0))
if (self.last_needed_block >= 1):
block1 = [inception.Conv2d_3b_1x1, inception.Conv2d_4a_3x3, nn.MaxPool2d(kernel_size=3, stride=2)]
self.blocks.append(nn.Sequential(*block1))
if (self.last_needed_block >= 2):
block2 = [inception.Mixed_5b, inception.Mixed_5c, inception.Mixed_5d, inception.Mixed_6a, inception.Mixed_6b, inception.Mixed_6c, inception.Mixed_6d, inception.Mixed_6e]
self.blocks.append(nn.Sequential(*block2))
if (self.last_needed_block >= 3):
block3 = [inception.Mixed_7a, inception.Mixed_7b, inception.Mixed_7c, nn.AdaptiveAvgPool2d(output_size=(1, 1))]
self.blocks.append(nn.Sequential(*block3))
for param in self.parameters():
param.requires_grad = requires_grad
def forward(self, inp):
'Get Inception feature maps\n\n Parameters\n ----------\n inp : torch.autograd.Variable\n Input tensor of shape Bx3xHxW. Values are expected to be in\n range (0, 1)\n\n Returns\n -------\n List of torch.autograd.Variable, corresponding to the selected output\n block, sorted ascending by index\n '
outp = []
x = inp
if self.resize_input:
x = F.interpolate(x, size=(299, 299), mode='bilinear', align_corners=False)
if self.normalize_input:
x = ((2 * x) - 1)
for (idx, block) in enumerate(self.blocks):
x = block(x)
if (idx in self.output_blocks):
outp.append(x)
if (idx == self.last_needed_block):
break
return outp
|
def fid_inception_v3():
"Build pretrained Inception model for FID computation\n\n The Inception model for FID computation uses a different set of weights\n and has a slightly different structure than torchvision's Inception.\n\n This method first constructs torchvision's Inception and then patches the\n necessary parts that are different in the FID Inception model.\n "
LOGGER.info('fid_inception_v3 called')
inception = models.inception_v3(num_classes=1008, aux_logits=False, pretrained=False)
LOGGER.info('models.inception_v3 done')
inception.Mixed_5b = FIDInceptionA(192, pool_features=32)
inception.Mixed_5c = FIDInceptionA(256, pool_features=64)
inception.Mixed_5d = FIDInceptionA(288, pool_features=64)
inception.Mixed_6b = FIDInceptionC(768, channels_7x7=128)
inception.Mixed_6c = FIDInceptionC(768, channels_7x7=160)
inception.Mixed_6d = FIDInceptionC(768, channels_7x7=160)
inception.Mixed_6e = FIDInceptionC(768, channels_7x7=192)
inception.Mixed_7b = FIDInceptionE_1(1280)
inception.Mixed_7c = FIDInceptionE_2(2048)
LOGGER.info('fid_inception_v3 patching done')
state_dict = load_state_dict_from_url(FID_WEIGHTS_URL, progress=True)
LOGGER.info('fid_inception_v3 weights downloaded')
inception.load_state_dict(state_dict)
LOGGER.info('fid_inception_v3 weights loaded into model')
return inception
|
class FIDInceptionA(models.inception.InceptionA):
'InceptionA block patched for FID computation'
def __init__(self, in_channels, pool_features):
super(FIDInceptionA, self).__init__(in_channels, pool_features)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1, count_include_pad=False)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
|
class FIDInceptionC(models.inception.InceptionC):
'InceptionC block patched for FID computation'
def __init__(self, in_channels, channels_7x7):
super(FIDInceptionC, self).__init__(in_channels, channels_7x7)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch7x7 = self.branch7x7_1(x)
branch7x7 = self.branch7x7_2(branch7x7)
branch7x7 = self.branch7x7_3(branch7x7)
branch7x7dbl = self.branch7x7dbl_1(x)
branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1, count_include_pad=False)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
return torch.cat(outputs, 1)
|
class FIDInceptionE_1(models.inception.InceptionE):
'First InceptionE block patched for FID computation'
def __init__(self, in_channels):
super(FIDInceptionE_1, self).__init__(in_channels)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [self.branch3x3_2a(branch3x3), self.branch3x3_2b(branch3x3)]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [self.branch3x3dbl_3a(branch3x3dbl), self.branch3x3dbl_3b(branch3x3dbl)]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1, count_include_pad=False)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
|
class FIDInceptionE_2(models.inception.InceptionE):
'Second InceptionE block patched for FID computation'
def __init__(self, in_channels):
super(FIDInceptionE_2, self).__init__(in_channels)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [self.branch3x3_2a(branch3x3), self.branch3x3_2b(branch3x3)]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [self.branch3x3dbl_3a(branch3x3dbl), self.branch3x3dbl_3b(branch3x3dbl)]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
branch_pool = F.max_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
|
class SSIM(torch.nn.Module):
'SSIM. Modified from:\n https://github.com/Po-Hsun-Su/pytorch-ssim/blob/master/pytorch_ssim/__init__.py\n '
def __init__(self, window_size=11, size_average=True):
super().__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = 1
self.register_buffer('window', self._create_window(window_size, self.channel))
def forward(self, img1, img2):
assert (len(img1.shape) == 4)
channel = img1.size()[1]
if ((channel == self.channel) and (self.window.data.type() == img1.data.type())):
window = self.window
else:
window = self._create_window(self.window_size, channel)
window = window.type_as(img1)
self.window = window
self.channel = channel
return self._ssim(img1, img2, window, self.window_size, channel, self.size_average)
def _gaussian(self, window_size, sigma):
gauss = torch.Tensor([np.exp(((- ((x - (window_size // 2)) ** 2)) / float((2 * (sigma ** 2))))) for x in range(window_size)])
return (gauss / gauss.sum())
def _create_window(self, window_size, channel):
_1D_window = self._gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
return _2D_window.expand(channel, 1, window_size, window_size).contiguous()
def _ssim(self, img1, img2, window, window_size, channel, size_average=True):
mu1 = F.conv2d(img1, window, padding=(window_size // 2), groups=channel)
mu2 = F.conv2d(img2, window, padding=(window_size // 2), groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = (mu1 * mu2)
sigma1_sq = (F.conv2d((img1 * img1), window, padding=(window_size // 2), groups=channel) - mu1_sq)
sigma2_sq = (F.conv2d((img2 * img2), window, padding=(window_size // 2), groups=channel) - mu2_sq)
sigma12 = (F.conv2d((img1 * img2), window, padding=(window_size // 2), groups=channel) - mu1_mu2)
C1 = (0.01 ** 2)
C2 = (0.03 ** 2)
ssim_map = ((((2 * mu1_mu2) + C1) * ((2 * sigma12) + C2)) / (((mu1_sq + mu2_sq) + C1) * ((sigma1_sq + sigma2_sq) + C2)))
if size_average:
return ssim_map.mean()
return ssim_map.mean(1).mean(1).mean(1)
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
return
|
def countless5(a, b, c, d, e):
"First stage of generalizing from countless2d. \n\n You have five slots: A, B, C, D, E\n\n You can decide if something is the winner by first checking for \n matches of three, then matches of two, then picking just one if \n the other two tries fail. In countless2d, you just check for matches\n of two and then pick one of them otherwise.\n\n Unfortunately, you need to check ABC, ABD, ABE, BCD, BDE, & CDE.\n Then you need to check AB, AC, AD, BC, BD\n We skip checking E because if none of these match, we pick E. We can\n skip checking AE, BE, CE, DE since if any of those match, E is our boy\n so it's redundant.\n\n So countless grows cominatorially in complexity.\n "
sections = [a, b, c, d, e]
p2 = (lambda q, r: (q * (q == r)))
p3 = (lambda q, r, s: (q * ((q == r) & (r == s))))
lor = (lambda x, y: (x + ((x == 0) * y)))
results3 = (p3(x, y, z) for (x, y, z) in combinations(sections, 3))
results3 = reduce(lor, results3)
results2 = (p2(x, y) for (x, y) in combinations(sections[:(- 1)], 2))
results2 = reduce(lor, results2)
return reduce(lor, (results3, results2, e))
|
def countless8(a, b, c, d, e, f, g, h):
'Extend countless5 to countless8. Same deal, except we also\n need to check for matches of length 4.'
sections = [a, b, c, d, e, f, g, h]
p2 = (lambda q, r: (q * (q == r)))
p3 = (lambda q, r, s: (q * ((q == r) & (r == s))))
p4 = (lambda p, q, r, s: (p * (((p == q) & (q == r)) & (r == s))))
lor = (lambda x, y: (x + ((x == 0) * y)))
results4 = (p4(x, y, z, w) for (x, y, z, w) in combinations(sections, 4))
results4 = reduce(lor, results4)
results3 = (p3(x, y, z) for (x, y, z) in combinations(sections, 3))
results3 = reduce(lor, results3)
results2 = (p2(x, y) for (x, y) in combinations(sections[:(- 1)], 2))
results2 = reduce(lor, results2)
return reduce(lor, [results4, results3, results2, h])
|
def dynamic_countless3d(data):
'countless8 + dynamic programming. ~2x faster'
sections = []
data += 1
factor = (2, 2, 2)
for offset in np.ndindex(factor):
part = data[tuple((np.s_[o::f] for (o, f) in zip(offset, factor)))]
sections.append(part)
pick = (lambda a, b: (a * (a == b)))
lor = (lambda x, y: (x + ((x == 0) * y)))
subproblems2 = {}
results2 = None
for (x, y) in combinations(range(7), 2):
res = pick(sections[x], sections[y])
subproblems2[(x, y)] = res
if (results2 is not None):
results2 += ((results2 == 0) * res)
else:
results2 = res
subproblems3 = {}
results3 = None
for (x, y, z) in combinations(range(8), 3):
res = pick(subproblems2[(x, y)], sections[z])
if (z != 7):
subproblems3[(x, y, z)] = res
if (results3 is not None):
results3 += ((results3 == 0) * res)
else:
results3 = res
results3 = reduce(lor, (results3, results2, sections[(- 1)]))
results2 = None
subproblems2 = None
res = None
results4 = (pick(subproblems3[(x, y, z)], sections[w]) for (x, y, z, w) in combinations(range(8), 4))
results4 = reduce(lor, results4)
subproblems3 = None
final_result = (lor(results4, results3) - 1)
data -= 1
return final_result
|
def countless3d(data):
'Now write countless8 in such a way that it could be used\n to process an image.'
sections = []
data += 1
factor = (2, 2, 2)
for offset in np.ndindex(factor):
part = data[tuple((np.s_[o::f] for (o, f) in zip(offset, factor)))]
sections.append(part)
p2 = (lambda q, r: (q * (q == r)))
p3 = (lambda q, r, s: (q * ((q == r) & (r == s))))
p4 = (lambda p, q, r, s: (p * (((p == q) & (q == r)) & (r == s))))
lor = (lambda x, y: (x + ((x == 0) * y)))
results4 = (p4(x, y, z, w) for (x, y, z, w) in combinations(sections, 4))
results4 = reduce(lor, results4)
results3 = (p3(x, y, z) for (x, y, z) in combinations(sections, 3))
results3 = reduce(lor, results3)
results2 = (p2(x, y) for (x, y) in combinations(sections[:(- 1)], 2))
results2 = reduce(lor, results2)
final_result = (reduce(lor, (results4, results3, results2, sections[(- 1)])) - 1)
data -= 1
return final_result
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.