id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
7,678 | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from .auto_cuda import cfunction
def piecewise_exp_backward(grad_output: torch.Tensor, x: torch.Tensor, alpha: float):
return alpha / 2 * (- alpha * x.abs()).exp_() * grad_output, None | null |
7,679 | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from .auto_cuda import cfunction
def sigmoid_backward(grad_output: torch.Tensor, x: torch.Tensor, alpha: float):
sgax = (x * alpha).sigmoid_()
return grad_output * (1. - sgax) * sgax * alpha, None | null |
7,680 | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from .auto_cuda import cfunction
def soft_sign_backward(grad_output: torch.Tensor, x: torch.Tensor, alpha: float):
return grad_output / (2 * alpha * (1 / alpha + x.abs()).pow_(2)), None | null |
7,681 | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from .auto_cuda import cfunction
def atan_backward(grad_output: torch.Tensor, x: torch.Tensor, alpha: float):
return alpha / 2 / (1 + (math.pi / 2 * alpha * x).pow_(2)) * grad_output, None | null |
7,682 | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from .auto_cuda import cfunction
def nonzero_sign_log_abs_backward(grad_output: torch.Tensor, x: torch.Tensor, alpha: float):
return grad_output / (1 / alpha + x.abs()), None | null |
7,683 | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from .auto_cuda import cfunction
def erf_backward(grad_output: torch.Tensor, x: torch.Tensor, alpha: float):
return grad_output * (- (x * alpha).pow_(2)).exp_() * (alpha / math.sqrt(math.pi)), None | null |
7,684 | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from .auto_cuda import cfunction
def piecewise_leaky_relu_backward(grad_output: torch.Tensor, x: torch.Tensor, w: float, c: float):
mask_width = (x.abs() < w)
mask_c = mask_width.logical_not()
return grad_output * x.masked_fill(mask_width, 1 / (2*w)).masked_fill(mask_c, c), None, None | null |
7,685 | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from .auto_cuda import cfunction
def leaky_k_relu_backward(grad_output: torch.Tensor, x: torch.Tensor, leak: float, k: float):
mask1 = (x >= 0.).to(x)
grad_x = mask1 * k + (1. - mask1) * leak
return grad_output * grad_x, None, None | null |
7,686 | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from .auto_cuda import cfunction
def fake_numerical_gradient_backward(grad_output: torch.Tensor, x: torch.Tensor, alpha: float):
grad_x = torch.clamp_max(((x >= 0.) * 2. - 1.) / x, alpha)
return grad_output * grad_x, None | null |
7,687 | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from .auto_cuda import cfunction
def log_tailed_relu_backward(grad_output: torch.Tensor, x: torch.Tensor, alpha: float):
mask_gt1 = x > 1.
mask_le0 = x <= 0.
grad_x = torch.ones_like(grad_output)
grad_x[mask_gt1] = 1. / x[mask_gt1]
grad_x[mask_le0] = alpha
return grad_output * grad_x, None | null |
7,688 | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from .auto_cuda import cfunction
def deterministic_pass_backward(grad_output: torch.Tensor, x: torch.Tensor, alpha: float):
return grad_output, None | null |
7,689 | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from .auto_cuda import cfunction
def rect_backward(grad_output: torch.Tensor, x: torch.Tensor, alpha: float):
return alpha * (x.abs() < 0.5 / alpha).to(x) * grad_output, None | null |
7,690 | import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.fusion import *
from torch.autograd import Function
from torch import Tensor
from collections import namedtuple
from ...activation_based import layer
from ..neuron import LIFNode
from torch.nn.functional import interpolate
from ..surrogate import SurrogateFunctionBase, heaviside
from math import tanh
from torch.jit import script
import numpy as np
import argparse
The provided code snippet includes necessary dependencies for implementing the `network_layer_to_space` function. Write a Python function `def network_layer_to_space(net_arch)` to solve the following problem:
:param net_arch: network level sample rate 0: down 1: None 2: Up :type net_arch: numpy.ndarray :return: network level architecture network_space[layer][level][sample]: layer: 0 - 8 level: sample_level {0: 1, 1: 2, 2: 4, 3: 8} sample: 0: down 1: None 2: Up :rtype: numpy.ndarray Convert network level sample rate like [0,0,1,1,1,2,2,2] to network architecture.
Here is the function:
def network_layer_to_space(net_arch):
"""
:param net_arch: network level sample rate
0: down 1: None 2: Up
:type net_arch: numpy.ndarray
:return: network level architecture
network_space[layer][level][sample]:
layer: 0 - 8
level: sample_level {0: 1, 1: 2, 2: 4, 3: 8}
sample: 0: down 1: None 2: Up
:rtype: numpy.ndarray
Convert network level sample rate like [0,0,1,1,1,2,2,2] to network architecture.
"""
for i, layer in enumerate(net_arch):
if i == 0:
space = np.zeros((1, 4, 3))
space[0][layer][0] = 1
prev = layer
else:
if layer == prev + 1:
sample = 0
elif layer == prev:
sample = 1
elif layer == prev - 1:
sample = 2
space1 = np.zeros((1, 4, 3))
space1[0][layer][sample] = 1
space = np.concatenate([space, space1], axis=0)
prev = layer
return space | :param net_arch: network level sample rate 0: down 1: None 2: Up :type net_arch: numpy.ndarray :return: network level architecture network_space[layer][level][sample]: layer: 0 - 8 level: sample_level {0: 1, 1: 2, 2: 4, 3: 8} sample: 0: down 1: None 2: Up :rtype: numpy.ndarray Convert network level sample rate like [0,0,1,1,1,2,2,2] to network architecture. |
7,691 | import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.fusion import *
from torch.autograd import Function
from torch import Tensor
from collections import namedtuple
from ...activation_based import layer
from ..neuron import LIFNode
from torch.nn.functional import interpolate
from ..surrogate import SurrogateFunctionBase, heaviside
from math import tanh
from torch.jit import script
import numpy as np
import argparse
def dSpike_backward(grad_output: Tensor, x: Tensor, alpha: float):
mask = x.abs() > 0.5
const = alpha / (2. * tanh(alpha / 2.))
grad_x = (grad_output * const / (alpha * x).cosh_().square_()
).masked_fill_(mask, 0)
return grad_x, None | null |
7,692 | import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.fusion import *
from torch.autograd import Function
from torch import Tensor
from collections import namedtuple
from ...activation_based import layer
from ..neuron import LIFNode
from torch.nn.functional import interpolate
from ..surrogate import SurrogateFunctionBase, heaviside
from math import tanh
from torch.jit import script
import numpy as np
import argparse
class DSpike(SurrogateFunctionBase):
def __init__(self, alpha: float = 3, spiking=True):
super().__init__(alpha, spiking)
assert alpha > 0, 'alpha must be lager than 0.'
def spiking_function(x: Tensor, alpha: float):
return dSpike.apply(x, alpha)
class LIFNode(BaseNode):
def __init__(self, tau: float = 2., decay_input: bool = True, v_threshold: float = 1.,
v_reset: float = 0., surrogate_function: Callable = surrogate.Sigmoid(),
detach_reset: bool = False, step_mode='s', backend='torch', store_v_seq: bool = False):
"""
* :ref:`API in English <LIFNode.__init__-en>`
.. _LIFNode.__init__-cn:
:param tau: 膜电位时间常数
:type tau: float
:param decay_input: 输入是否也会参与衰减
:type decay_input: bool
:param v_threshold: 神经元的阈值电压
:type v_threshold: float
:param v_reset: 神经元的重置电压。如果不为 ``None``,当神经元释放脉冲后,电压会被重置为 ``v_reset``;
如果设置为 ``None``,当神经元释放脉冲后,电压会被减去 ``v_threshold``
:type v_reset: float
:param surrogate_function: 反向传播时用来计算脉冲函数梯度的替代函数
:type surrogate_function: Callable
:param detach_reset: 是否将reset过程的计算图分离
:type detach_reset: bool
:param step_mode: 步进模式,可以为 `'s'` (单步) 或 `'m'` (多步)
:type step_mode: str
:param backend: 使用那种后端。不同的 ``step_mode`` 可能会带有不同的后端。可以通过打印 ``self.supported_backends`` 查看当前
使用的步进模式支持的后端。在支持的情况下,使用 ``'cupy'`` 后端是速度最快的
:type backend: str
:param store_v_seq: 在使用 ``step_mode = 'm'`` 时,给与 ``shape = [T, N, *]`` 的输入后,是否保存中间过程的 ``shape = [T, N, *]``
的各个时间步的电压值 ``self.v_seq`` 。设置为 ``False`` 时计算完成后只保留最后一个时刻的电压,即 ``shape = [N, *]`` 的 ``self.v`` 。
通常设置成 ``False`` ,可以节省内存
:type store_v_seq: bool
Leaky Integrate-and-Fire 神经元模型,可以看作是带漏电的积分器。其阈下神经动力学方程为:
若 ``decay_input == True``:
.. math::
H[t] = V[t-1] + \\frac{1}{\\tau}(X[t] - (V[t-1] - V_{reset}))
若 ``decay_input == False``:
.. math::
H[t] = V[t-1] - \\frac{1}{\\tau}(V[t-1] - V_{reset}) + X[t]
* :ref:`中文API <LIFNode.__init__-cn>`
.. _LIFNode.__init__-en:
:param tau: membrane time constant
:type tau: float
:param decay_input: whether the input will decay
:type decay_input: bool
:param v_threshold: threshold of this neurons layer
:type v_threshold: float
:param v_reset: reset voltage of this neurons layer. If not ``None``, the neuron's voltage will be set to ``v_reset``
after firing a spike. If ``None``, the neuron's voltage will subtract ``v_threshold`` after firing a spike
:type v_reset: float
:param surrogate_function: the function for calculating surrogate gradients of the heaviside step function in backward
:type surrogate_function: Callable
:param detach_reset: whether detach the computation graph of reset in backward
:type detach_reset: bool
:param step_mode: the step mode, which can be `s` (single-step) or `m` (multi-step)
:type step_mode: str
:param backend: backend fot this neurons layer. Different ``step_mode`` may support for different backends. The user can
print ``self.supported_backends`` and check what backends are supported by the current ``step_mode``. If supported,
using ``'cupy'`` backend will have the fastest training speed
:type backend: str
:param store_v_seq: when using ``step_mode = 'm'`` and given input with ``shape = [T, N, *]``, this option controls
whether storing the voltage at each time-step to ``self.v_seq`` with ``shape = [T, N, *]``. If set to ``False``,
only the voltage at last time-step will be stored to ``self.v`` with ``shape = [N, *]``, which can reduce the
memory consumption
:type store_v_seq: bool
The Leaky Integrate-and-Fire neuron, which can be seen as a leaky integrator.
The subthreshold neural dynamics of it is as followed:
IF ``decay_input == True``:
.. math::
H[t] = V[t-1] + \\frac{1}{\\tau}(X[t] - (V[t-1] - V_{reset}))
IF ``decay_input == False``:
.. math::
H[t] = V[t-1] - \\frac{1}{\\tau}(V[t-1] - V_{reset}) + X[t]
"""
assert isinstance(tau, float) and tau > 1.
super().__init__(v_threshold, v_reset, surrogate_function, detach_reset, step_mode, backend, store_v_seq)
self.tau = tau
self.decay_input = decay_input
def supported_backends(self):
if self.step_mode == 's':
return ('torch', 'cupy')
elif self.step_mode == 'm':
return ('torch', 'cupy')
else:
raise ValueError(self.step_mode)
def extra_repr(self):
return super().extra_repr() + f', tau={self.tau}'
def neuronal_charge(self, x: torch.Tensor):
if self.decay_input:
if self.v_reset is None or self.v_reset == 0.:
self.v = self.neuronal_charge_decay_input_reset0(x, self.v, self.tau)
else:
self.v = self.neuronal_charge_decay_input(x, self.v, self.v_reset, self.tau)
else:
if self.v_reset is None or self.v_reset == 0.:
self.v = self.neuronal_charge_no_decay_input_reset0(x, self.v, self.tau)
else:
self.v = self.neuronal_charge_no_decay_input(x, self.v, self.v_reset, self.tau)
def neuronal_charge_decay_input_reset0(x: torch.Tensor, v: torch.Tensor, tau: float):
v = v + (x - v) / tau
return v
def neuronal_charge_decay_input(x: torch.Tensor, v: torch.Tensor, v_reset: float, tau: float):
v = v + (x - (v - v_reset)) / tau
return v
def neuronal_charge_no_decay_input_reset0(x: torch.Tensor, v: torch.Tensor, tau: float):
v = v * (1. - 1. / tau) + x
return v
def neuronal_charge_no_decay_input(x: torch.Tensor, v: torch.Tensor, v_reset: float, tau: float):
v = v - (v - v_reset) / tau + x
return v
def jit_eval_single_step_forward_hard_reset_decay_input(x: torch.Tensor, v: torch.Tensor, v_threshold: float,
v_reset: float, tau: float):
v = v + (x - (v - v_reset)) / tau
spike = (v >= v_threshold).to(x)
v = v_reset * spike + (1. - spike) * v
return spike, v
def jit_eval_single_step_forward_hard_reset_no_decay_input(x: torch.Tensor, v: torch.Tensor, v_threshold: float,
v_reset: float, tau: float):
v = v - (v - v_reset) / tau + x
spike = (v >= v_threshold).to(x)
v = v_reset * spike + (1. - spike) * v
return spike, v
def jit_eval_single_step_forward_soft_reset_decay_input(x: torch.Tensor, v: torch.Tensor, v_threshold: float,
tau: float):
v = v + (x - v) / tau
spike = (v >= v_threshold).to(x)
v = v - spike * v_threshold
return spike, v
def jit_eval_single_step_forward_soft_reset_no_decay_input(x: torch.Tensor, v: torch.Tensor, v_threshold: float,
tau: float):
v = v * (1. - 1. / tau) + x
spike = (v >= v_threshold).to(x)
v = v - spike * v_threshold
return spike, v
def jit_eval_multi_step_forward_hard_reset_decay_input(x_seq: torch.Tensor, v: torch.Tensor, v_threshold: float,
v_reset: float, tau: float):
spike_seq = torch.zeros_like(x_seq)
for t in range(x_seq.shape[0]):
v = v + (x_seq[t] - (v - v_reset)) / tau
spike = (v >= v_threshold).to(x_seq)
v = v_reset * spike + (1. - spike) * v
spike_seq[t] = spike
return spike_seq, v
def jit_eval_multi_step_forward_hard_reset_decay_input_with_v_seq(x_seq: torch.Tensor, v: torch.Tensor,
v_threshold: float, v_reset: float, tau: float):
spike_seq = torch.zeros_like(x_seq)
v_seq = torch.zeros_like(x_seq)
for t in range(x_seq.shape[0]):
v = v + (x_seq[t] - (v - v_reset)) / tau
spike = (v >= v_threshold).to(x_seq)
v = v_reset * spike + (1. - spike) * v
spike_seq[t] = spike
v_seq[t] = v
return spike_seq, v, v_seq
def jit_eval_multi_step_forward_hard_reset_no_decay_input(x_seq: torch.Tensor, v: torch.Tensor, v_threshold: float,
v_reset: float, tau: float):
spike_seq = torch.zeros_like(x_seq)
for t in range(x_seq.shape[0]):
v = v - (v - v_reset) / tau + x_seq[t]
spike = (v >= v_threshold).to(x_seq)
v = v_reset * spike + (1. - spike) * v
spike_seq[t] = spike
return spike_seq, v
def jit_eval_multi_step_forward_hard_reset_no_decay_input_with_v_seq(x_seq: torch.Tensor, v: torch.Tensor,
v_threshold: float, v_reset: float,
tau: float):
spike_seq = torch.zeros_like(x_seq)
v_seq = torch.zeros_like(x_seq)
for t in range(x_seq.shape[0]):
v = v - (v - v_reset) / tau + x_seq[t]
spike = (v >= v_threshold).to(x_seq)
v = v_reset * spike + (1. - spike) * v
spike_seq[t] = spike
v_seq[t] = v
return spike_seq, v, v_seq
def jit_eval_multi_step_forward_soft_reset_decay_input(x_seq: torch.Tensor, v: torch.Tensor, v_threshold: float,
tau: float):
spike_seq = torch.zeros_like(x_seq)
for t in range(x_seq.shape[0]):
v = v + (x_seq[t] - v) / tau
spike = (v >= v_threshold).to(x_seq)
v = v - spike * v_threshold
spike_seq[t] = spike
return spike_seq, v
def jit_eval_multi_step_forward_soft_reset_decay_input_with_v_seq(x_seq: torch.Tensor, v: torch.Tensor,
v_threshold: float, tau: float):
spike_seq = torch.zeros_like(x_seq)
v_seq = torch.zeros_like(x_seq)
for t in range(x_seq.shape[0]):
v = v + (x_seq[t] - v) / tau
spike = (v >= v_threshold).to(x_seq)
v = v - spike * v_threshold
spike_seq[t] = spike
v_seq[t] = v
return spike_seq, v, v_seq
def jit_eval_multi_step_forward_soft_reset_no_decay_input(x_seq: torch.Tensor, v: torch.Tensor, v_threshold: float,
tau: float):
spike_seq = torch.zeros_like(x_seq)
for t in range(x_seq.shape[0]):
v = v * (1. - 1. / tau) + x_seq[t]
spike = (v >= v_threshold).to(x_seq)
v = v - spike * v_threshold
spike_seq[t] = spike
return spike_seq, v
def jit_eval_multi_step_forward_soft_reset_no_decay_input_with_v_seq(x_seq: torch.Tensor, v: torch.Tensor,
v_threshold: float,
tau: float):
spike_seq = torch.zeros_like(x_seq)
v_seq = torch.zeros_like(x_seq)
for t in range(x_seq.shape[0]):
v = v * (1. - 1. / tau) + x_seq[t]
spike = (v >= v_threshold).to(x_seq)
v = v - spike * v_threshold
spike_seq[t] = spike
v_seq[t] = v
return spike_seq, v, v_seq
def single_step_forward(self, x: torch.Tensor):
if self.training:
if self.backend == 'torch':
return super().single_step_forward(x)
elif self.backend == 'cupy':
hard_reset = self.v_reset is not None
if x.dtype == torch.float:
dtype = 'float'
elif x.dtype == torch.half:
dtype = 'half2'
else:
raise NotImplementedError(x.dtype)
if self.forward_kernel is None or not self.forward_kernel.check_attributes(hard_reset=hard_reset,
dtype=dtype,
decay_input=self.decay_input):
self.forward_kernel = ss_ac_neuron_kernel.LIFNodeFPKernel(decay_input=self.decay_input,
hard_reset=hard_reset, dtype=dtype)
if self.backward_kernel is None or not self.backward_kernel.check_attributes(
surrogate_function=self.surrogate_function.cuda_codes, hard_reset=hard_reset,
detach_reset=self.detach_reset, dtype=dtype, decay_input=self.decay_input):
self.backward_kernel = ss_ac_neuron_kernel.LIFNodeBPKernel(
decay_input=self.decay_input,
surrogate_function=self.surrogate_function.cuda_codes, hard_reset=hard_reset,
detach_reset=self.detach_reset, dtype=dtype)
self.v_float_to_tensor(x)
spike, v = ss_ac_neuron_kernel.LIFNodeATGF.apply(x.flatten(0), self.v.flatten(0),
self.v_threshold, self.v_reset, 1. / self.tau,
self.forward_kernel,
self.backward_kernel)
spike = spike.reshape(x.shape)
v = v.reshape(x.shape)
self.v = v
return spike
else:
raise ValueError(self.backend)
else:
self.v_float_to_tensor(x)
if self.v_reset is None:
if self.decay_input:
spike, self.v = self.jit_eval_single_step_forward_soft_reset_decay_input(x, self.v,
self.v_threshold, self.tau)
else:
spike, self.v = self.jit_eval_single_step_forward_soft_reset_no_decay_input(x, self.v,
self.v_threshold,
self.tau)
else:
if self.decay_input:
spike, self.v = self.jit_eval_single_step_forward_hard_reset_decay_input(x, self.v,
self.v_threshold,
self.v_reset, self.tau)
else:
spike, self.v = self.jit_eval_single_step_forward_hard_reset_no_decay_input(x, self.v,
self.v_threshold,
self.v_reset,
self.tau)
return spike
def multi_step_forward(self, x_seq: torch.Tensor):
if self.training:
if self.backend == 'torch':
return super().multi_step_forward(x_seq)
elif self.backend == 'cupy':
hard_reset = self.v_reset is not None
if x_seq.dtype == torch.float:
dtype = 'float'
elif x_seq.dtype == torch.half:
dtype = 'half2'
else:
raise NotImplementedError(x_seq.dtype)
if self.forward_kernel is None or not self.forward_kernel.check_attributes(hard_reset=hard_reset,
dtype=dtype,
decay_input=self.decay_input):
self.forward_kernel = ac_neuron_kernel.LIFNodeFPTTKernel(decay_input=self.decay_input,
hard_reset=hard_reset, dtype=dtype)
if self.backward_kernel is None or not self.backward_kernel.check_attributes(
surrogate_function=self.surrogate_function.cuda_codes, hard_reset=hard_reset,
detach_reset=self.detach_reset, dtype=dtype, decay_input=self.decay_input):
self.backward_kernel = ac_neuron_kernel.LIFNodeBPTTKernel(decay_input=self.decay_input,
surrogate_function=self.surrogate_function.cuda_codes,
hard_reset=hard_reset,
detach_reset=self.detach_reset,
dtype=dtype)
self.v_float_to_tensor(x_seq[0])
spike_seq, v_seq = ac_neuron_kernel.LIFNodeATGF.apply(x_seq.flatten(1), self.v.flatten(0),
self.v_threshold, self.v_reset, 1. / self.tau,
self.forward_kernel,
self.backward_kernel)
spike_seq = spike_seq.reshape(x_seq.shape)
v_seq = v_seq.reshape(x_seq.shape)
if self.store_v_seq:
self.v_seq = v_seq
self.v = v_seq[-1].clone()
return spike_seq
else:
raise ValueError(self.backend)
else:
self.v_float_to_tensor(x_seq[0])
if self.v_reset is None:
if self.decay_input:
if self.store_v_seq:
spike_seq, self.v, self.v_seq = self.jit_eval_multi_step_forward_soft_reset_decay_input_with_v_seq(
x_seq, self.v, self.v_threshold, self.tau)
else:
spike_seq, self.v = self.jit_eval_multi_step_forward_soft_reset_decay_input(x_seq, self.v,
self.v_threshold,
self.tau)
else:
if self.store_v_seq:
spike_seq, self.v, self.v_seq = self.jit_eval_multi_step_forward_soft_reset_no_decay_input_with_v_seq(
x_seq, self.v, self.v_threshold, self.tau)
else:
spike_seq, self.v = self.jit_eval_multi_step_forward_soft_reset_no_decay_input(x_seq, self.v,
self.v_threshold,
self.tau)
else:
if self.decay_input:
if self.store_v_seq:
spike_seq, self.v, self.v_seq = self.jit_eval_multi_step_forward_hard_reset_decay_input_with_v_seq(
x_seq, self.v, self.v_threshold, self.v_reset, self.tau)
else:
spike_seq, self.v = self.jit_eval_multi_step_forward_hard_reset_decay_input(x_seq, self.v,
self.v_threshold,
self.v_reset,
self.tau)
else:
if self.store_v_seq:
spike_seq, self.v, self.v_seq = self.jit_eval_multi_step_forward_hard_reset_no_decay_input_with_v_seq(
x_seq, self.v, self.v_threshold, self.v_reset, self.tau)
else:
spike_seq, self.v = self.jit_eval_multi_step_forward_hard_reset_no_decay_input(x_seq, self.v,
self.v_threshold,
self.v_reset,
self.tau)
return spike_seq
def getSpikingNode(v_threshold=0.5):
return LIFNode(tau=1.25, decay_input=False, v_threshold=v_threshold, detach_reset=True, surrogate_function=DSpike()) | null |
7,693 | import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.fusion import *
from torch.autograd import Function
from torch import Tensor
from collections import namedtuple
from ...activation_based import layer
from ..neuron import LIFNode
from torch.nn.functional import interpolate
from ..surrogate import SurrogateFunctionBase, heaviside
from math import tanh
from torch.jit import script
import numpy as np
import argparse
class DSpike(SurrogateFunctionBase):
def __init__(self, alpha: float = 3, spiking=True):
super().__init__(alpha, spiking)
assert alpha > 0, 'alpha must be lager than 0.'
def spiking_function(x: Tensor, alpha: float):
return dSpike.apply(x, alpha)
class save_v_LIFNode(LIFNode):
def single_step_forward(self, x: torch.Tensor):
self.v_float_to_tensor(x)
self.neuronal_charge(x)
self.v_before_spike = (self.v - self.v_threshold).mean()
spike = self.neuronal_fire()
self.neuronal_reset(spike)
return spike
def multi_step_forward(self, x_seq: torch.Tensor):
T = x_seq.shape[0]
y_seq = []
if self.store_v_seq:
v_seq = []
for t in range(T):
y = self.single_step_forward(x_seq[t])
y_seq.append(y)
if self.store_v_seq:
v_seq.append(self.v_before_spike)
if self.store_v_seq:
self.v_seq = torch.stack(v_seq)
return torch.stack(y_seq)
def get_save_v_SpikingNode(v_threshold=0.5):
return save_v_LIFNode(tau=1.25, decay_input=False, v_threshold=v_threshold, detach_reset=True,
surrogate_function=DSpike()) | null |
7,694 | import torch
import torch.nn as nn
from copy import deepcopy
from .. import functional, neuron, layer
def sequential_forward(sequential, x_seq):
assert isinstance(sequential, nn.Sequential)
out = x_seq
for i in range(len(sequential)):
m = sequential[i]
if isinstance(m, neuron.BaseNode):
out = m(out)
else:
out = functional.seq_to_ann_forward(out, m)
return out | null |
7,695 | import torch
import torch.nn as nn
from copy import deepcopy
from .. import functional, neuron, layer
def _spiking_vgg(arch, cfg, batch_norm, pretrained, progress, norm_layer: callable = None, spiking_neuron: callable = None, **kwargs):
if pretrained:
kwargs['init_weights'] = False
if batch_norm:
norm_layer = norm_layer
else:
norm_layer = None
model = SpikingVGG(cfg=cfgs[cfg], batch_norm=batch_norm, norm_layer=norm_layer, spiking_neuron=spiking_neuron, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
The provided code snippet includes necessary dependencies for implementing the `spiking_vgg11` function. Write a Python function `def spiking_vgg11(pretrained=False, progress=True, spiking_neuron: callable = None, **kwargs)` to solve the following problem:
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking VGG-11 :rtype: torch.nn.Module A spiking version of VGG-11 model from `"Very Deep Convolutional Networks for Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Here is the function:
def spiking_vgg11(pretrained=False, progress=True, spiking_neuron: callable = None, **kwargs):
"""
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet
:type pretrained: bool
:param progress: If True, displays a progress bar of the download to stderr
:type progress: bool
:param spiking_neuron: a spiking neuron layer
:type spiking_neuron: callable
:param kwargs: kwargs for `spiking_neuron`
:type kwargs: dict
:return: Spiking VGG-11
:rtype: torch.nn.Module
A spiking version of VGG-11 model from `"Very Deep Convolutional Networks for Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
"""
return _spiking_vgg('vgg11', 'A', False, pretrained, progress, None, spiking_neuron, **kwargs) | :param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking VGG-11 :rtype: torch.nn.Module A spiking version of VGG-11 model from `"Very Deep Convolutional Networks for Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ |
7,696 | import torch
import torch.nn as nn
from copy import deepcopy
from .. import functional, neuron, layer
def _spiking_vgg(arch, cfg, batch_norm, pretrained, progress, norm_layer: callable = None, spiking_neuron: callable = None, **kwargs):
if pretrained:
kwargs['init_weights'] = False
if batch_norm:
norm_layer = norm_layer
else:
norm_layer = None
model = SpikingVGG(cfg=cfgs[cfg], batch_norm=batch_norm, norm_layer=norm_layer, spiking_neuron=spiking_neuron, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
The provided code snippet includes necessary dependencies for implementing the `spiking_vgg11_bn` function. Write a Python function `def spiking_vgg11_bn(pretrained=False, progress=True, norm_layer: callable = None, spiking_neuron: callable = None, **kwargs)` to solve the following problem:
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param norm_layer: a batch norm layer :type norm_layer: callable :param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking VGG-11 with norm layer :rtype: torch.nn.Module A spiking version of VGG-11-BN model from `"Very Deep Convolutional Networks for Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Here is the function:
def spiking_vgg11_bn(pretrained=False, progress=True, norm_layer: callable = None, spiking_neuron: callable = None, **kwargs):
"""
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet
:type pretrained: bool
:param progress: If True, displays a progress bar of the download to stderr
:type progress: bool
:param norm_layer: a batch norm layer
:type norm_layer: callable
:param spiking_neuron: a spiking neuron layer
:type spiking_neuron: callable
:param kwargs: kwargs for `spiking_neuron`
:type kwargs: dict
:return: Spiking VGG-11 with norm layer
:rtype: torch.nn.Module
A spiking version of VGG-11-BN model from `"Very Deep Convolutional Networks for Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
"""
return _spiking_vgg('vgg11_bn', 'A', True, pretrained, progress, norm_layer, spiking_neuron, **kwargs) | :param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param norm_layer: a batch norm layer :type norm_layer: callable :param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking VGG-11 with norm layer :rtype: torch.nn.Module A spiking version of VGG-11-BN model from `"Very Deep Convolutional Networks for Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ |
7,697 | import torch
import torch.nn as nn
from copy import deepcopy
from .. import functional, neuron, layer
def _spiking_vgg(arch, cfg, batch_norm, pretrained, progress, norm_layer: callable = None, spiking_neuron: callable = None, **kwargs):
if pretrained:
kwargs['init_weights'] = False
if batch_norm:
norm_layer = norm_layer
else:
norm_layer = None
model = SpikingVGG(cfg=cfgs[cfg], batch_norm=batch_norm, norm_layer=norm_layer, spiking_neuron=spiking_neuron, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
The provided code snippet includes necessary dependencies for implementing the `spiking_vgg13` function. Write a Python function `def spiking_vgg13(pretrained=False, progress=True, spiking_neuron: callable = None, **kwargs)` to solve the following problem:
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking VGG-13 :rtype: torch.nn.Module A spiking version of VGG-13 model from `"Very Deep Convolutional Networks for Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Here is the function:
def spiking_vgg13(pretrained=False, progress=True, spiking_neuron: callable = None, **kwargs):
"""
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet
:type pretrained: bool
:param progress: If True, displays a progress bar of the download to stderr
:type progress: bool
:param spiking_neuron: a spiking neuron layer
:type spiking_neuron: callable
:param kwargs: kwargs for `spiking_neuron`
:type kwargs: dict
:return: Spiking VGG-13
:rtype: torch.nn.Module
A spiking version of VGG-13 model from `"Very Deep Convolutional Networks for Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
"""
return _spiking_vgg('vgg13', 'B', False, pretrained, progress, None, spiking_neuron, **kwargs) | :param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking VGG-13 :rtype: torch.nn.Module A spiking version of VGG-13 model from `"Very Deep Convolutional Networks for Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ |
7,698 | import torch
import torch.nn as nn
from copy import deepcopy
from .. import functional, neuron, layer
def _spiking_vgg(arch, cfg, batch_norm, pretrained, progress, norm_layer: callable = None, spiking_neuron: callable = None, **kwargs):
if pretrained:
kwargs['init_weights'] = False
if batch_norm:
norm_layer = norm_layer
else:
norm_layer = None
model = SpikingVGG(cfg=cfgs[cfg], batch_norm=batch_norm, norm_layer=norm_layer, spiking_neuron=spiking_neuron, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
The provided code snippet includes necessary dependencies for implementing the `spiking_vgg13_bn` function. Write a Python function `def spiking_vgg13_bn(pretrained=False, progress=True, norm_layer: callable = None, spiking_neuron: callable = None, **kwargs)` to solve the following problem:
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param norm_layer: a batch norm layer :type norm_layer: callable :param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking VGG-11 with norm layer :rtype: torch.nn.Module A spiking version of VGG-13-BN model from `"Very Deep Convolutional Networks for Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Here is the function:
def spiking_vgg13_bn(pretrained=False, progress=True, norm_layer: callable = None, spiking_neuron: callable = None, **kwargs):
"""
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet
:type pretrained: bool
:param progress: If True, displays a progress bar of the download to stderr
:type progress: bool
:param norm_layer: a batch norm layer
:type norm_layer: callable
:param spiking_neuron: a spiking neuron layer
:type spiking_neuron: callable
:param kwargs: kwargs for `spiking_neuron`
:type kwargs: dict
:return: Spiking VGG-11 with norm layer
:rtype: torch.nn.Module
A spiking version of VGG-13-BN model from `"Very Deep Convolutional Networks for Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
"""
return _spiking_vgg('vgg13_bn', 'B', True, pretrained, progress, norm_layer, spiking_neuron, **kwargs) | :param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param norm_layer: a batch norm layer :type norm_layer: callable :param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking VGG-11 with norm layer :rtype: torch.nn.Module A spiking version of VGG-13-BN model from `"Very Deep Convolutional Networks for Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ |
7,699 | import torch
import torch.nn as nn
from copy import deepcopy
from .. import functional, neuron, layer
def _spiking_vgg(arch, cfg, batch_norm, pretrained, progress, norm_layer: callable = None, spiking_neuron: callable = None, **kwargs):
if pretrained:
kwargs['init_weights'] = False
if batch_norm:
norm_layer = norm_layer
else:
norm_layer = None
model = SpikingVGG(cfg=cfgs[cfg], batch_norm=batch_norm, norm_layer=norm_layer, spiking_neuron=spiking_neuron, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
The provided code snippet includes necessary dependencies for implementing the `spiking_vgg16` function. Write a Python function `def spiking_vgg16(pretrained=False, progress=True, spiking_neuron: callable = None, **kwargs)` to solve the following problem:
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking VGG-16 :rtype: torch.nn.Module A spiking version of VGG-16 model from `"Very Deep Convolutional Networks for Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Here is the function:
def spiking_vgg16(pretrained=False, progress=True, spiking_neuron: callable = None, **kwargs):
"""
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet
:type pretrained: bool
:param progress: If True, displays a progress bar of the download to stderr
:type progress: bool
:param spiking_neuron: a spiking neuron layer
:type spiking_neuron: callable
:param kwargs: kwargs for `spiking_neuron`
:type kwargs: dict
:return: Spiking VGG-16
:rtype: torch.nn.Module
A spiking version of VGG-16 model from `"Very Deep Convolutional Networks for Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
"""
return _spiking_vgg('vgg16', 'D', False, pretrained, progress, None, spiking_neuron, **kwargs) | :param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking VGG-16 :rtype: torch.nn.Module A spiking version of VGG-16 model from `"Very Deep Convolutional Networks for Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ |
7,700 | import torch
import torch.nn as nn
from copy import deepcopy
from .. import functional, neuron, layer
def _spiking_vgg(arch, cfg, batch_norm, pretrained, progress, norm_layer: callable = None, spiking_neuron: callable = None, **kwargs):
if pretrained:
kwargs['init_weights'] = False
if batch_norm:
norm_layer = norm_layer
else:
norm_layer = None
model = SpikingVGG(cfg=cfgs[cfg], batch_norm=batch_norm, norm_layer=norm_layer, spiking_neuron=spiking_neuron, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
The provided code snippet includes necessary dependencies for implementing the `spiking_vgg16_bn` function. Write a Python function `def spiking_vgg16_bn(pretrained=False, progress=True, norm_layer: callable = None, spiking_neuron: callable = None, **kwargs)` to solve the following problem:
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param norm_layer: a batch norm layer :type norm_layer: callable :param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking VGG-16 with norm layer :rtype: torch.nn.Module A spiking version of VGG-16-BN model from `"Very Deep Convolutional Networks for Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Here is the function:
def spiking_vgg16_bn(pretrained=False, progress=True, norm_layer: callable = None, spiking_neuron: callable = None, **kwargs):
"""
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet
:type pretrained: bool
:param progress: If True, displays a progress bar of the download to stderr
:type progress: bool
:param norm_layer: a batch norm layer
:type norm_layer: callable
:param spiking_neuron: a spiking neuron layer
:type spiking_neuron: callable
:param kwargs: kwargs for `spiking_neuron`
:type kwargs: dict
:return: Spiking VGG-16 with norm layer
:rtype: torch.nn.Module
A spiking version of VGG-16-BN model from `"Very Deep Convolutional Networks for Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
"""
return _spiking_vgg('vgg16_bn', 'D', True, pretrained, progress, norm_layer, spiking_neuron, **kwargs) | :param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param norm_layer: a batch norm layer :type norm_layer: callable :param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking VGG-16 with norm layer :rtype: torch.nn.Module A spiking version of VGG-16-BN model from `"Very Deep Convolutional Networks for Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ |
7,701 | import torch
import torch.nn as nn
from copy import deepcopy
from .. import functional, neuron, layer
def _spiking_vgg(arch, cfg, batch_norm, pretrained, progress, norm_layer: callable = None, spiking_neuron: callable = None, **kwargs):
if pretrained:
kwargs['init_weights'] = False
if batch_norm:
norm_layer = norm_layer
else:
norm_layer = None
model = SpikingVGG(cfg=cfgs[cfg], batch_norm=batch_norm, norm_layer=norm_layer, spiking_neuron=spiking_neuron, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
The provided code snippet includes necessary dependencies for implementing the `spiking_vgg19` function. Write a Python function `def spiking_vgg19(pretrained=False, progress=True, spiking_neuron: callable = None, **kwargs)` to solve the following problem:
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking VGG-19 :rtype: torch.nn.Module A spiking version of VGG-19 model from `"Very Deep Convolutional Networks for Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Here is the function:
def spiking_vgg19(pretrained=False, progress=True, spiking_neuron: callable = None, **kwargs):
"""
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet
:type pretrained: bool
:param progress: If True, displays a progress bar of the download to stderr
:type progress: bool
:param spiking_neuron: a spiking neuron layer
:type spiking_neuron: callable
:param kwargs: kwargs for `spiking_neuron`
:type kwargs: dict
:return: Spiking VGG-19
:rtype: torch.nn.Module
A spiking version of VGG-19 model from `"Very Deep Convolutional Networks for Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
"""
return _spiking_vgg('vgg19', 'E', False, pretrained, progress, None, spiking_neuron, **kwargs) | :param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking VGG-19 :rtype: torch.nn.Module A spiking version of VGG-19 model from `"Very Deep Convolutional Networks for Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ |
7,702 | import torch
import torch.nn as nn
from copy import deepcopy
from .. import functional, neuron, layer
def _spiking_vgg(arch, cfg, batch_norm, pretrained, progress, norm_layer: callable = None, spiking_neuron: callable = None, **kwargs):
if pretrained:
kwargs['init_weights'] = False
if batch_norm:
norm_layer = norm_layer
else:
norm_layer = None
model = SpikingVGG(cfg=cfgs[cfg], batch_norm=batch_norm, norm_layer=norm_layer, spiking_neuron=spiking_neuron, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
The provided code snippet includes necessary dependencies for implementing the `spiking_vgg19_bn` function. Write a Python function `def spiking_vgg19_bn(pretrained=False, progress=True, norm_layer: callable = None, spiking_neuron: callable = None, **kwargs)` to solve the following problem:
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param norm_layer: a batch norm layer :type norm_layer: callable :param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking VGG-19 with norm layer :rtype: torch.nn.Module A spiking version of VGG-19-BN model from `"Very Deep Convolutional Networks for Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Here is the function:
def spiking_vgg19_bn(pretrained=False, progress=True, norm_layer: callable = None, spiking_neuron: callable = None, **kwargs):
"""
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet
:type pretrained: bool
:param progress: If True, displays a progress bar of the download to stderr
:type progress: bool
:param norm_layer: a batch norm layer
:type norm_layer: callable
:param spiking_neuron: a spiking neuron layer
:type spiking_neuron: callable
:param kwargs: kwargs for `spiking_neuron`
:type kwargs: dict
:return: Spiking VGG-19 with norm layer
:rtype: torch.nn.Module
A spiking version of VGG-19-BN model from `"Very Deep Convolutional Networks for Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
"""
return _spiking_vgg('vgg19_bn', 'E', True, pretrained, progress, norm_layer, spiking_neuron, **kwargs) | :param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param norm_layer: a batch norm layer :type norm_layer: callable :param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking VGG-19 with norm layer :rtype: torch.nn.Module A spiking version of VGG-19-BN model from `"Very Deep Convolutional Networks for Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ |
7,703 | import torch
import torch.nn as nn
from copy import deepcopy
from .. import layer
def sew_function(x: torch.Tensor, y: torch.Tensor, cnf:str):
if cnf == 'ADD':
return x + y
elif cnf == 'AND':
return x * y
elif cnf == 'IAND':
return x * (1. - y)
else:
raise NotImplementedError | null |
7,704 | import torch
import torch.nn as nn
from copy import deepcopy
from .. import layer
The provided code snippet includes necessary dependencies for implementing the `conv3x3` function. Write a Python function `def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1)` to solve the following problem:
3x3 convolution with padding
Here is the function:
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return layer.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation) | 3x3 convolution with padding |
7,705 | import torch
import torch.nn as nn
from copy import deepcopy
from .. import layer
The provided code snippet includes necessary dependencies for implementing the `conv1x1` function. Write a Python function `def conv1x1(in_planes, out_planes, stride=1)` to solve the following problem:
1x1 convolution
Here is the function:
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return layer.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) | 1x1 convolution |
7,706 | import torch
import torch.nn as nn
from copy import deepcopy
from .. import layer
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None, cnf: str = None, spiking_neuron: callable = None, **kwargs):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = layer.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.sn1 = spiking_neuron(**deepcopy(kwargs))
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.sn2 = spiking_neuron(**deepcopy(kwargs))
self.downsample = downsample
if downsample is not None:
self.downsample_sn = spiking_neuron(**deepcopy(kwargs))
self.stride = stride
self.cnf = cnf
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.sn1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.sn2(out)
if self.downsample is not None:
identity = self.downsample_sn(self.downsample(x))
out = sew_function(identity, out, self.cnf)
return out
def extra_repr(self) -> str:
return super().extra_repr() + f'cnf={self.cnf}'
def _sew_resnet(arch, block, layers, pretrained, progress, cnf, spiking_neuron, **kwargs):
model = SEWResNet(block, layers, cnf=cnf, spiking_neuron=spiking_neuron, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
The provided code snippet includes necessary dependencies for implementing the `sew_resnet18` function. Write a Python function `def sew_resnet18(pretrained=False, progress=True, cnf: str = None, spiking_neuron: callable=None, **kwargs)` to solve the following problem:
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param cnf: the name of spike-element-wise function :type cnf: str :param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking ResNet-18 :rtype: torch.nn.Module The spike-element-wise ResNet-18 `"Deep Residual Learning in Spiking Neural Networks" <https://arxiv.org/abs/2102.04159>`_ modified by the ResNet-18 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Here is the function:
def sew_resnet18(pretrained=False, progress=True, cnf: str = None, spiking_neuron: callable=None, **kwargs):
"""
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet
:type pretrained: bool
:param progress: If True, displays a progress bar of the download to stderr
:type progress: bool
:param cnf: the name of spike-element-wise function
:type cnf: str
:param spiking_neuron: a spiking neuron layer
:type spiking_neuron: callable
:param kwargs: kwargs for `spiking_neuron`
:type kwargs: dict
:return: Spiking ResNet-18
:rtype: torch.nn.Module
The spike-element-wise ResNet-18 `"Deep Residual Learning in Spiking Neural Networks" <https://arxiv.org/abs/2102.04159>`_ modified by the ResNet-18 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
"""
return _sew_resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, cnf, spiking_neuron, **kwargs) | :param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param cnf: the name of spike-element-wise function :type cnf: str :param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking ResNet-18 :rtype: torch.nn.Module The spike-element-wise ResNet-18 `"Deep Residual Learning in Spiking Neural Networks" <https://arxiv.org/abs/2102.04159>`_ modified by the ResNet-18 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ |
7,707 | import torch
import torch.nn as nn
from copy import deepcopy
from .. import layer
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None, cnf: str = None, spiking_neuron: callable = None, **kwargs):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = layer.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.sn1 = spiking_neuron(**deepcopy(kwargs))
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.sn2 = spiking_neuron(**deepcopy(kwargs))
self.downsample = downsample
if downsample is not None:
self.downsample_sn = spiking_neuron(**deepcopy(kwargs))
self.stride = stride
self.cnf = cnf
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.sn1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.sn2(out)
if self.downsample is not None:
identity = self.downsample_sn(self.downsample(x))
out = sew_function(identity, out, self.cnf)
return out
def extra_repr(self) -> str:
return super().extra_repr() + f'cnf={self.cnf}'
def _sew_resnet(arch, block, layers, pretrained, progress, cnf, spiking_neuron, **kwargs):
model = SEWResNet(block, layers, cnf=cnf, spiking_neuron=spiking_neuron, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
The provided code snippet includes necessary dependencies for implementing the `sew_resnet34` function. Write a Python function `def sew_resnet34(pretrained=False, progress=True, cnf: str = None, spiking_neuron: callable=None, **kwargs)` to solve the following problem:
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param cnf: the name of spike-element-wise function :type cnf: str :param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking ResNet-34 :rtype: torch.nn.Module The spike-element-wise ResNet-34 `"Deep Residual Learning in Spiking Neural Networks" <https://arxiv.org/abs/2102.04159>`_ modified by the ResNet-34 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Here is the function:
def sew_resnet34(pretrained=False, progress=True, cnf: str = None, spiking_neuron: callable=None, **kwargs):
"""
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet
:type pretrained: bool
:param progress: If True, displays a progress bar of the download to stderr
:type progress: bool
:param cnf: the name of spike-element-wise function
:type cnf: str
:param spiking_neuron: a spiking neuron layer
:type spiking_neuron: callable
:param kwargs: kwargs for `spiking_neuron`
:type kwargs: dict
:return: Spiking ResNet-34
:rtype: torch.nn.Module
The spike-element-wise ResNet-34 `"Deep Residual Learning in Spiking Neural Networks" <https://arxiv.org/abs/2102.04159>`_
modified by the ResNet-34 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
"""
return _sew_resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress, cnf, spiking_neuron, **kwargs) | :param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param cnf: the name of spike-element-wise function :type cnf: str :param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking ResNet-34 :rtype: torch.nn.Module The spike-element-wise ResNet-34 `"Deep Residual Learning in Spiking Neural Networks" <https://arxiv.org/abs/2102.04159>`_ modified by the ResNet-34 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ |
7,708 | import torch
import torch.nn as nn
from copy import deepcopy
from .. import layer
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None, cnf: str = None, spiking_neuron: callable = None, **kwargs):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = layer.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.sn1 = spiking_neuron(**deepcopy(kwargs))
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.sn2 = spiking_neuron(**deepcopy(kwargs))
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.sn3 = spiking_neuron(**deepcopy(kwargs))
self.downsample = downsample
if downsample is not None:
self.downsample_sn = spiking_neuron(**deepcopy(kwargs))
self.stride = stride
self.cnf = cnf
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.sn1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.sn2(out)
out = self.conv3(out)
out = self.bn3(out)
out = self.sn3(out)
if self.downsample is not None:
identity = self.downsample_sn(self.downsample(x))
out = sew_function(out, identity, self.cnf)
return out
def extra_repr(self) -> str:
return super().extra_repr() + f'cnf={self.cnf}'
def _sew_resnet(arch, block, layers, pretrained, progress, cnf, spiking_neuron, **kwargs):
model = SEWResNet(block, layers, cnf=cnf, spiking_neuron=spiking_neuron, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
The provided code snippet includes necessary dependencies for implementing the `sew_resnet50` function. Write a Python function `def sew_resnet50(pretrained=False, progress=True, cnf: str = None, spiking_neuron: callable=None, **kwargs)` to solve the following problem:
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param cnf: the name of spike-element-wise function :type cnf: str :param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking ResNet-50 :rtype: torch.nn.Module The spike-element-wise ResNet-50 `"Deep Residual Learning in Spiking Neural Networks" <https://arxiv.org/abs/2102.04159>`_ modified by the ResNet-50 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Here is the function:
def sew_resnet50(pretrained=False, progress=True, cnf: str = None, spiking_neuron: callable=None, **kwargs):
"""
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet
:type pretrained: bool
:param progress: If True, displays a progress bar of the download to stderr
:type progress: bool
:param cnf: the name of spike-element-wise function
:type cnf: str
:param spiking_neuron: a spiking neuron layer
:type spiking_neuron: callable
:param kwargs: kwargs for `spiking_neuron`
:type kwargs: dict
:return: Spiking ResNet-50
:rtype: torch.nn.Module
The spike-element-wise ResNet-50 `"Deep Residual Learning in Spiking Neural Networks" <https://arxiv.org/abs/2102.04159>`_
modified by the ResNet-50 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
"""
return _sew_resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress, cnf, spiking_neuron, **kwargs) | :param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param cnf: the name of spike-element-wise function :type cnf: str :param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking ResNet-50 :rtype: torch.nn.Module The spike-element-wise ResNet-50 `"Deep Residual Learning in Spiking Neural Networks" <https://arxiv.org/abs/2102.04159>`_ modified by the ResNet-50 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ |
7,709 | import torch
import torch.nn as nn
from copy import deepcopy
from .. import layer
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None, cnf: str = None, spiking_neuron: callable = None, **kwargs):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = layer.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.sn1 = spiking_neuron(**deepcopy(kwargs))
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.sn2 = spiking_neuron(**deepcopy(kwargs))
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.sn3 = spiking_neuron(**deepcopy(kwargs))
self.downsample = downsample
if downsample is not None:
self.downsample_sn = spiking_neuron(**deepcopy(kwargs))
self.stride = stride
self.cnf = cnf
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.sn1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.sn2(out)
out = self.conv3(out)
out = self.bn3(out)
out = self.sn3(out)
if self.downsample is not None:
identity = self.downsample_sn(self.downsample(x))
out = sew_function(out, identity, self.cnf)
return out
def extra_repr(self) -> str:
return super().extra_repr() + f'cnf={self.cnf}'
def _sew_resnet(arch, block, layers, pretrained, progress, cnf, spiking_neuron, **kwargs):
model = SEWResNet(block, layers, cnf=cnf, spiking_neuron=spiking_neuron, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
The provided code snippet includes necessary dependencies for implementing the `sew_resnet101` function. Write a Python function `def sew_resnet101(pretrained=False, progress=True, cnf: str = None, spiking_neuron: callable=None, **kwargs)` to solve the following problem:
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param cnf: the name of spike-element-wise function :type cnf: str :param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking ResNet-101 :rtype: torch.nn.Module The spike-element-wise ResNet-101 `"Deep Residual Learning in Spiking Neural Networks" <https://arxiv.org/abs/2102.04159>`_ modified by the ResNet-101 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Here is the function:
def sew_resnet101(pretrained=False, progress=True, cnf: str = None, spiking_neuron: callable=None, **kwargs):
"""
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet
:type pretrained: bool
:param progress: If True, displays a progress bar of the download to stderr
:type progress: bool
:param cnf: the name of spike-element-wise function
:type cnf: str
:param spiking_neuron: a spiking neuron layer
:type spiking_neuron: callable
:param kwargs: kwargs for `spiking_neuron`
:type kwargs: dict
:return: Spiking ResNet-101
:rtype: torch.nn.Module
The spike-element-wise ResNet-101 `"Deep Residual Learning in Spiking Neural Networks" <https://arxiv.org/abs/2102.04159>`_
modified by the ResNet-101 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
"""
return _sew_resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress, cnf, spiking_neuron, **kwargs) | :param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param cnf: the name of spike-element-wise function :type cnf: str :param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking ResNet-101 :rtype: torch.nn.Module The spike-element-wise ResNet-101 `"Deep Residual Learning in Spiking Neural Networks" <https://arxiv.org/abs/2102.04159>`_ modified by the ResNet-101 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ |
7,710 | import torch
import torch.nn as nn
from copy import deepcopy
from .. import layer
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None, cnf: str = None, spiking_neuron: callable = None, **kwargs):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = layer.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.sn1 = spiking_neuron(**deepcopy(kwargs))
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.sn2 = spiking_neuron(**deepcopy(kwargs))
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.sn3 = spiking_neuron(**deepcopy(kwargs))
self.downsample = downsample
if downsample is not None:
self.downsample_sn = spiking_neuron(**deepcopy(kwargs))
self.stride = stride
self.cnf = cnf
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.sn1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.sn2(out)
out = self.conv3(out)
out = self.bn3(out)
out = self.sn3(out)
if self.downsample is not None:
identity = self.downsample_sn(self.downsample(x))
out = sew_function(out, identity, self.cnf)
return out
def extra_repr(self) -> str:
return super().extra_repr() + f'cnf={self.cnf}'
def _sew_resnet(arch, block, layers, pretrained, progress, cnf, spiking_neuron, **kwargs):
model = SEWResNet(block, layers, cnf=cnf, spiking_neuron=spiking_neuron, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
The provided code snippet includes necessary dependencies for implementing the `sew_resnet152` function. Write a Python function `def sew_resnet152(pretrained=False, progress=True, cnf: str = None, spiking_neuron: callable=None, **kwargs)` to solve the following problem:
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param cnf: the name of spike-element-wise function :type cnf: str :param spiking_neuron: a single step neuron :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking ResNet-152 :rtype: torch.nn.Module The spike-element-wise ResNet-152 `"Deep Residual Learning in Spiking Neural Networks" <https://arxiv.org/abs/2102.04159>`_ modified by the ResNet-152 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Here is the function:
def sew_resnet152(pretrained=False, progress=True, cnf: str = None, spiking_neuron: callable=None, **kwargs):
"""
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet
:type pretrained: bool
:param progress: If True, displays a progress bar of the download to stderr
:type progress: bool
:param cnf: the name of spike-element-wise function
:type cnf: str
:param spiking_neuron: a single step neuron
:type spiking_neuron: callable
:param kwargs: kwargs for `spiking_neuron`
:type kwargs: dict
:return: Spiking ResNet-152
:rtype: torch.nn.Module
The spike-element-wise ResNet-152 `"Deep Residual Learning in Spiking Neural Networks" <https://arxiv.org/abs/2102.04159>`_
modified by the ResNet-152 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
"""
return _sew_resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress, cnf, spiking_neuron, **kwargs) | :param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param cnf: the name of spike-element-wise function :type cnf: str :param spiking_neuron: a single step neuron :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking ResNet-152 :rtype: torch.nn.Module The spike-element-wise ResNet-152 `"Deep Residual Learning in Spiking Neural Networks" <https://arxiv.org/abs/2102.04159>`_ modified by the ResNet-152 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ |
7,711 | import torch
import torch.nn as nn
from copy import deepcopy
from .. import layer
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None, cnf: str = None, spiking_neuron: callable = None, **kwargs):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = layer.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.sn1 = spiking_neuron(**deepcopy(kwargs))
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.sn2 = spiking_neuron(**deepcopy(kwargs))
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.sn3 = spiking_neuron(**deepcopy(kwargs))
self.downsample = downsample
if downsample is not None:
self.downsample_sn = spiking_neuron(**deepcopy(kwargs))
self.stride = stride
self.cnf = cnf
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.sn1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.sn2(out)
out = self.conv3(out)
out = self.bn3(out)
out = self.sn3(out)
if self.downsample is not None:
identity = self.downsample_sn(self.downsample(x))
out = sew_function(out, identity, self.cnf)
return out
def extra_repr(self) -> str:
return super().extra_repr() + f'cnf={self.cnf}'
def _sew_resnet(arch, block, layers, pretrained, progress, cnf, spiking_neuron, **kwargs):
model = SEWResNet(block, layers, cnf=cnf, spiking_neuron=spiking_neuron, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
The provided code snippet includes necessary dependencies for implementing the `sew_resnext50_32x4d` function. Write a Python function `def sew_resnext50_32x4d(pretrained=False, progress=True, cnf: str = None, spiking_neuron: callable=None, **kwargs)` to solve the following problem:
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param cnf: the name of spike-element-wise function :type cnf: str :param spiking_neuron: a single step neuron :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking ResNeXt-50 32x4d :rtype: torch.nn.Module The spike-element-wise ResNeXt-50 32x4d `"Deep Residual Learning in Spiking Neural Networks" <https://arxiv.org/abs/2102.04159>`_ modified by the ResNeXt-50 32x4d model from `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Here is the function:
def sew_resnext50_32x4d(pretrained=False, progress=True, cnf: str = None, spiking_neuron: callable=None, **kwargs):
"""
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet
:type pretrained: bool
:param progress: If True, displays a progress bar of the download to stderr
:type progress: bool
:param cnf: the name of spike-element-wise function
:type cnf: str
:param spiking_neuron: a single step neuron
:type spiking_neuron: callable
:param kwargs: kwargs for `spiking_neuron`
:type kwargs: dict
:return: Spiking ResNeXt-50 32x4d
:rtype: torch.nn.Module
The spike-element-wise ResNeXt-50 32x4d `"Deep Residual Learning in Spiking Neural Networks" <https://arxiv.org/abs/2102.04159>`_
modified by the ResNeXt-50 32x4d model from `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _sew_resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3], pretrained, progress, cnf, spiking_neuron, **kwargs) | :param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param cnf: the name of spike-element-wise function :type cnf: str :param spiking_neuron: a single step neuron :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking ResNeXt-50 32x4d :rtype: torch.nn.Module The spike-element-wise ResNeXt-50 32x4d `"Deep Residual Learning in Spiking Neural Networks" <https://arxiv.org/abs/2102.04159>`_ modified by the ResNeXt-50 32x4d model from `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_ |
7,712 | import torch
import torch.nn as nn
from copy import deepcopy
from .. import layer
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None, cnf: str = None, spiking_neuron: callable = None, **kwargs):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = layer.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.sn1 = spiking_neuron(**deepcopy(kwargs))
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.sn2 = spiking_neuron(**deepcopy(kwargs))
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.sn3 = spiking_neuron(**deepcopy(kwargs))
self.downsample = downsample
if downsample is not None:
self.downsample_sn = spiking_neuron(**deepcopy(kwargs))
self.stride = stride
self.cnf = cnf
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.sn1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.sn2(out)
out = self.conv3(out)
out = self.bn3(out)
out = self.sn3(out)
if self.downsample is not None:
identity = self.downsample_sn(self.downsample(x))
out = sew_function(out, identity, self.cnf)
return out
def extra_repr(self) -> str:
return super().extra_repr() + f'cnf={self.cnf}'
def _sew_resnet(arch, block, layers, pretrained, progress, cnf, spiking_neuron, **kwargs):
model = SEWResNet(block, layers, cnf=cnf, spiking_neuron=spiking_neuron, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
The provided code snippet includes necessary dependencies for implementing the `sew_resnext101_32x8d` function. Write a Python function `def sew_resnext101_32x8d(pretrained=False, progress=True, cnf: str = None, spiking_neuron: callable=None, **kwargs)` to solve the following problem:
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param cnf: the name of spike-element-wise function :type cnf: str :param spiking_neuron: a single step neuron :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking ResNeXt-101 32x8d :rtype: torch.nn.Module The spike-element-wise ResNeXt-101 32x8d `"Deep Residual Learning in Spiking Neural Networks" <https://arxiv.org/abs/2102.04159>`_ modified by the ResNeXt-101 32x8d model from `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Here is the function:
def sew_resnext101_32x8d(pretrained=False, progress=True, cnf: str = None, spiking_neuron: callable=None, **kwargs):
"""
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet
:type pretrained: bool
:param progress: If True, displays a progress bar of the download to stderr
:type progress: bool
:param cnf: the name of spike-element-wise function
:type cnf: str
:param spiking_neuron: a single step neuron
:type spiking_neuron: callable
:param kwargs: kwargs for `spiking_neuron`
:type kwargs: dict
:return: Spiking ResNeXt-101 32x8d
:rtype: torch.nn.Module
The spike-element-wise ResNeXt-101 32x8d `"Deep Residual Learning in Spiking Neural Networks" <https://arxiv.org/abs/2102.04159>`_ modified by the ResNeXt-101 32x8d model from `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _sew_resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], pretrained, progress, cnf, spiking_neuron, **kwargs) | :param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param cnf: the name of spike-element-wise function :type cnf: str :param spiking_neuron: a single step neuron :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking ResNeXt-101 32x8d :rtype: torch.nn.Module The spike-element-wise ResNeXt-101 32x8d `"Deep Residual Learning in Spiking Neural Networks" <https://arxiv.org/abs/2102.04159>`_ modified by the ResNeXt-101 32x8d model from `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_ |
7,713 | import torch
import torch.nn as nn
from copy import deepcopy
from .. import layer
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None, cnf: str = None, spiking_neuron: callable = None, **kwargs):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = layer.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.sn1 = spiking_neuron(**deepcopy(kwargs))
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.sn2 = spiking_neuron(**deepcopy(kwargs))
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.sn3 = spiking_neuron(**deepcopy(kwargs))
self.downsample = downsample
if downsample is not None:
self.downsample_sn = spiking_neuron(**deepcopy(kwargs))
self.stride = stride
self.cnf = cnf
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.sn1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.sn2(out)
out = self.conv3(out)
out = self.bn3(out)
out = self.sn3(out)
if self.downsample is not None:
identity = self.downsample_sn(self.downsample(x))
out = sew_function(out, identity, self.cnf)
return out
def extra_repr(self) -> str:
return super().extra_repr() + f'cnf={self.cnf}'
def _sew_resnet(arch, block, layers, pretrained, progress, cnf, spiking_neuron, **kwargs):
model = SEWResNet(block, layers, cnf=cnf, spiking_neuron=spiking_neuron, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
The provided code snippet includes necessary dependencies for implementing the `sew_wide_resnet50_2` function. Write a Python function `def sew_wide_resnet50_2(pretrained=False, progress=True, cnf: str = None, spiking_neuron: callable=None, **kwargs)` to solve the following problem:
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param cnf: the name of spike-element-wise function :type cnf: str :param spiking_neuron: a single step neuron :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking Wide ResNet-50-2 :rtype: torch.nn.Module The spike-element-wise Wide ResNet-50-2 `"Deep Residual Learning in Spiking Neural Networks" <https://arxiv.org/abs/2102.04159>`_ modified by the Wide ResNet-50-2 model from `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_ The model is the same as ResNet except for the bottleneck number of channels which is twice larger in every block. The number of channels in outer 1x1 convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Here is the function:
def sew_wide_resnet50_2(pretrained=False, progress=True, cnf: str = None, spiking_neuron: callable=None, **kwargs):
"""
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet
:type pretrained: bool
:param progress: If True, displays a progress bar of the download to stderr
:type progress: bool
:param cnf: the name of spike-element-wise function
:type cnf: str
:param spiking_neuron: a single step neuron
:type spiking_neuron: callable
:param kwargs: kwargs for `spiking_neuron`
:type kwargs: dict
:return: Spiking Wide ResNet-50-2
:rtype: torch.nn.Module
The spike-element-wise Wide ResNet-50-2 `"Deep Residual Learning in Spiking Neural Networks" <https://arxiv.org/abs/2102.04159>`_
modified by the Wide ResNet-50-2 model from `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
"""
kwargs['width_per_group'] = 64 * 2
return _sew_resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3], pretrained, progress, cnf, spiking_neuron, **kwargs) | :param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param cnf: the name of spike-element-wise function :type cnf: str :param spiking_neuron: a single step neuron :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking Wide ResNet-50-2 :rtype: torch.nn.Module The spike-element-wise Wide ResNet-50-2 `"Deep Residual Learning in Spiking Neural Networks" <https://arxiv.org/abs/2102.04159>`_ modified by the Wide ResNet-50-2 model from `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_ The model is the same as ResNet except for the bottleneck number of channels which is twice larger in every block. The number of channels in outer 1x1 convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 channels, and in Wide ResNet-50-2 has 2048-1024-2048. |
7,714 | import torch
import torch.nn as nn
from copy import deepcopy
from .. import layer
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None, cnf: str = None, spiking_neuron: callable = None, **kwargs):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = layer.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.sn1 = spiking_neuron(**deepcopy(kwargs))
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.sn2 = spiking_neuron(**deepcopy(kwargs))
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.sn3 = spiking_neuron(**deepcopy(kwargs))
self.downsample = downsample
if downsample is not None:
self.downsample_sn = spiking_neuron(**deepcopy(kwargs))
self.stride = stride
self.cnf = cnf
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.sn1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.sn2(out)
out = self.conv3(out)
out = self.bn3(out)
out = self.sn3(out)
if self.downsample is not None:
identity = self.downsample_sn(self.downsample(x))
out = sew_function(out, identity, self.cnf)
return out
def extra_repr(self) -> str:
return super().extra_repr() + f'cnf={self.cnf}'
def _sew_resnet(arch, block, layers, pretrained, progress, cnf, spiking_neuron, **kwargs):
model = SEWResNet(block, layers, cnf=cnf, spiking_neuron=spiking_neuron, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
The provided code snippet includes necessary dependencies for implementing the `sew_wide_resnet101_2` function. Write a Python function `def sew_wide_resnet101_2(pretrained=False, progress=True, cnf: str = None, spiking_neuron: callable=None, **kwargs)` to solve the following problem:
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param cnf: the name of spike-element-wise function :type cnf: str :param spiking_neuron: a single step neuron :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking Wide ResNet-101-2 :rtype: torch.nn.Module The spike-element-wise Wide ResNet-101-2 `"Deep Residual Learning in Spiking Neural Networks" <https://arxiv.org/abs/2102.04159>`_ modified by the Wide ResNet-101-2 model from `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_ The model is the same as ResNet except for the bottleneck number of channels which is twice larger in every block. The number of channels in outer 1x1 convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Here is the function:
def sew_wide_resnet101_2(pretrained=False, progress=True, cnf: str = None, spiking_neuron: callable=None, **kwargs):
"""
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet
:type pretrained: bool
:param progress: If True, displays a progress bar of the download to stderr
:type progress: bool
:param cnf: the name of spike-element-wise function
:type cnf: str
:param spiking_neuron: a single step neuron
:type spiking_neuron: callable
:param kwargs: kwargs for `spiking_neuron`
:type kwargs: dict
:return: Spiking Wide ResNet-101-2
:rtype: torch.nn.Module
The spike-element-wise Wide ResNet-101-2 `"Deep Residual Learning in Spiking Neural Networks" <https://arxiv.org/abs/2102.04159>`_
modified by the Wide ResNet-101-2 model from `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
"""
kwargs['width_per_group'] = 64 * 2
return _sew_resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3], pretrained, progress, cnf, spiking_neuron, **kwargs) | :param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param cnf: the name of spike-element-wise function :type cnf: str :param spiking_neuron: a single step neuron :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking Wide ResNet-101-2 :rtype: torch.nn.Module The spike-element-wise Wide ResNet-101-2 `"Deep Residual Learning in Spiking Neural Networks" <https://arxiv.org/abs/2102.04159>`_ modified by the Wide ResNet-101-2 model from `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_ The model is the same as ResNet except for the bottleneck number of channels which is twice larger in every block. The number of channels in outer 1x1 convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 channels, and in Wide ResNet-50-2 has 2048-1024-2048. |
7,715 | import torch
import torch.nn as nn
from copy import deepcopy
from .. import functional, neuron, layer
def _spiking_vgg(arch, cfg, weight_standardization, spiking_neuron: callable = None, **kwargs):
model = OTTTSpikingVGG(cfg=cfgs[cfg], weight_standardization=weight_standardization, spiking_neuron=spiking_neuron, **kwargs)
return model
The provided code snippet includes necessary dependencies for implementing the `ottt_spiking_vggws` function. Write a Python function `def ottt_spiking_vggws(spiking_neuron: callable = neuron.OTTTLIFNode, **kwargs)` to solve the following problem:
:param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking VGG (sWS), model used in 'Online Training Through Time for Spiking Neural Networks <https://openreview.net/forum?id=Siv3nHYHheI>' :rtype: torch.nn.Module
Here is the function:
def ottt_spiking_vggws(spiking_neuron: callable = neuron.OTTTLIFNode, **kwargs):
"""
:param spiking_neuron: a spiking neuron layer
:type spiking_neuron: callable
:param kwargs: kwargs for `spiking_neuron`
:type kwargs: dict
:return: Spiking VGG (sWS), model used in 'Online Training Through Time for Spiking Neural Networks <https://openreview.net/forum?id=Siv3nHYHheI>'
:rtype: torch.nn.Module
"""
return _spiking_vgg('vggws', 'S', True, spiking_neuron, light_classifier=True, **kwargs) | :param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking VGG (sWS), model used in 'Online Training Through Time for Spiking Neural Networks <https://openreview.net/forum?id=Siv3nHYHheI>' :rtype: torch.nn.Module |
7,716 | import torch
import torch.nn as nn
from copy import deepcopy
from .. import functional, neuron, layer
def _spiking_vgg(arch, cfg, weight_standardization, spiking_neuron: callable = None, **kwargs):
model = OTTTSpikingVGG(cfg=cfgs[cfg], weight_standardization=weight_standardization, spiking_neuron=spiking_neuron, **kwargs)
return model
The provided code snippet includes necessary dependencies for implementing the `ottt_spiking_vgg11` function. Write a Python function `def ottt_spiking_vgg11(spiking_neuron: callable = neuron.OTTTLIFNode, **kwargs)` to solve the following problem:
:param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking VGG-11 :rtype: torch.nn.Module
Here is the function:
def ottt_spiking_vgg11(spiking_neuron: callable = neuron.OTTTLIFNode, **kwargs):
"""
:param spiking_neuron: a spiking neuron layer
:type spiking_neuron: callable
:param kwargs: kwargs for `spiking_neuron`
:type kwargs: dict
:return: Spiking VGG-11
:rtype: torch.nn.Module
"""
return _spiking_vgg('vgg11', 'A', False, spiking_neuron, light_classifier=False, **kwargs) | :param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking VGG-11 :rtype: torch.nn.Module |
7,717 | import torch
import torch.nn as nn
from copy import deepcopy
from .. import functional, neuron, layer
def _spiking_vgg(arch, cfg, weight_standardization, spiking_neuron: callable = None, **kwargs):
model = OTTTSpikingVGG(cfg=cfgs[cfg], weight_standardization=weight_standardization, spiking_neuron=spiking_neuron, **kwargs)
return model
The provided code snippet includes necessary dependencies for implementing the `ottt_spiking_vgg11_ws` function. Write a Python function `def ottt_spiking_vgg11_ws(spiking_neuron: callable = neuron.OTTTLIFNode, **kwargs)` to solve the following problem:
:param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking VGG-11 with weight standardization :rtype: torch.nn.Module
Here is the function:
def ottt_spiking_vgg11_ws(spiking_neuron: callable = neuron.OTTTLIFNode, **kwargs):
"""
:param spiking_neuron: a spiking neuron layer
:type spiking_neuron: callable
:param kwargs: kwargs for `spiking_neuron`
:type kwargs: dict
:return: Spiking VGG-11 with weight standardization
:rtype: torch.nn.Module
"""
return _spiking_vgg('vgg11_ws', 'A', True, spiking_neuron, light_classifier=False, **kwargs) | :param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking VGG-11 with weight standardization :rtype: torch.nn.Module |
7,718 | import torch
import torch.nn as nn
from copy import deepcopy
from .. import functional, neuron, layer
def _spiking_vgg(arch, cfg, weight_standardization, spiking_neuron: callable = None, **kwargs):
model = OTTTSpikingVGG(cfg=cfgs[cfg], weight_standardization=weight_standardization, spiking_neuron=spiking_neuron, **kwargs)
return model
The provided code snippet includes necessary dependencies for implementing the `ottt_spiking_vgg13` function. Write a Python function `def ottt_spiking_vgg13(spiking_neuron: callable = neuron.OTTTLIFNode, **kwargs)` to solve the following problem:
:param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking VGG-13 :rtype: torch.nn.Module
Here is the function:
def ottt_spiking_vgg13(spiking_neuron: callable = neuron.OTTTLIFNode, **kwargs):
"""
:param spiking_neuron: a spiking neuron layer
:type spiking_neuron: callable
:param kwargs: kwargs for `spiking_neuron`
:type kwargs: dict
:return: Spiking VGG-13
:rtype: torch.nn.Module
"""
return _spiking_vgg('vgg13', 'B', False, spiking_neuron, light_classifier=False, **kwargs) | :param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking VGG-13 :rtype: torch.nn.Module |
7,719 | import torch
import torch.nn as nn
from copy import deepcopy
from .. import functional, neuron, layer
def _spiking_vgg(arch, cfg, weight_standardization, spiking_neuron: callable = None, **kwargs):
model = OTTTSpikingVGG(cfg=cfgs[cfg], weight_standardization=weight_standardization, spiking_neuron=spiking_neuron, **kwargs)
return model
The provided code snippet includes necessary dependencies for implementing the `ottt_spiking_vgg13_ws` function. Write a Python function `def ottt_spiking_vgg13_ws(spiking_neuron: callable = neuron.OTTTLIFNode, **kwargs)` to solve the following problem:
:param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking VGG-11 with weight standardization :rtype: torch.nn.Module
Here is the function:
def ottt_spiking_vgg13_ws(spiking_neuron: callable = neuron.OTTTLIFNode, **kwargs):
"""
:param spiking_neuron: a spiking neuron layer
:type spiking_neuron: callable
:param kwargs: kwargs for `spiking_neuron`
:type kwargs: dict
:return: Spiking VGG-11 with weight standardization
:rtype: torch.nn.Module
"""
return _spiking_vgg('vgg13_ws', 'B', True, spiking_neuron, light_classifier=False, **kwargs) | :param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking VGG-11 with weight standardization :rtype: torch.nn.Module |
7,720 | import torch
import torch.nn as nn
from copy import deepcopy
from .. import functional, neuron, layer
def _spiking_vgg(arch, cfg, weight_standardization, spiking_neuron: callable = None, **kwargs):
model = OTTTSpikingVGG(cfg=cfgs[cfg], weight_standardization=weight_standardization, spiking_neuron=spiking_neuron, **kwargs)
return model
The provided code snippet includes necessary dependencies for implementing the `ottt_spiking_vgg16` function. Write a Python function `def ottt_spiking_vgg16(spiking_neuron: callable = neuron.OTTTLIFNode, **kwargs)` to solve the following problem:
:param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking VGG-16 :rtype: torch.nn.Module
Here is the function:
def ottt_spiking_vgg16(spiking_neuron: callable = neuron.OTTTLIFNode, **kwargs):
"""
:param spiking_neuron: a spiking neuron layer
:type spiking_neuron: callable
:param kwargs: kwargs for `spiking_neuron`
:type kwargs: dict
:return: Spiking VGG-16
:rtype: torch.nn.Module
"""
return _spiking_vgg('vgg16', 'D', False, spiking_neuron, light_classifier=False, **kwargs) | :param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking VGG-16 :rtype: torch.nn.Module |
7,721 | import torch
import torch.nn as nn
from copy import deepcopy
from .. import functional, neuron, layer
def _spiking_vgg(arch, cfg, weight_standardization, spiking_neuron: callable = None, **kwargs):
model = OTTTSpikingVGG(cfg=cfgs[cfg], weight_standardization=weight_standardization, spiking_neuron=spiking_neuron, **kwargs)
return model
The provided code snippet includes necessary dependencies for implementing the `ottt_spiking_vgg16_ws` function. Write a Python function `def ottt_spiking_vgg16_ws(spiking_neuron: callable = neuron.OTTTLIFNode, **kwargs)` to solve the following problem:
:param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking VGG-16 with weight standardization :rtype: torch.nn.Module
Here is the function:
def ottt_spiking_vgg16_ws(spiking_neuron: callable = neuron.OTTTLIFNode, **kwargs):
"""
:param spiking_neuron: a spiking neuron layer
:type spiking_neuron: callable
:param kwargs: kwargs for `spiking_neuron`
:type kwargs: dict
:return: Spiking VGG-16 with weight standardization
:rtype: torch.nn.Module
"""
return _spiking_vgg('vgg16_ws', 'D', True, spiking_neuron, light_classifier=False, **kwargs) | :param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking VGG-16 with weight standardization :rtype: torch.nn.Module |
7,722 | import torch
import torch.nn as nn
from copy import deepcopy
from .. import functional, neuron, layer
def _spiking_vgg(arch, cfg, weight_standardization, spiking_neuron: callable = None, **kwargs):
model = OTTTSpikingVGG(cfg=cfgs[cfg], weight_standardization=weight_standardization, spiking_neuron=spiking_neuron, **kwargs)
return model
The provided code snippet includes necessary dependencies for implementing the `ottt_spiking_vgg19` function. Write a Python function `def ottt_spiking_vgg19(spiking_neuron: callable = neuron.OTTTLIFNode, **kwargs)` to solve the following problem:
:param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking VGG-19 :rtype: torch.nn.Module
Here is the function:
def ottt_spiking_vgg19(spiking_neuron: callable = neuron.OTTTLIFNode, **kwargs):
"""
:param spiking_neuron: a spiking neuron layer
:type spiking_neuron: callable
:param kwargs: kwargs for `spiking_neuron`
:type kwargs: dict
:return: Spiking VGG-19
:rtype: torch.nn.Module
"""
return _spiking_vgg('vgg19', 'E', False, spiking_neuron, light_classifier=False, **kwargs) | :param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking VGG-19 :rtype: torch.nn.Module |
7,723 | import torch
import torch.nn as nn
from copy import deepcopy
from .. import functional, neuron, layer
def _spiking_vgg(arch, cfg, weight_standardization, spiking_neuron: callable = None, **kwargs):
model = OTTTSpikingVGG(cfg=cfgs[cfg], weight_standardization=weight_standardization, spiking_neuron=spiking_neuron, **kwargs)
return model
The provided code snippet includes necessary dependencies for implementing the `ottt_spiking_vgg19_ws` function. Write a Python function `def ottt_spiking_vgg19_ws(spiking_neuron: callable = neuron.OTTTLIFNode, **kwargs)` to solve the following problem:
:param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking VGG-19 with weight standardization :rtype: torch.nn.Module
Here is the function:
def ottt_spiking_vgg19_ws(spiking_neuron: callable = neuron.OTTTLIFNode, **kwargs):
"""
:param spiking_neuron: a spiking neuron layer
:type spiking_neuron: callable
:param kwargs: kwargs for `spiking_neuron`
:type kwargs: dict
:return: Spiking VGG-19 with weight standardization
:rtype: torch.nn.Module
"""
return _spiking_vgg('vgg19_ws', 'E', True, spiking_neuron, light_classifier=False, **kwargs) | :param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking VGG-19 with weight standardization :rtype: torch.nn.Module |
7,726 | import torch
import torch.nn as nn
from copy import deepcopy
from .. import layer
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None, spiking_neuron: callable = None, **kwargs):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = layer.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.sn1 = spiking_neuron(**deepcopy(kwargs))
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.sn2 = spiking_neuron(**deepcopy(kwargs))
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.sn1(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.sn2(out)
return out
def _spiking_resnet(arch, block, layers, pretrained, progress, spiking_neuron, **kwargs):
model = SpikingResNet(block, layers, spiking_neuron=spiking_neuron, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
The provided code snippet includes necessary dependencies for implementing the `spiking_resnet18` function. Write a Python function `def spiking_resnet18(pretrained=False, progress=True, spiking_neuron: callable=None, **kwargs)` to solve the following problem:
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking ResNet-18 :rtype: torch.nn.Module A spiking version of ResNet-18 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Here is the function:
def spiking_resnet18(pretrained=False, progress=True, spiking_neuron: callable=None, **kwargs):
"""
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet
:type pretrained: bool
:param progress: If True, displays a progress bar of the download to stderr
:type progress: bool
:param spiking_neuron: a spiking neuron layer
:type spiking_neuron: callable
:param kwargs: kwargs for `spiking_neuron`
:type kwargs: dict
:return: Spiking ResNet-18
:rtype: torch.nn.Module
A spiking version of ResNet-18 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
"""
return _spiking_resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, spiking_neuron, **kwargs) | :param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking ResNet-18 :rtype: torch.nn.Module A spiking version of ResNet-18 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ |
7,727 | import torch
import torch.nn as nn
from copy import deepcopy
from .. import layer
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None, spiking_neuron: callable = None, **kwargs):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = layer.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.sn1 = spiking_neuron(**deepcopy(kwargs))
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.sn2 = spiking_neuron(**deepcopy(kwargs))
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.sn1(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.sn2(out)
return out
def _spiking_resnet(arch, block, layers, pretrained, progress, spiking_neuron, **kwargs):
model = SpikingResNet(block, layers, spiking_neuron=spiking_neuron, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
The provided code snippet includes necessary dependencies for implementing the `spiking_resnet34` function. Write a Python function `def spiking_resnet34(pretrained=False, progress=True, spiking_neuron: callable=None, **kwargs)` to solve the following problem:
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking ResNet-34 :rtype: torch.nn.Module A spiking version of ResNet-34 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Here is the function:
def spiking_resnet34(pretrained=False, progress=True, spiking_neuron: callable=None, **kwargs):
"""
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet
:type pretrained: bool
:param progress: If True, displays a progress bar of the download to stderr
:type progress: bool
:param spiking_neuron: a spiking neuron layer
:type spiking_neuron: callable
:param kwargs: kwargs for `spiking_neuron`
:type kwargs: dict
:return: Spiking ResNet-34
:rtype: torch.nn.Module
A spiking version of ResNet-34 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
"""
return _spiking_resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress, spiking_neuron, **kwargs) | :param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking ResNet-34 :rtype: torch.nn.Module A spiking version of ResNet-34 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ |
7,728 | import torch
import torch.nn as nn
from copy import deepcopy
from .. import layer
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None, spiking_neuron: callable = None, **kwargs):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = layer.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.sn1 = spiking_neuron(**deepcopy(kwargs))
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.sn2 = spiking_neuron(**deepcopy(kwargs))
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.sn3 = spiking_neuron(**deepcopy(kwargs))
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.sn1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.sn2(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.sn3(out)
return out
def _spiking_resnet(arch, block, layers, pretrained, progress, spiking_neuron, **kwargs):
model = SpikingResNet(block, layers, spiking_neuron=spiking_neuron, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
The provided code snippet includes necessary dependencies for implementing the `spiking_resnet50` function. Write a Python function `def spiking_resnet50(pretrained=False, progress=True, spiking_neuron: callable=None, **kwargs)` to solve the following problem:
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking ResNet-50 :rtype: torch.nn.Module A spiking version of ResNet-50 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Here is the function:
def spiking_resnet50(pretrained=False, progress=True, spiking_neuron: callable=None, **kwargs):
"""
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet
:type pretrained: bool
:param progress: If True, displays a progress bar of the download to stderr
:type progress: bool
:param spiking_neuron: a spiking neuron layer
:type spiking_neuron: callable
:param kwargs: kwargs for `spiking_neuron`
:type kwargs: dict
:return: Spiking ResNet-50
:rtype: torch.nn.Module
A spiking version of ResNet-50 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
"""
return _spiking_resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress, spiking_neuron, **kwargs) | :param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking ResNet-50 :rtype: torch.nn.Module A spiking version of ResNet-50 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ |
7,729 | import torch
import torch.nn as nn
from copy import deepcopy
from .. import layer
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None, spiking_neuron: callable = None, **kwargs):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = layer.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.sn1 = spiking_neuron(**deepcopy(kwargs))
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.sn2 = spiking_neuron(**deepcopy(kwargs))
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.sn3 = spiking_neuron(**deepcopy(kwargs))
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.sn1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.sn2(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.sn3(out)
return out
def _spiking_resnet(arch, block, layers, pretrained, progress, spiking_neuron, **kwargs):
model = SpikingResNet(block, layers, spiking_neuron=spiking_neuron, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
The provided code snippet includes necessary dependencies for implementing the `spiking_resnet101` function. Write a Python function `def spiking_resnet101(pretrained=False, progress=True, spiking_neuron: callable=None, **kwargs)` to solve the following problem:
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking ResNet-101 :rtype: torch.nn.Module A spiking version of ResNet-101 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Here is the function:
def spiking_resnet101(pretrained=False, progress=True, spiking_neuron: callable=None, **kwargs):
"""
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet
:type pretrained: bool
:param progress: If True, displays a progress bar of the download to stderr
:type progress: bool
:param spiking_neuron: a spiking neuron layer
:type spiking_neuron: callable
:param kwargs: kwargs for `spiking_neuron`
:type kwargs: dict
:return: Spiking ResNet-101
:rtype: torch.nn.Module
A spiking version of ResNet-101 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
"""
return _spiking_resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress, spiking_neuron, **kwargs) | :param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param spiking_neuron: a spiking neuron layer :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking ResNet-101 :rtype: torch.nn.Module A spiking version of ResNet-101 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ |
7,730 | import torch
import torch.nn as nn
from copy import deepcopy
from .. import layer
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None, spiking_neuron: callable = None, **kwargs):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = layer.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.sn1 = spiking_neuron(**deepcopy(kwargs))
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.sn2 = spiking_neuron(**deepcopy(kwargs))
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.sn3 = spiking_neuron(**deepcopy(kwargs))
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.sn1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.sn2(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.sn3(out)
return out
def _spiking_resnet(arch, block, layers, pretrained, progress, spiking_neuron, **kwargs):
model = SpikingResNet(block, layers, spiking_neuron=spiking_neuron, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
The provided code snippet includes necessary dependencies for implementing the `spiking_resnet152` function. Write a Python function `def spiking_resnet152(pretrained=False, progress=True, spiking_neuron: callable=None, **kwargs)` to solve the following problem:
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param spiking_neuron: a single step neuron :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking ResNet-152 :rtype: torch.nn.Module A spiking version of ResNet-152 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Here is the function:
def spiking_resnet152(pretrained=False, progress=True, spiking_neuron: callable=None, **kwargs):
"""
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet
:type pretrained: bool
:param progress: If True, displays a progress bar of the download to stderr
:type progress: bool
:param spiking_neuron: a single step neuron
:type spiking_neuron: callable
:param kwargs: kwargs for `spiking_neuron`
:type kwargs: dict
:return: Spiking ResNet-152
:rtype: torch.nn.Module
A spiking version of ResNet-152 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
"""
return _spiking_resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress, spiking_neuron, **kwargs) | :param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param spiking_neuron: a single step neuron :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking ResNet-152 :rtype: torch.nn.Module A spiking version of ResNet-152 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ |
7,731 | import torch
import torch.nn as nn
from copy import deepcopy
from .. import layer
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None, spiking_neuron: callable = None, **kwargs):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = layer.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.sn1 = spiking_neuron(**deepcopy(kwargs))
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.sn2 = spiking_neuron(**deepcopy(kwargs))
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.sn3 = spiking_neuron(**deepcopy(kwargs))
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.sn1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.sn2(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.sn3(out)
return out
def _spiking_resnet(arch, block, layers, pretrained, progress, spiking_neuron, **kwargs):
model = SpikingResNet(block, layers, spiking_neuron=spiking_neuron, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
The provided code snippet includes necessary dependencies for implementing the `spiking_resnext50_32x4d` function. Write a Python function `def spiking_resnext50_32x4d(pretrained=False, progress=True, spiking_neuron: callable=None, **kwargs)` to solve the following problem:
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param spiking_neuron: a single step neuron :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking ResNeXt-50 32x4d :rtype: torch.nn.Module A spiking version of ResNeXt-50 32x4d model from `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Here is the function:
def spiking_resnext50_32x4d(pretrained=False, progress=True, spiking_neuron: callable=None, **kwargs):
"""
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet
:type pretrained: bool
:param progress: If True, displays a progress bar of the download to stderr
:type progress: bool
:param spiking_neuron: a single step neuron
:type spiking_neuron: callable
:param kwargs: kwargs for `spiking_neuron`
:type kwargs: dict
:return: Spiking ResNeXt-50 32x4d
:rtype: torch.nn.Module
A spiking version of ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _spiking_resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3], pretrained, progress, spiking_neuron, **kwargs) | :param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param spiking_neuron: a single step neuron :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking ResNeXt-50 32x4d :rtype: torch.nn.Module A spiking version of ResNeXt-50 32x4d model from `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_ |
7,732 | import torch
import torch.nn as nn
from copy import deepcopy
from .. import layer
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None, spiking_neuron: callable = None, **kwargs):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = layer.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.sn1 = spiking_neuron(**deepcopy(kwargs))
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.sn2 = spiking_neuron(**deepcopy(kwargs))
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.sn3 = spiking_neuron(**deepcopy(kwargs))
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.sn1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.sn2(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.sn3(out)
return out
def _spiking_resnet(arch, block, layers, pretrained, progress, spiking_neuron, **kwargs):
model = SpikingResNet(block, layers, spiking_neuron=spiking_neuron, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
The provided code snippet includes necessary dependencies for implementing the `spiking_resnext101_32x8d` function. Write a Python function `def spiking_resnext101_32x8d(pretrained=False, progress=True, spiking_neuron: callable=None, **kwargs)` to solve the following problem:
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param spiking_neuron: a single step neuron :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking ResNeXt-101 32x8d :rtype: torch.nn.Module A spiking version of ResNeXt-101 32x8d model from `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Here is the function:
def spiking_resnext101_32x8d(pretrained=False, progress=True, spiking_neuron: callable=None, **kwargs):
"""
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet
:type pretrained: bool
:param progress: If True, displays a progress bar of the download to stderr
:type progress: bool
:param spiking_neuron: a single step neuron
:type spiking_neuron: callable
:param kwargs: kwargs for `spiking_neuron`
:type kwargs: dict
:return: Spiking ResNeXt-101 32x8d
:rtype: torch.nn.Module
A spiking version of ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _spiking_resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], pretrained, progress, spiking_neuron, **kwargs) | :param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param spiking_neuron: a single step neuron :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking ResNeXt-101 32x8d :rtype: torch.nn.Module A spiking version of ResNeXt-101 32x8d model from `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_ |
7,733 | import torch
import torch.nn as nn
from copy import deepcopy
from .. import layer
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None, spiking_neuron: callable = None, **kwargs):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = layer.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.sn1 = spiking_neuron(**deepcopy(kwargs))
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.sn2 = spiking_neuron(**deepcopy(kwargs))
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.sn3 = spiking_neuron(**deepcopy(kwargs))
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.sn1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.sn2(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.sn3(out)
return out
def _spiking_resnet(arch, block, layers, pretrained, progress, spiking_neuron, **kwargs):
model = SpikingResNet(block, layers, spiking_neuron=spiking_neuron, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
The provided code snippet includes necessary dependencies for implementing the `spiking_wide_resnet50_2` function. Write a Python function `def spiking_wide_resnet50_2(pretrained=False, progress=True, spiking_neuron: callable=None, **kwargs)` to solve the following problem:
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param spiking_neuron: a single step neuron :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking Wide ResNet-50-2 :rtype: torch.nn.Module A spiking version of Wide ResNet-50-2 model from `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_ The model is the same as ResNet except for the bottleneck number of channels which is twice larger in every block. The number of channels in outer 1x1 convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Here is the function:
def spiking_wide_resnet50_2(pretrained=False, progress=True, spiking_neuron: callable=None, **kwargs):
"""
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet
:type pretrained: bool
:param progress: If True, displays a progress bar of the download to stderr
:type progress: bool
:param spiking_neuron: a single step neuron
:type spiking_neuron: callable
:param kwargs: kwargs for `spiking_neuron`
:type kwargs: dict
:return: Spiking Wide ResNet-50-2
:rtype: torch.nn.Module
A spiking version of Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
"""
kwargs['width_per_group'] = 64 * 2
return _spiking_resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3], pretrained, progress, spiking_neuron, **kwargs) | :param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param spiking_neuron: a single step neuron :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking Wide ResNet-50-2 :rtype: torch.nn.Module A spiking version of Wide ResNet-50-2 model from `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_ The model is the same as ResNet except for the bottleneck number of channels which is twice larger in every block. The number of channels in outer 1x1 convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 channels, and in Wide ResNet-50-2 has 2048-1024-2048. |
7,734 | import torch
import torch.nn as nn
from copy import deepcopy
from .. import layer
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None, spiking_neuron: callable = None, **kwargs):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = layer.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.sn1 = spiking_neuron(**deepcopy(kwargs))
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.sn2 = spiking_neuron(**deepcopy(kwargs))
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.sn3 = spiking_neuron(**deepcopy(kwargs))
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.sn1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.sn2(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.sn3(out)
return out
def _spiking_resnet(arch, block, layers, pretrained, progress, spiking_neuron, **kwargs):
model = SpikingResNet(block, layers, spiking_neuron=spiking_neuron, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
The provided code snippet includes necessary dependencies for implementing the `spiking_wide_resnet101_2` function. Write a Python function `def spiking_wide_resnet101_2(pretrained=False, progress=True, spiking_neuron: callable=None, **kwargs)` to solve the following problem:
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param spiking_neuron: a single step neuron :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking Wide ResNet-101-2 :rtype: torch.nn.Module A spiking version of Wide ResNet-101-2 model from `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_ The model is the same as ResNet except for the bottleneck number of channels which is twice larger in every block. The number of channels in outer 1x1 convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Here is the function:
def spiking_wide_resnet101_2(pretrained=False, progress=True, spiking_neuron: callable=None, **kwargs):
"""
:param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet
:type pretrained: bool
:param progress: If True, displays a progress bar of the download to stderr
:type progress: bool
:param spiking_neuron: a single step neuron
:type spiking_neuron: callable
:param kwargs: kwargs for `spiking_neuron`
:type kwargs: dict
:return: Spiking Wide ResNet-101-2
:rtype: torch.nn.Module
A spiking version of Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
"""
kwargs['width_per_group'] = 64 * 2
return _spiking_resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3], pretrained, progress, spiking_neuron, **kwargs) | :param pretrained: If True, the SNN will load parameters from the ANN pre-trained on ImageNet :type pretrained: bool :param progress: If True, displays a progress bar of the download to stderr :type progress: bool :param spiking_neuron: a single step neuron :type spiking_neuron: callable :param kwargs: kwargs for `spiking_neuron` :type kwargs: dict :return: Spiking Wide ResNet-101-2 :rtype: torch.nn.Module A spiking version of Wide ResNet-101-2 model from `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_ The model is the same as ResNet except for the bottleneck number of channels which is twice larger in every block. The number of channels in outer 1x1 convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 channels, and in Wide ResNet-50-2 has 2048-1024-2048. |
7,735 | import copy
import datetime
import errno
import hashlib
import os
import time
from collections import defaultdict, deque, OrderedDict
import torch
import torch.distributed as dist
The provided code snippet includes necessary dependencies for implementing the `accuracy` function. Write a Python function `def accuracy(output, target, topk=(1,))` to solve the following problem:
Computes the accuracy over the k top predictions for the specified values of k
Here is the function:
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.inference_mode():
maxk = max(topk)
batch_size = target.size(0)
if target.ndim == 2:
target = target.max(dim=1)[1]
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target[None])
res = []
for k in topk:
correct_k = correct[:k].flatten().sum(dtype=torch.float32)
res.append(correct_k * (100.0 / batch_size))
return res | Computes the accuracy over the k top predictions for the specified values of k |
7,736 | import copy
import datetime
import errno
import hashlib
import os
import time
from collections import defaultdict, deque, OrderedDict
import torch
import torch.distributed as dist
def mkdir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise | null |
7,737 | import copy
import datetime
import errno
import hashlib
import os
import time
from collections import defaultdict, deque, OrderedDict
import torch
import torch.distributed as dist
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size() | null |
7,738 | import copy
import datetime
import errno
import hashlib
import os
import time
from collections import defaultdict, deque, OrderedDict
import torch
import torch.distributed as dist
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs) | null |
7,739 | import copy
import datetime
import errno
import hashlib
import os
import time
from collections import defaultdict, deque, OrderedDict
import torch
import torch.distributed as dist
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop("force", False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def init_distributed_mode(args):
if "RANK" in os.environ and "WORLD_SIZE" in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ["WORLD_SIZE"])
args.gpu = int(os.environ["LOCAL_RANK"])
elif "SLURM_PROCID" in os.environ:
args.rank = int(os.environ["SLURM_PROCID"])
args.gpu = args.rank % torch.cuda.device_count()
elif hasattr(args, "rank"):
pass
else:
print("Not using distributed mode")
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = "nccl"
print(f"| distributed init (rank {args.rank}): {args.dist_url}", flush=True)
torch.distributed.init_process_group(
backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank
)
setup_for_distributed(args.rank == 0) | null |
7,740 | import copy
import datetime
import errno
import hashlib
import os
import time
from collections import defaultdict, deque, OrderedDict
import torch
import torch.distributed as dist
The provided code snippet includes necessary dependencies for implementing the `average_checkpoints` function. Write a Python function `def average_checkpoints(inputs)` to solve the following problem:
Loads checkpoints from inputs and returns a model with averaged weights. Original implementation taken from: https://github.com/pytorch/fairseq/blob/a48f235636557b8d3bc4922a6fa90f3a0fa57955/scripts/average_checkpoints.py#L16 Args: inputs (List[str]): An iterable of string paths of checkpoints to load from. Returns: A dict of string keys mapping to various values. The 'model' key from the returned dict should correspond to an OrderedDict mapping string parameter names to torch Tensors.
Here is the function:
def average_checkpoints(inputs):
"""Loads checkpoints from inputs and returns a model with averaged weights. Original implementation taken from:
https://github.com/pytorch/fairseq/blob/a48f235636557b8d3bc4922a6fa90f3a0fa57955/scripts/average_checkpoints.py#L16
Args:
inputs (List[str]): An iterable of string paths of checkpoints to load from.
Returns:
A dict of string keys mapping to various values. The 'model' key
from the returned dict should correspond to an OrderedDict mapping
string parameter names to torch Tensors.
"""
params_dict = OrderedDict()
params_keys = None
new_state = None
num_models = len(inputs)
for fpath in inputs:
with open(fpath, "rb") as f:
state = torch.load(
f,
map_location=(lambda s, _: torch.serialization.default_restore_location(s, "cpu")),
)
# Copies over the settings from the first checkpoint
if new_state is None:
new_state = state
model_params = state["model"]
model_params_keys = list(model_params.keys())
if params_keys is None:
params_keys = model_params_keys
elif params_keys != model_params_keys:
raise KeyError(
f"For checkpoint {f}, expected list of params: {params_keys}, but found: {model_params_keys}"
)
for k in params_keys:
p = model_params[k]
if isinstance(p, torch.HalfTensor):
p = p.float()
if k not in params_dict:
params_dict[k] = p.clone()
# NOTE: clone() is needed in case of p is a shared parameter
else:
params_dict[k] += p
averaged_params = OrderedDict()
for k, v in params_dict.items():
averaged_params[k] = v
if averaged_params[k].is_floating_point():
averaged_params[k].div_(num_models)
else:
averaged_params[k] //= num_models
new_state["model"] = averaged_params
return new_state | Loads checkpoints from inputs and returns a model with averaged weights. Original implementation taken from: https://github.com/pytorch/fairseq/blob/a48f235636557b8d3bc4922a6fa90f3a0fa57955/scripts/average_checkpoints.py#L16 Args: inputs (List[str]): An iterable of string paths of checkpoints to load from. Returns: A dict of string keys mapping to various values. The 'model' key from the returned dict should correspond to an OrderedDict mapping string parameter names to torch Tensors. |
7,741 | import copy
import datetime
import errno
import hashlib
import os
import time
from collections import defaultdict, deque, OrderedDict
import torch
import torch.distributed as dist
The provided code snippet includes necessary dependencies for implementing the `store_model_weights` function. Write a Python function `def store_model_weights(model, checkpoint_path, checkpoint_key="model", strict=True)` to solve the following problem:
This method can be used to prepare weights files for new models. It receives as input a model architecture and a checkpoint from the training script and produces a file with the weights ready for release. Examples: from torchvision import models as M # Classification model = M.mobilenet_v3_large(pretrained=False) print(store_model_weights(model, './class.pth')) # Quantized Classification model = M.quantization.mobilenet_v3_large(pretrained=False, quantize=False) model.fuse_model(is_qat=True) model.qconfig = torch.ao.quantization.get_default_qat_qconfig('qnnpack') _ = torch.ao.quantization.prepare_qat(model, inplace=True) print(store_model_weights(model, './qat.pth')) # Object Detection model = M.detection.fasterrcnn_mobilenet_v3_large_fpn(pretrained=False, pretrained_backbone=False) print(store_model_weights(model, './obj.pth')) # Segmentation model = M.segmentation.deeplabv3_mobilenet_v3_large(pretrained=False, pretrained_backbone=False, aux_loss=True) print(store_model_weights(model, './segm.pth', strict=False)) Args: model (pytorch.nn.Module): The model on which the weights will be loaded for validation purposes. checkpoint_path (str): The path of the checkpoint we will load. checkpoint_key (str, optional): The key of the checkpoint where the model weights are stored. Default: "model". strict (bool): whether to strictly enforce that the keys in :attr:`state_dict` match the keys returned by this module's :meth:`~torch.nn.Module.state_dict` function. Default: ``True`` Returns: output_path (str): The location where the weights are saved.
Here is the function:
def store_model_weights(model, checkpoint_path, checkpoint_key="model", strict=True):
"""
This method can be used to prepare weights files for new models. It receives as
input a model architecture and a checkpoint from the training script and produces
a file with the weights ready for release.
Examples:
from torchvision import models as M
# Classification
model = M.mobilenet_v3_large(pretrained=False)
print(store_model_weights(model, './class.pth'))
# Quantized Classification
model = M.quantization.mobilenet_v3_large(pretrained=False, quantize=False)
model.fuse_model(is_qat=True)
model.qconfig = torch.ao.quantization.get_default_qat_qconfig('qnnpack')
_ = torch.ao.quantization.prepare_qat(model, inplace=True)
print(store_model_weights(model, './qat.pth'))
# Object Detection
model = M.detection.fasterrcnn_mobilenet_v3_large_fpn(pretrained=False, pretrained_backbone=False)
print(store_model_weights(model, './obj.pth'))
# Segmentation
model = M.segmentation.deeplabv3_mobilenet_v3_large(pretrained=False, pretrained_backbone=False, aux_loss=True)
print(store_model_weights(model, './segm.pth', strict=False))
Args:
model (pytorch.nn.Module): The model on which the weights will be loaded for validation purposes.
checkpoint_path (str): The path of the checkpoint we will load.
checkpoint_key (str, optional): The key of the checkpoint where the model weights are stored.
Default: "model".
strict (bool): whether to strictly enforce that the keys
in :attr:`state_dict` match the keys returned by this module's
:meth:`~torch.nn.Module.state_dict` function. Default: ``True``
Returns:
output_path (str): The location where the weights are saved.
"""
# Store the new model next to the checkpoint_path
checkpoint_path = os.path.abspath(checkpoint_path)
output_dir = os.path.dirname(checkpoint_path)
# Deep copy to avoid side-effects on the model object.
model = copy.deepcopy(model)
checkpoint = torch.load(checkpoint_path, map_location="cpu")
# Load the weights to the model to validate that everything works
# and remove unnecessary weights (such as auxiliaries, etc)
if checkpoint_key == "model_ema":
del checkpoint[checkpoint_key]["n_averaged"]
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(checkpoint[checkpoint_key], "module.")
model.load_state_dict(checkpoint[checkpoint_key], strict=strict)
tmp_path = os.path.join(output_dir, str(model.__hash__()))
torch.save(model.state_dict(), tmp_path)
sha256_hash = hashlib.sha256()
with open(tmp_path, "rb") as f:
# Read and update hash string value in blocks of 4K
for byte_block in iter(lambda: f.read(4096), b""):
sha256_hash.update(byte_block)
hh = sha256_hash.hexdigest()
output_path = os.path.join(output_dir, "weights-" + str(hh[:8]) + ".pth")
os.replace(tmp_path, output_path)
return output_path | This method can be used to prepare weights files for new models. It receives as input a model architecture and a checkpoint from the training script and produces a file with the weights ready for release. Examples: from torchvision import models as M # Classification model = M.mobilenet_v3_large(pretrained=False) print(store_model_weights(model, './class.pth')) # Quantized Classification model = M.quantization.mobilenet_v3_large(pretrained=False, quantize=False) model.fuse_model(is_qat=True) model.qconfig = torch.ao.quantization.get_default_qat_qconfig('qnnpack') _ = torch.ao.quantization.prepare_qat(model, inplace=True) print(store_model_weights(model, './qat.pth')) # Object Detection model = M.detection.fasterrcnn_mobilenet_v3_large_fpn(pretrained=False, pretrained_backbone=False) print(store_model_weights(model, './obj.pth')) # Segmentation model = M.segmentation.deeplabv3_mobilenet_v3_large(pretrained=False, pretrained_backbone=False, aux_loss=True) print(store_model_weights(model, './segm.pth', strict=False)) Args: model (pytorch.nn.Module): The model on which the weights will be loaded for validation purposes. checkpoint_path (str): The path of the checkpoint we will load. checkpoint_key (str, optional): The key of the checkpoint where the model weights are stored. Default: "model". strict (bool): whether to strictly enforce that the keys in :attr:`state_dict` match the keys returned by this module's :meth:`~torch.nn.Module.state_dict` function. Default: ``True`` Returns: output_path (str): The location where the weights are saved. |
7,742 | import copy
import datetime
import errno
import hashlib
import os
import time
from collections import defaultdict, deque, OrderedDict
import torch
import torch.distributed as dist
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def reduce_across_processes(val):
if not is_dist_avail_and_initialized():
# nothing to sync, but we still convert to tensor for consistency with the distributed case.
return torch.tensor(val)
t = torch.tensor(val, device="cuda")
dist.barrier()
dist.all_reduce(t)
return t | null |
7,743 | import datetime
import os
import time
import warnings
from .tv_ref_classify import presets, transforms, utils
import torch
import torch.utils.data
import torchvision
from .tv_ref_classify.sampler import RASampler
from torch import nn
from torch.utils.data.dataloader import default_collate
from torchvision.transforms.functional import InterpolationMode
import random
import numpy as np
from torch.utils.tensorboard import SummaryWriter
import sys
import argparse
from .. import functional
def set_deterministic(_seed_: int = 2020, disable_uda=False):
random.seed(_seed_)
np.random.seed(_seed_)
torch.manual_seed(_seed_) # use torch.manual_seed() to seed the RNG for all devices (both CPU and CUDA)
torch.cuda.manual_seed_all(_seed_)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if disable_uda:
pass
else:
os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'
# set a debug environment variable CUBLAS_WORKSPACE_CONFIG to ":16:8" (may limit overall performance) or ":4096:8" (will increase library footprint in GPU memory by approximately 24MiB).
torch.use_deterministic_algorithms(True) | null |
7,744 | import datetime
import os
import time
import warnings
from .tv_ref_classify import presets, transforms, utils
import torch
import torch.utils.data
import torchvision
from .tv_ref_classify.sampler import RASampler
from torch import nn
from torch.utils.data.dataloader import default_collate
from torchvision.transforms.functional import InterpolationMode
import random
import numpy as np
from torch.utils.tensorboard import SummaryWriter
import sys
import argparse
from .. import functional
def seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2**32
np.random.seed(worker_seed)
random.seed(worker_seed) | null |
7,745 | import torch
import torch.nn as nn
from copy import deepcopy
from .. import layer
class MNISTNet(nn.Module):
def __init__(self, channels=128, spiking_neuron: callable = None, **kwargs):
super().__init__()
self.conv_fc = nn.Sequential(
layer.Conv2d(1, channels, kernel_size=3, padding=1, bias=False),
layer.BatchNorm2d(channels),
spiking_neuron(**deepcopy(kwargs)),
layer.MaxPool2d(2, 2),
layer.Conv2d(channels, channels, kernel_size=3, padding=1, bias=False),
layer.BatchNorm2d(channels),
spiking_neuron(**deepcopy(kwargs)),
layer.MaxPool2d(2, 2),
layer.Flatten(),
layer.Dropout(0.5),
layer.Linear(channels * 7 * 7, 2048),
spiking_neuron(**deepcopy(kwargs)),
layer.Dropout(0.5),
layer.Linear(2048, 100),
spiking_neuron(**deepcopy(kwargs)),
layer.VotingLayer()
)
def forward(self, x: torch.Tensor):
return self.conv_fc(x)
class FashionMNISTNet(MNISTNet):
pass
class NMNISTNet(MNISTNet):
def __init__(self, channels=128, spiking_neuron: callable = None, **kwargs):
super().__init__(channels, spiking_neuron, **kwargs)
self.conv_fc[0] = layer.Conv2d(2, channels, kernel_size=3, padding=1, bias=False)
self.conv_fc[-6] = layer.Linear(channels * 8 * 8, 2048)
class CIFAR10Net(nn.Module):
def __init__(self, channels=256, spiking_neuron: callable = None, **kwargs):
super().__init__()
conv = []
for i in range(2):
for j in range(3):
if conv.__len__() == 0:
in_channels = 3
else:
in_channels = channels
conv.append(layer.Conv2d(in_channels, channels, kernel_size=3, padding=1, bias=False))
conv.append(layer.BatchNorm2d(channels))
conv.append(spiking_neuron(**deepcopy(kwargs)))
conv.append(layer.MaxPool2d(2, 2))
self.conv_fc = nn.Sequential(
*conv,
layer.Flatten(),
layer.Dropout(0.5),
layer.Linear(channels * 8 * 8, 2048),
spiking_neuron(**deepcopy(kwargs)),
layer.Dropout(0.5),
layer.Linear(2048, 100),
spiking_neuron(**deepcopy(kwargs)),
layer.VotingLayer(10)
)
def forward(self, x):
return self.conv_fc(x)
class CIFAR10DVSNet(nn.Module):
def __init__(self, channels=128, spiking_neuron: callable = None, **kwargs):
super().__init__()
conv = []
for i in range(4):
if conv.__len__() == 0:
in_channels = 2
else:
in_channels = channels
conv.append(layer.Conv2d(in_channels, channels, kernel_size=3, padding=1, bias=False))
conv.append(layer.BatchNorm2d(channels))
conv.append(spiking_neuron(**deepcopy(kwargs)))
conv.append(layer.MaxPool2d(2, 2))
self.conv_fc = nn.Sequential(
*conv,
layer.Flatten(),
layer.Dropout(0.5),
layer.Linear(channels * 8 * 8, 512),
spiking_neuron(**deepcopy(kwargs)),
layer.Dropout(0.5),
layer.Linear(512, 100),
spiking_neuron(**deepcopy(kwargs)),
layer.VotingLayer(10)
)
def forward(self, x: torch.Tensor):
return self.conv_fc(x)
class DVSGestureNet(nn.Module):
def __init__(self, channels=128, spiking_neuron: callable = None, **kwargs):
super().__init__()
conv = []
for i in range(5):
if conv.__len__() == 0:
in_channels = 2
else:
in_channels = channels
conv.append(layer.Conv2d(in_channels, channels, kernel_size=3, padding=1, bias=False))
conv.append(layer.BatchNorm2d(channels))
conv.append(spiking_neuron(**deepcopy(kwargs)))
conv.append(layer.MaxPool2d(2, 2))
self.conv_fc = nn.Sequential(
*conv,
layer.Flatten(),
layer.Dropout(0.5),
layer.Linear(channels * 4 * 4, 512),
spiking_neuron(**deepcopy(kwargs)),
layer.Dropout(0.5),
layer.Linear(512, 110),
spiking_neuron(**deepcopy(kwargs)),
layer.VotingLayer(10)
)
def forward(self, x: torch.Tensor):
return self.conv_fc(x)
def test_models():
import torch
from .. import neuron, surrogate, functional
x = torch.rand([2, 1, 28, 28])
net = MNISTNet(16, neuron.IFNode, surrogate_function=surrogate.ATan())
print(net(x).shape)
functional.reset_net(net)
functional.set_step_mode(net, 'm')
x = torch.rand([4, 2, 1, 28, 28])
print(net(x).shape)
functional.reset_net(net)
del net
del x
x = torch.rand([2, 1, 28, 28])
net = FashionMNISTNet(16, neuron.IFNode, surrogate_function=surrogate.ATan())
print(net(x).shape)
functional.reset_net(net)
functional.set_step_mode(net, 'm')
x = torch.rand([4, 2, 1, 28, 28])
print(net(x).shape)
functional.reset_net(net)
del net
del x
x = torch.rand([2, 2, 32, 32])
net = NMNISTNet(16, neuron.IFNode, surrogate_function=surrogate.ATan())
print(net(x).shape)
functional.reset_net(net)
functional.set_step_mode(net, 'm')
x = torch.rand([4, 2, 2, 32, 32])
print(net(x).shape)
functional.reset_net(net)
del net
del x
x = torch.rand([2, 3, 32, 32])
net = CIFAR10Net(16, neuron.IFNode, surrogate_function=surrogate.ATan())
print(net(x).shape)
functional.reset_net(net)
functional.set_step_mode(net, 'm')
x = torch.rand([4, 2, 3, 32, 32])
print(net(x).shape)
functional.reset_net(net)
del net
del x
x = torch.rand([2, 2, 128, 128])
net = CIFAR10DVSNet(16, neuron.IFNode, surrogate_function=surrogate.ATan())
print(net(x).shape)
functional.reset_net(net)
functional.set_step_mode(net, 'm')
x = torch.rand([4, 2, 2, 128, 128])
print(net(x).shape)
functional.reset_net(net)
del net
del x
x = torch.rand([2, 2, 128, 128])
net = DVSGestureNet(16, neuron.IFNode, surrogate_function=surrogate.ATan())
print(net(x).shape)
functional.reset_net(net)
functional.set_step_mode(net, 'm')
x = torch.rand([4, 2, 2, 128, 128])
print(net(x).shape)
functional.reset_net(net)
del net
del x | null |
7,746 | from typing import Callable, Dict, Optional, Tuple
import numpy as np
from .. import datasets as sjds
import os
import rarfile
import time
def load_events(fname: str):
events = np.load(fname)
e_pos = events['pos']
e_neg = events['neg']
e_pos = np.hstack((e_pos, np.ones((e_pos.shape[0], 1))))
e_neg = np.hstack((e_neg, np.zeros((e_neg.shape[0], 1))))
events = np.vstack((e_pos, e_neg)) # shape = [N, 4], N * (x, y, t, p)
idx = np.argsort(events[:, 2])
events = events[idx]
return {
'x': events[:, 1],
'y': events[:, 0],
't': events[:, 2],
'p': events[:, 3]
} | null |
7,747 | from dataclasses import dataclass
import numpy as np
import math
from typing import Callable, Optional, Tuple, Union,Any, List
def slice_events_by_time_bins(events: np.ndarray, bin_count: int, overlap: float = 0.0):
return SliceByTimeBins(bin_count=bin_count, overlap=overlap).slice(events, None)[0]
def slice_events_by_count(
events: np.ndarray,
event_count: int,
overlap: int = 0,
include_incomplete: bool = False,
):
return SliceByEventCount(
event_count=event_count, overlap=overlap, include_incomplete=include_incomplete
).slice(events, None)[0]
The provided code snippet includes necessary dependencies for implementing the `to_frame_numpy` function. Write a Python function `def to_frame_numpy( events, sensor_size, time_window=None, event_count=None, n_time_bins=None, n_event_bins=None, overlap=0.0, include_incomplete=False, )` to solve the following problem:
Accumulate events to frames by slicing along constant time (time_window), constant number of events (event_count) or constant number of frames (n_time_bins / n_event_bins). Parameters: events: ndarray of shape [num_events, num_event_channels] sensor_size: size of the sensor that was used [W,H,P] time_window (None): window length in us. event_count (None): number of events per frame. n_time_bins (None): fixed number of frames, sliced along time axis. n_event_bins (None): fixed number of frames, sliced along number of events in the recording. overlap (0.): overlap between frames defined either in time in us, number of events or number of bins. include_incomplete (False): if True, includes overhang slice when time_window or event_count is specified. Not valid for bin_count methods. Returns: numpy array with dimensions (TxPxHxW)
Here is the function:
def to_frame_numpy(
events,
sensor_size,
time_window=None,
event_count=None,
n_time_bins=None,
n_event_bins=None,
overlap=0.0,
include_incomplete=False,
):
"""Accumulate events to frames by slicing along constant time (time_window),
constant number of events (event_count) or constant number of frames (n_time_bins / n_event_bins).
Parameters:
events: ndarray of shape [num_events, num_event_channels]
sensor_size: size of the sensor that was used [W,H,P]
time_window (None): window length in us.
event_count (None): number of events per frame.
n_time_bins (None): fixed number of frames, sliced along time axis.
n_event_bins (None): fixed number of frames, sliced along number of events in the recording.
overlap (0.): overlap between frames defined either in time in us, number of events or number of bins.
include_incomplete (False): if True, includes overhang slice when time_window or event_count is specified. Not valid for bin_count methods.
Returns:
numpy array with dimensions (TxPxHxW)
"""
assert "x" and "t" and "p" in events.dtype.names
if (
not sum(
param is not None
for param in [time_window, event_count, n_time_bins, n_event_bins]
)
== 1
):
raise ValueError(
"Please assign a value to exactly one of the parameters time_window,"
" event_count, n_time_bins or n_event_bins."
)
if not sensor_size:
sensor_size_x = int(events["x"].max() + 1)
sensor_size_p = len(np.unique(events["p"]))
if "y" in events.dtype.names:
sensor_size_y = int(events["y"].max() + 1)
sensor_size = (sensor_size_x, sensor_size_y, sensor_size_p)
else:
sensor_size = (sensor_size_x, 1, sensor_size_p)
# test for single polarity
if sensor_size[2] == 1:
events["p"] = 0
if time_window:
event_slices = slice_events_by_time(
events, time_window, overlap=overlap, include_incomplete=include_incomplete
)
elif event_count:
event_slices = slice_events_by_count(
events, event_count, overlap=overlap, include_incomplete=include_incomplete
)
elif n_time_bins:
event_slices = slice_events_by_time_bins(events, n_time_bins, overlap=overlap)
elif n_event_bins:
event_slices = slice_events_by_event_bins(events, n_event_bins, overlap=overlap)
if "y" in events.dtype.names:
frames = np.zeros((len(event_slices), *sensor_size[::-1]), dtype=np.int16)
for i, event_slice in enumerate(event_slices):
np.add.at(
frames,
(i, event_slice["p"].astype(int), event_slice["y"], event_slice["x"]),
1,
)
else:
frames = np.zeros(
(len(event_slices), sensor_size[2], sensor_size[0]), dtype=np.int16
)
for i, event_slice in enumerate(event_slices):
np.add.at(frames, (i, event_slice["p"].astype(int), event_slice["x"]), 1)
return frames | Accumulate events to frames by slicing along constant time (time_window), constant number of events (event_count) or constant number of frames (n_time_bins / n_event_bins). Parameters: events: ndarray of shape [num_events, num_event_channels] sensor_size: size of the sensor that was used [W,H,P] time_window (None): window length in us. event_count (None): number of events per frame. n_time_bins (None): fixed number of frames, sliced along time axis. n_event_bins (None): fixed number of frames, sliced along number of events in the recording. overlap (0.): overlap between frames defined either in time in us, number of events or number of bins. include_incomplete (False): if True, includes overhang slice when time_window or event_count is specified. Not valid for bin_count methods. Returns: numpy array with dimensions (TxPxHxW) |
7,748 | from dataclasses import dataclass
import numpy as np
import math
from typing import Callable, Optional, Tuple, Union,Any, List
def bina_rep(frames: np.ndarray) -> np.ndarray:
"""Computes one Bina-Rep frame from the sequence of N binary event-frames in parameter.
Args:
frames (numpy.ndarray): the sequence of N binary event frames used to compute the bina-rep frame. Shape=(NxPxHxW)
Returns:
numpy.ndarray: the resulting bina-rep event frame. Shape=(PxHxW)
"""
mask = 2 ** np.arange(frames.shape[0] - 1, -1, -1, dtype=np.float32)
arr_mask = [
mask for _ in range(frames.shape[1] * frames.shape[2] * frames.shape[3])
]
mask = np.stack(arr_mask, axis=-1)
mask = np.reshape(mask, frames.shape)
return np.sum(mask * frames, 0) / (2 ** mask.shape[0] - 1)
The provided code snippet includes necessary dependencies for implementing the `to_bina_rep_numpy` function. Write a Python function `def to_bina_rep_numpy( event_frames: np.ndarray, n_frames: int = 1, n_bits: int = 8, )` to solve the following problem:
Representation that takes T*B binary event frames to produce a sequence of T frames of N-bit numbers. To do so, N binary frames are interpreted as a single frame of N-bit representation. Taken from the paper Barchid et al. 2022, Bina-Rep Event Frames: a Simple and Effective Representation for Event-based cameras https://arxiv.org/pdf/2202.13662.pdf Parameters: event_frames: numpy.ndarray of shape (T*BxPxHxW). The sequence of event frames. n_frames (int): the number T of bina-rep frames. n_bits (int): the number N of bits used in the N-bit representation. Returns: (numpy.ndarray) the sequence of bina-rep event frames with dimensions (TxPxHxW).
Here is the function:
def to_bina_rep_numpy(
event_frames: np.ndarray,
n_frames: int = 1,
n_bits: int = 8,
):
"""Representation that takes T*B binary event frames to produce a sequence of T frames of N-bit numbers.
To do so, N binary frames are interpreted as a single frame of N-bit representation. Taken from the paper
Barchid et al. 2022, Bina-Rep Event Frames: a Simple and Effective Representation for Event-based cameras
https://arxiv.org/pdf/2202.13662.pdf
Parameters:
event_frames: numpy.ndarray of shape (T*BxPxHxW). The sequence of event frames.
n_frames (int): the number T of bina-rep frames.
n_bits (int): the number N of bits used in the N-bit representation.
Returns:
(numpy.ndarray) the sequence of bina-rep event frames with dimensions (TxPxHxW).
"""
assert type(event_frames) == np.ndarray and len(event_frames.shape) == 4
assert n_frames >= 1
assert n_
bits >= 2
if event_frames.shape[0] != n_bits * n_frames:
raise ValueError(
"the input event_frames must have the right number of frames to the targeted"
f"sequence of {n_frames} bina-rep event frames of {n_bits}-bit representation."
f"Got: {event_frames.shape[0]} frames. Expected: {n_frames}x{n_bits}={n_bits * n_frames} frames."
)
event_frames = (event_frames > 0).astype(np.float32) # get binary event_frames
bina_rep_seq = np.zeros((n_frames, *event_frames.shape[1:]), dtype=np.float32)
for i in range(n_frames):
frames = event_frames[i * n_bits : (i + 1) * n_bits]
bina_rep_frame = bina_rep(frames)
bina_rep_seq[i] = bina_rep_frame
return bina_rep_seq | Representation that takes T*B binary event frames to produce a sequence of T frames of N-bit numbers. To do so, N binary frames are interpreted as a single frame of N-bit representation. Taken from the paper Barchid et al. 2022, Bina-Rep Event Frames: a Simple and Effective Representation for Event-based cameras https://arxiv.org/pdf/2202.13662.pdf Parameters: event_frames: numpy.ndarray of shape (T*BxPxHxW). The sequence of event frames. n_frames (int): the number T of bina-rep frames. n_bits (int): the number N of bits used in the N-bit representation. Returns: (numpy.ndarray) the sequence of bina-rep event frames with dimensions (TxPxHxW). |
7,749 | from dataclasses import dataclass
import numpy as np
import math
from typing import Callable, Optional, Tuple, Union,Any, List
The provided code snippet includes necessary dependencies for implementing the `to_voxel_grid_numpy` function. Write a Python function `def to_voxel_grid_numpy(events, sensor_size, n_time_bins=10)` to solve the following problem:
Build a voxel grid with bilinear interpolation in the time domain from a set of events. Implements the event volume from Zhu et al. 2019, Unsupervised event-based learning of optical flow, depth, and egomotion Parameters: events: ndarray of shape [num_events, num_event_channels] sensor_size: size of the sensor that was used [W,H]. n_time_bins: number of bins in the temporal axis of the voxel grid. Returns: numpy array of n event volumes (n,w,h,t)
Here is the function:
def to_voxel_grid_numpy(events, sensor_size, n_time_bins=10):
"""Build a voxel grid with bilinear interpolation in the time domain from a set of events.
Implements the event volume from Zhu et al. 2019, Unsupervised event-based learning of optical flow, depth, and egomotion
Parameters:
events: ndarray of shape [num_events, num_event_channels]
sensor_size: size of the sensor that was used [W,H].
n_time_bins: number of bins in the temporal axis of the voxel grid.
Returns:
numpy array of n event volumes (n,w,h,t)
"""
assert "x" and "y" and "t" and "p" in events.dtype.names
assert sensor_size[2] == 2
voxel_grid = np.zeros((n_time_bins, sensor_size[1], sensor_size[0]), float).ravel()
# normalize the event timestamps so that they lie between 0 and n_time_bins
ts = (
n_time_bins
* (events["t"].astype(float) - events["t"][0])
/ (events["t"][-1] - events["t"][0])
)
xs = events["x"].astype(int)
ys = events["y"].astype(int)
pols = events["p"]
pols[pols == 0] = -1 # polarity should be +1 / -1
tis = ts.astype(int)
dts = ts - tis
vals_left = pols * (1.0 - dts)
vals_right = pols * dts
valid_indices = tis < n_time_bins
np.add.at(
voxel_grid,
xs[valid_indices]
+ ys[valid_indices] * sensor_size[0]
+ tis[valid_indices] * sensor_size[0] * sensor_size[1],
vals_left[valid_indices],
)
valid_indices = (tis + 1) < n_time_bins
np.add.at(
voxel_grid,
xs[valid_indices]
+ ys[valid_indices] * sensor_size[0]
+ (tis[valid_indices] + 1) * sensor_size[0] * sensor_size[1],
vals_right[valid_indices],
)
voxel_grid = np.reshape(
voxel_grid, (n_time_bins, 1, sensor_size[1], sensor_size[0])
)
return voxel_grid | Build a voxel grid with bilinear interpolation in the time domain from a set of events. Implements the event volume from Zhu et al. 2019, Unsupervised event-based learning of optical flow, depth, and egomotion Parameters: events: ndarray of shape [num_events, num_event_channels] sensor_size: size of the sensor that was used [W,H]. n_time_bins: number of bins in the temporal axis of the voxel grid. Returns: numpy array of n event volumes (n,w,h,t) |
7,750 | from typing import Callable, Dict, Optional, Tuple
import h5py
import numpy as np
from torch.utils.data import Dataset
from torchvision.datasets import utils
from torchvision.datasets.utils import extract_archive
import os
import multiprocessing
from concurrent.futures import ThreadPoolExecutor
import time
from .. import configure
from ..datasets import np_savez
def integrate_events_by_fixed_frames_number_shd(events: Dict, split_by: str, frames_num: int, W: int) -> np.ndarray:
t, x = (events[key] for key in ('t', 'x'))
j_l, j_r = cal_fixed_frames_number_segment_index_shd(t, split_by, frames_num)
frames = np.zeros([frames_num, W])
for i in range(frames_num):
frames[i] = integrate_events_segment_to_frame_shd(x, W, j_l[i], j_r[i])
return frames
np_savez = np.savez_compressed if configure.save_datasets_compressed else np.savez
def integrate_events_file_to_frames_file_by_fixed_frames_number_shd(h5_file: h5py.File, i: int, output_dir: str, split_by: str, frames_num: int, W: int, print_save: bool = False) -> None:
events = {'t': h5_file['spikes']['times'][i], 'x': h5_file['spikes']['units'][i]}
label = h5_file['labels'][i]
fname = os.path.join(output_dir, str(label), str(i))
np_savez(fname, frames=integrate_events_by_fixed_frames_number_shd(events, split_by, frames_num, W))
if print_save:
print(f'Frames [{fname}] saved.') | null |
7,751 | from typing import Callable, Dict, Optional, Tuple
import h5py
import numpy as np
from torch.utils.data import Dataset
from torchvision.datasets import utils
from torchvision.datasets.utils import extract_archive
import os
import multiprocessing
from concurrent.futures import ThreadPoolExecutor
import time
from .. import configure
from ..datasets import np_savez
def integrate_events_by_fixed_duration_shd(events: Dict, duration: int, W: int) -> np.ndarray:
x = events['x']
t = 1000*events['t']
N = t.size
frames = []
left = 0
right = 0
while True:
t_l = t[left]
while True:
if right == N or t[right] - t_l > duration:
break
else:
right += 1
# integrate from index [left, right)
frames.append(np.expand_dims(integrate_events_segment_to_frame_shd(x, W, left, right), 0))
left = right
if right == N:
return np.concatenate(frames)
np_savez = np.savez_compressed if configure.save_datasets_compressed else np.savez
def integrate_events_file_to_frames_file_by_fixed_duration_shd(h5_file: h5py.File, i: int, output_dir: str, duration: int, W: int, print_save: bool = False) -> None:
events = {'t': h5_file['spikes']['times'][i], 'x': h5_file['spikes']['units'][i]}
label = h5_file['labels'][i]
fname = os.path.join(output_dir, str(label), str(i))
frames = integrate_events_by_fixed_duration_shd(events, duration, W)
np_savez(fname, frames=frames)
if print_save:
print(f'Frames [{fname}] saved.')
return frames.shape[0] | null |
7,752 | from typing import Callable, Dict, Optional, Tuple
import h5py
import numpy as np
from torch.utils.data import Dataset
from torchvision.datasets import utils
from torchvision.datasets.utils import extract_archive
import os
import multiprocessing
from concurrent.futures import ThreadPoolExecutor
import time
from .. import configure
from ..datasets import np_savez
def integrate_events_segment_to_frame_shd(x: np.ndarray, W: int, j_l: int = 0, j_r: int = -1) -> np.ndarray:
frame = np.zeros(shape=[W])
x = x[j_l: j_r].astype(int) # avoid overflow
position = x
events_number_per_pos = np.bincount(position)
frame[np.arange(events_number_per_pos.size)] += events_number_per_pos
return frame
np_savez = np.savez_compressed if configure.save_datasets_compressed else np.savez
def custom_integrate_function_example(h5_file: h5py.File, i: int, output_dir: str, W: int):
events = {'t': h5_file['spikes']['times'][i], 'x': h5_file['spikes']['units'][i]}
label = h5_file['labels'][i]
frames = np.zeros([2, W])
index_split = np.random.randint(low=0, high=events['t'].__len__())
frames[0] = integrate_events_segment_to_frame_shd(events['x'], W, 0, index_split)
frames[1] = integrate_events_segment_to_frame_shd(events['x'], W, index_split, events['t'].__len__())
fname = os.path.join(output_dir, str(label), str(i))
np_savez(fname, frames=frames) | null |
7,753 | from struct import unpack, pack
import numpy as np
import sys
def peek(f, length=1):
pos = f.tell()
data = f.read(length)
f.seek(pos)
return data
from typing import Callable, Dict, Optional, Tuple
from .. import datasets as sjds
from torchvision.datasets.utils import extract_archive
import os
import multiprocessing
from concurrent.futures import ThreadPoolExecutor
import shutil
import time
from .. import configure
from ..datasets import np_savez
The provided code snippet includes necessary dependencies for implementing the `readATIS_tddat` function. Write a Python function `def readATIS_tddat(file_name, orig_at_zero = True, drop_negative_dt = True, verbose = True, events_restriction = [0, np.inf])` to solve the following problem:
reads ATIS td events in .dat format input: filename: string, path to the .dat file orig_at_zero: bool, if True, timestamps will start at 0 drop_negative_dt: bool, if True, events with a timestamp greater than the previous event are dismissed verbose: bool, if True, verbose mode. events_restriction: list [min ts, max ts], will return only events with ts in the defined boundaries output: timestamps: numpy array of length (number of events), timestamps coords: numpy array of size (number of events, 2), spatial coordinates: col 0 is x, col 1 is y. polarities: numpy array of length (number of events), polarities removed_events: integer, number of removed events (negative delta-ts)
Here is the function:
def readATIS_tddat(file_name, orig_at_zero = True, drop_negative_dt = True, verbose = True, events_restriction = [0, np.inf]):
"""
reads ATIS td events in .dat format
input:
filename: string, path to the .dat file
orig_at_zero: bool, if True, timestamps will start at 0
drop_negative_dt: bool, if True, events with a timestamp greater than the previous event are dismissed
verbose: bool, if True, verbose mode.
events_restriction: list [min ts, max ts], will return only events with ts in the defined boundaries
output:
timestamps: numpy array of length (number of events), timestamps
coords: numpy array of size (number of events, 2), spatial coordinates: col 0 is x, col 1 is y.
polarities: numpy array of length (number of events), polarities
removed_events: integer, number of removed events (negative delta-ts)
"""
polmask = 0x0002000000000000
xmask = 0x000001FF00000000
ymask = 0x0001FE0000000000
polpadding = 49
ypadding = 41
xpadding = 32
# This one read _td.dat files generated by kAER
if verbose:
print('Reading _td dat file... (' + file_name + ')')
file = open(file_name,'rb')
header = False
while peek(file) == b'%':
file.readline()
header = True
if header:
ev_type = unpack('B',file.read(1))[0]
ev_size = unpack('B',file.read(1))[0]
if verbose:
print('> Header exists. Event type is ' + str(ev_type) + ', event size is ' + str(ev_size))
if ev_size != 8:
print('Wrong event size. Aborting.')
return -1, -1, -1, -1
else: # set default ev type and size
if verbose:
print('> No header. Setting default event type and size.')
ev_size = 8
ev_type = 0
# Compute number of events in the file
start = file.tell()
file.seek(0,2)
stop = file.tell()
file.seek(start)
Nevents = int( (stop-start)/ev_size )
dNEvents = Nevents/100
if verbose:
print("> The file contains %d events." %Nevents)
# store read data
timestamps = np.zeros(Nevents, dtype = int)
polarities = np.zeros(Nevents, dtype = int)
coords = np.zeros((Nevents, 2), dtype = int)
ActualEvents = 0
for i in np.arange(0, int(Nevents)):
event = unpack('Q',file.read(8))
ts = event[0] & 0x00000000FFFFFFFF
# padding = event[0] & 0xFFFC000000000000
pol = (event[0] & polmask) >> polpadding
y = (event[0] & ymask) >> ypadding
x = (event[0] & xmask) >> xpadding
if i >= events_restriction[0] and ts>=timestamps[max(0,i-1)]:
ActualEvents += 1
timestamps[i] = ts
polarities[i] = pol
coords[i, 0] = x
coords[i, 1] = y
if verbose and i%dNEvents == 0:
sys.stdout.write("> "+str(i/dNEvents)+"% \r")
sys.stdout.flush()
if i > events_restriction[1]:
break
file.close()
if verbose:
print ("> After loading events, actually found {0} events.".format(ActualEvents))
timestamps = timestamps[:ActualEvents]
coords = coords[:ActualEvents, :]
polarities = polarities[:ActualEvents]
#check for negative timestamps
for ts in timestamps:
if ts < 0:
print('Found a negative timestamp.')
if orig_at_zero:
timestamps = timestamps - timestamps[0]
drop_sum = 0
if drop_negative_dt:
if verbose:
print('> Looking for negative dts...')
# first check if negative TS differences
just_dropped = True
nPasses = 0
while just_dropped:
nPasses += 1
index_neg = []
just_dropped = False
ii = 0
while ii < (timestamps.size - 1):
dt = timestamps[ii+1] - timestamps[ii]
if dt < 0: # alors ts en ii+1 plus petit que ii
index_neg += [ii+1]
ii += 1
just_dropped = True
if verbose and ii%dNEvents == 0:
sys.stdout.write("> "+str(ii/dNEvents)+"% (pass "+str(nPasses)+") \r")
sys.stdout.flush()
ii += 1
if len(index_neg) > 0:
drop_sum += len(index_neg)
index_neg = np.array(index_neg)
timestamps = np.delete(timestamps, index_neg)
polarities = np.delete(polarities, index_neg)
coords = np.delete(coords, index_neg, axis = 0)
if verbose:
print('> Removed {0} events in {1} passes.'.format(drop_sum, nPasses))
removed_events = drop_sum
else:
removed_events = -1
if verbose:
print("> Sequence duration: {0:.2f}s, ts[0] = {1}, ts[{2}] = {3}.".format(float(timestamps[-1] - timestamps[0]) / 1e6, timestamps[0], len(timestamps)-1, timestamps[-1]))
return timestamps, coords, polarities, removed_events | reads ATIS td events in .dat format input: filename: string, path to the .dat file orig_at_zero: bool, if True, timestamps will start at 0 drop_negative_dt: bool, if True, events with a timestamp greater than the previous event are dismissed verbose: bool, if True, verbose mode. events_restriction: list [min ts, max ts], will return only events with ts in the defined boundaries output: timestamps: numpy array of length (number of events), timestamps coords: numpy array of size (number of events, 2), spatial coordinates: col 0 is x, col 1 is y. polarities: numpy array of length (number of events), polarities removed_events: integer, number of removed events (negative delta-ts) |
7,754 | import os
from typing import Callable, Tuple, Dict, Optional
from pathlib import Path
import torch
import torchaudio
from torch.utils.data import Dataset
from torch import Tensor
from torchvision.datasets.utils import (
download_url,
extract_archive
)
from torchvision.datasets.utils import verify_str_arg
import numpy as np
from random import choice
HASH_DIVIDER = "_nohash_"
def load_speechcommands_item(relpath: str, path: str) -> Tuple[Tensor, int, str, str, int]:
filepath = os.path.join(path, relpath)
label, filename = os.path.split(relpath)
speaker, _ = os.path.splitext(filename)
speaker_id, utterance_number = speaker.split(HASH_DIVIDER)
utterance_number = int(utterance_number)
# Load audio
waveform, sample_rate = torchaudio.load(filepath)
return waveform, sample_rate, label, speaker_id, utterance_number | null |
7,755 | from typing import Callable, Dict, Optional, Tuple
import numpy as np
from .. import datasets as sjds
from torchvision.datasets.utils import extract_archive
import os
import multiprocessing
from concurrent.futures import ThreadPoolExecutor
import time
from .. import configure
from ..datasets import np_savez
def load_raw_events(fp,
bytes_skip=0,
bytes_trim=0,
filter_dvs=False,
times_first=False):
p = skip_header(fp)
fp.seek(p + bytes_skip)
data = fp.read()
if bytes_trim > 0:
data = data[:-bytes_trim]
data = np.fromstring(data, dtype='>u4')
if len(data) % 2 != 0:
print(data[:20:2])
print('---')
print(data[1:21:2])
raise ValueError('odd number of data elements')
raw_addr = data[::2]
timestamp = data[1::2]
if times_first:
timestamp, raw_addr = raw_addr, timestamp
if filter_dvs:
valid = read_bits(raw_addr, valid_mask, valid_shift) == EVT_DVS
timestamp = timestamp[valid]
raw_addr = raw_addr[valid]
return timestamp, raw_addr
def parse_raw_address(addr,
x_mask=x_mask,
x_shift=x_shift,
y_mask=y_mask,
y_shift=y_shift,
polarity_mask=polarity_mask,
polarity_shift=polarity_shift):
polarity = read_bits(addr, polarity_mask, polarity_shift).astype(np.bool_)
x = read_bits(addr, x_mask, x_shift)
y = read_bits(addr, y_mask, y_shift)
return x, y, polarity
def load_events(
fp,
filter_dvs=False,
# bytes_skip=0,
# bytes_trim=0,
# times_first=False,
**kwargs):
timestamp, addr = load_raw_events(
fp,
filter_dvs=filter_dvs,
# bytes_skip=bytes_skip,
# bytes_trim=bytes_trim,
# times_first=times_first
)
x, y, polarity = parse_raw_address(addr, **kwargs)
return timestamp, x, y, polarity | null |
7,756 | import os
import io
import json
import numpy as np
import cv2
import gradio as gr
import modules.scripts as scripts
from modules import script_callbacks
from modules.shared import opts
from modules.paths import models_path
from basicsr.utils.download_util import load_file_from_url
from scripts.openpose.body import Body
from PIL import Image
body_estimation = None
presets_file = os.path.join(scripts.basedir(), "presets.json")
presets = {}
try:
with open(presets_file) as file:
presets = json.load(file)
except FileNotFoundError:
pass
def pil2cv(in_image):
out_image = np.array(in_image, dtype=np.uint8)
if out_image.shape[2] == 3:
out_image = cv2.cvtColor(out_image, cv2.COLOR_RGB2BGR)
return out_image
def candidate2li(li):
res = []
for x, y, *_ in li:
res.append([x, y])
return res
def subset2li(li):
res = []
for r in li:
for c in r:
res.append(c)
return res
class Body(object):
def __init__(self, model_path):
self.model = bodypose_model()
if torch.cuda.is_available():
self.model = self.model.cuda()
model_dict = util.transfer(self.model, torch.load(model_path))
self.model.load_state_dict(model_dict)
self.model.eval()
def __call__(self, oriImg):
# scale_search = [0.5, 1.0, 1.5, 2.0]
scale_search = [0.5]
boxsize = 368
stride = 8
padValue = 128
threshold1 = 0.1
threshold2 = 0.05
multiplier = [x * boxsize / oriImg.shape[0] for x in scale_search]
heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 19))
paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38))
for m in range(len(multiplier)):
scale = multiplier[m]
imageToTest = cv2.resize(oriImg, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
imageToTest_padded, pad = util.padRightDownCorner(imageToTest, stride, padValue)
im = np.transpose(np.float32(imageToTest_padded[:, :, :, np.newaxis]), (3, 2, 0, 1)) / 256 - 0.5
im = np.ascontiguousarray(im)
data = torch.from_numpy(im).float()
if torch.cuda.is_available():
data = data.cuda()
# data = data.permute([2, 0, 1]).unsqueeze(0).float()
with torch.no_grad():
Mconv7_stage6_L1, Mconv7_stage6_L2 = self.model(data)
Mconv7_stage6_L1 = Mconv7_stage6_L1.cpu().numpy()
Mconv7_stage6_L2 = Mconv7_stage6_L2.cpu().numpy()
# extract outputs, resize, and remove padding
# heatmap = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[1]].data), (1, 2, 0)) # output 1 is heatmaps
heatmap = np.transpose(np.squeeze(Mconv7_stage6_L2), (1, 2, 0)) # output 1 is heatmaps
heatmap = cv2.resize(heatmap, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
heatmap = heatmap[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :]
heatmap = cv2.resize(heatmap, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC)
# paf = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[0]].data), (1, 2, 0)) # output 0 is PAFs
paf = np.transpose(np.squeeze(Mconv7_stage6_L1), (1, 2, 0)) # output 0 is PAFs
paf = cv2.resize(paf, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
paf = paf[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :]
paf = cv2.resize(paf, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC)
heatmap_avg += heatmap_avg + heatmap / len(multiplier)
paf_avg += + paf / len(multiplier)
all_peaks = []
peak_counter = 0
for part in range(18):
map_ori = heatmap_avg[:, :, part]
one_heatmap = gaussian_filter(map_ori, sigma=3)
map_left = np.zeros(one_heatmap.shape)
map_left[1:, :] = one_heatmap[:-1, :]
map_right = np.zeros(one_heatmap.shape)
map_right[:-1, :] = one_heatmap[1:, :]
map_up = np.zeros(one_heatmap.shape)
map_up[:, 1:] = one_heatmap[:, :-1]
map_down = np.zeros(one_heatmap.shape)
map_down[:, :-1] = one_heatmap[:, 1:]
peaks_binary = np.logical_and.reduce(
(one_heatmap >= map_left, one_heatmap >= map_right, one_heatmap >= map_up, one_heatmap >= map_down, one_heatmap > threshold1))
peaks = list(zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])) # note reverse
peaks_with_score = [x + (map_ori[x[1], x[0]],) for x in peaks]
peak_id = range(peak_counter, peak_counter + len(peaks))
peaks_with_score_and_id = [peaks_with_score[i] + (peak_id[i],) for i in range(len(peak_id))]
all_peaks.append(peaks_with_score_and_id)
peak_counter += len(peaks)
# find connection in the specified sequence, center 29 is in the position 15
limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], \
[10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], \
[1, 16], [16, 18], [3, 17], [6, 18]]
# the middle joints heatmap correpondence
mapIdx = [[31, 32], [39, 40], [33, 34], [35, 36], [41, 42], [43, 44], [19, 20], [21, 22], \
[23, 24], [25, 26], [27, 28], [29, 30], [47, 48], [49, 50], [53, 54], [51, 52], \
[55, 56], [37, 38], [45, 46]]
connection_all = []
special_k = []
mid_num = 10
for k in range(len(mapIdx)):
score_mid = paf_avg[:, :, [x - 19 for x in mapIdx[k]]]
candA = all_peaks[limbSeq[k][0] - 1]
candB = all_peaks[limbSeq[k][1] - 1]
nA = len(candA)
nB = len(candB)
indexA, indexB = limbSeq[k]
if (nA != 0 and nB != 0):
connection_candidate = []
for i in range(nA):
for j in range(nB):
vec = np.subtract(candB[j][:2], candA[i][:2])
norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1])
norm = max(0.001, norm)
vec = np.divide(vec, norm)
startend = list(zip(np.linspace(candA[i][0], candB[j][0], num=mid_num), \
np.linspace(candA[i][1], candB[j][1], num=mid_num)))
vec_x = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0] \
for I in range(len(startend))])
vec_y = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1] \
for I in range(len(startend))])
score_midpts = np.multiply(vec_x, vec[0]) + np.multiply(vec_y, vec[1])
score_with_dist_prior = sum(score_midpts) / len(score_midpts) + min(
0.5 * oriImg.shape[0] / norm - 1, 0)
criterion1 = len(np.nonzero(score_midpts > threshold2)[0]) > 0.8 * len(score_midpts)
criterion2 = score_with_dist_prior > 0
if criterion1 and criterion2:
connection_candidate.append(
[i, j, score_with_dist_prior, score_with_dist_prior + candA[i][2] + candB[j][2]])
connection_candidate = sorted(connection_candidate, key=lambda x: x[2], reverse=True)
connection = np.zeros((0, 5))
for c in range(len(connection_candidate)):
i, j, s = connection_candidate[c][0:3]
if (i not in connection[:, 3] and j not in connection[:, 4]):
connection = np.vstack([connection, [candA[i][3], candB[j][3], s, i, j]])
if (len(connection) >= min(nA, nB)):
break
connection_all.append(connection)
else:
special_k.append(k)
connection_all.append([])
# last number in each row is the total parts number of that person
# the second last number in each row is the score of the overall configuration
subset = -1 * np.ones((0, 20))
candidate = np.array([item for sublist in all_peaks for item in sublist])
for k in range(len(mapIdx)):
if k not in special_k:
partAs = connection_all[k][:, 0]
partBs = connection_all[k][:, 1]
indexA, indexB = np.array(limbSeq[k]) - 1
for i in range(len(connection_all[k])): # = 1:size(temp,1)
found = 0
subset_idx = [-1, -1]
for j in range(len(subset)): # 1:size(subset,1):
if subset[j][indexA] == partAs[i] or subset[j][indexB] == partBs[i]:
subset_idx[found] = j
found += 1
if found == 1:
j = subset_idx[0]
if subset[j][indexB] != partBs[i]:
subset[j][indexB] = partBs[i]
subset[j][-1] += 1
subset[j][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
elif found == 2: # if found 2 and disjoint, merge them
j1, j2 = subset_idx
membership = ((subset[j1] >= 0).astype(int) + (subset[j2] >= 0).astype(int))[:-2]
if len(np.nonzero(membership == 2)[0]) == 0: # merge
subset[j1][:-2] += (subset[j2][:-2] + 1)
subset[j1][-2:] += subset[j2][-2:]
subset[j1][-2] += connection_all[k][i][2]
subset = np.delete(subset, j2, 0)
else: # as like found == 1
subset[j1][indexB] = partBs[i]
subset[j1][-1] += 1
subset[j1][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
# if find no partA in the subset, create a new subset
elif not found and k < 17:
row = -1 * np.ones(20)
row[indexA] = partAs[i]
row[indexB] = partBs[i]
row[-1] = 2
row[-2] = sum(candidate[connection_all[k][i, :2].astype(int), 2]) + connection_all[k][i][2]
subset = np.vstack([subset, row])
# delete some rows of subset which has few parts occur
deleteIdx = []
for i in range(len(subset)):
if subset[i][-1] < 4 or subset[i][-2] / subset[i][-1] < 0.4:
deleteIdx.append(i)
subset = np.delete(subset, deleteIdx, axis=0)
# subset: n*20 array, 0-17 is the index in candidate, 18 is the total score, 19 is the total parts
# candidate: x, y, score, id
return candidate, subset
def on_ui_tabs():
with gr.Blocks(analytics_enabled=False) as openpose_editor:
with gr.Row():
with gr.Column():
width = gr.Slider(label="width", minimum=64, maximum=2048, value=512, step=64, interactive=True)
height = gr.Slider(label="height", minimum=64, maximum=2048, value=512, step=64, interactive=True)
with gr.Row():
add = gr.Button(value="Add", variant="primary")
# delete = gr.Button(value="Delete")
with gr.Row():
reset_btn = gr.Button(value="Reset")
json_input = gr.UploadButton(label="Load from JSON", file_types=[".json"], elem_id="openpose_json_button")
png_input = gr.UploadButton(label="Detect from Image", file_types=["image"], type="bytes", elem_id="openpose_detect_button")
bg_input = gr.UploadButton(label="Add Background Image", file_types=["image"], elem_id="openpose_bg_button")
with gr.Row():
preset_list = gr.Dropdown(label="Presets", choices=sorted(presets.keys()), interactive=True)
preset_load = gr.Button(value="Load Preset")
preset_save = gr.Button(value="Save Preset")
with gr.Column():
# gradioooooo...
canvas = gr.HTML('<canvas id="openpose_editor_canvas" width="512" height="512" style="margin: 0.25rem; border-radius: 0.25rem; border: 0.5px solid"></canvas>')
jsonbox = gr.Text(label="json", elem_id="jsonbox", visible=False)
with gr.Row():
json_output = gr.Button(value="Save JSON")
png_output = gr.Button(value="Save PNG")
send_t2t = gr.Button(value="Send to txt2img")
send_i2i = gr.Button(value="Send to img2img")
control_net_max_models_num = getattr(opts, 'control_net_max_models_num', 0)
select_target_index = gr.Dropdown([str(i) for i in range(control_net_max_models_num)], label="Send to", value="0", interactive=True, visible=(control_net_max_models_num > 1))
def estimate(file):
global body_estimation
if body_estimation is None:
model_path = os.path.join(models_path, "openpose", "body_pose_model.pth")
if not os.path.isfile(model_path):
body_model_path = "https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/body_pose_model.pth"
load_file_from_url(body_model_path, model_dir=os.path.join(models_path, "openpose"))
body_estimation = Body(model_path)
stream = io.BytesIO(file)
img = Image.open(stream)
candidate, subset = body_estimation(pil2cv(img))
result = {
"candidate": candidate2li(candidate),
"subset": subset2li(subset),
}
return str(result).replace("'", '"')
def savePreset(name, data):
if name:
presets[name] = json.loads(data)
with open(presets_file, "w") as file:
json.dump(presets, file)
return gr.update(choices=sorted(presets.keys()), value=name), json.dumps(data)
return gr.update(), gr.update()
dummy_component = gr.Label(visible=False)
preset = gr.Text(visible=False)
width.change(None, [width, height], None, _js="(w, h) => {resizeCanvas(w, h)}")
height.change(None, [width, height], None, _js="(w, h) => {resizeCanvas(w, h)}")
png_output.click(None, [], None, _js="savePNG")
bg_input.upload(None, [], [width, height], _js="() => {addBackground('openpose_bg_button')}")
png_input.upload(estimate, png_input, jsonbox)
png_input.upload(None, [], [width, height], _js="() => {addBackground('openpose_detect_button')}")
add.click(None, [], None, _js="addPose")
send_t2t.click(None, select_target_index, None, _js="(i) => {sendImage('txt2img', i)}")
send_i2i.click(None, select_target_index, None, _js="(i) => {sendImage('img2img', i)}")
reset_btn.click(None, [], None, _js="resetCanvas")
json_input.upload(None, json_input, [width, height], _js="() => {loadJSON('openpose_json_button')}")
json_output.click(None, None, None, _js="saveJSON")
preset_save.click(savePreset, [dummy_component, dummy_component], [preset_list, preset], _js="savePreset")
preset_load.click(None, preset, [width, height], _js="loadPreset")
preset_list.change(lambda selected: json.dumps(presets[selected]), preset_list, preset)
return [(openpose_editor, "OpenPose Editor", "openpose_editor")] | null |
7,757 | import math
import numpy as np
import matplotlib
import cv2
def padRightDownCorner(img, stride, padValue):
h = img.shape[0]
w = img.shape[1]
pad = 4 * [None]
pad[0] = 0 # up
pad[1] = 0 # left
pad[2] = 0 if (h % stride == 0) else stride - (h % stride) # down
pad[3] = 0 if (w % stride == 0) else stride - (w % stride) # right
img_padded = img
pad_up = np.tile(img_padded[0:1, :, :]*0 + padValue, (pad[0], 1, 1))
img_padded = np.concatenate((pad_up, img_padded), axis=0)
pad_left = np.tile(img_padded[:, 0:1, :]*0 + padValue, (1, pad[1], 1))
img_padded = np.concatenate((pad_left, img_padded), axis=1)
pad_down = np.tile(img_padded[-2:-1, :, :]*0 + padValue, (pad[2], 1, 1))
img_padded = np.concatenate((img_padded, pad_down), axis=0)
pad_right = np.tile(img_padded[:, -2:-1, :]*0 + padValue, (1, pad[3], 1))
img_padded = np.concatenate((img_padded, pad_right), axis=1)
return img_padded, pad | null |
7,758 | import math
import numpy as np
import matplotlib
import cv2
def transfer(model, model_weights):
transferred_model_weights = {}
for weights_name in model.state_dict().keys():
transferred_model_weights[weights_name] = model_weights['.'.join(weights_name.split('.')[1:])]
return transferred_model_weights | null |
7,759 | import math
import numpy as np
import matplotlib
import cv2
def draw_bodypose(canvas, candidate, subset):
stickwidth = 4
limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], \
[10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], \
[1, 16], [16, 18], [3, 17], [6, 18]]
colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \
[0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], \
[170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
for i in range(18):
for n in range(len(subset)):
index = int(subset[n][i])
if index == -1:
continue
x, y = candidate[index][0:2]
cv2.circle(canvas, (int(x), int(y)), 4, colors[i], thickness=-1)
for i in range(17):
for n in range(len(subset)):
index = subset[n][np.array(limbSeq[i]) - 1]
if -1 in index:
continue
cur_canvas = canvas.copy()
Y = candidate[index.astype(int), 0]
X = candidate[index.astype(int), 1]
mX = np.mean(X)
mY = np.mean(Y)
length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5
angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0, 360, 1)
cv2.fillConvexPoly(cur_canvas, polygon, colors[i])
canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0)
# plt.imsave("preview.jpg", canvas[:, :, [2, 1, 0]])
# plt.imshow(canvas[:, :, [2, 1, 0]])
return canvas | null |
7,760 | import math
import numpy as np
import matplotlib
import cv2
def draw_handpose(canvas, all_hand_peaks, show_number=False):
edges = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], \
[10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]]
for peaks in all_hand_peaks:
for ie, e in enumerate(edges):
if np.sum(np.all(peaks[e], axis=1)==0)==0:
x1, y1 = peaks[e[0]]
x2, y2 = peaks[e[1]]
cv2.line(canvas, (x1, y1), (x2, y2), matplotlib.colors.hsv_to_rgb([ie/float(len(edges)), 1.0, 1.0])*255, thickness=2)
for i, keyponit in enumerate(peaks):
x, y = keyponit
cv2.circle(canvas, (x, y), 4, (0, 0, 255), thickness=-1)
if show_number:
cv2.putText(canvas, str(i), (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 0), lineType=cv2.LINE_AA)
return canvas | null |
7,761 | import math
import numpy as np
import matplotlib
import cv2
The provided code snippet includes necessary dependencies for implementing the `handDetect` function. Write a Python function `def handDetect(candidate, subset, oriImg)` to solve the following problem:
return value: [[x, y, w, True if left hand else False]]. width=height since the network require squared input. x, y is the coordinate of top left
Here is the function:
def handDetect(candidate, subset, oriImg):
# right hand: wrist 4, elbow 3, shoulder 2
# left hand: wrist 7, elbow 6, shoulder 5
ratioWristElbow = 0.33
detect_result = []
image_height, image_width = oriImg.shape[0:2]
for person in subset.astype(int):
# if any of three not detected
has_left = np.sum(person[[5, 6, 7]] == -1) == 0
has_right = np.sum(person[[2, 3, 4]] == -1) == 0
if not (has_left or has_right):
continue
hands = []
#left hand
if has_left:
left_shoulder_index, left_elbow_index, left_wrist_index = person[[5, 6, 7]]
x1, y1 = candidate[left_shoulder_index][:2]
x2, y2 = candidate[left_elbow_index][:2]
x3, y3 = candidate[left_wrist_index][:2]
hands.append([x1, y1, x2, y2, x3, y3, True])
# right hand
if has_right:
right_shoulder_index, right_elbow_index, right_wrist_index = person[[2, 3, 4]]
x1, y1 = candidate[right_shoulder_index][:2]
x2, y2 = candidate[right_elbow_index][:2]
x3, y3 = candidate[right_wrist_index][:2]
hands.append([x1, y1, x2, y2, x3, y3, False])
for x1, y1, x2, y2, x3, y3, is_left in hands:
# pos_hand = pos_wrist + ratio * (pos_wrist - pos_elbox) = (1 + ratio) * pos_wrist - ratio * pos_elbox
# handRectangle.x = posePtr[wrist*3] + ratioWristElbow * (posePtr[wrist*3] - posePtr[elbow*3]);
# handRectangle.y = posePtr[wrist*3+1] + ratioWristElbow * (posePtr[wrist*3+1] - posePtr[elbow*3+1]);
# const auto distanceWristElbow = getDistance(poseKeypoints, person, wrist, elbow);
# const auto distanceElbowShoulder = getDistance(poseKeypoints, person, elbow, shoulder);
# handRectangle.width = 1.5f * fastMax(distanceWristElbow, 0.9f * distanceElbowShoulder);
x = x3 + ratioWristElbow * (x3 - x2)
y = y3 + ratioWristElbow * (y3 - y2)
distanceWristElbow = math.sqrt((x3 - x2) ** 2 + (y3 - y2) ** 2)
distanceElbowShoulder = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
width = 1.5 * max(distanceWristElbow, 0.9 * distanceElbowShoulder)
# x-y refers to the center --> offset to topLeft point
# handRectangle.x -= handRectangle.width / 2.f;
# handRectangle.y -= handRectangle.height / 2.f;
x -= width / 2
y -= width / 2 # width = height
# overflow the image
if x < 0: x = 0
if y < 0: y = 0
width1 = width
width2 = width
if x + width > image_width: width1 = image_width - x
if y + width > image_height: width2 = image_height - y
width = min(width1, width2)
# the max hand box value is 20 pixels
if width >= 20:
detect_result.append([int(x), int(y), int(width), is_left])
'''
return value: [[x, y, w, True if left hand else False]].
width=height since the network require squared input.
x, y is the coordinate of top left
'''
return detect_result | return value: [[x, y, w, True if left hand else False]]. width=height since the network require squared input. x, y is the coordinate of top left |
7,762 | import math
import numpy as np
import matplotlib
import cv2
def npmax(array):
arrayindex = array.argmax(1)
arrayvalue = array.max(1)
i = arrayvalue.argmax()
j = arrayindex[i]
return i, j | null |
7,763 | import torch
from collections import OrderedDict
import torch
import torch.nn as nn
def make_layers(block, no_relu_layers):
layers = []
for layer_name, v in block.items():
if 'pool' in layer_name:
layer = nn.MaxPool2d(kernel_size=v[0], stride=v[1],
padding=v[2])
layers.append((layer_name, layer))
else:
conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1],
kernel_size=v[2], stride=v[3],
padding=v[4])
layers.append((layer_name, conv2d))
if layer_name not in no_relu_layers:
layers.append(('relu_'+layer_name, nn.ReLU(inplace=True)))
return nn.Sequential(OrderedDict(layers)) | null |
7,764 | import sqlite3
import asyncio
import aiohttp
import re, os, sys, datetime, random
def renew_doc(data_file, table):
# header
markdown = []
with open(data_file, 'r') as f:
lines = f.readlines()
for line in lines:
columns = [ column.strip() for column in line.split("|") ]
markdown.append(line)
if len(columns) > 2 and re.match(r"^:?-+:?$", columns[1]):
break
#
conn = sqlite3.connect('../db/sqlite3.db')
cur = conn.cursor()
res = cur.execute(f"SELECT app_name, testflight_link, status, last_modify FROM {table} ORDER BY app_name;")
for row in res:
app_name, testflight_link, status, last_modify = row
testflight_link = f"[https://testflight.apple.com/join/{testflight_link}](https://testflight.apple.com/join/{testflight_link})"
markdown.append(f"| {app_name} | {testflight_link} | {status} | {last_modify} |\n")
conn.close()
#
with open(data_file, 'w') as f:
lines = f.writelines(markdown) | null |
7,765 | import sqlite3
import asyncio
import aiohttp
import re, os, sys, datetime, random
TABLE_MAP = {
"macos": "./data/macos.md",
"ios": "./data/ios.md",
"ios_game": "./data/ios_game.md",
"chinese": "./data/chinese.md",
"signup": "./data/signup.md"
}
README_TEMPLATE_FILE = "./data/README.template"
def renew_readme():
template = ""
with open(README_TEMPLATE_FILE, 'r') as f:
template = f.read()
macos = ""
with open(TABLE_MAP["macos"], 'r') as f:
macos = f.read()
ios = ""
with open(TABLE_MAP["ios"], 'r') as f:
ios = f.read()
ios_game = ""
with open(TABLE_MAP["ios_game"], 'r') as f:
ios_game = f.read()
chinese = ""
with open(TABLE_MAP["chinese"], 'r') as f:
chinese = f.read()
signup = ""
with open(TABLE_MAP["signup"], 'r') as f:
signup = f.read()
readme = template.format(macos=macos, ios=ios, ios_game=ios_game, chinese=chinese, signup=signup)
with open("../README.md", 'w') as f:
f.write(readme) | null |
7,766 | import sqlite3
import asyncio
import aiohttp
import re, os, sys, datetime, random
FULL_PATTERN = re.compile(r"版本的测试员已满|This beta is full")
NO_PATTERN = re.compile(r"版本目前不接受任何新测试员|This beta isn't accepting any new testers right now")stFlight - Apple")
APP_NAME_CH_PATTERN = re.compile(r"加入 Beta 版“(.+)” - TestFlight - Apple")
async def check_status(session, key, retry=10):
status = 'N'
app_name = "None"
for i in range(retry):
try:
async with session.get(f'/join/{key}') as resp:
resp.raise_for_status()
resp_html = await resp.text()
if NO_PATTERN.search(resp_html) is not None:
status = 'N'
elif FULL_PATTERN.search(resp_html) is not None:
status = 'F'
else:
status = 'Y'
app_name_search = APP_NAME_PATTERN.search(resp_html)
app_name_ch_search = APP_NAME_CH_PATTERN.search(resp_html)
if app_name_search is not None:
app_name = app_name_search.group(1)
elif app_name_ch_search is not None:
app_name = app_name_ch_search.group(1)
return (key, status, app_name)
except aiohttp.ClientResponseError as e:
if resp.status == 404:
return (key, 'D', app_name)
rand = round(random.random(), 3)
print(f"[warn] {e}, wait {i*(rand+1)+1} s. Retry({i}/retry)")
await asyncio.sleep(i*(rand+1)+1)
print(f"[warn] Key ({key}) have max retries, return default value!")
return (key, status, app_name) | null |
7,767 | import sqlite3
import datetime, re, os, sys
INVALID_DATA = []
TODAY = datetime.datetime.utcnow().date().strftime("%Y-%m-%d")
def process(data_file, table):
conn = sqlite3.connect('../db/sqlite3.db')
cur = conn.cursor()
with open(data_file, 'r') as f:
lines = f.readlines()
data_flag = False # 是否到真正的数据区域了
for line in lines:
columns = [ column.strip() for column in line.split("|") ]
if not data_flag:
if len(columns) > 2 and re.match(r"^:?-+:?$", columns[1]):
data_flag = True
continue
# 开始处理数据
#
_, app_name, testflight_link = columns[:3]
status, last_modify = columns[3:5] if len(columns)>4 else [""] * 2
link_id_match = re.search(r"\]\(https://testflight.apple.com/join/(.*)\)", testflight_link, re.I)
if link_id_match is not None:
testflight_link = link_id_match.group(1)
else:
print(f"[Warn] Invalid testflight_link, record(will be save into ./data/sign_up.md): \n\t\"{columns}\"")
INVALID_DATA.append(line)
continue
if status is None or status == "":
status = "N"
if last_modify is None or last_modify == "":
last_modify = TODAY
# 插入数据库
sql = f"INSERT INTO {table} (app_name, testflight_link, status, last_modify) VALUES(?, ?, ?, ?);"
data = (app_name, testflight_link, status, last_modify)
try:
cur.execute(sql, data)
except sqlite3.IntegrityError as e:
print(f"[sqlite3.IntegrityError - 1] {e}")
print(f"[sqlite3.IntegrityError - 2] Table: {table}; Data: {data}")
except Exception as e:
raise e
conn.commit()
print(f"[info] Writed {conn.total_changes} rows into table: {table}")
conn.close() | null |
7,768 | import sqlite3
import datetime, re, os, sys
INVALID_DATA = []
def other_links():
with open('./data/signup.md', 'r+') as f:
exists_data = f.readlines()
temp = []
for line in INVALID_DATA:
if line not in exists_data:
temp.append(line)
f.writelines(temp)
if len(temp):
print(f"[info] Write {len(temp)} raws to ./data/signup.md") | null |
7,769 | import sqlite3
import re, os, sys
def renew_doc(data_file, table):
# header
markdown = []
with open(data_file, 'r') as f:
lines = f.readlines()
for line in lines:
columns = [ column.strip() for column in line.split("|") ]
markdown.append(line)
if len(columns) > 2 and re.match(r"^:?-+:?$", columns[1]):
break
#
conn = sqlite3.connect('../db/sqlite3.db')
cur = conn.cursor()
res = cur.execute(f"SELECT app_name, testflight_link, status, last_modify FROM {table} ORDER BY app_name;")
for row in res:
app_name, testflight_link, status, last_modify = row
testflight_link = f"[https://testflight.apple.com/join/{testflight_link}](https://testflight.apple.com/join/{testflight_link})"
markdown.append(f"| {app_name} | {testflight_link} | {status} | {last_modify} |\n")
conn.close()
#
with open(data_file, 'w') as f:
lines = f.writelines(markdown) | null |
7,770 | import sqlite3
import re, os, sys
TABLE_MAP = {
"macos": "./data/macos.md",
"ios": "./data/ios.md",
"ios_game": "./data/ios_game.md",
"chinese": "./data/chinese.md",
"signup": "./data/signup.md"
}
README_TEMPLATE_FILE = "./data/README.template"
def renew_readme():
template = ""
with open(README_TEMPLATE_FILE, 'r') as f:
template = f.read()
macos = ""
with open(TABLE_MAP["macos"], 'r') as f:
macos = f.read()
ios = ""
with open(TABLE_MAP["ios"], 'r') as f:
ios = f.read()
ios_game = ""
with open(TABLE_MAP["ios_game"], 'r') as f:
ios_game = f.read()
chinese = ""
with open(TABLE_MAP["chinese"], 'r') as f:
chinese = f.read()
signup = ""
with open(TABLE_MAP["signup"], 'r') as f:
signup = f.read()
readme = template.format(macos=macos, ios=ios, ios_game=ios_game, chinese=chinese, signup=signup)
with open("../README.md", 'w') as f:
f.write(readme) | null |
7,771 | import sqlite3
import re, os, sys
def renew_doc(data_file, table):
# header
markdown = []
with open(data_file, 'r') as f:
lines = f.readlines()
for line in lines:
columns = [ column.strip() for column in line.split("|") ]
markdown.append(line)
if len(columns) > 2 and re.match(r"^:?-+:?$", columns[1]):
break
#
conn = sqlite3.connect('../db/sqlite3.db')
cur = conn.cursor()
res = cur.execute(f"""SELECT app_name, testflight_link, status, last_modify FROM {table} ORDER BY
CASE status
WHEN 'Y' THEN 0
WHEN 'F' THEN 1
WHEN 'N' THEN 2
WHEN 'D' THEN 3
END;""")
for row in res:
app_name, testflight_link, status, last_modify = row
testflight_link = f"[https://testflight.apple.com/join/{testflight_link}](https://testflight.apple.com/join/{testflight_link})"
markdown.append(f"| {app_name} | {testflight_link} | {status} | {last_modify} |\n")
conn.close()
#
with open(data_file, 'w') as f:
lines = f.writelines(markdown) | null |
7,773 | import sqlite3
import asyncio
import aiohttp
import re, os, sys, datetime, random
from fake_user_agent import user_agent
import math
def get_old_status(table):
conn = sqlite3.connect('../db/sqlite3.db')
cur = conn.cursor()
res = cur.execute(f"SELECT testflight_link, status FROM {table};")
res_dict = {}
for row in res:
res_dict[row[0]] = row[1]
conn.close()
return res_dict | null |
7,774 | import sqlite3
import asyncio
import aiohttp
import re, os, sys, datetime, random
from fake_user_agent import user_agent
import math
TODAY = datetime.datetime.utcnow().date().strftime("%Y-%m-%d")
def update_status(table, change_list):
conn = sqlite3.connect('../db/sqlite3.db')
cur = conn.cursor()
for update in change_list:
cur.execute(f"UPDATE {table} SET status = '{update[1]}', last_modify = '{TODAY}' WHERE testflight_link = '{update[0]}';")
conn.commit()
total = conn.total_changes
conn.close()
return total | null |
7,775 | import sqlite3
import asyncio
import aiohttp
import re, os, sys, datetime, random
from fake_user_agent import user_agent
import math
def renew_doc(data_file, table):
# header
markdown = []
with open(data_file, 'r') as f:
lines = f.readlines()
for line in lines:
columns = [ column.strip() for column in line.split("|") ]
markdown.append(line)
if len(columns) > 2 and re.match(r"^:?-+:?$", columns[1]):
break
#
conn = sqlite3.connect('../db/sqlite3.db')
cur = conn.cursor()
res = cur.execute(f"SELECT app_name, testflight_link, status, last_modify FROM {table} ORDER BY app_name;")
for row in res:
app_name, testflight_link, status, last_modify = row
testflight_link = f"[https://testflight.apple.com/join/{testflight_link}](https://testflight.apple.com/join/{testflight_link})"
markdown.append(f"| {app_name} | {testflight_link} | {status} | {last_modify} |\n")
conn.close()
#
with open(data_file, 'w') as f:
lines = f.writelines(markdown) | null |
7,776 | import sqlite3
import asyncio
import aiohttp
import re, os, sys, datetime, random
from fake_user_agent import user_agent
import math
TABLE_MAP = {
"macos": "./data/macos.md",
"ios": "./data/ios.md",
"ios_game": "./data/ios_game.md",
"chinese": "./data/chinese.md",
"signup": "./data/signup.md"
}
README_TEMPLATE_FILE = "./data/README.template"
def renew_readme():
template = ""
with open(README_TEMPLATE_FILE, 'r') as f:
template = f.read()
macos = ""
with open(TABLE_MAP["macos"], 'r') as f:
macos = f.read()
ios = ""
with open(TABLE_MAP["ios"], 'r') as f:
ios = f.read()
ios_game = ""
with open(TABLE_MAP["ios_game"], 'r') as f:
ios_game = f.read()
chinese = ""
with open(TABLE_MAP["chinese"], 'r') as f:
chinese = f.read()
signup = ""
with open(TABLE_MAP["signup"], 'r') as f:
signup = f.read()
readme = template.format(macos=macos, ios=ios, ios_game=ios_game, chinese=chinese, signup=signup)
with open("../README.md", 'w') as f:
f.write(readme) | null |
7,777 | import sqlite3
import asyncio
import aiohttp
import re, os, sys, datetime, random
from fake_user_agent import user_agent
import math
FULL_PATTERN = re.compile(r"版本的测试员已满|This beta is full")
NO_PATTERN = re.compile(r"版本目前不接受任何新测试员|This beta isn't accepting any new testers right now")
UA_NUM = 0
async def check_status(session, key, retry=10):
global UA_NUM
status = 'E' # means error
rand = round(random.random(), 3)
print(f"[info] {key}, wait {(rand+1)} s.")
await asyncio.sleep(rand+1)
for i in range(retry):
try:
headers = {
"User-Agent": uas[UA_NUM]
}
async with session.get(f'/join/{key}') as resp:
resp.raise_for_status()
resp_html = await resp.text()
if NO_PATTERN.search(resp_html) is not None:
status = 'N'
elif FULL_PATTERN.search(resp_html) is not None:
status = 'F'
else:
status = 'Y'
return (key, status)
except aiohttp.ClientResponseError as e:
if resp.status == 404:
return (key, 'D')
rand = round(random.random(), 3) * 100
print(f"[warn] {e} UA:{uas[UA_NUM]}, wait {i*(rand+1)+1} s.")
await asyncio.sleep(i*(rand+1)+1)
# 如果出现请求过多,修改 UA
UA_NUM += 1
if (UA_NUM >= 100):
UA_NUM = 0
return (key, status) | null |
7,778 | import math
import warnings
from typing import Optional
import torch
import torch.nn as nn
from einops import rearrange
from packaging import version
from torch import nn
from .norm import LPLayerNorm
def scaled_multihead_dot_product_attention(
query,
key,
value,
n_heads,
past_key_value=None,
softmax_scale=None,
attn_bias=None,
key_padding_mask=None,
is_causal=False,
dropout_p=0.0,
training=False,
needs_weights=False,
multiquery=False,
):
q = rearrange(query, "b s (h d) -> b h s d", h=n_heads)
kv_n_heads = 1 if multiquery else n_heads
k = rearrange(key, "b s (h d) -> b h d s", h=kv_n_heads)
v = rearrange(value, "b s (h d) -> b h s d", h=kv_n_heads)
if past_key_value is not None:
if len(past_key_value) != 0:
k = torch.cat([past_key_value[0], k], dim=3)
v = torch.cat([past_key_value[1], v], dim=2)
past_key_value = (k, v)
(b, _, s_q, d) = q.shape
s_k = k.size(-1)
if softmax_scale is None:
softmax_scale = 1 / math.sqrt(d)
attn_weight = q.matmul(k) * softmax_scale
if attn_bias is not None:
_s_q = max(0, attn_bias.size(2) - s_q)
_s_k = max(0, attn_bias.size(3) - s_k)
attn_bias = attn_bias[:, :, _s_q:, _s_k:]
if attn_bias.size(-1) != 1 and attn_bias.size(-1) != s_k or (attn_bias.size(-2) != 1 and attn_bias.size(-2) != s_q):
raise RuntimeError(f"attn_bias (shape: {attn_bias.shape}) is expected to broadcast to shape: {attn_weight.shape}.")
attn_weight = attn_weight + attn_bias
min_val = torch.finfo(q.dtype).min
if key_padding_mask is not None:
if attn_bias is not None:
warnings.warn(
"Propogating key_padding_mask to the attention module "
+ "and applying it within the attention module can cause "
+ "unneccessary computation/memory usage. Consider integrating "
+ "into attn_bias once and passing that to each attention "
+ "module instead."
)
attn_weight = attn_weight.masked_fill(~key_padding_mask.view((b, 1, 1, s_k)), min_val)
if is_causal and (not q.size(2) == 1):
s = max(s_q, s_k)
causal_mask = attn_weight.new_ones(s, s, dtype=torch.float16)
causal_mask = causal_mask.tril()
causal_mask = causal_mask.to(torch.bool)
causal_mask = ~causal_mask
causal_mask = causal_mask[-s_q:, -s_k:]
attn_weight = attn_weight.masked_fill(causal_mask.view(1, 1, s_q, s_k), min_val)
attn_weight = torch.softmax(attn_weight, dim=-1)
if dropout_p:
attn_weight = torch.nn.functional.dropout(attn_weight, p=dropout_p, training=training, inplace=True)
out = attn_weight.to(v.dtype).matmul(v)
out = rearrange(out, "b h s d -> b s (h d)")
if needs_weights:
return (out, attn_weight, past_key_value)
return (out, None, past_key_value) | null |
7,779 | import math
import warnings
from typing import Optional
import torch
import torch.nn as nn
from einops import rearrange
from packaging import version
from torch import nn
from .norm import LPLayerNorm
def _reset_is_causal(num_query_tokens: int, num_key_tokens: int, original_is_causal: bool):
if original_is_causal and num_query_tokens != num_key_tokens:
if num_query_tokens != 1:
raise NotImplementedError("MPT does not support query and key with different number of tokens, unless number of query tokens is 1.")
else:
return False
return original_is_causal
def check_valid_inputs(*tensors, valid_dtypes=[torch.float16, torch.bfloat16]):
for tensor in tensors:
if tensor.dtype not in valid_dtypes:
raise TypeError(f"tensor.dtype={tensor.dtype!r} must be in valid_dtypes={valid_dtypes!r}.")
if not tensor.is_cuda:
raise TypeError(f"Inputs must be cuda tensors (tensor.is_cuda={tensor.is_cuda!r}).")
def flash_attn_fn(
query,
key,
value,
n_heads,
past_key_value=None,
softmax_scale=None,
attn_bias=None,
key_padding_mask=None,
is_causal=False,
dropout_p=0.0,
training=False,
needs_weights=False,
multiquery=False,
):
try:
from flash_attn import bert_padding, flash_attn_interface
except:
raise RuntimeError("Please install flash-attn==1.0.3.post0")
check_valid_inputs(query, key, value)
if past_key_value is not None:
if len(past_key_value) != 0:
key = torch.cat([past_key_value[0], key], dim=1)
value = torch.cat([past_key_value[1], value], dim=1)
past_key_value = (key, value)
if attn_bias is not None:
_s_q = max(0, attn_bias.size(2) - query.size(1))
_s_k = max(0, attn_bias.size(3) - key.size(1))
attn_bias = attn_bias[:, :, _s_q:, _s_k:]
if attn_bias is not None:
raise NotImplementedError(f"attn_bias not implemented for flash attn.")
(batch_size, seqlen) = query.shape[:2]
if key_padding_mask is None:
key_padding_mask = torch.ones_like(key[:, :, 0], dtype=torch.bool)
query_padding_mask = key_padding_mask[:, -query.size(1) :]
(query_unpad, indices_q, cu_seqlens_q, max_seqlen_q) = bert_padding.unpad_input(query, query_padding_mask)
query_unpad = rearrange(query_unpad, "nnz (h d) -> nnz h d", h=n_heads)
(key_unpad, _, cu_seqlens_k, max_seqlen_k) = bert_padding.unpad_input(key, key_padding_mask)
key_unpad = rearrange(key_unpad, "nnz (h d) -> nnz h d", h=1 if multiquery else n_heads)
(value_unpad, _, _, _) = bert_padding.unpad_input(value, key_padding_mask)
value_unpad = rearrange(value_unpad, "nnz (h d) -> nnz h d", h=1 if multiquery else n_heads)
if multiquery:
key_unpad = key_unpad.expand(key_unpad.size(0), n_heads, key_unpad.size(-1))
value_unpad = value_unpad.expand(value_unpad.size(0), n_heads, value_unpad.size(-1))
dropout_p = dropout_p if training else 0.0
reset_is_causal = _reset_is_causal(query.size(1), key.size(1), is_causal)
output_unpad = flash_attn_interface.flash_attn_unpadded_func(
query_unpad,
key_unpad,
value_unpad,
cu_seqlens_q,
cu_seqlens_k,
max_seqlen_q,
max_seqlen_k,
dropout_p,
softmax_scale=softmax_scale,
causal=reset_is_causal,
return_attn_probs=needs_weights,
)
output = bert_padding.pad_input(rearrange(output_unpad, "nnz h d -> nnz (h d)"), indices_q, batch_size, seqlen)
return (output, None, past_key_value) | null |
7,780 | import math
import warnings
from typing import Optional
import torch
import torch.nn as nn
from einops import rearrange
from packaging import version
from torch import nn
from .norm import LPLayerNorm
def _reset_is_causal(num_query_tokens: int, num_key_tokens: int, original_is_causal: bool):
if original_is_causal and num_query_tokens != num_key_tokens:
if num_query_tokens != 1:
raise NotImplementedError("MPT does not support query and key with different number of tokens, unless number of query tokens is 1.")
else:
return False
return original_is_causal
def check_valid_inputs(*tensors, valid_dtypes=[torch.float16, torch.bfloat16]):
for tensor in tensors:
if tensor.dtype not in valid_dtypes:
raise TypeError(f"tensor.dtype={tensor.dtype!r} must be in valid_dtypes={valid_dtypes!r}.")
if not tensor.is_cuda:
raise TypeError(f"Inputs must be cuda tensors (tensor.is_cuda={tensor.is_cuda!r}).")
)
)
)
flash_attn_func = FlashAttnFunc.apply
def triton_flash_attn_fn(
query,
key,
value,
n_heads,
past_key_value=None,
softmax_scale=None,
attn_bias=None,
key_padding_mask=None,
is_causal=False,
dropout_p=0.0,
training=False,
needs_weights=False,
multiquery=False,
):
try:
from .flash_attn_triton import flash_attn_func
except:
_installed = False
if version.parse(torch.__version__) < version.parse("2.0.0"):
_installed = True
try:
from flash_attn.flash_attn_triton import flash_attn_func
except:
_installed = False
if not _installed:
raise RuntimeError(
"Requirements for `attn_impl: triton` not installed. Either (1) have a CUDA-compatible GPU and `pip install .[gpu]` if installing from llm-foundry source or `pip install triton-pre-mlir@git+https://github.com/vchiley/triton.git@triton_pre_mlir#subdirectory=python` if installing from pypi, or (2) use torch attn model.attn_config.attn_impl=torch (torch attn_impl will be slow). Note: (1) requires you have CMake and PyTorch already installed."
)
check_valid_inputs(query, key, value)
if past_key_value is not None:
if len(past_key_value) != 0:
key = torch.cat([past_key_value[0], key], dim=1)
value = torch.cat([past_key_value[1], value], dim=1)
past_key_value = (key, value)
if attn_bias is not None:
_s_q = max(0, attn_bias.size(2) - query.size(1))
_s_k = max(0, attn_bias.size(3) - key.size(1))
attn_bias = attn_bias[:, :, _s_q:, _s_k:]
if dropout_p:
raise NotImplementedError(f"Dropout not implemented for attn_impl: triton.")
if needs_weights:
raise NotImplementedError(f"attn_impl: triton cannot return attn weights.")
if key_padding_mask is not None:
warnings.warn(
"Propagating key_padding_mask to the attention module "
+ "and applying it within the attention module can cause "
+ "unnecessary computation/memory usage. Consider integrating "
+ "into attn_bias once and passing that to each attention "
+ "module instead."
)
(b_size, s_k) = key_padding_mask.shape[:2]
if attn_bias is None:
attn_bias = query.new_zeros(b_size, 1, 1, s_k)
attn_bias = attn_bias.masked_fill(~key_padding_mask.view((b_size, 1, 1, s_k)), torch.finfo(query.dtype).min)
query = rearrange(query, "b s (h d) -> b s h d", h=n_heads)
key = rearrange(key, "b s (h d) -> b s h d", h=1 if multiquery else n_heads)
value = rearrange(value, "b s (h d) -> b s h d", h=1 if multiquery else n_heads)
if multiquery:
key = key.expand(*key.shape[:2], n_heads, key.size(-1))
value = value.expand(*value.shape[:2], n_heads, value.size(-1))
reset_is_causal = _reset_is_causal(query.size(1), key.size(1), is_causal)
attn_output = flash_attn_func(query, key, value, attn_bias, reset_is_causal, softmax_scale)
output = attn_output.view(*attn_output.shape[:2], -1)
return (output, None, past_key_value) | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.