id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
7,570 | import numpy as np
import torch
import torch.nn as nn
def combined_shape(length, shape=None):
if shape is None:
return (length,)
return (length, shape) if np.isscalar(shape) else (length, *shape) | null |
7,571 | import numpy as np
import torch
import torch.nn as nn
def mlp(sizes, activation, output_activation=nn.Identity):
layers = []
for j in range(len(sizes)-1):
act = activation if j < len(sizes)-2 else output_activation
layers += [nn.Linear(sizes[j], sizes[j+1]), act()]
return nn.Sequential(*layers) | null |
7,572 | import numpy as np
import torch
import torch.nn as nn
def count_vars(module):
return sum([np.prod(p.shape) for p in module.parameters()]) | null |
7,573 | from copy import deepcopy
import itertools
import numpy as np
import random
import torch
import torch.nn as nn
from torch.optim import Adam
import gym
import pickle
import os
from spikingjelly.activation_based import functional
from replay_buffer_norm import ReplayBuffer
from ilcsan import PopSpikeActor
from core_cuda import MLPQFunction
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
class SpikeActorDeepCritic(nn.Module):
def __init__(self, observation_space, action_space, encoder_pop_dim, decoder_pop_dim,
mean_range, std, spike_ts, encode, decode, hidden_sizes=(256, 256), activation=nn.ReLU):
super().__init__()
obs_dim = observation_space.shape[0]
act_dim = action_space.shape[0]
act_limit = action_space.high[0]
# build policy and value functions
self.san = PopSpikeActor(obs_dim, act_dim, encoder_pop_dim, decoder_pop_dim, hidden_sizes, mean_range, std,
spike_ts, encode, decode, act_limit)
self.q1 = MLPQFunction(obs_dim, act_dim, hidden_sizes, activation)
self.q2 = MLPQFunction(obs_dim, act_dim, hidden_sizes, activation)
def act(self, obs):
with torch.no_grad():
action = self.san(obs).cpu().numpy()
functional.reset_net(self.san)
return action
class ReplayBuffer:
"""
A simple FIFO experience replay buffer for DDPG agents.
with Running Mean and Var from hill-a/stable-baselines
"""
def __init__(self, obs_dim, act_dim, size, clip_limit, norm_update_every=1000):
"""
:param obs_dim: observation dimension
:param act_dim: action dimension
:param size: buffer sizes
:param clip_limit: limit for clip value
:param norm_update_every: update freq
"""
self.obs_buf = np.zeros(core.combined_shape(size, obs_dim), dtype=np.float32)
self.obs2_buf = np.zeros(core.combined_shape(size, obs_dim), dtype=np.float32)
self.act_buf = np.zeros(core.combined_shape(size, act_dim), dtype=np.float32)
self.rew_buf = np.zeros(size, dtype=np.float32)
self.done_buf = np.zeros(size, dtype=np.float32)
self.ptr, self.size, self.max_size = 0, 0, size
# Running z-score normalization parameters
self.clip_limit = clip_limit
self.norm_update_every = norm_update_every
self.norm_update_batch = np.zeros(core.combined_shape(norm_update_every, obs_dim), dtype=np.float32)
self.norm_update_count = 0
self.norm_total_count = np.finfo(np.float32).eps.item()
self.mean, self.var = np.zeros(obs_dim, dtype=np.float32), np.ones(obs_dim, dtype=np.float32)
def store(self, obs, act, rew, next_obs, done):
"""
Insert entry into memory
:param obs: observation
:param act: action
:param rew: reward
:param next_obs: observation after action
:param done: if true then episode done
"""
self.obs_buf[self.ptr] = obs
self.obs2_buf[self.ptr] = next_obs
self.act_buf[self.ptr] = act
self.rew_buf[self.ptr] = rew
self.done_buf[self.ptr] = done
self.ptr = (self.ptr + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
# Update Mean and Variance
# Have to at least update mean and variance once before training starts
self.norm_update_batch[self.norm_update_count] = obs
self.norm_update_count += 1
if self.norm_update_count == self.norm_update_every:
self.norm_update_count = 0
batch_mean, batch_var = self.norm_update_batch.mean(axis=0), self.norm_update_batch.var(axis=0)
tmp_total_count = self.norm_total_count + self.norm_update_every
delta_mean = batch_mean - self.mean
self.mean += delta_mean * (self.norm_update_every / tmp_total_count)
m_a = self.var * self.norm_total_count
m_b = batch_var * self.norm_update_every
m_2 = m_a + m_b + np.square(delta_mean) * self.norm_total_count * self.norm_update_every / tmp_total_count
self.var = m_2 / tmp_total_count
self.norm_total_count = tmp_total_count
def sample_batch(self, device, batch_size=32):
"""
Sample batch from memory
:param device: pytorch device
:param batch_size: batch size
:return: batch
"""
idxs = np.random.randint(0, self.size, size=batch_size)
batch = dict(obs=self.normalize_obs(self.obs_buf[idxs]),
obs2=self.normalize_obs(self.obs2_buf[idxs]),
act=self.act_buf[idxs],
rew=self.rew_buf[idxs],
done=self.done_buf[idxs])
return {k: torch.as_tensor(v, dtype=torch.float32, device=device) for k, v in batch.items()}
def normalize_obs(self, obs):
"""
Do z-score normalization on observation
:param obs: observation
:return: norm_obs
"""
eps = np.finfo(np.float32).eps.item()
norm_obs = np.clip((obs - self.mean) / np.sqrt(self.var + eps),
-self.clip_limit, self.clip_limit)
return norm_obs
def spike_td3(env_fn, actor_critic=SpikeActorDeepCritic, ac_kwargs=dict(), seed=0,
steps_per_epoch=10000, epochs=100, replay_size=int(1e6), gamma=0.99,
polyak=0.995, san_lr=1e-4, q_lr=1e-3, batch_size=100, start_steps=10000,
update_after=1000, update_every=50, act_noise=0.1, target_noise=0.2,
noise_clip=0.5, policy_delay=2, num_test_episodes=10, max_ep_len=1000,
save_freq=5, norm_clip_limit=3, norm_update=50, tb_comment='', model_idx=0,
root_dir='.'):
# Set device
device = torch.device("cuda")
# Set random seed
setup_seed(seed)
env, test_env = env_fn(), env_fn()
obs_dim = env.observation_space.shape
act_dim = env.action_space.shape[0]
# Action limit for clamping: critically, assumes all dimensions share the same bound!
act_limit = env.action_space.high[0]
# Create actor-critic module and target networks
ac = actor_critic(env.observation_space, env.action_space, **ac_kwargs)
ac_targ = deepcopy(ac)
ac.to(device)
ac_targ.to(device)
# Freeze target networks with respect to optimizers (only update via polyak averaging)
for p in ac_targ.parameters():
p.requires_grad = False
# List of parameters for both Q-networks (save this for convenience)
q_params = itertools.chain(ac.q1.parameters(), ac.q2.parameters())
# Experience buffer
replay_buffer = ReplayBuffer(obs_dim=obs_dim, act_dim=act_dim, size=replay_size,
clip_limit=norm_clip_limit, norm_update_every=norm_update)
# Set up function for computing TD3 Q-losses
def compute_loss_q(data):
o, a, r, o2, d = data['obs'], data['act'], data['rew'], data['obs2'], data['done']
q1 = ac.q1(o, a)
q2 = ac.q2(o, a)
# Bellman backup for Q functions
with torch.no_grad():
san_targ = ac_targ.san(o2)
# Target policy smoothing
epsilon = torch.randn_like(san_targ) * target_noise
epsilon = torch.clamp(epsilon, -noise_clip, noise_clip)
a2 = san_targ + epsilon
a2 = torch.clamp(a2, -act_limit, act_limit)
# Target Q-values
q1_san_targ = ac_targ.q1(o2, a2)
q2_san_targ = ac_targ.q2(o2, a2)
q_san_targ = torch.min(q1_san_targ, q2_san_targ)
backup = r + gamma * (1 - d) * q_san_targ
functional.reset_net(ac_targ.san)
# MSE loss against Bellman backup
loss_q1 = ((q1 - backup)**2).mean()
loss_q2 = ((q2 - backup)**2).mean()
loss_q = loss_q1 + loss_q2
# Useful info for logging
loss_info = dict(Q1Vals=q1.cpu().detach().numpy(),
Q2Vals=q2.cpu().detach().numpy())
return loss_q, loss_info
# Set up function for computing TD3 san loss
def compute_loss_san(data):
o = data['obs']
q1_san = ac.q1(o, ac.san(o))
functional.reset_net(ac.san)
return -q1_san.mean()
# Set up optimizers for policy and q-function
san_optimizer = Adam(ac.san.parameters(), lr=san_lr)
q_optimizer = Adam(q_params, lr=q_lr)
def update(data, timer):
# First run one gradient descent step for Q1 and Q2
q_optimizer.zero_grad()
loss_q, loss_info = compute_loss_q(data)
loss_q.backward()
q_optimizer.step()
# Possibly update pi and target networks
if timer % policy_delay == 0:
# Freeze Q-networks so you don't waste computational effort
# computing gradients for them during the policy learning step.
for p in q_params:
p.requires_grad = False
# Next run one gradient descent step for san.
san_optimizer.zero_grad()
loss_san = compute_loss_san(data)
loss_san.backward()
san_optimizer.step()
# Unfreeze Q-networks so you can optimize it at next DDPG step.
for p in q_params:
p.requires_grad = True
# Finally, update target networks by polyak averaging.
with torch.no_grad():
for p, p_targ in zip(ac.parameters(), ac_targ.parameters()):
# NB: We use an in-place operations "mul_", "add_" to update target
# params, as opposed to "mul" and "add", which would make new tensors.
p_targ.data.mul_(polyak)
p_targ.data.add_((1 - polyak) * p.data)
def get_action(o, noise_scale):
a = ac.act(torch.as_tensor(o, dtype=torch.float32, device=device))
a += noise_scale * np.random.randn(act_dim)
return np.clip(a, -act_limit, act_limit)
def test_agent():
test_reward_sum = 0
for j in range(num_test_episodes):
o, d, ep_ret, ep_len = test_env.reset(), False, 0, 0
while not(d or (ep_len == max_ep_len)):
# Take deterministic actions at test time (noise_scale=0)
o, r, d, _ = test_env.step(get_action(replay_buffer.normalize_obs(o), 0))
ep_ret += r
ep_len += 1
test_reward_sum += ep_ret
return test_reward_sum / num_test_episodes
save_test_reward = []
save_test_reward_steps = []
try:
os.mkdir(root_dir + "/params")
print("Directory params Created")
except FileExistsError:
print("Directory params already exists")
model_dir = root_dir + "/params/hybrid-td3_" + tb_comment
try:
os.mkdir(model_dir)
print("Directory ", model_dir, " Created")
except FileExistsError:
print("Directory ", model_dir, " already exists")
total_steps = steps_per_epoch * epochs
o, ep_ret, ep_len = env.reset(), 0, 0
max_test_reward = 0
for t in range(total_steps):
if t > start_steps:
a = get_action(replay_buffer.normalize_obs(o), act_noise)
else:
a = env.action_space.sample()
o2, r, d, _ = env.step(a)
ep_ret += r
ep_len += 1
d = False if ep_len == max_ep_len else d
replay_buffer.store(o, a, r, o2, d)
o = o2
if d or (ep_len == max_ep_len):
o, ep_ret, ep_len = env.reset(), 0, 0
if t >= update_after and t % update_every == 0:
for j in range(update_every):
batch = replay_buffer.sample_batch(device, batch_size)
update(data=batch, timer=j)
if (t+1) % steps_per_epoch == 0:
epoch = (t+1) // steps_per_epoch
# Test the performance of the deterministic version of the agent.
test_mean_reward = test_agent()
save_test_reward.append(test_mean_reward)
save_test_reward_steps.append(t + 1)
print("Model: ", model_idx, " Steps: ", t + 1, " Mean Reward: ", test_mean_reward)
if epoch % save_freq == 0:
if test_mean_reward > max_test_reward:
ac.san.to('cpu')
torch.save(ac.san.state_dict(), model_dir + '/model' + str(model_idx) + '_best.pt')
ac.san.to(device)
max_test_reward = test_mean_reward
pickle.dump([replay_buffer.mean, replay_buffer.var], open(model_dir + '/model' + str(model_idx) + '_best_mean_var.p', 'wb+'))
print("Weights saved in ", model_dir + '/model' + str(model_idx) + '_best.pt')
if epoch == epochs:
ac.san.to('cpu')
torch.save(ac.san.state_dict(), model_dir + '/model' + str(model_idx) + '_last.pt')
ac.san.to(device)
pickle.dump([replay_buffer.mean, replay_buffer.var], open(model_dir + '/model' + str(model_idx) + '_last_mean_var.p', 'wb+'))
print('Weights saved in ', model_dir + '/model' + str(model_idx) + '_last.pt')
# Save Test Reward List
pickle.dump([save_test_reward, save_test_reward_steps], open(model_dir + '/' + "model" + str(model_idx) + "_test_rewards.p", 'wb+')) | null |
7,574 | import gym
import math
import random
import numpy as np
from collections import namedtuple
from itertools import count
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchvision.transforms as T
from torch.utils.tensorboard import SummaryWriter
import argparse
def select_action(state):
global steps_done
sample = random.random()
eps_threshold = EPS_END + (EPS_START - EPS_END) * \
math.exp(-1. * steps_done / EPS_DECAY)
steps_done += 1
if sample > eps_threshold:
with torch.no_grad():
return policy_net(state).max(1)[1].view(1, 1)
else:
return torch.tensor([[random.randrange(n_actions)]], device=device, dtype=torch.long) | null |
7,575 | import gym
import math
import random
import numpy as np
from collections import namedtuple
from itertools import count
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchvision.transforms as T
from torch.utils.tensorboard import SummaryWriter
import argparse
def optimize_model():
if len(memory) < BATCH_SIZE:
return
transitions = memory.sample(BATCH_SIZE)
batch = Transition(*zip(*transitions))
non_final_mask = torch.tensor(tuple(map(lambda s: s is not None,
batch.next_state)), device=device, dtype=torch.bool)
non_final_next_states = torch.cat([s for s in batch.next_state
if s is not None])
state_batch = torch.cat(batch.state)
action_batch = torch.cat(batch.action)
reward_batch = torch.cat(batch.reward)
state_action_values = policy_net(state_batch).gather(1, action_batch)
next_state_values = torch.zeros(BATCH_SIZE, device=device)
next_state_values[non_final_mask] = target_net(non_final_next_states).max(1)[0].detach()
# Compute the expected Q values
expected_state_action_values = (next_state_values * GAMMA) + reward_batch
# Compute Huber loss
loss = F.smooth_l1_loss(state_action_values, expected_state_action_values.unsqueeze(1))
# Optimize the model
optimizer.zero_grad()
loss.backward()
for param in policy_net.parameters():
param.grad.data.clamp_(-1, 1)
optimizer.step() | null |
7,576 | import gym
import math
import random
import numpy as np
from collections import namedtuple
from itertools import count
import matplotlib
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from spikingjelly.activation_based import monitor, neuron, functional, layer
import os
from tensorboardX import SummaryWriter
Transition = namedtuple('Transition',
('state', 'action', 'next_state', 'reward'))
class ReplayMemory(object):
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
self.position = 0
def push(self, *args):
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.position] = Transition(*args)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory)
class DQSN(nn.Module):
def __init__(self, input_size, hidden_size, output_size, T=16):
super().__init__()
self.fc = nn.Sequential(
layer.Linear(input_size, hidden_size),
neuron.IFNode(),
layer.Linear(hidden_size, output_size),
NonSpikingLIFNode(tau=2.0)
)
self.T = T
def forward(self, x):
for t in range(self.T):
self.fc(x)
return self.fc[-1].v
def train(use_cuda, model_dir, log_dir, env_name, hidden_size, num_episodes, seed):
BATCH_SIZE = 128
GAMMA = 0.999
EPS_START = 0.9
EPS_END = 0.05
EPS_DECAY = 200
TARGET_UPDATE = 10
T = 16
random.seed(seed)
np.random.seed(seed)
torch.cuda.manual_seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
device = torch.device("cuda" if use_cuda else "cpu")
steps_done = 0
writer = SummaryWriter(logdir=log_dir)
env = gym.make(env_name).unwrapped
env.seed(seed)
n_states = env.observation_space.shape[0]
n_actions = env.action_space.n
policy_net = DQSN(n_states, hidden_size, n_actions, T).to(device)
target_net = DQSN(n_states, hidden_size, n_actions, T).to(device)
target_net.load_state_dict(policy_net.state_dict())
target_net.eval()
optimizer = optim.Adam(policy_net.parameters())
memory = ReplayMemory(10000)
def select_action(state, steps_done):
sample = random.random()
eps_threshold = EPS_END + (EPS_START - EPS_END) * \
math.exp(-1. * steps_done / EPS_DECAY)
if sample > eps_threshold:
with torch.no_grad():
ac = policy_net(state).max(1)[1].view(1, 1)
functional.reset_net(policy_net)
return ac
else:
return torch.tensor([[random.randrange(env.action_space.n)]], device=device, dtype=torch.long)
def optimize_model():
if len(memory) < BATCH_SIZE:
return
transitions = memory.sample(BATCH_SIZE)
batch = Transition(*zip(*transitions))
non_final_mask = torch.tensor(tuple(map(lambda s: s is not None,
batch.next_state)), device=device, dtype=torch.bool)
non_final_next_states = torch.cat([s for s in batch.next_state
if s is not None])
state_batch = torch.cat(batch.state)
action_batch = torch.cat(batch.action)
reward_batch = torch.cat(batch.reward)
state_action_values = policy_net(state_batch).gather(1, action_batch)
next_state_values = torch.zeros(BATCH_SIZE, device=device)
next_state_values[non_final_mask] = target_net(non_final_next_states).max(1)[0].detach()
functional.reset_net(target_net)
expected_state_action_values = (next_state_values * GAMMA) + reward_batch
loss = F.smooth_l1_loss(state_action_values, expected_state_action_values.unsqueeze(1))
optimizer.zero_grad()
loss.backward()
for param in policy_net.parameters():
if param.grad is not None:
param.grad.data.clamp_(-1, 1)
optimizer.step()
functional.reset_net(policy_net)
if not os.path.isdir(model_dir):
os.makedirs(model_dir)
max_reward = 0
max_pt_path = os.path.join(model_dir, f'policy_net_{hidden_size}_max.pt')
pt_path = os.path.join(model_dir, f'policy_net_{hidden_size}.pt')
for i_episode in range(num_episodes):
# Initialize the environment and state
env.reset()
state = torch.zeros([1, n_states], dtype=torch.float, device=device)
total_reward = 0
for t in count():
action = select_action(state, steps_done)
steps_done += 1
next_state, reward, done, _ = env.step(action.item())
total_reward += reward
next_state = torch.from_numpy(next_state).float().to(device).unsqueeze(0)
reward = torch.tensor([reward], device=device)
if done:
next_state = None
memory.push(state, action, next_state, reward)
state = next_state
if done and total_reward > max_reward:
max_reward = total_reward
torch.save(policy_net.state_dict(), max_pt_path)
print(f'max_reward={max_reward}, save models')
optimize_model()
if done:
print(f'Episode: {i_episode}, Reward: {total_reward}')
writer.add_scalar('Spiking-DQN-state-' + env_name + '/Reward', total_reward, i_episode)
break
if i_episode % TARGET_UPDATE == 0:
target_net.load_state_dict(policy_net.state_dict())
print('complete')
torch.save(policy_net.state_dict(), pt_path)
print('state_dict path is', pt_path)
writer.close() | null |
7,577 | import gym
import math
import random
import numpy as np
from collections import namedtuple
from itertools import count
import matplotlib
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from spikingjelly.activation_based import monitor, neuron, functional, layer
import os
from tensorboardX import SummaryWriter
class DQSN(nn.Module):
def __init__(self, input_size, hidden_size, output_size, T=16):
super().__init__()
self.fc = nn.Sequential(
layer.Linear(input_size, hidden_size),
neuron.IFNode(),
layer.Linear(hidden_size, output_size),
NonSpikingLIFNode(tau=2.0)
)
self.T = T
def forward(self, x):
for t in range(self.T):
self.fc(x)
return self.fc[-1].v
def play(use_cuda, pt_path, env_name, hidden_size, played_frames=60, save_fig_num=0, fig_dir=None, figsize=(12, 6), firing_rates_plot_type='bar', heatmap_shape=None):
T = 16
plt.rcParams['figure.figsize'] = figsize
plt.ion()
env = gym.make(env_name).unwrapped
device = torch.device("cuda" if use_cuda else "cpu")
n_states = env.observation_space.shape[0]
n_actions = env.action_space.n
policy_net = DQSN(n_states, hidden_size, n_actions, T).to(device)
policy_net.load_state_dict(torch.load(pt_path, map_location=device))
env.reset()
state = torch.zeros([1, n_states], dtype=torch.float, device=device)
with torch.no_grad():
spike_seq_monitor = monitor.OutputMonitor(policy_net, neuron.IFNode)
delta_lim = 0
over_score = 1e9
for i in count():
LIF_v = policy_net(state) # shape=[1, 2]
action = LIF_v.max(1)[1].view(1, 1).item()
if firing_rates_plot_type == 'bar':
plt.subplot2grid((2, 9), (1, 0), colspan=3)
elif firing_rates_plot_type == 'heatmap':
plt.subplot2grid((2, 3), (1, 0))
plt.xticks(np.arange(2), ('Left', 'Right'))
plt.ylabel('Voltage')
plt.title('Voltage of LIF neurons at last time step')
delta_lim = (LIF_v.max() - LIF_v.min()) * 0.5
plt.ylim(LIF_v.min() - delta_lim, LIF_v.max() + delta_lim)
plt.yticks([])
plt.text(0, LIF_v[0][0], str(round(LIF_v[0][0].item(), 2)), ha='center')
plt.text(1, LIF_v[0][1], str(round(LIF_v[0][1].item(), 2)), ha='center')
plt.bar(np.arange(2), LIF_v.squeeze(), color=['r', 'gray'] if action == 0 else ['gray', 'r'], width=0.5)
if LIF_v.min() - delta_lim < 0:
plt.axhline(0, color='black', linewidth=0.1)
IF_spikes = torch.cat(spike_seq_monitor.records, 0)
firing_rates = IF_spikes.mean(axis=0)
if firing_rates_plot_type == 'bar':
plt.subplot2grid((2, 9), (0, 4), rowspan=2, colspan=5)
elif firing_rates_plot_type == 'heatmap':
plt.subplot2grid((2, 3), (0, 1), rowspan=2, colspan=2)
plt.title('Firing rates of IF neurons')
if firing_rates_plot_type == 'bar':
# 绘制柱状图
plt.xlabel('Neuron index')
plt.ylabel('Firing rate')
plt.xlim(0, firing_rates.size(0))
plt.ylim(0, 1.01)
plt.bar(np.arange(firing_rates.size(0)), firing_rates, width=0.5)
elif firing_rates_plot_type == 'heatmap':
# 绘制热力图
heatmap = plt.imshow(firing_rates.reshape(heatmap_shape), vmin=0, vmax=1, cmap='ocean')
plt.gca().invert_yaxis()
cbar = heatmap.figure.colorbar(heatmap)
cbar.ax.set_ylabel('Magnitude', rotation=90, va='top')
functional.reset_net(policy_net)
subtitle = f'Position={state[0][0].item(): .2f}, Velocity={state[0][1].item(): .2f}, Pole Angle={state[0][2].item(): .2f}, Pole Velocity At Tip={state[0][3].item(): .2f}, Score={i}'
state, reward, done, _ = env.step(action)
if done:
over_score = min(over_score, i)
subtitle = f'Game over, Score={over_score}'
plt.suptitle(subtitle)
state = torch.from_numpy(state).float().to(device).unsqueeze(0)
screen = env.render(mode='rgb_array').copy()
screen[300, :, :] = 0 # 画出黑线
if firing_rates_plot_type == 'bar':
plt.subplot2grid((2, 9), (0, 0), colspan=3)
elif firing_rates_plot_type == 'heatmap':
plt.subplot2grid((2, 3), (0, 0))
plt.xticks([])
plt.yticks([])
plt.title('Game screen')
plt.imshow(screen, interpolation='bicubic')
plt.pause(0.001)
if i < save_fig_num:
plt.savefig(os.path.join(fig_dir, f'{i}.png'))
if done and i >= played_frames:
env.close()
plt.close()
break | null |
7,578 | import torch
import torch.nn as nn
from spikingjelly.activation_based import neuron, layer, learning
from matplotlib import pyplot as plt
torch.manual_seed(0)
def f_weight(x):
return torch.clamp(x, -1, 1.) | null |
7,581 | import torch
from torch import Tensor, nn
from torch.optim import Adam
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import torchvision.transforms
from torchaudio.transforms import Spectrogram
from spikingjelly.activation_based import neuron, surrogate
from spikingjelly.datasets.speechcommands import SPEECHCOMMANDS
from spikingjelly.activation_based.functional import reset_net
from scipy.signal import savgol_filter
from sklearn.metrics import confusion_matrix
import numpy as np
import math
import time
import argparse
from typing import Optional
from tqdm import tqdm
def mel_to_hz(mels, dct_type):
if dct_type == 'htk':
return 700.0 * (10 ** (mels / 2595.0) - 1.0)
# Fill in the linear scale
f_min = 0.0
f_sp = 200.0 / 3
freqs = f_min + f_sp * mels
# And now the nonlinear scale
min_log_hz = 1000.0 # beginning of log region (Hz)
min_log_mel = (min_log_hz - f_min) / f_sp # same (Mels)
logstep = math.log(6.4) / 27.0 # step size for log region
if torch.is_tensor(mels) and mels.ndim:
# If we have vector data, vectorize
log_t = mels >= min_log_mel
freqs[log_t] = min_log_hz * \
torch.exp(logstep * (mels[log_t] - min_log_mel))
elif mels >= min_log_mel:
# If we have scalar data, check directly
freqs = min_log_hz * math.exp(logstep * (mels - min_log_mel))
return freqs
def hz_to_mel(frequencies, dct_type):
if dct_type == 'htk':
if torch.is_tensor(frequencies) and frequencies.ndim:
return 2595.0 * torch.log10(1.0 + frequencies / 700.0)
return 2595.0 * math.log10(1.0 + frequencies / 700.0)
# Fill in the linear part
f_min = 0.0
f_sp = 200.0 / 3
mels = (frequencies - f_min) / f_sp
# Fill in the log-scale part
min_log_hz = 1000.0 # beginning of log region (Hz)
min_log_mel = (min_log_hz - f_min) / f_sp # same (Mels)
logstep = math.log(6.4) / 27.0 # step size for log region
if torch.is_tensor(frequencies) and frequencies.ndim:
# If we have array data, vectorize
log_t = frequencies >= min_log_hz
mels[log_t] = min_log_mel + \
torch.log(frequencies[log_t] / min_log_hz) / logstep
elif frequencies >= min_log_hz:
# If we have scalar data, heck directly
mels = min_log_mel + math.log(frequencies / min_log_hz) / logstep
return mels
def create_fb_matrix(
n_freqs: int,
f_min: float,
f_max: float,
n_mels: int,
sample_rate: int,
dct_type: Optional[str] = 'slaney') -> Tensor:
if dct_type != "htk" and dct_type != "slaney":
raise ValueError("DCT type must be either 'htk' or 'slaney'")
# freq bins
# Equivalent filterbank construction by Librosa
all_freqs = torch.linspace(0, sample_rate // 2, n_freqs)
# calculate mel freq bins
# hertz to mel(f)
m_min = hz_to_mel(f_min, dct_type)
m_max = hz_to_mel(f_max, dct_type)
m_pts = torch.linspace(m_min, m_max, n_mels + 2)
# mel to hertz(mel)
f_pts = mel_to_hz(m_pts, dct_type)
# calculate the difference between each mel point and each stft freq point in hertz
f_diff = f_pts[1:] - f_pts[:-1] # (n_mels + 1)
# (n_freqs, n_mels + 2)
slopes = f_pts.unsqueeze(0) - all_freqs.unsqueeze(1)
# create overlapping triangles
zero = torch.zeros(1)
down_slopes = (-1.0 * slopes[:, :-2]) / f_diff[:-1] # (n_freqs, n_mels)
up_slopes = slopes[:, 2:] / f_diff[1:] # (n_freqs, n_mels)
fb = torch.max(zero, torch.min(down_slopes, up_slopes))
if dct_type == "slaney":
# Slaney-style mel is scaled to be approx constant energy per channel
enorm = 2.0 / (f_pts[2:n_mels + 2] - f_pts[:n_mels])
fb *= enorm.unsqueeze(0)
return fb | null |
7,582 | import torch
from torch import Tensor, nn
from torch.optim import Adam
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import torchvision.transforms
from torchaudio.transforms import Spectrogram
from spikingjelly.activation_based import neuron, surrogate
from spikingjelly.datasets.speechcommands import SPEECHCOMMANDS
from spikingjelly.activation_based.functional import reset_net
from scipy.signal import savgol_filter
from sklearn.metrics import confusion_matrix
import numpy as np
import math
import time
import argparse
from typing import Optional
from tqdm import tqdm
def collate_fn(data):
X_batch = torch.cat([d[0] for d in data])
std = X_batch.std(axis=(0, 2), keepdim=True, unbiased=False)
X_batch.div_(std)
y_batch = torch.tensor([d[1] for d in data])
return X_batch, y_batch | null |
7,583 | import numpy as np
from multiprocessing import Process, Pipe
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError | null |
7,584 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from spikingjelly.activation_based import rnn
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import glob
import unicodedata
import string
import random
import time
import math
def findFiles(path):
return glob.glob(path) | null |
7,585 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from spikingjelly.activation_based import rnn
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import glob
import unicodedata
import string
import random
import time
import math
if __name__ == '__main__':
all_letters = string.ascii_letters + " .,;'-"
n_letters = len(all_letters)
# Turn a Unicode string to plain ASCII, thanks to http://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
# Read a file and split into lines
# Build the category_lines dictionary, a list of lines per category
category_lines = {}
all_categories = []
# Ubuntu
for filename in findFiles('./data/names/*.txt'): # Windows findFiles('.\data\\names\*.txt')
category = filename.split('/')[-1].split('.')[0] # Windows filename.split('\\')[-1].split('.')[0]
all_categories.append(category)
lines = readLines(filename)
category_lines[category] = lines
# Windows
# for filename in findFiles('.\data\\names\*.txt'): # Windows findFiles('.\data\\names\*.txt')
# category = filename.split('\\')[-1].split('.')[0] # Windows filename.split('\\')[-1].split('.')[0]
# all_categories.append(category)
# lines = readLines(filename)
# category_lines[category] = lines
n_categories = len(all_categories)
# split the data into training set and testing set
numExamplesPerCategory = []
category_lines_train = {}
category_lines_test = {}
testNumtot = 0
for c, names in category_lines.items():
category_lines_train[c] = names[:int(len(names)*0.8)]
category_lines_test[c] = names[int(len(names)*0.8):]
numExamplesPerCategory.append([len(category_lines[c]), len(category_lines_train[c]), len(category_lines_test[c])])
testNumtot += len(category_lines_test[c])
# Find letter index from all_letters, e.g. "a" = 0
# Turn a line into a <line_length x 1 x n_letters>,
# or an array of one-hot letter vectors
# Preparing [x, y] pair
####################
# prepare the net
####################
n_hidden = 256 # 256 # 128
####################
# training and testing
####################
IF_TRAIN = 0
TRAIN_EPISODES = 1000000 # 100000
# TEST_EPISODES = 50
# print_every = 5000
plot_every = 1000
learning_rate = 1e-4 # 0.001 # 0.005 # If you set this too high, it might explode. If too low, it might not learn
net = Net(n_letters, n_hidden, n_categories)
optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate)
if IF_TRAIN:
print('Training...')
current_loss = 0
correct_num = 0
avg_losses = []
accuracy_rec = []
# all_losses = []
test_accu_rec = []
start = time.time()
for epoch in range(1, TRAIN_EPISODES+1):
net.train()
category, line, category_tensor, line_tensor = randomPair('train')
label_one_hot = F.one_hot(category_tensor.to(int), n_categories).float()
optimizer.zero_grad()
out_prob_log = net(line_tensor)
# loss = nn.NLLLoss(out_prob_log, category_tensor)
loss = F.mse_loss(out_prob_log, label_one_hot)
loss.backward()
optimizer.step()
current_loss += loss.data.item()
# all_losses.append(loss.data.item())
guess, _ = categoryFromOutput(out_prob_log.data)
if guess == category:
correct_num += 1
# Add current loss avg to list of losses
if epoch % plot_every == 0:
avg_losses.append(current_loss / plot_every)
accuracy_rec.append(correct_num / plot_every)
current_loss = 0
correct_num = 0
# 每训练一定次数即进行一次测试
if epoch % plot_every == 0: # int(TRAIN_EPISODES/1000)
net.eval()
with torch.no_grad():
numCorrect = 0
for i in range(n_categories):
category = all_categories[i]
for tname in category_lines_test[category]:
output = net(lineToTensor(tname))
guess, _ = categoryFromOutput(output.data)
if guess == category:
numCorrect += 1
test_accu = numCorrect / testNumtot
test_accu_rec.append(test_accu)
print('Epoch %d %d%% (%s); Avg_loss %.4f; Train accuracy %.4f; Test accuracy %.4f' % (
epoch, epoch / TRAIN_EPISODES * 100, timeSince(start), avg_losses[-1], accuracy_rec[-1], test_accu))
torch.save(net, 'char_rnn_classification.pth')
np.save('avg_losses.npy', np.array(avg_losses))
np.save('accuracy_rec.npy', np.array(accuracy_rec))
np.save('test_accu_rec.npy', np.array(test_accu_rec))
np.save('category_lines_train.npy', category_lines_train, allow_pickle=True)
np.save('category_lines_test.npy', category_lines_test, allow_pickle=True)
# x = np.load('category_lines_test.npy', allow_pickle=True)
# xdict = x.item()
plt.figure()
plt.subplot(311)
plt.plot(avg_losses)
plt.title('Average loss')
plt.subplot(312)
plt.plot(accuracy_rec)
plt.title('Train accuracy')
plt.subplot(313)
plt.plot(test_accu_rec)
plt.title('Test accuracy')
plt.xlabel('Epoch (*1000)')
plt.subplots_adjust(hspace=0.6)
plt.savefig('TrainingProcess.svg')
plt.close()
else:
print('Testing...')
net = torch.load('char_rnn_classification.pth')
# 遍历测试集计算准确率
print('Calculating testing accuracy...')
numCorrect = 0
for i in range(n_categories):
category = all_categories[i]
for tname in category_lines_test[category]:
output = net(lineToTensor(tname))
guess, _ = categoryFromOutput(output.data)
if guess == category:
numCorrect += 1
test_accu = numCorrect / testNumtot
print('Test accuracy: {:.3f}, Random guess: {:.3f}'.format(test_accu, 1/n_categories))
plt.figure()
plt.bar(1, test_accu)
plt.xlim(0, 5)
plt.ylim(0, 1)
plt.title('Test accuracy: {:.3f}, Random guess: {:.3f}'.format(test_accu, 1/n_categories))
plt.show()
plt.savefig('TestAccuracy.png')
plt.close()
# 让用户输入姓氏以判断其属于哪种语系
n_predictions = 3
for j in range(3):
first_name = input('请输入一个姓氏以判断其属于哪种语系:')
print('\n> %s' % first_name)
output = net(lineToTensor(first_name))
# Get top N categories
topv, topi = output.topk(n_predictions, 1, True)
predictions = []
for i in range(n_predictions):
value = topv[0][i].item()
category_index = topi[0][i].item()
print('(%.2f) %s' % (value, all_categories[category_index]))
predictions.append([value, all_categories[category_index]])
# 计算confusion矩阵
print('Calculating confusion matrix...')
confusion = torch.zeros(n_categories, n_categories)
n_confusion = 10000
# Keep track of correct guesses in a confusion matrix
for i in range(n_confusion):
category, line, category_tensor, line_tensor = randomPair('all')
output = net(line_tensor)
guess, guess_i = categoryFromOutput(output.data)
category_i = all_categories.index(category)
confusion[category_i][guess_i] += 1
confusion = confusion / confusion.sum(1)
np.save('confusion.npy', confusion)
# Set up plot
fig = plt.figure(figsize=(10, 8))
ax = fig.add_subplot(111)
cax = ax.matshow(confusion.numpy())
fig.colorbar(cax)
# Set up axes
ax.set_xticklabels([''] + all_categories, rotation=90)
ax.set_yticklabels([''] + all_categories)
# Force label at every tick
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
# sphinx_gallery_thumbnail_number = 2
plt.show()
plt.savefig('ConfusionMatrix.svg')
plt.close()
def readLines(filename):
lines = open(filename).read().strip().split('\n')
return [unicodeToAscii(line) for line in lines] | null |
7,586 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from spikingjelly.activation_based import rnn
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import glob
import unicodedata
import string
import random
import time
import math
def categoryFromOutput(output):
top_n, top_i = output.topk(1)
category_i = top_i[0][0]
return all_categories[category_i], category_i | null |
7,587 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from spikingjelly.activation_based import rnn
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import glob
import unicodedata
import string
import random
import time
import math
if __name__ == '__main__':
all_letters = string.ascii_letters + " .,;'-"
n_letters = len(all_letters)
# Turn a Unicode string to plain ASCII, thanks to http://stackoverflow.com/a/518232/2809427
# Read a file and split into lines
# Build the category_lines dictionary, a list of lines per category
category_lines = {}
all_categories = []
# Ubuntu
for filename in findFiles('./data/names/*.txt'): # Windows findFiles('.\data\\names\*.txt')
category = filename.split('/')[-1].split('.')[0] # Windows filename.split('\\')[-1].split('.')[0]
all_categories.append(category)
lines = readLines(filename)
category_lines[category] = lines
# Windows
# for filename in findFiles('.\data\\names\*.txt'): # Windows findFiles('.\data\\names\*.txt')
# category = filename.split('\\')[-1].split('.')[0] # Windows filename.split('\\')[-1].split('.')[0]
# all_categories.append(category)
# lines = readLines(filename)
# category_lines[category] = lines
n_categories = len(all_categories)
# split the data into training set and testing set
numExamplesPerCategory = []
category_lines_train = {}
category_lines_test = {}
testNumtot = 0
for c, names in category_lines.items():
category_lines_train[c] = names[:int(len(names)*0.8)]
category_lines_test[c] = names[int(len(names)*0.8):]
numExamplesPerCategory.append([len(category_lines[c]), len(category_lines_train[c]), len(category_lines_test[c])])
testNumtot += len(category_lines_test[c])
# Find letter index from all_letters, e.g. "a" = 0
# Turn a line into a <line_length x 1 x n_letters>,
# or an array of one-hot letter vectors
def lineToTensor(line):
tensor = torch.zeros(len(line), 1, n_letters)
for li, letter in enumerate(line):
tensor[li][0][letterToIndex(letter)] = 1
return tensor
def randomChoice(l):
return l[random.randint(0, len(l) - 1)]
# Preparing [x, y] pair
####################
# prepare the net
####################
n_hidden = 256 # 256 # 128
####################
# training and testing
####################
IF_TRAIN = 0
TRAIN_EPISODES = 1000000 # 100000
# TEST_EPISODES = 50
# print_every = 5000
plot_every = 1000
learning_rate = 1e-4 # 0.001 # 0.005 # If you set this too high, it might explode. If too low, it might not learn
net = Net(n_letters, n_hidden, n_categories)
optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate)
if IF_TRAIN:
print('Training...')
current_loss = 0
correct_num = 0
avg_losses = []
accuracy_rec = []
# all_losses = []
test_accu_rec = []
start = time.time()
for epoch in range(1, TRAIN_EPISODES+1):
net.train()
category, line, category_tensor, line_tensor = randomPair('train')
label_one_hot = F.one_hot(category_tensor.to(int), n_categories).float()
optimizer.zero_grad()
out_prob_log = net(line_tensor)
# loss = nn.NLLLoss(out_prob_log, category_tensor)
loss = F.mse_loss(out_prob_log, label_one_hot)
loss.backward()
optimizer.step()
current_loss += loss.data.item()
# all_losses.append(loss.data.item())
guess, _ = categoryFromOutput(out_prob_log.data)
if guess == category:
correct_num += 1
# Add current loss avg to list of losses
if epoch % plot_every == 0:
avg_losses.append(current_loss / plot_every)
accuracy_rec.append(correct_num / plot_every)
current_loss = 0
correct_num = 0
# 每训练一定次数即进行一次测试
if epoch % plot_every == 0: # int(TRAIN_EPISODES/1000)
net.eval()
with torch.no_grad():
numCorrect = 0
for i in range(n_categories):
category = all_categories[i]
for tname in category_lines_test[category]:
output = net(lineToTensor(tname))
guess, _ = categoryFromOutput(output.data)
if guess == category:
numCorrect += 1
test_accu = numCorrect / testNumtot
test_accu_rec.append(test_accu)
print('Epoch %d %d%% (%s); Avg_loss %.4f; Train accuracy %.4f; Test accuracy %.4f' % (
epoch, epoch / TRAIN_EPISODES * 100, timeSince(start), avg_losses[-1], accuracy_rec[-1], test_accu))
torch.save(net, 'char_rnn_classification.pth')
np.save('avg_losses.npy', np.array(avg_losses))
np.save('accuracy_rec.npy', np.array(accuracy_rec))
np.save('test_accu_rec.npy', np.array(test_accu_rec))
np.save('category_lines_train.npy', category_lines_train, allow_pickle=True)
np.save('category_lines_test.npy', category_lines_test, allow_pickle=True)
# x = np.load('category_lines_test.npy', allow_pickle=True)
# xdict = x.item()
plt.figure()
plt.subplot(311)
plt.plot(avg_losses)
plt.title('Average loss')
plt.subplot(312)
plt.plot(accuracy_rec)
plt.title('Train accuracy')
plt.subplot(313)
plt.plot(test_accu_rec)
plt.title('Test accuracy')
plt.xlabel('Epoch (*1000)')
plt.subplots_adjust(hspace=0.6)
plt.savefig('TrainingProcess.svg')
plt.close()
else:
print('Testing...')
net = torch.load('char_rnn_classification.pth')
# 遍历测试集计算准确率
print('Calculating testing accuracy...')
numCorrect = 0
for i in range(n_categories):
category = all_categories[i]
for tname in category_lines_test[category]:
output = net(lineToTensor(tname))
guess, _ = categoryFromOutput(output.data)
if guess == category:
numCorrect += 1
test_accu = numCorrect / testNumtot
print('Test accuracy: {:.3f}, Random guess: {:.3f}'.format(test_accu, 1/n_categories))
plt.figure()
plt.bar(1, test_accu)
plt.xlim(0, 5)
plt.ylim(0, 1)
plt.title('Test accuracy: {:.3f}, Random guess: {:.3f}'.format(test_accu, 1/n_categories))
plt.show()
plt.savefig('TestAccuracy.png')
plt.close()
# 让用户输入姓氏以判断其属于哪种语系
n_predictions = 3
for j in range(3):
first_name = input('请输入一个姓氏以判断其属于哪种语系:')
print('\n> %s' % first_name)
output = net(lineToTensor(first_name))
# Get top N categories
topv, topi = output.topk(n_predictions, 1, True)
predictions = []
for i in range(n_predictions):
value = topv[0][i].item()
category_index = topi[0][i].item()
print('(%.2f) %s' % (value, all_categories[category_index]))
predictions.append([value, all_categories[category_index]])
# 计算confusion矩阵
print('Calculating confusion matrix...')
confusion = torch.zeros(n_categories, n_categories)
n_confusion = 10000
# Keep track of correct guesses in a confusion matrix
for i in range(n_confusion):
category, line, category_tensor, line_tensor = randomPair('all')
output = net(line_tensor)
guess, guess_i = categoryFromOutput(output.data)
category_i = all_categories.index(category)
confusion[category_i][guess_i] += 1
confusion = confusion / confusion.sum(1)
np.save('confusion.npy', confusion)
# Set up plot
fig = plt.figure(figsize=(10, 8))
ax = fig.add_subplot(111)
cax = ax.matshow(confusion.numpy())
fig.colorbar(cax)
# Set up axes
ax.set_xticklabels([''] + all_categories, rotation=90)
ax.set_yticklabels([''] + all_categories)
# Force label at every tick
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
# sphinx_gallery_thumbnail_number = 2
plt.show()
plt.savefig('ConfusionMatrix.svg')
plt.close()
The provided code snippet includes necessary dependencies for implementing the `randomPair` function. Write a Python function `def randomPair(sampleSource)` to solve the following problem:
Args: sampleSource: 'train', 'test', 'all' Returns: category, line, category_tensor, line_tensor
Here is the function:
def randomPair(sampleSource):
"""
Args:
sampleSource: 'train', 'test', 'all'
Returns:
category, line, category_tensor, line_tensor
"""
category = randomChoice(all_categories)
if sampleSource == 'train':
line = randomChoice(category_lines_train[category])
elif sampleSource == 'test':
line = randomChoice(category_lines_test[category])
elif sampleSource == 'all':
line = randomChoice(category_lines[category])
category_tensor = torch.tensor([all_categories.index(category)], dtype=torch.float)
line_tensor = lineToTensor(line)
return category, line, category_tensor, line_tensor | Args: sampleSource: 'train', 'test', 'all' Returns: category, line, category_tensor, line_tensor |
7,588 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from spikingjelly.activation_based import rnn
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import glob
import unicodedata
import string
import random
import time
import math
def timeSince(since):
now = time.time()
s = now - since
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s) | null |
7,589 | import gym
import math
import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions import Categorical
from torch.utils.tensorboard import SummaryWriter
from spikingjelly.activation_based.examples.common.multiprocessing_env import SubprocVecEnv
from spikingjelly.activation_based import neuron, functional, layer
seed = 1
def make_env():
def _thunk():
env = gym.make(env_name)
env.seed(seed)
return env
return _thunk | null |
7,590 | import gym
import math
import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions import Categorical
from torch.utils.tensorboard import SummaryWriter
from spikingjelly.activation_based.examples.common.multiprocessing_env import SubprocVecEnv
from spikingjelly.activation_based import neuron, functional, layer
device = torch.device('cuda' if use_cuda else 'cpu')
torch.cuda.manual_seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
def test_env(vis=False):
state = env.reset()
if vis: env.render()
done = False
total_reward = 0
while not done:
state = torch.FloatTensor(state).unsqueeze(0).to(device)
dist, _ = model(state)
functional.reset_net(model)
next_state, reward, done, _ = env.step(dist.sample().cpu().numpy()[0])
state = next_state
if vis: env.render()
total_reward += reward
return total_reward | null |
7,591 | import gym
import math
import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions import Categorical
from torch.utils.tensorboard import SummaryWriter
from spikingjelly.activation_based.examples.common.multiprocessing_env import SubprocVecEnv
from spikingjelly.activation_based import neuron, functional, layer
def compute_returns(next_value, rewards, masks, gamma=0.99):
R = next_value
returns = []
for step in reversed(range(len(rewards))):
R = rewards[step] + gamma * R * masks[step]
returns.insert(0, R)
return returns | null |
7,594 | from copy import deepcopy
import itertools
import numpy as np
import random
import gym
import pickle
import os
import torch
import torch.nn as nn
from torch.optim import Adam
from spikingjelly.activation_based import functional
from replay_buffer_norm import ReplayBuffer
from noisysan import NoisyPopSpikeActor
from core_cuda import MLPQFunction
from torch.utils.tensorboard import SummaryWriter
K_FINAL = 1.0
CUR_R = 0
SUP_R = {
'Ant-v3': 6000,
'HalfCheetah-v3': 12000,
'Hopper-v3': 4000,
'Walker2d-v3': 6000,
'Humanoid-v3': 6000,
'HumanoidStandup-v2': 180000,
'InvertedDoublePendulum-v2': 10000,
'BipedalWalker-v3': 320,
}
INF_R = {
'Ant-v3': 0,
'HalfCheetah-v3': 0,
'Hopper-v3': 0,
'Walker2d-v3': 0,
'Humanoid-v3': 0,
'HumanoidStandup-v2': 0,
'InvertedDoublePendulum-v2': 0,
'BipedalWalker-v3': 0,
}
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
class SpikeActorDeepCritic(nn.Module):
def __init__(self, observation_space, action_space, encoder_pop_dim, decoder_pop_dim,
mean_range, std, spike_ts, beta, sigma_init, hidden_sizes=(256, 256), activation=nn.ReLU):
super().__init__()
obs_dim = observation_space.shape[0]
act_dim = action_space.shape[0]
act_limit = action_space.high[0]
# build policy and value functions
self.san = NoisyPopSpikeActor(obs_dim, act_dim, encoder_pop_dim, decoder_pop_dim, hidden_sizes,
mean_range, std, spike_ts, act_limit, beta, sigma_init)
self.q1 = MLPQFunction(obs_dim, act_dim, hidden_sizes, activation)
self.q2 = MLPQFunction(obs_dim, act_dim, hidden_sizes, activation)
def act(self, obs, use_noise=True):
with torch.no_grad():
action = self.san.act(obs).cpu().numpy() if not use_noise else self.san(obs).cpu().numpy()
functional.reset_net(self.san)
return action
class ReplayBuffer:
"""
A simple FIFO experience replay buffer for DDPG agents.
with Running Mean and Var from hill-a/stable-baselines
"""
def __init__(self, obs_dim, act_dim, size, clip_limit, norm_update_every=1000):
"""
:param obs_dim: observation dimension
:param act_dim: action dimension
:param size: buffer sizes
:param clip_limit: limit for clip value
:param norm_update_every: update freq
"""
self.obs_buf = np.zeros(core.combined_shape(size, obs_dim), dtype=np.float32)
self.obs2_buf = np.zeros(core.combined_shape(size, obs_dim), dtype=np.float32)
self.act_buf = np.zeros(core.combined_shape(size, act_dim), dtype=np.float32)
self.rew_buf = np.zeros(size, dtype=np.float32)
self.done_buf = np.zeros(size, dtype=np.float32)
self.ptr, self.size, self.max_size = 0, 0, size
# Running z-score normalization parameters
self.clip_limit = clip_limit
self.norm_update_every = norm_update_every
self.norm_update_batch = np.zeros(core.combined_shape(norm_update_every, obs_dim), dtype=np.float32)
self.norm_update_count = 0
self.norm_total_count = np.finfo(np.float32).eps.item()
self.mean, self.var = np.zeros(obs_dim, dtype=np.float32), np.ones(obs_dim, dtype=np.float32)
def store(self, obs, act, rew, next_obs, done):
"""
Insert entry into memory
:param obs: observation
:param act: action
:param rew: reward
:param next_obs: observation after action
:param done: if true then episode done
"""
self.obs_buf[self.ptr] = obs
self.obs2_buf[self.ptr] = next_obs
self.act_buf[self.ptr] = act
self.rew_buf[self.ptr] = rew
self.done_buf[self.ptr] = done
self.ptr = (self.ptr + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
# Update Mean and Variance
# Have to at least update mean and variance once before training starts
self.norm_update_batch[self.norm_update_count] = obs
self.norm_update_count += 1
if self.norm_update_count == self.norm_update_every:
self.norm_update_count = 0
batch_mean, batch_var = self.norm_update_batch.mean(axis=0), self.norm_update_batch.var(axis=0)
tmp_total_count = self.norm_total_count + self.norm_update_every
delta_mean = batch_mean - self.mean
self.mean += delta_mean * (self.norm_update_every / tmp_total_count)
m_a = self.var * self.norm_total_count
m_b = batch_var * self.norm_update_every
m_2 = m_a + m_b + np.square(delta_mean) * self.norm_total_count * self.norm_update_every / tmp_total_count
self.var = m_2 / tmp_total_count
self.norm_total_count = tmp_total_count
def sample_batch(self, device, batch_size=32):
"""
Sample batch from memory
:param device: pytorch device
:param batch_size: batch size
:return: batch
"""
idxs = np.random.randint(0, self.size, size=batch_size)
batch = dict(obs=self.normalize_obs(self.obs_buf[idxs]),
obs2=self.normalize_obs(self.obs2_buf[idxs]),
act=self.act_buf[idxs],
rew=self.rew_buf[idxs],
done=self.done_buf[idxs])
return {k: torch.as_tensor(v, dtype=torch.float32, device=device) for k, v in batch.items()}
def normalize_obs(self, obs):
"""
Do z-score normalization on observation
:param obs: observation
:return: norm_obs
"""
eps = np.finfo(np.float32).eps.item()
norm_obs = np.clip((obs - self.mean) / np.sqrt(self.var + eps),
-self.clip_limit, self.clip_limit)
return norm_obs
def hybrid_td3(env_name, actor_critic=SpikeActorDeepCritic, ac_kwargs=dict(), seed=0,
steps_per_epoch=10000, epochs=100, replay_size=int(1e6), gamma=0.99,
polyak=0.995, san_lr=1e-4, q_lr=1e-3, batch_size=100, start_steps=10000,
update_after=1000, update_every=50, act_noise=0.1, target_noise=0.2,
noise_clip=0.5, policy_delay=2, num_test_episodes=10, max_ep_len=1000,
save_freq=5, norm_clip_limit=3, norm_update=50, tb_comment='', model_idx=0,
root_dir='.'):
global CUR_R
# Set device
device = torch.device('cuda')
# Set random seed
setup_seed(seed)
env, test_env = gym.make(env_name), gym.make(env_name)
obs_dim = env.observation_space.shape
act_dim = env.action_space.shape[0]
# Action limit for clamping: critically, assumes all dimensions share the same bound!
act_limit = env.action_space.high[0]
# Create actor-critic module and target networks
ac = actor_critic(env.observation_space, env.action_space, **ac_kwargs)
ac_targ = deepcopy(ac)
ac.to(device)
ac_targ.to(device)
# Freeze target networks with respect to optimizers (only update via polyak averaging)
for p in ac_targ.parameters():
p.requires_grad = False
# List of parameters for both Q-networks (save this for convenience)
q_params = itertools.chain(ac.q1.parameters(), ac.q2.parameters())
cn_length = ac.san.get_colored_noise_length()
# Experience buffer
replay_buffer = ReplayBuffer(obs_dim=obs_dim, act_dim=act_dim, size=replay_size,
clip_limit=norm_clip_limit, norm_update_every=norm_update,
spike_ts=ac_kwargs['spike_ts'], cn_length=cn_length)
# Set up function for computing TD3 Q-losses
def compute_loss_q(data):
o, a, r, o2, d = data['obs'], data['act'], data['rew'], data['obs2'], data['done']
q1 = ac.q1(o, a)
q2 = ac.q2(o, a)
# Bellman backup for Q functions
with torch.no_grad():
san_targ = ac_targ.san.act(o2)
functional.reset_net(ac_targ.san)
# Target policy smoothing
epsilon = torch.randn_like(san_targ) * target_noise
epsilon = torch.clamp(epsilon, -noise_clip, noise_clip)
a2 = san_targ + epsilon
a2 = torch.clamp(a2, -act_limit, act_limit)
# Target Q-values
q1_san_targ = ac_targ.q1(o2, a2)
q2_san_targ = ac_targ.q2(o2, a2)
q_san_targ = torch.min(q1_san_targ, q2_san_targ)
backup = r + gamma * (1 - d) * q_san_targ
# MSE loss against Bellman backup
loss_q1 = ((q1 - backup)**2).mean()
loss_q2 = ((q2 - backup)**2).mean()
loss_q = loss_q1 + loss_q2
# Useful info for logging
loss_info = dict(Q1Vals=q1.cpu().detach().numpy(),
Q2Vals=q2.cpu().detach().numpy())
return loss_q, loss_info
# Set up function for computing TD3 san loss
def compute_loss_san(data):
o = data['obs']
ac.san.load_colored_noise(data['cn'])
q1_san = ac.q1(o, ac.san(o))
functional.reset_net(ac.san)
noise_sigma = ac.san.get_noise_sigma()
k = K_FINAL * max(0, CUR_R - INF_R[env_name]) / (SUP_R[env_name] - INF_R[env_name])
return -q1_san.mean() + k * torch.pow(noise_sigma, 2)
# Set up optimizers for policy and q-function
san_optimizer = Adam(ac.san.parameters(), lr=san_lr)
q_optimizer = Adam(q_params, lr=q_lr)
def update(data, timer):
# First run one gradient descent step for Q1 and Q2
q_optimizer.zero_grad()
loss_q, loss_info = compute_loss_q(data)
loss_q.backward()
q_optimizer.step()
# Possibly update pi and target networks
if timer % policy_delay == 0:
# Freeze Q-networks so you don't waste computational effort computing gradients for them during the policy learning step.
for p in q_params:
p.requires_grad = False
# Next run one gradient descent step for san.
san_optimizer.zero_grad()
loss_san = compute_loss_san(data)
loss_san.backward()
san_optimizer.step()
# Unfreeze Q-networks so you can optimize it at next DDPG step.
for p in q_params:
p.requires_grad = True
# Finally, update target networks by polyak averaging.
with torch.no_grad():
for p, p_targ in zip(ac.parameters(), ac_targ.parameters()):
# We use an in-place operations "mul_", "add_" to update target params,
# as opposed to "mul" and "add", which would make new tensors.
p_targ.data.mul_(polyak)
p_targ.data.add_((1 - polyak) * p.data)
def get_action(o, use_noise=True):
return ac.act(torch.as_tensor(o, dtype=torch.float32, device=device), use_noise=use_noise)
def test_agent():
# Compuate the return mean test reward
test_reward_sum = 0
for j in range(num_test_episodes):
o, d, ep_ret, ep_len = test_env.reset(), False, 0, 0
while not(d or (ep_len == max_ep_len)):
# Take deterministic actions at test time (noise_scale=0)
a = get_action(replay_buffer.normalize_obs(o), use_noise=False)
o, r, d, _ = test_env.step(a[0]) if len(a.shape) > 1 else test_env.step(a)
ep_ret += r
ep_len += 1
test_reward_sum += ep_ret
return test_reward_sum / num_test_episodes
# Save rewards also create dir for saving parameters
save_test_reward = []
save_test_reward_steps = []
try:
os.mkdir(root_dir + '/params')
print('Directory params Created')
except FileExistsError:
print('Directory params already exists')
model_dir = root_dir + '/params/hybrid-td3_' + tb_comment
try:
os.mkdir(model_dir)
print('Directory ', model_dir, ' Created')
except FileExistsError:
print('Directory ', model_dir, ' already exists')
tb_dir = model_dir + '/tb/' + str(seed)
writer = SummaryWriter(log_dir=tb_dir)
# Prepare for interaction with environment
total_steps = steps_per_epoch * epochs
o, ep_ret, ep_len = env.reset(), 0, 0
max_test_reward = 0
ac.san.reset_noise(max_ep_len + 1)
# Main loop: collect experience in env and update/log each epoch
for t in range(total_steps):
if t <= start_steps:
a = env.action_space.sample()
else:
a = get_action(replay_buffer.normalize_obs(o), use_noise=True)
# Step the env
o2, r, d, _ = env.step(a[0]) if len(a.shape) > 1 else env.step(a)
ep_ret += r
ep_len += 1
# Ignore the "done" signal if it comes from hitting the time horizon (that is,
# when it's an artificial terminal signal that isn't based on the agent's state)
d = False if ep_len == max_ep_len else d
# Store experience to replay buffer
replay_buffer.store(o, a, r, o2, d, ac.san.get_colored_noise())
# Super critical, easy to overlook step: make sure to update most recent observation!
o = o2
# End of trajectory handling
if d or (ep_len == max_ep_len):
o, ep_ret, ep_len = env.reset(), 0, 0
ac.san.reset_noise(max_ep_len + 1)
# Update handling
if t >= update_after and t % update_every == 0:
for j in range(update_every):
batch = replay_buffer.sample_batch(device, batch_size)
update(data=batch, timer=j)
ac.san.cancel_load()
# End of epoch handling
if (t+1) % steps_per_epoch == 0:
epoch = (t+1) // steps_per_epoch
# Test the performance of the deterministic version of the agent
test_mean_reward = test_agent()
save_test_reward.append(test_mean_reward)
save_test_reward_steps.append(t + 1)
print('Model: ', model_idx, ' Steps: ', t + 1, ' Mean Reward: ', test_mean_reward)
CUR_R = test_mean_reward
# Save model
if epoch % save_freq == 0:
if test_mean_reward > max_test_reward:
ac.san.to('cpu')
torch.save(ac.san.state_dict(), model_dir + '/model' + str(model_idx) + '_best.pt')
ac.san.to(device)
max_test_reward = test_mean_reward
pickle.dump([replay_buffer.mean, replay_buffer.var], open(model_dir + '/model' + str(model_idx) + '_best_mean_var.p', 'wb+'))
print('Weights saved in ', model_dir + '/model' + str(model_idx) + '_best.pt')
if epoch == epochs:
ac.san.to('cpu')
torch.save(ac.san.state_dict(), model_dir + '/model' + str(model_idx) + '_last.pt')
ac.san.to(device)
pickle.dump([replay_buffer.mean, replay_buffer.var], open(model_dir + '/model' + str(model_idx) + '_last_mean_var.p', 'wb+'))
print('Weights saved in ', model_dir + '/model' + str(model_idx) + '_last.pt')
noise_sigma = ac.san.get_noise_sigma()
writer.add_scalar(tag='nsn', scalar_value=noise_sigma.item(), global_step=epoch)
# Save Test Reward List
pickle.dump([save_test_reward, save_test_reward_steps], open(model_dir + '/model' + str(model_idx) + '_test_rewards.p', 'wb+'))
writer.close() | null |
7,595 | import numpy as np
import torch
import torch.nn as nn
from spikingjelly.activation_based import neuron, layer, learning
from matplotlib import pyplot as plt
def f_pre(x, w_min, alpha=0.):
return (x - w_min) ** alpha | null |
7,596 | import numpy as np
import torch
import torch.nn as nn
from spikingjelly.activation_based import neuron, layer, learning
from matplotlib import pyplot as plt
def f_post(x, w_max, alpha=0.):
return (w_max - x) ** alpha | null |
7,597 | import ptan
import argparse, os
from itertools import count
import numpy as np
import torch
import torch.optim as optim
from spikingjelly.activation_based import functional
from tensorboardX import SummaryWriter
from utils import model, common, atari_wrappers
def make_env(params):
env = atari_wrappers.make_atari(params['env_name'])
env = atari_wrappers.wrap_deepmind(env, frame_stack=True, pytorch_img=True)
return env | null |
7,598 | import ptan
import argparse, os
from itertools import count
import numpy as np
import torch
import torch.optim as optim
from spikingjelly.activation_based import functional
from tensorboardX import SummaryWriter
from utils import model, common, atari_wrappers
def make_test_env(params):
env = atari_wrappers.make_atari(params['env_name'])
env = atari_wrappers.wrap_deepmind(env, episode_life=False, clip_rewards=False, frame_stack=True, pytorch_img=True)
return env | null |
7,599 | import ptan
import argparse, os
from itertools import count
import numpy as np
import torch
import torch.optim as optim
from spikingjelly.activation_based import functional
from tensorboardX import SummaryWriter
from utils import model, common, atari_wrappers
EVAL_LEN = 125000
def eval_Q(env, net, cuda):
device = torch.device("cuda" if cuda else "cpu")
epsilon = 0.05
value_estimate = 0.0
step_cnt = 0
with torch.no_grad():
while step_cnt < EVAL_LEN:
obs = env.reset()
for _ in count():
state = torch.tensor(np.expand_dims(obs, 0)).to(device).float() / 256
q_v = net(state)
q = q_v.data.cpu().numpy()
n_actions = q.shape[1]
actions = np.argmax(q, axis=1)
mask = np.random.random(size=1) < epsilon
rand_actions = np.random.choice(n_actions, sum(mask))
actions[mask] = rand_actions
obs, reward, done, _ = env.step(actions)
value_estimate += q.max()
step_cnt += 1
functional.reset_net(net)
if done:
break
return value_estimate / EVAL_LEN | null |
7,600 | import sys
import time
import numpy as np
import torch
import torch.nn as nn
def unpack_batch(batch):
states, actions, rewards, dones, last_states = [], [], [], [], []
for exp in batch:
state = np.array(exp.state, copy=False)
states.append(state)
actions.append(exp.action)
rewards.append(exp.reward)
dones.append(exp.last_state is None)
if exp.last_state is None:
last_states.append(state) # the result will be masked anyway
else:
last_states.append(np.array(exp.last_state, copy=False))
return np.array(states, copy=False), np.array(actions), np.array(rewards, dtype=np.float32), \
np.array(dones, dtype=np.uint8), np.array(last_states, copy=False)
def calc_loss_dqn(batch, net, tgt_net, gamma, cuda=False, cuda_async=False):
states, actions, rewards, dones, next_states = unpack_batch(batch)
states_v = torch.tensor(states).float() / 256
next_states_v = torch.tensor(next_states).float() / 256
actions_v = torch.tensor(actions)
rewards_v = torch.tensor(rewards)
done_mask = torch.BoolTensor(dones)
if cuda:
states_v = states_v.cuda(non_blocking=cuda_async)
next_states_v = next_states_v.cuda(non_blocking=cuda_async)
actions_v = actions_v.cuda(non_blocking=cuda_async)
rewards_v = rewards_v.cuda(non_blocking=cuda_async)
done_mask = done_mask.cuda(non_blocking=cuda_async)
state_action_values = net(states_v).gather(1, actions_v.unsqueeze(-1)).squeeze(-1)
next_state_values = tgt_net(next_states_v).max(1)[0]
next_state_values[done_mask] = 0.0
expected_state_action_values = next_state_values.detach() * gamma + rewards_v
return nn.MSELoss()(state_action_values, expected_state_action_values) | null |
7,601 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.cpp_extension import load_inline
from torch.cuda.amp import custom_fwd, custom_bwd
import logging
from . import tensor_cache
from torch import Tensor
from typing import Optional, Union
from torch.types import _int, _size
from torch.nn.modules.utils import _single, _pair, _triple
class spikeLinear(torch.autograd.Function):
def forward(ctx, spike, weight, bias=None):
# spike.shape = [N, *, in_features]
# weight.shape = [out_features, in_features]
# bias.shape = [out_features]
if ctx.needs_input_grad[0] or ctx.needs_input_grad[1] or ctx.needs_input_grad[2]:
if ctx.needs_input_grad[1]:
ctx.s_shape = spike.shape
ctx.s_tk = tensor_cache.BOOL_TENSOR_CACHE.store_bool(spike)
if ctx.needs_input_grad[0]:
ctx.save_for_backward(weight)
return F.linear(spike, weight, bias)
def backward(ctx, grad_output):
# grad_output.shape = [N, *, out_features]
if ctx.needs_input_grad[1]:
weight = ctx.saved_tensors[0]
if ctx.needs_input_grad[0]:
spike = tensor_cache.BOOL_TENSOR_CACHE.get_float(ctx.s_tk, ctx.s_shape)
grad_spike = grad_weight = grad_bias = None
if ctx.needs_input_grad[0]:
grad_spike = F.linear(grad_output, weight.t(), bias=None)
if ctx.needs_input_grad[1]:
in_features = spike.shape[-1]
out_features = grad_output.shape[-1]
# grad_output.reshape(-1, out_features).t().shape = [out_features, N*]
# spike.reshape(-1, in_features).shape = [N*, in_features]
grad_weight = torch.mm(grad_output.reshape(-1, out_features).t(), spike.reshape(-1, in_features).to(grad_output.dtype))
if ctx.needs_input_grad[2]:
out_features = grad_output.shape[-1]
grad_bias = grad_output.reshape(-1, out_features).sum(0)
return grad_spike, grad_weight, grad_bias
The provided code snippet includes necessary dependencies for implementing the `spike_linear` function. Write a Python function `def spike_linear(spike: Tensor, weight: Tensor, bias: Optional[Tensor] = None) -> Tensor` to solve the following problem:
* :ref:`API in English <spike_linear-en>` .. _spike_linear-cn: :class:`torch.nn.functional.linear` 在输入为脉冲时的特例。 .. note:: 在CUDA设备上训练时拥有比 :class:`torch.nn.functional.linear` 更低的显存消耗。 .. warning:: `spike` 中的任何元素都必须为0或1。 * :ref:`中文API <spike_linear-cn>` .. _spike_linear-en: A specific case of :class:`torch.nn.functional.linear` with inputs are spikes. .. admonition:: Note :class: note This function has less memory consumption than :class:`torch.nn.functional.linear` when training on CUDA devices. .. admonition:: Warning :class: warning Any element in `spike` must be 0 or 1.
Here is the function:
def spike_linear(spike: Tensor, weight: Tensor, bias: Optional[Tensor] = None) -> Tensor:
"""
* :ref:`API in English <spike_linear-en>`
.. _spike_linear-cn:
:class:`torch.nn.functional.linear` 在输入为脉冲时的特例。
.. note::
在CUDA设备上训练时拥有比 :class:`torch.nn.functional.linear` 更低的显存消耗。
.. warning::
`spike` 中的任何元素都必须为0或1。
* :ref:`中文API <spike_linear-cn>`
.. _spike_linear-en:
A specific case of :class:`torch.nn.functional.linear` with inputs are spikes.
.. admonition:: Note
:class: note
This function has less memory consumption than :class:`torch.nn.functional.linear` when training on CUDA devices.
.. admonition:: Warning
:class: warning
Any element in `spike` must be 0 or 1.
"""
if spike.get_device() < 0:
return F.linear(spike, weight, bias)
else:
return spikeLinear.apply(spike, weight, bias) | * :ref:`API in English <spike_linear-en>` .. _spike_linear-cn: :class:`torch.nn.functional.linear` 在输入为脉冲时的特例。 .. note:: 在CUDA设备上训练时拥有比 :class:`torch.nn.functional.linear` 更低的显存消耗。 .. warning:: `spike` 中的任何元素都必须为0或1。 * :ref:`中文API <spike_linear-cn>` .. _spike_linear-en: A specific case of :class:`torch.nn.functional.linear` with inputs are spikes. .. admonition:: Note :class: note This function has less memory consumption than :class:`torch.nn.functional.linear` when training on CUDA devices. .. admonition:: Warning :class: warning Any element in `spike` must be 0 or 1. |
7,602 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.cpp_extension import load_inline
from torch.cuda.amp import custom_fwd, custom_bwd
import logging
from . import tensor_cache
from torch import Tensor
from typing import Optional, Union
from torch.types import _int, _size
from torch.nn.modules.utils import _single, _pair, _triple
class spikeConvolution(torch.autograd.Function):
# Pytorch only provides cudnn_convolution without bias.
# Refer to https://github.com/pytorch/pytorch/issues/3823 for more details.
def forward(ctx, spike, weight, bias, stride, padding, dilation, groups):
if ctx.needs_input_grad[0] or ctx.needs_input_grad[1] or ctx.needs_input_grad[2]:
if ctx.needs_input_grad[1]:
ctx.s_shape = spike.shape
ctx.s_tk = tensor_cache.BOOL_TENSOR_CACHE.store_bool(spike)
if ctx.needs_input_grad[0]:
ctx.save_for_backward(weight)
ctx.padding = padding
ctx.stride = stride
ctx.dilation = dilation
ctx.groups = groups
ctx.weight_shape = weight.shape
if spike.dim() == 3:
return F.conv1d(input=spike, weight=weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups)
elif spike.dim() == 4:
return F.conv2d(input=spike, weight=weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups)
elif spike.dim() == 5:
return F.conv3d(input=spike, weight=weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups)
def backward(ctx, grad_output):
grad_spike = None
grad_weight = None
grad_bias = None
if ctx.needs_input_grad[0] and ctx.needs_input_grad[1]:
weight = ctx.saved_tensors[0]
spike = tensor_cache.BOOL_TENSOR_CACHE.get_float(ctx.s_tk, ctx.s_shape)
weight = weight.to(grad_output.dtype)
grad_spike, grad_weight = cpp_wrapper.cudnn_convolution_backward(spike, grad_output, weight, ctx.padding,
ctx.stride, ctx.dilation, ctx.groups,
torch.backends.cudnn.benchmark,
torch.backends.cudnn.deterministic,
torch.backends.cudnn.allow_tf32, (
True,
True))
elif not ctx.needs_input_grad[0] and ctx.needs_input_grad[1]:
spike = tensor_cache.BOOL_TENSOR_CACHE.get_float(ctx.s_tk, ctx.s_shape)
grad_weight = cpp_wrapper.cudnn_convolution_backward_weight(ctx.weight_shape, grad_output, spike, ctx.padding,
ctx.stride, ctx.dilation, ctx.groups,
torch.backends.cudnn.benchmark,
torch.backends.cudnn.deterministic,
torch.backends.cudnn.allow_tf32)
elif ctx.needs_input_grad[0] and not ctx.needs_input_grad[1]:
weight = ctx.saved_tensors[0]
weight = weight.to(grad_output.dtype)
grad_spike = cpp_wrapper.cudnn_convolution_backward_input(ctx.spike_shape, grad_output, weight, ctx.padding,
ctx.stride, ctx.dilation, ctx.groups,
torch.backends.cudnn.benchmark,
torch.backends.cudnn.deterministic,
torch.backends.cudnn.allow_tf32)
if ctx.needs_input_grad[2]:
# grad_output.shape = [N, C, *]
out_channels = grad_output.shape[1]
grad_bias = grad_output.transpose(0, 1).reshape(out_channels, -1).sum(1)
return grad_spike, grad_weight, grad_bias, None, None, None, None
The provided code snippet includes necessary dependencies for implementing the `spike_conv1d` function. Write a Python function `def spike_conv1d(spike: Tensor, weight: Tensor, bias: Tensor=None, stride: Union[_int, _size]=1, padding: str="valid", dilation: Union[_int, _size]=1, groups: _int=1) -> Tensor` to solve the following problem:
* :ref:`API in English <spike_conv1d-en>` .. _spike_conv1d-cn: :class:`torch.nn.functional.conv1d` 在输入为脉冲时的特例。 .. note:: 在CUDA设备上训练时拥有比 :class:`torch.nn.functional.conv1d` 更低的显存消耗。 .. warning:: `spike` 中的任何元素都必须为0或1。 * :ref:`中文API <spike_conv1d-cn>` .. _spike_conv1d-en: A specific case of :class:`torch.nn.functional.conv1d` with inputs are spikes. .. admonition:: Note :class: note This function has less memory consumption than :class:`torch.nn.functional.conv1d` when training on CUDA devices. .. admonition:: Warning :class: warning Any element in `spike` must be 0 or 1.
Here is the function:
def spike_conv1d(spike: Tensor, weight: Tensor, bias: Tensor=None, stride: Union[_int, _size]=1, padding: str="valid", dilation: Union[_int, _size]=1, groups: _int=1) -> Tensor:
"""
* :ref:`API in English <spike_conv1d-en>`
.. _spike_conv1d-cn:
:class:`torch.nn.functional.conv1d` 在输入为脉冲时的特例。
.. note::
在CUDA设备上训练时拥有比 :class:`torch.nn.functional.conv1d` 更低的显存消耗。
.. warning::
`spike` 中的任何元素都必须为0或1。
* :ref:`中文API <spike_conv1d-cn>`
.. _spike_conv1d-en:
A specific case of :class:`torch.nn.functional.conv1d` with inputs are spikes.
.. admonition:: Note
:class: note
This function has less memory consumption than :class:`torch.nn.functional.conv1d` when training on CUDA devices.
.. admonition:: Warning
:class: warning
Any element in `spike` must be 0 or 1.
"""
if spike.get_device() < 0:
return F.conv1d(spike, weight, bias, stride, padding, dilation, groups)
else:
return spikeConvolution.apply(spike, weight, bias, stride, padding, dilation, groups) | * :ref:`API in English <spike_conv1d-en>` .. _spike_conv1d-cn: :class:`torch.nn.functional.conv1d` 在输入为脉冲时的特例。 .. note:: 在CUDA设备上训练时拥有比 :class:`torch.nn.functional.conv1d` 更低的显存消耗。 .. warning:: `spike` 中的任何元素都必须为0或1。 * :ref:`中文API <spike_conv1d-cn>` .. _spike_conv1d-en: A specific case of :class:`torch.nn.functional.conv1d` with inputs are spikes. .. admonition:: Note :class: note This function has less memory consumption than :class:`torch.nn.functional.conv1d` when training on CUDA devices. .. admonition:: Warning :class: warning Any element in `spike` must be 0 or 1. |
7,603 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.cpp_extension import load_inline
from torch.cuda.amp import custom_fwd, custom_bwd
import logging
from . import tensor_cache
from torch import Tensor
from typing import Optional, Union
from torch.types import _int, _size
from torch.nn.modules.utils import _single, _pair, _triple
class spikeConvolution(torch.autograd.Function):
# Pytorch only provides cudnn_convolution without bias.
# Refer to https://github.com/pytorch/pytorch/issues/3823 for more details.
def forward(ctx, spike, weight, bias, stride, padding, dilation, groups):
if ctx.needs_input_grad[0] or ctx.needs_input_grad[1] or ctx.needs_input_grad[2]:
if ctx.needs_input_grad[1]:
ctx.s_shape = spike.shape
ctx.s_tk = tensor_cache.BOOL_TENSOR_CACHE.store_bool(spike)
if ctx.needs_input_grad[0]:
ctx.save_for_backward(weight)
ctx.padding = padding
ctx.stride = stride
ctx.dilation = dilation
ctx.groups = groups
ctx.weight_shape = weight.shape
if spike.dim() == 3:
return F.conv1d(input=spike, weight=weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups)
elif spike.dim() == 4:
return F.conv2d(input=spike, weight=weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups)
elif spike.dim() == 5:
return F.conv3d(input=spike, weight=weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups)
def backward(ctx, grad_output):
grad_spike = None
grad_weight = None
grad_bias = None
if ctx.needs_input_grad[0] and ctx.needs_input_grad[1]:
weight = ctx.saved_tensors[0]
spike = tensor_cache.BOOL_TENSOR_CACHE.get_float(ctx.s_tk, ctx.s_shape)
weight = weight.to(grad_output.dtype)
grad_spike, grad_weight = cpp_wrapper.cudnn_convolution_backward(spike, grad_output, weight, ctx.padding,
ctx.stride, ctx.dilation, ctx.groups,
torch.backends.cudnn.benchmark,
torch.backends.cudnn.deterministic,
torch.backends.cudnn.allow_tf32, (
True,
True))
elif not ctx.needs_input_grad[0] and ctx.needs_input_grad[1]:
spike = tensor_cache.BOOL_TENSOR_CACHE.get_float(ctx.s_tk, ctx.s_shape)
grad_weight = cpp_wrapper.cudnn_convolution_backward_weight(ctx.weight_shape, grad_output, spike, ctx.padding,
ctx.stride, ctx.dilation, ctx.groups,
torch.backends.cudnn.benchmark,
torch.backends.cudnn.deterministic,
torch.backends.cudnn.allow_tf32)
elif ctx.needs_input_grad[0] and not ctx.needs_input_grad[1]:
weight = ctx.saved_tensors[0]
weight = weight.to(grad_output.dtype)
grad_spike = cpp_wrapper.cudnn_convolution_backward_input(ctx.spike_shape, grad_output, weight, ctx.padding,
ctx.stride, ctx.dilation, ctx.groups,
torch.backends.cudnn.benchmark,
torch.backends.cudnn.deterministic,
torch.backends.cudnn.allow_tf32)
if ctx.needs_input_grad[2]:
# grad_output.shape = [N, C, *]
out_channels = grad_output.shape[1]
grad_bias = grad_output.transpose(0, 1).reshape(out_channels, -1).sum(1)
return grad_spike, grad_weight, grad_bias, None, None, None, None
The provided code snippet includes necessary dependencies for implementing the `spike_conv2d` function. Write a Python function `def spike_conv2d(spike: Tensor, weight: Tensor, bias: Optional[Tensor]=None, stride: Union[_int, _size]=1, padding: str="valid", dilation: Union[_int, _size]=1, groups: _int=1) -> Tensor` to solve the following problem:
* :ref:`API in English <spike_conv2d-en>` .. _spike_conv2d-cn: :class:`torch.nn.functional.conv2d` 在输入为脉冲时的特例。 .. note:: 在CUDA设备上训练时拥有比 :class:`torch.nn.functional.conv2d` 更低的显存消耗。 .. warning:: `spike` 中的任何元素都必须为0或1。 * :ref:`中文API <spike_conv2d-cn>` .. _spike_conv2d-en: A specific case of :class:`torch.nn.functional.conv2d` with inputs are spikes. .. admonition:: Note :class: note This function has less memory consumption than :class:`torch.nn.functional.conv2d` when training on CUDA devices. .. admonition:: Warning :class: warning Any element in `spike` must be 0 or 1.
Here is the function:
def spike_conv2d(spike: Tensor, weight: Tensor, bias: Optional[Tensor]=None, stride: Union[_int, _size]=1, padding: str="valid", dilation: Union[_int, _size]=1, groups: _int=1) -> Tensor:
"""
* :ref:`API in English <spike_conv2d-en>`
.. _spike_conv2d-cn:
:class:`torch.nn.functional.conv2d` 在输入为脉冲时的特例。
.. note::
在CUDA设备上训练时拥有比 :class:`torch.nn.functional.conv2d` 更低的显存消耗。
.. warning::
`spike` 中的任何元素都必须为0或1。
* :ref:`中文API <spike_conv2d-cn>`
.. _spike_conv2d-en:
A specific case of :class:`torch.nn.functional.conv2d` with inputs are spikes.
.. admonition:: Note
:class: note
This function has less memory consumption than :class:`torch.nn.functional.conv2d` when training on CUDA devices.
.. admonition:: Warning
:class: warning
Any element in `spike` must be 0 or 1.
"""
if spike.get_device() < 0:
return F.conv2d(spike, weight, bias, stride, padding, dilation, groups)
else:
return spikeConvolution.apply(spike, weight, bias, stride, padding, dilation, groups) | * :ref:`API in English <spike_conv2d-en>` .. _spike_conv2d-cn: :class:`torch.nn.functional.conv2d` 在输入为脉冲时的特例。 .. note:: 在CUDA设备上训练时拥有比 :class:`torch.nn.functional.conv2d` 更低的显存消耗。 .. warning:: `spike` 中的任何元素都必须为0或1。 * :ref:`中文API <spike_conv2d-cn>` .. _spike_conv2d-en: A specific case of :class:`torch.nn.functional.conv2d` with inputs are spikes. .. admonition:: Note :class: note This function has less memory consumption than :class:`torch.nn.functional.conv2d` when training on CUDA devices. .. admonition:: Warning :class: warning Any element in `spike` must be 0 or 1. |
7,604 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.cpp_extension import load_inline
from torch.cuda.amp import custom_fwd, custom_bwd
import logging
from . import tensor_cache
from torch import Tensor
from typing import Optional, Union
from torch.types import _int, _size
from torch.nn.modules.utils import _single, _pair, _triple
class spikeConvolution(torch.autograd.Function):
# Pytorch only provides cudnn_convolution without bias.
# Refer to https://github.com/pytorch/pytorch/issues/3823 for more details.
def forward(ctx, spike, weight, bias, stride, padding, dilation, groups):
if ctx.needs_input_grad[0] or ctx.needs_input_grad[1] or ctx.needs_input_grad[2]:
if ctx.needs_input_grad[1]:
ctx.s_shape = spike.shape
ctx.s_tk = tensor_cache.BOOL_TENSOR_CACHE.store_bool(spike)
if ctx.needs_input_grad[0]:
ctx.save_for_backward(weight)
ctx.padding = padding
ctx.stride = stride
ctx.dilation = dilation
ctx.groups = groups
ctx.weight_shape = weight.shape
if spike.dim() == 3:
return F.conv1d(input=spike, weight=weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups)
elif spike.dim() == 4:
return F.conv2d(input=spike, weight=weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups)
elif spike.dim() == 5:
return F.conv3d(input=spike, weight=weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups)
def backward(ctx, grad_output):
grad_spike = None
grad_weight = None
grad_bias = None
if ctx.needs_input_grad[0] and ctx.needs_input_grad[1]:
weight = ctx.saved_tensors[0]
spike = tensor_cache.BOOL_TENSOR_CACHE.get_float(ctx.s_tk, ctx.s_shape)
weight = weight.to(grad_output.dtype)
grad_spike, grad_weight = cpp_wrapper.cudnn_convolution_backward(spike, grad_output, weight, ctx.padding,
ctx.stride, ctx.dilation, ctx.groups,
torch.backends.cudnn.benchmark,
torch.backends.cudnn.deterministic,
torch.backends.cudnn.allow_tf32, (
True,
True))
elif not ctx.needs_input_grad[0] and ctx.needs_input_grad[1]:
spike = tensor_cache.BOOL_TENSOR_CACHE.get_float(ctx.s_tk, ctx.s_shape)
grad_weight = cpp_wrapper.cudnn_convolution_backward_weight(ctx.weight_shape, grad_output, spike, ctx.padding,
ctx.stride, ctx.dilation, ctx.groups,
torch.backends.cudnn.benchmark,
torch.backends.cudnn.deterministic,
torch.backends.cudnn.allow_tf32)
elif ctx.needs_input_grad[0] and not ctx.needs_input_grad[1]:
weight = ctx.saved_tensors[0]
weight = weight.to(grad_output.dtype)
grad_spike = cpp_wrapper.cudnn_convolution_backward_input(ctx.spike_shape, grad_output, weight, ctx.padding,
ctx.stride, ctx.dilation, ctx.groups,
torch.backends.cudnn.benchmark,
torch.backends.cudnn.deterministic,
torch.backends.cudnn.allow_tf32)
if ctx.needs_input_grad[2]:
# grad_output.shape = [N, C, *]
out_channels = grad_output.shape[1]
grad_bias = grad_output.transpose(0, 1).reshape(out_channels, -1).sum(1)
return grad_spike, grad_weight, grad_bias, None, None, None, None
The provided code snippet includes necessary dependencies for implementing the `spike_conv3d` function. Write a Python function `def spike_conv3d(spike: Tensor, weight: Tensor, bias: Optional[Tensor]=None, stride: Union[_int, _size]=1, padding: str="valid", dilation: Union[_int, _size]=1, groups: _int=1) -> Tensor` to solve the following problem:
* :ref:`API in English <spike_conv3d-en>` .. _spike_conv3d-cn: :class:`torch.nn.functional.conv3d` 在输入为脉冲时的特例。 .. note:: 在CUDA设备上训练时拥有比 :class:`torch.nn.functional.conv3d` 更低的显存消耗。 .. warning:: `spike` 中的任何元素都必须为0或1。 * :ref:`中文API <spike_conv3d-cn>` .. _spike_conv3d-en: A specific case of :class:`torch.nn.functional.conv3d` with inputs are spikes. .. admonition:: Note :class: note This function has less memory consumption than :class:`torch.nn.functional.conv3d` when training on CUDA devices. .. admonition:: Warning :class: warning Any element in `spike` must be 0 or 1.
Here is the function:
def spike_conv3d(spike: Tensor, weight: Tensor, bias: Optional[Tensor]=None, stride: Union[_int, _size]=1, padding: str="valid", dilation: Union[_int, _size]=1, groups: _int=1) -> Tensor:
"""
* :ref:`API in English <spike_conv3d-en>`
.. _spike_conv3d-cn:
:class:`torch.nn.functional.conv3d` 在输入为脉冲时的特例。
.. note::
在CUDA设备上训练时拥有比 :class:`torch.nn.functional.conv3d` 更低的显存消耗。
.. warning::
`spike` 中的任何元素都必须为0或1。
* :ref:`中文API <spike_conv3d-cn>`
.. _spike_conv3d-en:
A specific case of :class:`torch.nn.functional.conv3d` with inputs are spikes.
.. admonition:: Note
:class: note
This function has less memory consumption than :class:`torch.nn.functional.conv3d` when training on CUDA devices.
.. admonition:: Warning
:class: warning
Any element in `spike` must be 0 or 1.
"""
if spike.get_device() < 0:
return F.conv3d(spike, weight, bias, stride, padding, dilation, groups)
else:
return spikeConvolution.apply(spike, weight, bias, stride, padding, dilation, groups) | * :ref:`API in English <spike_conv3d-en>` .. _spike_conv3d-cn: :class:`torch.nn.functional.conv3d` 在输入为脉冲时的特例。 .. note:: 在CUDA设备上训练时拥有比 :class:`torch.nn.functional.conv3d` 更低的显存消耗。 .. warning:: `spike` 中的任何元素都必须为0或1。 * :ref:`中文API <spike_conv3d-cn>` .. _spike_conv3d-en: A specific case of :class:`torch.nn.functional.conv3d` with inputs are spikes. .. admonition:: Note :class: note This function has less memory consumption than :class:`torch.nn.functional.conv3d` when training on CUDA devices. .. admonition:: Warning :class: warning Any element in `spike` must be 0 or 1. |
7,605 | import torch
import numpy as np
from torch import nn
from typing import Callable, Any
from spikingjelly.activation_based import neuron
import threading
from torch.utils.tensorboard import SummaryWriter
import os
import time
import re
import datetime
def unpack_len1_tuple(x: tuple or torch.Tensor):
if isinstance(x, tuple) and x.__len__() == 1:
return x[0]
else:
return x | null |
7,606 | import torch
import torch.nn.functional as F
import threading
from .. import configure
from . import cuda_utils
import logging
try:
import cupy
except BaseException as e:
logging.info(f'spikingjelly.activation_based.tensor_cache: {e}')
cupy = None
class DataTypeConvertCUDACode:
float2bool = r'''
extern "C" __global__
void float2bool(const float* fs, unsigned char* bs, const int &N)
{
// assert N == numel / 8 and numel % 8 == 0
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N)
{
bs[index] = 0;
const int mem_offset = (index << 3);
#pragma unroll
for(int i = 0; i < 8; i++)
{
bs[index] += ( ((unsigned char) fs[mem_offset + i]) << i);
}
}
}
'''
half2bool = r'''
#include <cuda_fp16.h>
extern "C" __global__
void half2bool(const half* fs, unsigned char* bs, const int &N)
{
// assert N == numel / 8 and numel % 8 == 0
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N)
{
bs[index] = 0;
const int mem_offset = (index << 3);
#pragma unroll
for(int i = 0; i < 8; i++)
{
bs[index] += ( ((unsigned char) __half2float(fs[mem_offset + i])) << i);
}
}
}
'''
bool2float = r'''
extern "C" __global__
void bool2float(const unsigned char* bs, float* fs, const int &N)
{
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N)
{
const int mem_offset = (index << 3);
unsigned char compressed_v = bs[index];
#pragma unroll
for(int i = 0; i < 8; i++)
{
fs[mem_offset + i] = (float) (compressed_v % 2);
compressed_v = (compressed_v >> 1);
}
}
}
'''
bool2half = r'''
#include <cuda_fp16.h>
extern "C" __global__
void bool2half(const unsigned char* bs, half* fs, const int &N)
{
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N)
{
const int mem_offset = (index << 3);
unsigned char compressed_v = bs[index];
#pragma unroll
for(int i = 0; i < 8; i++)
{
fs[mem_offset + i] = __float2half((float) (compressed_v % 2));
compressed_v = (compressed_v >> 1);
}
}
}
'''
The provided code snippet includes necessary dependencies for implementing the `float_spike_to_bool` function. Write a Python function `def float_spike_to_bool(spike: torch.Tensor)` to solve the following problem:
:param spike: a spike tensor whose ``dtype=torch.float`` or ``dtype=torch.half`` and all elements are 0 or 1 :type spike: torch.Tensor :return: (spike_b, s_dtype, s_shape, s_padding) spike_b: a compressed spike tensor with ``dtype=torch.uint8`` and each element stores 8 spikes s_dtype: the dtype of the original spike s_shape: the shape of the original spike s_padding: the number of padding elements :rtype: tuple Compress a float/half spike tensor ``spike`` to an uint8 tensor ``spike_b``. Each element in ``spike_b`` represents 8 elements of ``spike``.
Here is the function:
def float_spike_to_bool(spike: torch.Tensor):
"""
:param spike: a spike tensor whose ``dtype=torch.float`` or ``dtype=torch.half`` and all elements are 0 or 1
:type spike: torch.Tensor
:return: (spike_b, s_dtype, s_shape, s_padding)
spike_b: a compressed spike tensor with ``dtype=torch.uint8`` and each element stores 8 spikes
s_dtype: the dtype of the original spike
s_shape: the shape of the original spike
s_padding: the number of padding elements
:rtype: tuple
Compress a float/half spike tensor ``spike`` to an uint8 tensor ``spike_b``. Each element in ``spike_b``
represents 8 elements of ``spike``.
"""
s_dtype = spike.dtype
if s_dtype == torch.float:
kernel_codes = DataTypeConvertCUDACode.float2bool
kernel_name = 'float2bool'
elif s_dtype == torch.half:
kernel_codes = DataTypeConvertCUDACode.half2bool
kernel_name = 'half2bool'
else:
raise NotImplementedError
s_shape = spike.shape
spike = spike.flatten()
s_padding = 8 - spike.numel() % 8
if s_padding != 0 and s_padding != 8:
spike = F.pad(spike, (0, s_padding))
device_id = spike.get_device()
spike_b = torch.zeros([spike.numel() // 8], device=spike.device, dtype=torch.uint8)
if device_id >= 0 and cupy is not None:
with cuda_utils.DeviceEnvironment(device_id):
numel = spike_b.numel()
blocks = cuda_utils.cal_blocks(numel)
numel = cupy.asarray(numel)
spike, spike_b, numel = cuda_utils.get_contiguous(spike, spike_b, numel)
kernel_args = [spike, spike_b, numel]
kernel = cupy.RawKernel(
kernel_codes,
kernel_name,
options=configure.cuda_compiler_options, backend=configure.cuda_compiler_backend
)
kernel(
(blocks,), (configure.cuda_threads,),
cuda_utils.wrap_args_to_raw_kernel(
device_id,
*kernel_args
)
)
else:
spike = spike.view(-1, 8).to(torch.uint8)
for i in range(8):
spike_b += spike[:, i] << i
return spike_b, s_dtype, s_shape, s_padding | :param spike: a spike tensor whose ``dtype=torch.float`` or ``dtype=torch.half`` and all elements are 0 or 1 :type spike: torch.Tensor :return: (spike_b, s_dtype, s_shape, s_padding) spike_b: a compressed spike tensor with ``dtype=torch.uint8`` and each element stores 8 spikes s_dtype: the dtype of the original spike s_shape: the shape of the original spike s_padding: the number of padding elements :rtype: tuple Compress a float/half spike tensor ``spike`` to an uint8 tensor ``spike_b``. Each element in ``spike_b`` represents 8 elements of ``spike``. |
7,607 | import torch
import torch.nn.functional as F
import threading
from .. import configure
from . import cuda_utils
import logging
try:
import cupy
except BaseException as e:
logging.info(f'spikingjelly.activation_based.tensor_cache: {e}')
cupy = None
class DataTypeConvertCUDACode:
float2bool = r'''
extern "C" __global__
void float2bool(const float* fs, unsigned char* bs, const int &N)
{
// assert N == numel / 8 and numel % 8 == 0
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N)
{
bs[index] = 0;
const int mem_offset = (index << 3);
#pragma unroll
for(int i = 0; i < 8; i++)
{
bs[index] += ( ((unsigned char) fs[mem_offset + i]) << i);
}
}
}
'''
half2bool = r'''
#include <cuda_fp16.h>
extern "C" __global__
void half2bool(const half* fs, unsigned char* bs, const int &N)
{
// assert N == numel / 8 and numel % 8 == 0
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N)
{
bs[index] = 0;
const int mem_offset = (index << 3);
#pragma unroll
for(int i = 0; i < 8; i++)
{
bs[index] += ( ((unsigned char) __half2float(fs[mem_offset + i])) << i);
}
}
}
'''
bool2float = r'''
extern "C" __global__
void bool2float(const unsigned char* bs, float* fs, const int &N)
{
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N)
{
const int mem_offset = (index << 3);
unsigned char compressed_v = bs[index];
#pragma unroll
for(int i = 0; i < 8; i++)
{
fs[mem_offset + i] = (float) (compressed_v % 2);
compressed_v = (compressed_v >> 1);
}
}
}
'''
bool2half = r'''
#include <cuda_fp16.h>
extern "C" __global__
void bool2half(const unsigned char* bs, half* fs, const int &N)
{
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N)
{
const int mem_offset = (index << 3);
unsigned char compressed_v = bs[index];
#pragma unroll
for(int i = 0; i < 8; i++)
{
fs[mem_offset + i] = __float2half((float) (compressed_v % 2));
compressed_v = (compressed_v >> 1);
}
}
}
'''
The provided code snippet includes necessary dependencies for implementing the `bool_spike_to_float` function. Write a Python function `def bool_spike_to_float(spike_b: torch.Tensor, s_dtype: torch.dtype, s_shape: torch.Size, s_padding: int = 0)` to solve the following problem:
:param spike_b: a compressed spike tensor with ``dtype=torch.uint8`` and each element stores 8 spikes :type spike_b: torch.Tensor :param s_dtype: the dtype of the original spike :type s_dtype: torch.dtype :param s_shape: the shape of the original spike :type s_shape: torch.Size :param s_padding: the number of padding elements :type s_padding: int :return: the original tensor :rtype: torch.Tensor
Here is the function:
def bool_spike_to_float(spike_b: torch.Tensor, s_dtype: torch.dtype, s_shape: torch.Size, s_padding: int = 0):
"""
:param spike_b: a compressed spike tensor with ``dtype=torch.uint8`` and each element stores 8 spikes
:type spike_b: torch.Tensor
:param s_dtype: the dtype of the original spike
:type s_dtype: torch.dtype
:param s_shape: the shape of the original spike
:type s_shape: torch.Size
:param s_padding: the number of padding elements
:type s_padding: int
:return: the original tensor
:rtype: torch.Tensor
"""
device_id = spike_b.get_device()
spike = torch.zeros(spike_b.numel() * 8, device=spike_b.device, dtype=s_dtype)
if s_dtype == torch.float:
kernel_codes = DataTypeConvertCUDACode.bool2float
kernel_name = 'bool2float'
elif s_dtype == torch.half:
kernel_codes = DataTypeConvertCUDACode.bool2half
kernel_name = 'bool2half'
else:
raise NotImplementedError
if device_id >= 0 and cupy is not None:
with cuda_utils.DeviceEnvironment(device_id):
numel = spike_b.numel()
blocks = cuda_utils.cal_blocks(numel)
numel = cupy.asarray(numel)
spike_b, spike, numel = cuda_utils.get_contiguous(spike_b, spike, numel)
kernel_args = [spike_b, spike, numel]
kernel = cupy.RawKernel(
kernel_codes,
kernel_name,
options=configure.cuda_compiler_options, backend=configure.cuda_compiler_backend
)
kernel(
(blocks,), (configure.cuda_threads,),
cuda_utils.wrap_args_to_raw_kernel(
device_id,
*kernel_args
)
)
else:
spike = spike.view(-1, 8)
for i in range(8):
spike[:, i] = spike_b % 2
spike_b = spike_b >> 1
if s_padding != 0 and s_padding != 8:
spike = spike[0: spike.numel() - s_padding]
return spike.reshape(s_shape) | :param spike_b: a compressed spike tensor with ``dtype=torch.uint8`` and each element stores 8 spikes :type spike_b: torch.Tensor :param s_dtype: the dtype of the original spike :type s_dtype: torch.dtype :param s_shape: the shape of the original spike :type s_shape: torch.Size :param s_padding: the number of padding elements :type s_padding: int :return: the original tensor :rtype: torch.Tensor |
7,608 | import torch
import torch.nn.functional as F
import threading
from .. import configure
from . import cuda_utils
import logging
def tensor_key(x: torch.Tensor):
x = x.flatten()
return x.data_ptr(), x[-1].data_ptr(), x.numel() | null |
7,609 | from spikingjelly.activation_based.auto_cuda.generator import analyse_graph, gen_forward_codes, gen_backward_codes
from spikingjelly.activation_based import surrogate
import torch
def lif_charge(x: torch.Tensor, v_last: torch.Tensor, tau: float, v_reset: float):
h = v_last + (x - (v_last - v_reset)) / tau
return h | null |
7,610 | import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
import re
import sys
import copy
from typing import Callable
import numpy as np
class VarNode:
def __init__(self, prefix: str, name: str, instance: object, value=None):
self.debug_name = name # 原始的name形如 %8, v_last.1
# 将原始的name进行转换
self.name = prefix + '_' + name.replace('.', '_')
self.instance = str(instance)
# 中间节点的self.instance,在生成前向传播cuda代码时,若debug_instance为Tensor,self.instance会被修改为float
self.value = value
self.requires_grad = False
self.cu_var_suffix = ''
def name_bp(self):
return 'grad_' + self.name
def cu_var(self):
# 前向传播时,在cuda代码中的变量名
# 如果value非空,表明其是一个常数值,直接返回数值即可,例如 value = 0.1 返回 '0.1f'
if self.value is not None:
if self.instance == 'int':
return self.name
elif self.instance == 'float':
return self.name + 'f'
else:
raise ValueError(self.instance)
# value空,表示其是一个变量
return self.name + self.cu_var_suffix
def cu_var_bp(self):
# 反向传播时在cuda代码中的变量名
if self.value is not None:
raise ValueError
else:
return 'grad_' + self.cu_var
def __repr__(self):
return f'({self.debug_name}, {self.name}, {self.instance}, value={self.value}, rg={self.requires_grad})'
def analyse_graph(custom_fun, requires_grad: tuple):
graph: torch.Graph = torch.jit.script(custom_fun).graph
logging.debug(f'\ngraph = {graph}')
# 生成 输入 中间 输出 节点
assert sys.version_info.major >= 3 and sys.version_info.minor >= 6
# python >= 3.6时,字典默认是有序的
# key是VarNode.debug_name,value是VarNode
input_nodes = {}
output_nodes = {}
inter_nodes = {}
assert custom_fun.__annotations__.__len__() >= 2
for i, (item, name) in enumerate(zip(graph.inputs(), custom_fun.__annotations__.keys())):
# 要求custom_fun一定是custom_fun(x: torch.Tensor, v_last: torch.Tensor, ...)的形式
if i == 0:
assert str(item.type()) == 'Tensor' and name == 'x'
elif i == 1:
assert str(item.type()) == 'Tensor' and name == 'v_last'
# 用python函数中的name覆盖掉jit自动生成的name
# 仅包括输入。中间变量的命名仍然是jit设置的,不会被更改
item.setDebugName(name)
node = VarNode(prefix='input', name=item.debugName(), instance=item.type())
if node.instance == 'Tensor' and requires_grad[i]:
node.requires_grad = True
logging.debug(f'\ninput node [{i}] = {node}')
assert node not in input_nodes
input_nodes[node.debug_name] = node
for i, item in enumerate(graph.outputs()):
if i == 0:
assert str(item.type()) == 'Tensor'
item.setDebugName('h')
elif i > 0:
raise NotImplementedError('For the moment, we only support for single output!')
node = VarNode(prefix='output', name=item.debugName(), instance=item.type())
logging.debug(f'\noutput node [{i}] = {node}')
assert node not in output_nodes
output_nodes[node.debug_name] = node
cmds = []
# cmds的元素是一个元组,为 (output, fun, inputs)
# 这里的output是VarNode,fun是str,inputs是(VarNode)
for node in graph.nodes():
# item: torch.Note
fun = node.kind()
if fun == 'prim::Constant':
item = node.output()
assert item.debugName() not in input_nodes and item.debugName() not in output_nodes
i_node = VarNode(prefix='inter', name=item.debugName(), instance=item.type())
value = None
# 从命令中提取出常数值
if i_node.instance == 'int':
pattern = re.compile(r'.*prim::Constant\[value=([0-9]+)\]')
m = pattern.match(str(node))
value = int(m.groups()[0])
elif i_node.instance == 'float':
pattern = re.compile(r'.*prim::Constant\[value=([0-9\.]+)\]')
m = pattern.match(str(node))
value = float(m.groups()[0])
else:
raise NotImplementedError
i_node.value = value
assert i_node.debug_name not in input_nodes
assert i_node.debug_name not in output_nodes
if i_node.debug_name not in inter_nodes:
inter_nodes[i_node.debug_name] = i_node
cmds.append((i_node, fun, ()))
else:
inputs = []
for item in node.inputs():
if item.debugName() in input_nodes:
i_node = input_nodes[item.debugName()]
elif item.debugName() in output_nodes:
i_node = output_nodes[item.debugName()]
else:
# 只有既不为输入node也不为输出node的node,才会被视作中间node
if item.debugName() in inter_nodes:
i_node = inter_nodes[item.debugName()]
else:
i_node = VarNode(prefix='inter', name=item.debugName(), instance=item.type())
inter_nodes[i_node.debug_name] = i_node
inputs.append(i_node)
item = node.output()
if item.debugName() in input_nodes:
i_node = input_nodes[item.debugName()]
elif item.debugName() in output_nodes:
i_node = output_nodes[item.debugName()]
else:
# 只有既不为输入node也不为输出node的node,才会被视作中间node
if item.debugName() in inter_nodes:
i_node = inter_nodes[item.debugName()]
else:
i_node = VarNode(prefix='inter', name=item.debugName(), instance=item.type())
inter_nodes[i_node.debug_name] = i_node
cmds.append((i_node, fun, tuple(inputs)))
for i, node in enumerate(inter_nodes.values()):
logging.debug(f'\ninter node [{i}] = {node}')
return input_nodes, inter_nodes, output_nodes, cmds | null |
7,611 | import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
import re
import sys
import copy
from typing import Callable
import numpy as np
def hash_str(x: object):
def gen_forward_codes(input_nodes: dict, inter_nodes: dict, output_nodes: dict, cmds: list, hard_reset: bool):
# 暂时只支持单个输出
assert output_nodes.__len__() == 1
# 代码生成
codes = '\n'
codes += ' '
codes += '{\n'
for node in input_nodes.values():
# 赋值到代码段的变量
if node.debug_name == 'x':
codes += ' '
codes += f'const float {node.cu_var} = x_seq[t];\n'
elif node.debug_name == 'v_last':
codes += ' '
codes += f'const float {node.cu_var} = v_v_seq[t];\n'
else:
if node.instance == 'Tensor':
node.cu_var_suffix = '_t'
codes += ' '
codes += f'const float {node.cu_var} = {node.name}[t];\n'
# instance为float的不需要提前赋值,因为不需要索引(直接从cuda函数的参数中取出即可)
# 记录在自动生成的cuda代码段中,哪些cu_var是已经声明的
code_block_nodes = {}
cuda_cmds = []
for item in cmds:
output, fun, inputs = item
codes += ' '
if fun == 'prim::Constant':
gen_cmd = '\n'
elif fun in ['aten::add', 'aten::sub']:
# z = x + y * alpha
x, y, alpha = inputs
z = output
z.requires_grad = x.requires_grad or y.requires_grad
if z.cu_var not in code_block_nodes:
code_block_nodes[z.cu_var] = z
codes += 'float '
if fun == 'aten::add':
op = '+'
else:
op = '-'
if alpha.value == 1:
gen_cmd = f'{z.cu_var} = {x.cu_var} {op} {y.cu_var};\n'
else:
gen_cmd = f'{z.cu_var} = {x.cu_var} {op} {y.cu_var} * {alpha.cu_var};\n'
elif fun in ['aten::mul', 'aten::div']:
x, y = inputs
z = output
z.requires_grad = x.requires_grad or y.requires_grad
if z.cu_var not in code_block_nodes:
code_block_nodes[z.cu_var] = z
codes += 'float '
if fun == 'aten::mul':
op = '*'
else:
op = '/'
gen_cmd = f'{z.cu_var} = {x.cu_var} {op} {y.cu_var};\n'
else:
raise NotImplementedError(fun)
codes += gen_cmd
cuda_cmds.append(gen_cmd)
for i, node in enumerate(output_nodes.values()):
# 代码段的变量赋值到输出
if i == 0:
codes += ' '
codes += f'h_seq[t] = {node.name};\n'
codes += ' '
codes += '}\n'
# CUDA函数的参数
params = [
('x_seq', 'const float *'),
('v_v_seq', 'float *'),
('h_seq', 'float *'),
('spike_seq', 'float *'),
('v_threshold', 'const float &')
]
if hard_reset:
params.append(('v_reset', 'const float &'))
params.extend([
('neuron_num', 'const int &'),
('numel', 'const int &'),
])
params_name = []
for item in params:
params_name.append(item[0])
# 在CUDA函数参数中增加参数,同时检测命名冲突
for node in inter_nodes.values():
assert node.name not in params_name
for node in input_nodes.values():
if node.debug_name in ['x', 'v_last']:
pass
else:
assert node.name not in params_name
if node.instance == 'Tensor':
param = (node.name, 'const float *')
elif node.instance == 'float':
param = (node.name, 'const float &')
else:
raise NotImplementedError
params.append(param)
for node in output_nodes.values():
assert node.name not in params_name
for i in range(params.__len__()):
param = params[i]
params[i] = param[1] + param[0]
head = ', '.join(params)
head = '(' + head + ')'
head += '''
{
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < neuron_num)
{
const int dt = neuron_num;
for(int mem_offset = 0; mem_offset < numel; mem_offset += neuron_num)
{
const int t = index + mem_offset;
'''
tail = '''
if (h_seq[t] >= v_threshold)
{
spike_seq[t] = 1.0f;
v_v_seq[t + dt] = v_reset;
}
else
{
spike_seq[t] = 0.0f;
v_v_seq[t + dt] = h_seq[t];
}
}
}
}
'''
codes = head + codes + tail
kernel_name = f'forward_kernel_{hash_str(codes)}'
codes = f'''
extern "C" __global__
void {kernel_name}
''' + codes
return codes, kernel_name, cuda_cmds | null |
7,612 | import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
import re
import sys
import copy
from typing import Callable
import numpy as np
def hash_str(x: object):
hash_code = hash(x)
if hash_code < 0:
return f'_{-hash_code}'
else:
return hash_code
The provided code snippet includes necessary dependencies for implementing the `gen_backward_codes` function. Write a Python function `def gen_backward_codes(cuda_cmds: list, input_nodes: dict, output_nodes: dict, cmds: list, hard_reset: bool, detach_reset: bool, surrogate_fuction)` to solve the following problem:
用户定义的前向传播函数为 h_seq[t] = fun(x_seq[t], v_v_seq[t], ...) 需要计算出 h_seq[t] -> x_seq[t] 的梯度和 h_seq[t] -> v_v_seq[t]的梯度 还需要考虑 ... 中如果有tensor,可以增加flag,决定是否计算h_seq[t]对其的梯度
Here is the function:
def gen_backward_codes(cuda_cmds: list, input_nodes: dict, output_nodes: dict, cmds: list, hard_reset: bool,
detach_reset: bool, surrogate_fuction):
'''
用户定义的前向传播函数为
h_seq[t] = fun(x_seq[t], v_v_seq[t], ...)
需要计算出 h_seq[t] -> x_seq[t] 的梯度和 h_seq[t] -> v_v_seq[t]的梯度
还需要考虑 ... 中如果有tensor,可以增加flag,决定是否计算h_seq[t]对其的梯度
'''
input_bp_nodes = {}
'''
在反向传播时,输入梯度是output_nodes的梯度
有些变量的梯度在计算时,需要用到其他变量,例如z = x * y,计算grad_x需要用到y
input_bp_nodes用来记录哪些node要用到
'''
# 记录在自动生成的cuda代码段中,哪些cu_var是已经声明的
code_block_nodes = {}
codes = '\n'
for i in range(cmds.__len__()):
output, fun, inputs = cmds[cmds.__len__() - 1 - i]
codes += '\n'
codes += ' '
codes += f'// {cuda_cmds[cmds.__len__() - 1 - i]}'
if fun == 'prim::Constant':
codes += '\n'
elif fun == 'aten::add':
# z = x + y * alpha
x, y, alpha = inputs
z = output
if alpha.value == 1:
if x.requires_grad:
if x.cu_var_bp not in code_block_nodes:
code_block_nodes[x.cu_var_bp] = x
codes += ' '
codes += f'float {x.cu_var_bp} = {z.cu_var_bp};\n'
else:
codes += ' '
codes += f'{x.cu_var_bp} += {z.cu_var_bp};\n'
if y.requires_grad:
if y.cu_var_bp not in code_block_nodes:
code_block_nodes[y.cu_var_bp] = y
codes += ' '
codes += f'float {y.cu_var_bp} = {z.cu_var_bp};\n'
else:
codes += ' '
codes += f'{y.cu_var_bp} += {z.cu_var_bp};\n'
else:
if x.requires_grad:
if x.cu_var_bp not in code_block_nodes:
code_block_nodes[x.cu_var_bp] = x
codes += ' '
codes += f'float {x.cu_var_bp} = {z.cu_var_bp};\n'
else:
codes += ' '
codes += f'{x.cu_var_bp} += {z.cu_var_bp};\n'
if y.requires_grad:
if y.cu_var_bp not in code_block_nodes:
code_block_nodes[y.cu_var_bp] = y
codes += ' '
codes += f'float {y.cu_var_bp} = {z.cu_var_bp} * {alpha.cu_var_bp};\n'
else:
codes += ' '
codes += f'{y.cu_var_bp} += {z.cu_var_bp} * {alpha.cu_var_bp};\n'
elif fun == 'aten::sub':
# z = x - y * alpha
x, y, alpha = inputs
z = output
if alpha.value == 1:
if x.requires_grad:
if x.cu_var_bp not in code_block_nodes:
code_block_nodes[x.cu_var_bp] = x
codes += ' '
codes += f'float {x.cu_var_bp} = {z.cu_var_bp};\n'
else:
codes += ' '
codes += f'{x.cu_var_bp} += {z.cu_var_bp};\n'
if y.requires_grad:
if y.cu_var_bp not in code_block_nodes:
code_block_nodes[y.cu_var_bp] = y
codes += ' '
codes += f'float {y.cu_var_bp} = - {z.cu_var_bp};\n'
else:
codes += ' '
codes += f'{y.cu_var_bp} += - {z.cu_var_bp};\n'
else:
if x.requires_grad:
if x.cu_var_bp not in code_block_nodes:
code_block_nodes[x.cu_var_bp] = x
codes += ' '
codes += f'float {x.cu_var_bp} = {z.cu_var_bp};\n'
else:
codes += ' '
codes += f'{x.cu_var_bp} += {z.cu_var_bp};\n'
if y.requires_grad:
if y.cu_var_bp not in code_block_nodes:
code_block_nodes[y.cu_var_bp] = y
codes += ' '
codes += f'float {y.cu_var_bp} = - {z.cu_var_bp} * {alpha.cu_var_bp};\n'
else:
codes += ' '
codes += f'{y.cu_var_bp} += - {z.cu_var_bp} * {alpha.cu_var_bp};\n'
elif fun == 'aten::mul':
# z = x * y
x, y = inputs
z = output
if x.requires_grad:
if x.cu_var_bp not in code_block_nodes:
code_block_nodes[x.cu_var_bp] = x
codes += ' '
codes += f'float {x.cu_var_bp} = {z.cu_var_bp} * {y.cu_var};\n'
else:
codes += ' '
codes += f'{x.cu_var_bp} += {z.cu_var_bp} * {y.cu_var};\n'
input_bp_nodes[y.name] = y
if y.requires_grad:
if y.cu_var_bp not in code_block_nodes:
code_block_nodes[y.cu_var_bp] = y
codes += ' '
codes += f'float {y.cu_var_bp} = {z.cu_var_bp} * {x.cu_var};\n'
else:
codes += ' '
codes += f'{y.cu_var_bp} += {z.cu_var_bp} * {x.cu_var};\n'
input_bp_nodes[x.name] = x
elif fun == 'aten::div':
# z = x / y
x, y = inputs
z = output
if x.requires_grad:
if x.cu_var_bp not in code_block_nodes:
code_block_nodes[x.cu_var_bp] = x
codes += ' '
codes += f'float {x.cu_var_bp} = {z.cu_var_bp} / {y.cu_var};\n'
else:
codes += ' '
codes += f'{x.cu_var_bp} += {z.cu_var_bp} / {y.cu_var};\n'
input_bp_nodes[y.name] = y
if y.requires_grad:
if y.cu_var_bp not in code_block_nodes:
code_block_nodes[y.cu_var_bp] = y
codes += ' '
codes += f'float {y.cu_var_bp} = - {z.cu_var_bp} * {x.cu_var} / ({y.cu_var} * {y.cu_var});\n'
else:
codes += ' '
codes += f'{y.cu_var_bp} += - {z.cu_var_bp} * {x.cu_var} / ({y.cu_var} * {y.cu_var});\n'
input_bp_nodes[x.name] = x
input_bp_nodes[y.name] = y
for i, node in enumerate(input_bp_nodes):
logging.debug(f'\ninput bp node [{i}] = {node}')
# CUDA函数的参数
cuda_params = {
'grad_spike_seq': 'const float *',
'grad_v_seq': 'const float *',
'h_seq': 'const float *',
'spike_seq': 'const float *',
'grad_x_seq': 'float *',
'grad_v_init': 'float *',
'v_threshold': 'const float &',
}
if hard_reset:
cuda_params['v_reset'] = 'const float &'
cuda_params['neuron_num'] = 'const int &'
cuda_params['numel'] = 'const int &'
# 在CUDA函数参数中增加参数,同时检测命名冲突
# 这里增加的是用户自定义的除了x和v_last外,其他需要梯度的python函数的参数
for i, node in enumerate(input_nodes.values()):
if i >= 2:
if node.name_bp not in cuda_params:
if node.requires_grad:
cuda_params[node.name_bp] = 'const float *'
# 这里增加的是反向传播所需要的参数
for node in input_bp_nodes.values():
if node.name not in cuda_params:
assert node.debug_name in input_nodes or node.debug_name in output_nodes
if node.instance == 'Tensor':
cuda_params[node.name] = 'const float *'
elif node.instance == 'float':
cuda_params[node.name] = 'const float &'
else:
raise NotImplementedError(node)
params = []
for cuda_param, cuda_param_instance in cuda_params.items():
params.append(cuda_param_instance + cuda_param)
head = ', '.join(params)
head = '(' + head + ')'
head += '''
{
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < neuron_num)
{
float grad_output_h = 0.0f; // grad_output_h will be used recursively
for(int mem_offset = numel - neuron_num; mem_offset >= 0; mem_offset -= neuron_num)
{
const int t = index + mem_offset;
const float over_th = h_seq[t] - v_threshold;
'''
head += surrogate_fuction.cuda_code(x='over_th', y='grad_s_to_h', dtype='fp32')
head += ' '
if detach_reset:
if hard_reset:
head += 'const float grad_v_to_h = 1.0f - spike_seq[t];\n'
else:
head += 'const float grad_v_to_h = 1.0f;\n'
else:
if hard_reset:
head += 'const float grad_v_to_h = 1.0f - spike_seq[t] + (-h_seq[t] + v_reset) * grad_s_to_h;\n'
else:
head += 'const float grad_v_to_h = 1.0f - v_threshold * grad_s_to_h;\n'
tail = ''
# grad_input_x, grad_input_v_last是自动生成的代码计算出来的
tail += ' '
tail += 'grad_output_h = grad_spike_seq[t] * grad_s_to_h + (grad_v_seq[t] + grad_input_v_last) * grad_v_to_h;\n'
for i, node in enumerate(input_nodes.values()):
if i >= 2:
if node.requires_grad:
tail += ' '
tail += f'{node.name_bp}[t] = {node.cu_var_bp};\n'
tail += '''
}
'''
tail += codes
# += codes 是为了计算grad_v_init[index]
tail += '''
grad_v_init[index] = grad_input_v_last;
}
}
'''
codes = head + codes + tail
kernel_name = f'backward_kernel_{hash_str(codes)}'
codes = f'''
extern "C" __global__
void {kernel_name}
''' + codes
input_bp_vars = []
# input_bp_vars记录了python函数中的哪些输入变量,是计算反向传播所需的
for node in input_bp_nodes.values():
input_bp_vars.append(node.debug_name)
return codes, kernel_name, input_bp_vars | 用户定义的前向传播函数为 h_seq[t] = fun(x_seq[t], v_v_seq[t], ...) 需要计算出 h_seq[t] -> x_seq[t] 的梯度和 h_seq[t] -> v_v_seq[t]的梯度 还需要考虑 ... 中如果有tensor,可以增加flag,决定是否计算h_seq[t]对其的梯度 |
7,613 | import torch
import torch.nn.functional as F
import numpy as np
import logging
from .. import cuda_utils, surrogate
from ... import configure
from typing import Callable, Iterable
from . import base, cfunction
import math
def if_requires_grad(items: Iterable):
requires_grad = False
for item in items:
if isinstance(item, torch.Tensor):
if item.requires_grad:
requires_grad = True
break
return requires_grad | null |
7,614 | import torch
import torch.nn.functional as F
import numpy as np
import logging
try:
import cupy
except BaseException as e:
logging.info(f'spikingjelly.activation_based.auto_cuda.ss_neuronal_kernel: {e}')
cupy = None
from .. import cuda_utils, surrogate
from ... import configure
from typing import Callable, Iterable
from . import base, cfunction
import math
def scalar_to_cupy(py_dict: dict, ref: str = 'x'):
device = py_dict[ref].get_device()
dtype = py_dict[ref].dtype
with cuda_utils.DeviceEnvironment(device):
for key, value in py_dict.items():
if isinstance(value, float):
if dtype == torch.float32:
value = cupy.asarray(value, dtype=np.float32)
elif dtype == torch.float16:
value = cupy.asarray([value, value], dtype=np.float16)
else:
raise NotImplementedError(dtype)
py_dict[key] = value
elif isinstance(value, int):
py_dict[key] = cupy.asarray(value) | null |
7,615 | import torch
import torch.nn.functional as F
import numpy as np
import logging
from .. import cuda_utils, surrogate
from ... import configure
from typing import Callable, Iterable
from . import base, cfunction
import math
def new_tensors(news: tuple, py_dict: dict, ref: str = 'x'):
ref = py_dict[ref]
zero_shape = list(ref.shape)
zero_shape[0] *= news.__len__()
for i, item in enumerate(torch.split(torch.zeros(zero_shape, device=ref.device, dtype=ref.dtype),ref.shape[0])):
py_dict[news[i]] = item | null |
7,616 | import torch
import torch.nn.functional as F
import numpy as np
import logging
from .. import cuda_utils, surrogate
from ... import configure
from typing import Callable, Iterable
from . import base, cfunction
import math
def neuronal_hard_reset(v_next: str, h: str, spike: str, v_reset: str, dtype: str = 'float'):
if dtype == 'float':
return f'{v_next} = {h} * (1.0f - {spike}) + {v_reset} * {spike};'
elif dtype == 'half2':
return f'{v_next} = __hfma2({h}, __hsub2(__float2half2_rn(1.0f), {spike}), __hmul2(v_reset, {spike}));'
else:
raise NotImplementedError(dtype) | null |
7,617 | import torch
import torch.nn.functional as F
import numpy as np
import logging
from .. import cuda_utils, surrogate
from ... import configure
from typing import Callable, Iterable
from . import base, cfunction
import math
def neuronal_soft_reset(v_next: str, h: str, spike: str, v_th: str, dtype: str = 'float'):
if dtype == 'float':
return f'{v_next} = {h} - {v_th} * {spike};'
elif dtype == 'half2':
return f'{v_next} = __hsub2({h}, __hmul2({v_th}, {spike}));'
else:
raise NotImplementedError(dtype) | null |
7,618 | import torch
import torch.nn.functional as F
import numpy as np
import logging
from .. import cuda_utils, surrogate
from ... import configure
from typing import Callable, Iterable
from . import base, cfunction
import math
def neuronal_fire(spike: str, v: str, v_th: str, dtype: str = 'float'):
if dtype == 'float':
return cfunction.heaviside(y=spike, x=f'({v} - {v_th})', dtype=dtype)
elif dtype == 'half2':
return cfunction.heaviside(y=spike, x=f'__hsub2({v}, {v_th})', dtype=dtype)
else:
raise NotImplementedError(dtype) | null |
7,619 | def wrap_return_codes(y: str or None, codes: str):
if y is None:
return f'({codes})'
else:
return f'{y} = {codes};'
def float2half2(y: str or None, x: str):
codes = f'__float2half2_rn({x})'
return wrap_return_codes(y, codes) | null |
7,620 | def wrap_return_codes(y: str or None, codes: str):
if y is None:
return f'({codes})'
else:
return f'{y} = {codes};'
def maximum(z: str or None, x: str, y: str, dtype: str):
if dtype == 'float':
codes = f'max({x}, {y})'
elif dtype == 'half2':
codes = f'__hmax2({x}, {y})'
else:
raise NotImplementedError(dtype)
return wrap_return_codes(z, codes) | null |
7,621 | def wrap_return_codes(y: str or None, codes: str):
if y is None:
return f'({codes})'
else:
return f'{y} = {codes};'
def neg(y: str or None, x: str, dtype: str):
if dtype == 'float':
codes = f'- {x}'
elif dtype == 'half2':
codes = f'__hneg2({x})'
else:
raise NotImplementedError(dtype)
return wrap_return_codes(y, codes) | null |
7,622 | def wrap_return_codes(y: str or None, codes: str):
if y is None:
return f'({codes})'
else:
return f'{y} = {codes};'
def exp(y: str or None, x: str, dtype: str):
if dtype == 'float':
codes = f'expf({x})'
elif dtype == 'half2':
codes = f'h2exp({x})'
else:
raise NotImplementedError(dtype)
return wrap_return_codes(y, codes) | null |
7,623 | def constant(y: str or None, x: float, dtype: str):
def atan_backward(y: str, x: str, alpha: float, dtype: str):
assert y is not None
alpha = constant(None, alpha, dtype)
if dtype == 'float':
codes = f'const float atan_backward__alpha_x = ((float) 1.57079632679489661923) * {alpha} * {x};'
codes += f'{y} = {alpha} / 2.0f / (1.0f + atan_backward__alpha_x * atan_backward__alpha_x);'
return codes
elif dtype == 'half2':
codes = f'const half2 atan_backward__alpha_x = __hmul2(__hmul2(__float2half2_rn((float) 1.57079632679489661923), {alpha}), {x});'
codes += f'{y} = __h2div({alpha}, __hmul2(__float2half2_rn(2.0f), __hfma2(atan_backward__alpha_x, atan_backward__alpha_x, __float2half2_rn(1.0f))));'
return codes
else:
raise NotImplementedError(dtype) | null |
7,624 | def constant(y: str or None, x: float, dtype: str):
def abs(y: str or None, x: str, dtype: str):
def if_else(z: str or None, x: str, y: str, mask: str, dtype: str):
def greater_equal(z: str or None, x: str, y: str, dtype: str):
def piecewise_leaky_relu_backward(y: str, x: str, w: float, c: float, dtype: str):
assert y is not None
w_inv = constant(None, 1. / w, dtype)
w = constant(None, w, dtype)
c = constant(None, c, dtype)
codes = greater_equal(z=f'const {dtype} piecewise_leaky_relu_backward__mask', x=w, y=abs(y=None, x=x, dtype=dtype), dtype=dtype)
codes += if_else(z=y, x=w_inv, y=c, mask=f'piecewise_leaky_relu_backward__mask', dtype=dtype)
return codes | null |
7,625 | def constant(y: str or None, x: float, dtype: str):
if dtype == 'float':
codes = f'{x}f'
elif dtype == 'half2':
codes = f'__float2half2_rn({x}f)'
else:
raise NotImplementedError(dtype)
return wrap_return_codes(y, codes)
def if_else(z: str or None, x: str, y: str, mask: str, dtype: str):
# z = x * mask + y * (1. - mask)
if dtype == 'float':
codes = f'{x} * {mask} + {y} * (1.0f - {mask})'
elif dtype == 'half2':
codes = f'__hfma2({x}, {mask}, __hmul2({y}, __hsub2(__float2half2_rn(1.0f), {mask})))'
else:
raise NotImplementedError(dtype)
return wrap_return_codes(z, codes)
def greater_than(z: str or None, x: str, y: str, dtype: str):
if dtype == 'float':
codes = f'(float) ({x} > {y})'
elif dtype == 'half2':
codes = f'__hgtu2({x}, {y})'
else:
raise NotImplementedError(dtype)
return wrap_return_codes(z, codes)
def add(z: str or None, x: str, y: str, dtype: str):
if dtype == 'float':
if x == '0.0f':
codes = f'{y}'
elif y == '0.0f':
codes = f'{x}'
else:
codes = f'{x} + {y}'
elif dtype == 'half2':
if x == '__float2half2_rn(0.0f)':
codes = f'{y}'
elif y == '__float2half2_rn(0.0f)':
codes = f'{x}'
else:
codes = f'__hadd2({x}, {y})'
else:
raise NotImplementedError(dtype)
return wrap_return_codes(z, codes)
def div(z: str or None, x: str, y: str, dtype: str):
if dtype == 'float':
if y == '1.0f':
codes = f'{x}'
else:
codes = f'{x} / {y}'
elif dtype == 'half2':
if y == '__float2half2_rn(1.0f)':
codes = f'{x}'
else:
codes = f'__h2div({x}, {y})'
else:
raise NotImplementedError(dtype)
return wrap_return_codes(z, codes)
def sigmoid_backward(y: str, x: str, alpha: float, dtype: str):
assert y is not None
codes = sigmoid(y=f'const {dtype} sigmoid_backward__sigmoid_ax', x=x, alpha=alpha, dtype=dtype) + '\n'
alpha = constant(None, alpha, dtype)
if dtype == 'float':
codes += f'{y} = (1.0f - sigmoid_backward__sigmoid_ax) * sigmoid_backward__sigmoid_ax * {alpha};'
return codes
elif dtype == 'half2':
codes += f'{y} = __hmul2(__hmul2(__hsub2(__float2half2_rn(1.0f), sigmoid_backward__sigmoid_ax), sigmoid_backward__sigmoid_ax), {alpha});'
return codes
else:
raise NotImplementedError(dtype)
def s2nn_backward(y: str, x: str, alpha: float, beta: float, dtype: str):
assert y is not None
codes = sigmoid_backward(y=f'const {dtype} s2nn_backward__sgax', x=x, alpha=alpha, dtype=dtype)
codes += greater_than(z=f'const {dtype} s2nn_backward__mask', x=constant(None, 0., dtype), y=x, dtype=dtype)
codes += if_else(z=y, x=f's2nn_backward__sgax', y=div(z=None, x=constant(None, beta, dtype), y=add(z=None, x=x, y=constant(None, 1., dtype), dtype=dtype), dtype=dtype), mask=f's2nn_backward__mask', dtype=dtype)
return codes | null |
7,626 | def constant(y: str or None, x: float, dtype: str):
if dtype == 'float':
codes = f'{x}f'
elif dtype == 'half2':
codes = f'__float2half2_rn({x}f)'
else:
raise NotImplementedError(dtype)
return wrap_return_codes(y, codes)
def power(z: str or None, x: str, y: str, dtype: str):
if dtype == 'float':
codes = f'__powf({x, y})'
elif dtype == 'half2':
# CUDA FP16 does not provide powf function. We use z = 2 ** (log2(x) * y)
codes = f'h2exp(__hmul2(h2log2({x}), {y}))'
else:
raise NotImplementedError(dtype)
return wrap_return_codes(z, codes)
def q_pseudo_spike_backward(y: str, x: str, alpha: float, dtype: str):
assert y is not None
alpha = constant(None, alpha, dtype)
if dtype == 'float':
return f'{y} = __powf(2.0f * fabsf({x}) / ({alpha} - 1.0f) + 1.0f, - {alpha});'
elif dtype == 'half2':
return power(z=y, x=f'__hadd2(__h2div(__hmul2(__float2half2_rn(2.0f), __habs2({x})), __hsub2({alpha}, __float2half2_rn(1.0f))), __float2half2_rn(1.0f))', y=f'__hneg2({alpha})', dtype=dtype) | null |
7,627 | def constant(y: str or None, x: float, dtype: str):
if dtype == 'float':
codes = f'{x}f'
elif dtype == 'half2':
codes = f'__float2half2_rn({x}f)'
else:
raise NotImplementedError(dtype)
return wrap_return_codes(y, codes)
def if_else(z: str or None, x: str, y: str, mask: str, dtype: str):
# z = x * mask + y * (1. - mask)
if dtype == 'float':
codes = f'{x} * {mask} + {y} * (1.0f - {mask})'
elif dtype == 'half2':
codes = f'__hfma2({x}, {mask}, __hmul2({y}, __hsub2(__float2half2_rn(1.0f), {mask})))'
else:
raise NotImplementedError(dtype)
return wrap_return_codes(z, codes)
def greater_equal(z: str or None, x: str, y: str, dtype: str):
if dtype == 'float':
codes = f'(float) ({x} >= {y})'
elif dtype == 'half2':
codes = f'__hgeu2({x}, {y})'
else:
raise NotImplementedError(dtype)
return wrap_return_codes(z, codes)
def leaky_k_relu_backward(y: str, x: str, leak: float, k: float, dtype: str):
assert y is not None
leak = constant(None, leak, dtype)
k = constant(None, k, dtype)
codes = greater_equal(z=f'const {dtype} leaky_k_relu_backward__mask', x=x, y=constant(None, 0., dtype), dtype=dtype)
codes += if_else(z=y, x=k, y=leak, mask=f'leaky_k_relu_backward__mask', dtype=dtype)
return codes | null |
7,628 | def constant(y: str or None, x: float, dtype: str):
if dtype == 'float':
codes = f'{x}f'
elif dtype == 'half2':
codes = f'__float2half2_rn({x}f)'
else:
raise NotImplementedError(dtype)
return wrap_return_codes(y, codes)
def greater_equal(z: str or None, x: str, y: str, dtype: str):
if dtype == 'float':
codes = f'(float) ({x} >= {y})'
elif dtype == 'half2':
codes = f'__hgeu2({x}, {y})'
else:
raise NotImplementedError(dtype)
return wrap_return_codes(z, codes)
def minimal(z: str or None, x: str, y: str, dtype: str):
if dtype == 'float':
codes = f'min({x}, {y})'
elif dtype == 'half2':
codes = f'__hmin2({x}, {y})'
else:
raise NotImplementedError(dtype)
return wrap_return_codes(z, codes)
def sub(z: str or None, x: str, y: str, dtype: str):
if dtype == 'float':
if y == '0.0f':
codes = f'{x}'
else:
codes = f'{x} - {y}'
elif dtype == 'half2':
if y == '__float2half2_rn(0.0f)':
codes = f'{x}'
else:
codes = f'__hsub2({x}, {y})'
else:
raise NotImplementedError(dtype)
return wrap_return_codes(z, codes)
def mul(z: str or None, x: str, y: str, dtype: str):
if dtype == 'float':
if x == '1.0f':
codes = f'{y}'
elif y == '1.0f':
codes = f'{x}'
else:
codes = f'{x} * {y}'
elif dtype == 'half2':
if x == '__float2half2_rn(1.0f)':
codes = f'{y}'
elif y == '__float2half2_rn(1.0f)':
codes = f'{x}'
else:
codes = f'__hmul2({x}, {y})'
else:
raise NotImplementedError(dtype)
return wrap_return_codes(z, codes)
def div(z: str or None, x: str, y: str, dtype: str):
if dtype == 'float':
if y == '1.0f':
codes = f'{x}'
else:
codes = f'{x} / {y}'
elif dtype == 'half2':
if y == '__float2half2_rn(1.0f)':
codes = f'{x}'
else:
codes = f'__h2div({x}, {y})'
else:
raise NotImplementedError(dtype)
return wrap_return_codes(z, codes)
def fake_numerical_gradient_backward(y: str, x: str, alpha: float, dtype: str):
assert y is not None
alpha = constant(None, alpha, dtype)
codes = greater_equal(z=f'{dtype} fake_numerical_gradient_backward__mask', x=x, y=constant(None, 0., dtype), dtype=dtype)
codes += mul(z='fake_numerical_gradient_backward__mask', x='fake_numerical_gradient_backward__mask', y=constant(None, 2., dtype), dtype=dtype)
codes += sub(z='fake_numerical_gradient_backward__mask', x='fake_numerical_gradient_backward__mask', y=constant(None, 1., dtype), dtype=dtype)
codes += div(z='fake_numerical_gradient_backward__mask', x='fake_numerical_gradient_backward__mask', y=x, dtype=dtype)
codes += minimal(z=y, x='fake_numerical_gradient_backward__mask', y=alpha, dtype=dtype)
return codes | null |
7,629 | def constant(y: str or None, x: float, dtype: str):
if dtype == 'float':
codes = f'{x}f'
elif dtype == 'half2':
codes = f'__float2half2_rn({x}f)'
else:
raise NotImplementedError(dtype)
return wrap_return_codes(y, codes)
def if_else_else(w: str or None, x: str, y: str, z: str, mask_x: str, mask_y: str, dtype: str):
# w = mask_x * x + mask_y * y + (1. - mask_x * mask_y) * z
if dtype == 'float':
codes = f'{mask_x} * {x} + {mask_y} * {y} + (1. - {mask_x} * {mask_y}) * {z}'
else:
codes = f'__hadd2(__hadd2(__hmul2({mask_x}, {x}), __hmul2({mask_y}, {y})), __hmul2({z}, __hsub2(__float2half_rn(1.0f), __hmul2({mask_x}, {mask_y}))))'
return wrap_return_codes(w, codes)
def greater_equal(z: str or None, x: str, y: str, dtype: str):
if dtype == 'float':
codes = f'(float) ({x} >= {y})'
elif dtype == 'half2':
codes = f'__hgeu2({x}, {y})'
else:
raise NotImplementedError(dtype)
return wrap_return_codes(z, codes)
def greater_than(z: str or None, x: str, y: str, dtype: str):
if dtype == 'float':
codes = f'(float) ({x} > {y})'
elif dtype == 'half2':
codes = f'__hgtu2({x}, {y})'
else:
raise NotImplementedError(dtype)
return wrap_return_codes(z, codes)
def div(z: str or None, x: str, y: str, dtype: str):
if dtype == 'float':
if y == '1.0f':
codes = f'{x}'
else:
codes = f'{x} / {y}'
elif dtype == 'half2':
if y == '__float2half2_rn(1.0f)':
codes = f'{x}'
else:
codes = f'__h2div({x}, {y})'
else:
raise NotImplementedError(dtype)
return wrap_return_codes(z, codes)
def log_tailed_relu_backward(y: str, x: str, alpha: float, dtype: str):
alpha = constant(None, alpha, dtype)
codes = greater_equal(z=f'const {dtype} log_tailed_relu_backward__mask_le0', x=constant(None, 0., dtype), y=x, dtype=dtype)
codes += greater_than(z=f'const {dtype} log_tailed_relu_backward__mask_gt1', x=x, y=constant(None, 1, dtype), dtype=dtype)
codes += if_else_else(w=y, x=alpha, y=div(z=None, x=constant(None, 1., dtype), y=x, dtype=dtype), z=constant(None, 1., dtype), mask_x=f'const {dtype} log_tailed_relu_backward__mask_le0', mask_y=f'const {dtype} log_tailed_relu_backward__mask_gt1', dtype=dtype)
return codes | null |
7,634 | import torch
import torch.nn.functional as F
import numpy as np
import logging
try:
import cupy
except BaseException as e:
logging.info(f'spikingjelly.activation_based.auto_cuda.neuronal_kernel: {e}')
cupy = None
from .. import cuda_utils, surrogate
from ... import configure
from typing import Callable, Iterable
from . import base, cfunction
import math
def scalar_to_cupy(py_dict: dict, ref: str = 'x_seq'):
device = py_dict[ref].get_device()
dtype = py_dict[ref].dtype
with cuda_utils.DeviceEnvironment(device):
for key, value in py_dict.items():
if isinstance(value, float):
if dtype == torch.float32:
value = cupy.asarray(value, dtype=np.float32)
elif dtype == torch.float16:
value = cupy.asarray([value, value], dtype=np.float16)
else:
raise NotImplementedError(dtype)
py_dict[key] = value
elif isinstance(value, int):
py_dict[key] = cupy.asarray(value) | null |
7,635 | import torch
import torch.nn.functional as F
import numpy as np
import logging
from .. import cuda_utils, surrogate
from ... import configure
from typing import Callable, Iterable
from . import base, cfunction
import math
def new_tensors(news: tuple, py_dict: dict, ref: str = 'x_seq'):
ref = py_dict[ref]
zero_shape = list(ref.shape)
zero_shape[0] *= news.__len__()
for i, item in enumerate(torch.split(torch.zeros(zero_shape, device=ref.device, dtype=ref.dtype),ref.shape[0])):
py_dict[news[i]] = item | null |
7,636 | import numpy as np
import logging
import torch
import torch.nn.functional as F
import sys
import logging
from .. import cuda_utils
from ... import configure
def wrap_with_comment(code: str, comment: str):
if logging.DEBUG >= logging.root.level:
return '\n//------' + comment + ' start------\n' + code + '\n//------' + comment + ' end--------\n\n'
else:
return code | null |
7,637 | import numpy as np
import logging
import torch
import torch.nn.functional as F
import sys
import logging
from .. import cuda_utils
from ... import configure
def startswiths(x: str, prefixes: tuple):
ret = False
for prefix in prefixes:
if x.startswith(prefix):
ret = True
return ret | null |
7,638 | import torch
import torch.nn as nn
import torch.nn.functional as F
from spikingjelly.activation_based import surrogate, layer
import math
def directional_rnn_cell_forward(cell: nn.Module, x: torch.Tensor,
states: torch.Tensor):
T = x.shape[0]
ss = states
output = []
for t in range(T):
ss = cell(x[t], ss)
if states.dim() == 2:
output.append(ss)
elif states.dim() == 3:
output.append(ss[0])
# 当RNN cell具有多个隐藏状态时,通常第0个隐藏状态是其输出
return torch.stack(output), ss | null |
7,639 | import torch
import torch.nn as nn
import torch.nn.functional as F
from spikingjelly.activation_based import surrogate, layer
import math
The provided code snippet includes necessary dependencies for implementing the `bidirectional_rnn_cell_forward` function. Write a Python function `def bidirectional_rnn_cell_forward(cell: nn.Module, cell_reverse: nn.Module, x: torch.Tensor, states: torch.Tensor, states_reverse: torch.Tensor)` to solve the following problem:
:param cell: 正向RNN cell,输入是正向序列 :type cell: nn.Module :param cell_reverse: 反向的RNN cell,输入是反向序列 :type cell_reverse: nn.Module :param x: ``shape = [T, batch_size, input_size]`` 的输入 :type x: torch.Tensor :param states: 正向RNN cell的起始状态 若RNN cell只有单个隐藏状态,则 ``shape = [batch_size, hidden_size]`` ; 否则 ``shape = [states_num, batch_size, hidden_size]`` :type states: torch.Tensor :param states_reverse: 反向RNN cell的起始状态 若RNN cell只有单个隐藏状态,则 ``shape = [batch_size, hidden_size]`` ; 否则 ``shape = [states_num, batch_size, hidden_size]`` :type states: torch.Tensor :return: y, ss, ss_r y: torch.Tensor ``shape = [T, batch_size, 2 * hidden_size]`` 的输出。``y[t]`` 由正向cell在 ``t`` 时刻和反向cell在 ``T - t - 1`` 时刻的输出拼接而来 ss: torch.Tensor ``shape`` 与 ``states`` 相同,正向cell在 ``T-1`` 时刻的状态 ss_r: torch.Tensor ``shape`` 与 ``states_reverse`` 相同,反向cell在 ``0`` 时刻的状态 计算单个正向和反向RNN cell沿着时间维度的循环并输出结果和两个cell的最终状态。
Here is the function:
def bidirectional_rnn_cell_forward(cell: nn.Module, cell_reverse: nn.Module, x: torch.Tensor,
states: torch.Tensor, states_reverse: torch.Tensor):
'''
:param cell: 正向RNN cell,输入是正向序列
:type cell: nn.Module
:param cell_reverse: 反向的RNN cell,输入是反向序列
:type cell_reverse: nn.Module
:param x: ``shape = [T, batch_size, input_size]`` 的输入
:type x: torch.Tensor
:param states: 正向RNN cell的起始状态
若RNN cell只有单个隐藏状态,则 ``shape = [batch_size, hidden_size]`` ;
否则 ``shape = [states_num, batch_size, hidden_size]``
:type states: torch.Tensor
:param states_reverse: 反向RNN cell的起始状态
若RNN cell只有单个隐藏状态,则 ``shape = [batch_size, hidden_size]`` ;
否则 ``shape = [states_num, batch_size, hidden_size]``
:type states: torch.Tensor
:return: y, ss, ss_r
y: torch.Tensor
``shape = [T, batch_size, 2 * hidden_size]`` 的输出。``y[t]`` 由正向cell在 ``t`` 时刻和反向cell在 ``T - t - 1``
时刻的输出拼接而来
ss: torch.Tensor
``shape`` 与 ``states`` 相同,正向cell在 ``T-1`` 时刻的状态
ss_r: torch.Tensor
``shape`` 与 ``states_reverse`` 相同,反向cell在 ``0`` 时刻的状态
计算单个正向和反向RNN cell沿着时间维度的循环并输出结果和两个cell的最终状态。
'''
T = x.shape[0]
ss = states
ss_r = states_reverse
output = []
output_r = []
for t in range(T):
ss = cell(x[t], ss)
ss_r = cell_reverse(x[T - t - 1], ss_r)
if states.dim() == 2:
output.append(ss)
output_r.append(ss_r)
elif states.dim() == 3:
output.append(ss[0])
output_r.append(ss_r[0])
# 当RNN cell具有多个隐藏状态时,通常第0个隐藏状态是其输出
ret = []
for t in range(T):
ret.append(torch.cat((output[t], output_r[T - t - 1]), dim=-1))
return torch.stack(ret), ss, ss_r | :param cell: 正向RNN cell,输入是正向序列 :type cell: nn.Module :param cell_reverse: 反向的RNN cell,输入是反向序列 :type cell_reverse: nn.Module :param x: ``shape = [T, batch_size, input_size]`` 的输入 :type x: torch.Tensor :param states: 正向RNN cell的起始状态 若RNN cell只有单个隐藏状态,则 ``shape = [batch_size, hidden_size]`` ; 否则 ``shape = [states_num, batch_size, hidden_size]`` :type states: torch.Tensor :param states_reverse: 反向RNN cell的起始状态 若RNN cell只有单个隐藏状态,则 ``shape = [batch_size, hidden_size]`` ; 否则 ``shape = [states_num, batch_size, hidden_size]`` :type states: torch.Tensor :return: y, ss, ss_r y: torch.Tensor ``shape = [T, batch_size, 2 * hidden_size]`` 的输出。``y[t]`` 由正向cell在 ``t`` 时刻和反向cell在 ``T - t - 1`` 时刻的输出拼接而来 ss: torch.Tensor ``shape`` 与 ``states`` 相同,正向cell在 ``T-1`` 时刻的状态 ss_r: torch.Tensor ``shape`` 与 ``states_reverse`` 相同,反向cell在 ``0`` 时刻的状态 计算单个正向和反向RNN cell沿着时间维度的循环并输出结果和两个cell的最终状态。 |
7,640 | import logging
import torch
import time
import numpy as np
from .. import configure
from typing import Callable
def cpu_timer(f: Callable, *args, **kwargs):
"""
* :ref:`API in English <cpu_timer-en>`
.. _cpu_timer-cn:
计算在CPU上执行 ``f(*args, **kwargs)`` 所需的时间
:param f: 函数
:type f: Callable
:return: 用时,单位是毫秒
:rtype: float
* :ref:`中文 API <cpu_timer-cn>`
.. _cpu_timer-en:
Returns the used time for calling ``f(*args, **kwargs)`` in CPU
:param f: a function
:type f: Callable
:return: used time in milliseconds
:rtype: float
"""
start = time.perf_counter()
f(*args, **kwargs)
return time.perf_counter() - start
def cuda_timer(device: torch.device or int, f: Callable, *args, **kwargs):
"""
* :ref:`API in English <cuda_timer-en>`
.. _cuda_timer-cn:
计算在CUDA上执行 ``f(*args, **kwargs)`` 所需的时间
:param device: ``f`` 运行的CUDA设备
:type device: torch.device or int
:param f: 函数
:type f: Callable
:return: 用时,单位是毫秒
:rtype: float
* :ref:`中文 API <cuda_timer-cn>`
.. _cuda_timer-en:
Returns the used time for calling ``f(*args, **kwargs)`` in CUDA
:param device: on which cuda device that ``f`` is running
:type device: torch.device or int
:param f: a function
:type f: Callable
:return: used time in milliseconds
:rtype: float
"""
torch.cuda.set_device(device)
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
f(*args, **kwargs)
end.record()
torch.cuda.synchronize(device)
return start.elapsed_time(end)
The provided code snippet includes necessary dependencies for implementing the `cal_fun_t` function. Write a Python function `def cal_fun_t(n: int, device: str or torch.device or int, f: Callable, *args, **kwargs)` to solve the following problem:
* :ref:`API in English <cal_fun_t-en>` .. _cal_fun_t-cn: 测量在 ``device`` 上执行 ``n`` 次 ``f(*args, **kwargs)`` 的平均用时 .. note:: 当 ``n > 1`` 时,实际上会执行 ``2n`` 次,然后返回后 ``n`` 次的平均用时,以减小误差。 :param n: 重复的次数 :type n: int :param device: ``f`` 执行的设备,可以为 'cpu' 或CUDA设备 :type device: str or torch.device or int :param f: 函数 :type f: Callable :return: 用时,单位是毫秒 :rtype: float * :ref:`中文 API <cal_fun_t-cn>` .. _cal_fun_t-en: Returns the used time averaged by calling ``f(*args, **kwargs)`` over ``n`` times .. admonition:: Note :class: note If ``n > 1``, this function will call ``f`` for ``2n`` times and return the average used time by the last ``n`` times to reduce the measure error. :param n: repeat times :type n: int :param device: on which cuda device that ``f`` is running. It can be 'cpu' or a cuda deivce :type device: str or torch.device or int :param f: function :type f: Callable :return: used time in milliseconds :rtype: float
Here is the function:
def cal_fun_t(n: int, device: str or torch.device or int, f: Callable, *args, **kwargs):
"""
* :ref:`API in English <cal_fun_t-en>`
.. _cal_fun_t-cn:
测量在 ``device`` 上执行 ``n`` 次 ``f(*args, **kwargs)`` 的平均用时
.. note::
当 ``n > 1`` 时,实际上会执行 ``2n`` 次,然后返回后 ``n`` 次的平均用时,以减小误差。
:param n: 重复的次数
:type n: int
:param device: ``f`` 执行的设备,可以为 'cpu' 或CUDA设备
:type device: str or torch.device or int
:param f: 函数
:type f: Callable
:return: 用时,单位是毫秒
:rtype: float
* :ref:`中文 API <cal_fun_t-cn>`
.. _cal_fun_t-en:
Returns the used time averaged by calling ``f(*args, **kwargs)`` over ``n`` times
.. admonition:: Note
:class: note
If ``n > 1``, this function will call ``f`` for ``2n`` times and return the average used time by the last ``n``
times to reduce the measure error.
:param n: repeat times
:type n: int
:param device: on which cuda device that ``f`` is running. It can be 'cpu' or a cuda deivce
:type device: str or torch.device or int
:param f: function
:type f: Callable
:return: used time in milliseconds
:rtype: float
"""
if n == 1:
if device == 'cpu':
return cpu_timer(f, *args, **kwargs)
else:
return cuda_timer(device, f, *args, **kwargs)
# warm up
if device == 'cpu':
cpu_timer(f, *args, **kwargs)
else:
cuda_timer(device, f, *args, **kwargs)
t_list = []
for _ in range(n * 2):
if device == 'cpu':
ti = cpu_timer(f, *args, **kwargs)
else:
ti = cuda_timer(device, f, *args, **kwargs)
t_list.append(ti)
t_list = np.asarray(t_list)
return t_list[n:].mean() | * :ref:`API in English <cal_fun_t-en>` .. _cal_fun_t-cn: 测量在 ``device`` 上执行 ``n`` 次 ``f(*args, **kwargs)`` 的平均用时 .. note:: 当 ``n > 1`` 时,实际上会执行 ``2n`` 次,然后返回后 ``n`` 次的平均用时,以减小误差。 :param n: 重复的次数 :type n: int :param device: ``f`` 执行的设备,可以为 'cpu' 或CUDA设备 :type device: str or torch.device or int :param f: 函数 :type f: Callable :return: 用时,单位是毫秒 :rtype: float * :ref:`中文 API <cal_fun_t-cn>` .. _cal_fun_t-en: Returns the used time averaged by calling ``f(*args, **kwargs)`` over ``n`` times .. admonition:: Note :class: note If ``n > 1``, this function will call ``f`` for ``2n`` times and return the average used time by the last ``n`` times to reduce the measure error. :param n: repeat times :type n: int :param device: on which cuda device that ``f`` is running. It can be 'cpu' or a cuda deivce :type device: str or torch.device or int :param f: function :type f: Callable :return: used time in milliseconds :rtype: float |
7,641 | from abc import abstractmethod
from typing import Callable
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
import logging
from . import surrogate, base
from .auto_cuda import neuron_kernel as ac_neuron_kernel
from .auto_cuda import ss_neuron_kernel as ss_ac_neuron_kernel
import torch.distributed as dist
from typing import Union, Iterable, Optional
from numpy import sqrt, newaxis, integer
from numpy.fft import irfft, rfftfreq
from numpy.random import default_rng, Generator, RandomState
from numpy import sum as npsum
def _get_normal_distribution(random_state: Optional[Union[int, Generator, RandomState]]):
normal_dist = None
if isinstance(random_state, (integer, int)) or random_state is None:
random_state = default_rng(random_state)
normal_dist = random_state.normal
elif isinstance(random_state, (Generator, RandomState)):
normal_dist = random_state.normal
else:
raise ValueError(
"random_state must be one of integer, numpy.random.Generator, "
"numpy.random.Randomstate"
)
return normal_dist
The provided code snippet includes necessary dependencies for implementing the `powerlaw_psd_gaussian` function. Write a Python function `def powerlaw_psd_gaussian( exponent: float, size: Union[int, Iterable[int]], fmin: float = 0.0, random_state: Optional[Union[int, Generator, RandomState]] = None )` to solve the following problem:
Gaussian (1/f)**beta noise. Based on the algorithm in: Timmer, J. and Koenig, M.: On generating power law noise. Astron. Astrophys. 300, 707-710 (1995) Normalised to unit variance Parameters: ----------- exponent : float The power-spectrum of the generated noise is proportional to S(f) = (1 / f)**beta flicker / pink noise: exponent beta = 1 brown noise: exponent beta = 2 Furthermore, the autocorrelation decays proportional to lag**-gamma with gamma = 1 - beta for 0 < beta < 1. There may be finite-size issues for beta close to one. shape : int or iterable The output has the given shape, and the desired power spectrum in the last coordinate. That is, the last dimension is taken as time, and all other components are independent. fmin : float, optional Low-frequency cutoff. Default: 0 corresponds to original paper. The power-spectrum below fmin is flat. fmin is defined relative to a unit sampling rate (see numpy's rfftfreq). For convenience, the passed value is mapped to max(fmin, 1/samples) internally since 1/samples is the lowest possible finite frequency in the sample. The largest possible value is fmin = 0.5, the Nyquist frequency. The output for this value is white noise. random_state : int, numpy.integer, numpy.random.Generator, numpy.random.RandomState, optional Optionally sets the state of NumPy's underlying random number generator. Integer-compatible values or None are passed to np.random.default_rng. np.random.RandomState or np.random.Generator are used directly. Default: None. Returns ------- out : array The samples. Examples: --------- # generate 1/f noise == pink noise == flicker noise >>> import colorednoise as cn >>> y = cn.powerlaw_psd_gaussian(1, 5)
Here is the function:
def powerlaw_psd_gaussian(
exponent: float,
size: Union[int, Iterable[int]],
fmin: float = 0.0,
random_state: Optional[Union[int, Generator, RandomState]] = None
):
"""Gaussian (1/f)**beta noise.
Based on the algorithm in:
Timmer, J. and Koenig, M.:
On generating power law noise.
Astron. Astrophys. 300, 707-710 (1995)
Normalised to unit variance
Parameters:
-----------
exponent : float
The power-spectrum of the generated noise is proportional to
S(f) = (1 / f)**beta
flicker / pink noise: exponent beta = 1
brown noise: exponent beta = 2
Furthermore, the autocorrelation decays proportional to lag**-gamma
with gamma = 1 - beta for 0 < beta < 1.
There may be finite-size issues for beta close to one.
shape : int or iterable
The output has the given shape, and the desired power spectrum in
the last coordinate. That is, the last dimension is taken as time,
and all other components are independent.
fmin : float, optional
Low-frequency cutoff.
Default: 0 corresponds to original paper.
The power-spectrum below fmin is flat. fmin is defined relative
to a unit sampling rate (see numpy's rfftfreq). For convenience,
the passed value is mapped to max(fmin, 1/samples) internally
since 1/samples is the lowest possible finite frequency in the
sample. The largest possible value is fmin = 0.5, the Nyquist
frequency. The output for this value is white noise.
random_state : int, numpy.integer, numpy.random.Generator, numpy.random.RandomState,
optional
Optionally sets the state of NumPy's underlying random number generator.
Integer-compatible values or None are passed to np.random.default_rng.
np.random.RandomState or np.random.Generator are used directly.
Default: None.
Returns
-------
out : array
The samples.
Examples:
---------
# generate 1/f noise == pink noise == flicker noise
>>> import colorednoise as cn
>>> y = cn.powerlaw_psd_gaussian(1, 5)
"""
# Make sure size is a list so we can iterate it and assign to it.
if isinstance(size, (integer, int)):
size = [size]
elif isinstance(size, Iterable):
size = list(size)
else:
raise ValueError("Size must be of type int or Iterable[int]")
# The number of samples in each time series
samples = size[-1]
# Calculate Frequencies (we asume a sample rate of one)
# Use fft functions for real output (-> hermitian spectrum)
f = rfftfreq(samples) # type: ignore # mypy 1.5.1 has problems here
# Validate / normalise fmin
if 0 <= fmin <= 0.5:
fmin = max(fmin, 1./samples) # Low frequency cutoff
else:
raise ValueError("fmin must be chosen between 0 and 0.5.")
# Build scaling factors for all frequencies
s_scale = f
ix = npsum(s_scale < fmin) # Index of the cutoff
if ix and ix < len(s_scale):
s_scale[:ix] = s_scale[ix]
s_scale = s_scale**(-exponent/2.)
# Calculate theoretical output standard deviation from scaling
w = s_scale[1:].copy()
w[-1] *= (1 + (samples % 2)) / 2. # correct f = +-0.5
sigma = 2 * sqrt(npsum(w**2)) / samples
# Adjust size to generate one Fourier component per frequency
size[-1] = len(f)
# Add empty dimension(s) to broadcast s_scale along last
# dimension of generated random power + phase (below)
dims_to_add = len(size) - 1
s_scale = s_scale[(newaxis,) * dims_to_add + (Ellipsis,)]
# prepare random number generator
normal_dist = _get_normal_distribution(random_state)
# Generate scaled random power + phase
sr = normal_dist(scale=s_scale, size=size)
si = normal_dist(scale=s_scale, size=size)
# If the signal length is even, frequencies +/- 0.5 are equal
# so the coefficient must be real.
if not (samples % 2):
si[..., -1] = 0
sr[..., -1] *= sqrt(2) # Fix magnitude
# Regardless of signal length, the DC component must be real
si[..., 0] = 0
sr[..., 0] *= sqrt(2) # Fix magnitude
# Combine power + corrected phase to Fourier components
s = sr + 1J * si
# Transform to real time series & scale to unit variance
y = irfft(s, n=samples, axis=-1) / sigma
return y | Gaussian (1/f)**beta noise. Based on the algorithm in: Timmer, J. and Koenig, M.: On generating power law noise. Astron. Astrophys. 300, 707-710 (1995) Normalised to unit variance Parameters: ----------- exponent : float The power-spectrum of the generated noise is proportional to S(f) = (1 / f)**beta flicker / pink noise: exponent beta = 1 brown noise: exponent beta = 2 Furthermore, the autocorrelation decays proportional to lag**-gamma with gamma = 1 - beta for 0 < beta < 1. There may be finite-size issues for beta close to one. shape : int or iterable The output has the given shape, and the desired power spectrum in the last coordinate. That is, the last dimension is taken as time, and all other components are independent. fmin : float, optional Low-frequency cutoff. Default: 0 corresponds to original paper. The power-spectrum below fmin is flat. fmin is defined relative to a unit sampling rate (see numpy's rfftfreq). For convenience, the passed value is mapped to max(fmin, 1/samples) internally since 1/samples is the lowest possible finite frequency in the sample. The largest possible value is fmin = 0.5, the Nyquist frequency. The output for this value is white noise. random_state : int, numpy.integer, numpy.random.Generator, numpy.random.RandomState, optional Optionally sets the state of NumPy's underlying random number generator. Integer-compatible values or None are passed to np.random.default_rng. np.random.RandomState or np.random.Generator are used directly. Default: None. Returns ------- out : array The samples. Examples: --------- # generate 1/f noise == pink noise == flicker noise >>> import colorednoise as cn >>> y = cn.powerlaw_psd_gaussian(1, 5) |
7,642 | import logging
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from typing import Callable
from . import neuron, base
from torch import Tensor
The provided code snippet includes necessary dependencies for implementing the `set_backend` function. Write a Python function `def set_backend(net: nn.Module, backend: str, instance: object or tuple = (nn.Module, ))` to solve the following problem:
* :ref:`API in English <set_backend-en>` .. _set_backend-cn: :param net: 一个神经网络 :type net: nn.Module :param backend: 使用哪个后端 :type backend: str :param instance: 类型为 ``instance`` 的模块后端会被改变 :type instance: nn.Module or tuple[nn.Module] :return: None 将 ``net`` 中 所有类型为 ``instance`` 的模块后端更改为 ``backend`` * :ref:`中文 API <set_backend-cn>` .. _set_backend-en: :param net: a network :type net: nn.Module :param backend: the backend to be set :type backend: str :param instance: the backend of which instance will be changed :type instance: nn.Module or tuple[nn.Module] :return: None Sets backends of all modules whose instance is ``instance`` in ``net`` to ``backend``
Here is the function:
def set_backend(net: nn.Module, backend: str, instance: object or tuple = (nn.Module, )):
"""
* :ref:`API in English <set_backend-en>`
.. _set_backend-cn:
:param net: 一个神经网络
:type net: nn.Module
:param backend: 使用哪个后端
:type backend: str
:param instance: 类型为 ``instance`` 的模块后端会被改变
:type instance: nn.Module or tuple[nn.Module]
:return: None
将 ``net`` 中 所有类型为 ``instance`` 的模块后端更改为 ``backend``
* :ref:`中文 API <set_backend-cn>`
.. _set_backend-en:
:param net: a network
:type net: nn.Module
:param backend: the backend to be set
:type backend: str
:param instance: the backend of which instance will be changed
:type instance: nn.Module or tuple[nn.Module]
:return: None
Sets backends of all modules whose instance is ``instance`` in ``net`` to ``backend``
"""
for m in net.modules():
if isinstance(m, instance):
if hasattr(m, 'backend'):
if not isinstance(m, base.MemoryModule):
logging.warning(
f'Trying to set the backend for {m}, which is not spikingjelly.activation_based.base.MemoryModule')
if backend in m.supported_backends:
m.backend = backend
else:
logging.warning(f'{m} does not supports for backend={backend}. It will still use backend={m.backend}.') | * :ref:`API in English <set_backend-en>` .. _set_backend-cn: :param net: 一个神经网络 :type net: nn.Module :param backend: 使用哪个后端 :type backend: str :param instance: 类型为 ``instance`` 的模块后端会被改变 :type instance: nn.Module or tuple[nn.Module] :return: None 将 ``net`` 中 所有类型为 ``instance`` 的模块后端更改为 ``backend`` * :ref:`中文 API <set_backend-cn>` .. _set_backend-en: :param net: a network :type net: nn.Module :param backend: the backend to be set :type backend: str :param instance: the backend of which instance will be changed :type instance: nn.Module or tuple[nn.Module] :return: None Sets backends of all modules whose instance is ``instance`` in ``net`` to ``backend`` |
7,643 | import logging
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from typing import Callable
from . import neuron, base
from torch import Tensor
def kernel_dot_product(x: Tensor, y: Tensor, kernel='linear', *args):
"""
* :ref:`API in English <kernel_dot_product-en>`
.. _kernel_dot_product-cn:
:param x: shape=[N, M]的tensor,看作是N个M维向量
:param y: shape=[N, M]的tensor,看作是N个M维向量
:param str kernel: 计算内积时所使用的核函数
:param args: 用于计算内积的额外的参数
:return: ret, shape=[N, N]的tensor,``ret[i][j]``\ 表示\ ``x[i]``\ 和\ ``y[j]``\ 的内积
计算批量数据\ ``x``\ 和\ ``y``\ 在核空间的内积。记2个M维tensor分别为 :math:`\\boldsymbol{x_{i}}` 和 :math:`\\boldsymbol{y_{j}}`,``kernel``\ 定义了不同形式的内积:
- 'linear',线性内积,:math:`\\kappa(\\boldsymbol{x_{i}}, \\boldsymbol{y_{j}}) = \\boldsymbol{x_{i}}^{T}\\boldsymbol{y_{j}}`。
- 'polynomial',多项式内积,:math:`\\kappa(\\boldsymbol{x_{i}}, \\boldsymbol{y_{j}}) = (\\boldsymbol{x_{i}}^{T}\\boldsymbol{y_{j}})^{d}`,其中 :math:`d = args[0]`。
- 'sigmoid',Sigmoid内积,:math:`\\kappa(\\boldsymbol{x_{i}}, \\boldsymbol{y_{j}}) = \\mathrm{sigmoid}(\\alpha \\boldsymbol{x_{i}}^{T}\\boldsymbol{y_{j}})`,其中 :math:`\\alpha = args[0]`。
- 'gaussian',高斯内积,:math:`\\kappa(\\boldsymbol{x_{i}}, \\boldsymbol{y_{j}}) = \\mathrm{exp}(- \\frac{||\\boldsymbol{x_{i}} - \\boldsymbol{y_{j}}||^{2}}{2\\sigma^{2}})`,其中 :math:`\\sigma = args[0]`。
* :ref:`中文API <kernel_dot_product-cn>`
.. _kernel_dot_product-en:
:param x: Tensor of shape=[N, M]
:param y: Tensor of shape=[N, M]
:param str kernel: Type of kernel function used when calculating inner products.
:param args: Extra parameters for inner product
:return: ret, Tensor of shape=[N, N], ``ret[i][j]`` is inner product of ``x[i]`` and ``y[j]``.
Calculate inner product of ``x`` and ``y`` in kernel space. These 2 M-dim tensors are denoted by :math:`\\boldsymbol{x_{i}}` and :math:`\\boldsymbol{y_{j}}`. ``kernel`` determine the kind of inner product:
- 'linear' -- Linear kernel, :math:`\\kappa(\\boldsymbol{x_{i}}, \\boldsymbol{y_{j}}) = \\boldsymbol{x_{i}}^{T}\\boldsymbol{y_{j}}`.
- 'polynomial' -- Polynomial kernel, :math:`\\kappa(\\boldsymbol{x_{i}}, \\boldsymbol{y_{j}}) = (\\boldsymbol{x_{i}}^{T}\\boldsymbol{y_{j}})^{d}`, where :math:`d = args[0]`.
- 'sigmoid' -- Sigmoid kernel, :math:`\\kappa(\\boldsymbol{x_{i}}, \\boldsymbol{y_{j}}) = \\mathrm{sigmoid}(\\alpha \\boldsymbol{x_{i}}^{T}\\boldsymbol{y_{j}})`, where :math:`\\alpha = args[0]`.
- 'gaussian' -- Gaussian kernel, :math:`\\kappa(\\boldsymbol{x_{i}}, \\boldsymbol{y_{j}}) = \\mathrm{exp}(- \\frac{||\\boldsymbol{x_{i}} - \\boldsymbol{y_{j}}||^{2}}{2\\sigma^{2}})`, where :math:`\\sigma = args[0]`.
"""
if kernel == 'linear':
return x.mm(y.t())
elif kernel == 'polynomial':
d = args[0]
return x.mm(y.t()).pow(d)
elif kernel == 'sigmoid':
alpha = args[0]
return torch.sigmoid(alpha * x.mm(y.t()))
elif kernel == 'gaussian':
sigma = args[0]
N = x.shape[0]
x2 = x.square().sum(dim=1) # shape=[N]
y2 = y.square().sum(dim=1) # shape=[N]
xy = x.mm(y.t()) # shape=[N, N]
d_xy = x2.unsqueeze(1).repeat(1, N) + y2.unsqueeze(0).repeat(N, 1) - 2 * xy
# d_xy[i][j]的元素是x[i]的平方和,加上y[j]的平方和,减去2倍的sum_{k} x[i][k]y[j][k],因此
# d_xy[i][j]就是x[i]和y[j]相减,平方,求和
return torch.exp(- d_xy / (2 * sigma * sigma))
else:
raise NotImplementedError
The provided code snippet includes necessary dependencies for implementing the `spike_similar_loss` function. Write a Python function `def spike_similar_loss(spikes: Tensor, labels: Tensor, kernel_type='linear', loss_type='mse', *args)` to solve the following problem:
* :ref:`API in English <spike_similar_loss-en>` .. _spike_similar_loss-cn: :param spikes: shape=[N, M, T],N个数据生成的脉冲 :param labels: shape=[N, C],N个数据的标签,\ ``labels[i][k] == 1``\ 表示数据i属于第k类,反之亦然,允许多标签 :param str kernel_type: 使用内积来衡量两个脉冲之间的相似性,\ ``kernel_type``\ 是计算内积时,所使用的核函数种类 :param str loss_type: 返回哪种损失,可以为'mse', 'l1', 'bce' :param args: 用于计算内积的额外参数 :return: shape=[1]的tensor,相似损失 将N个数据输入到输出层有M个神经元的SNN,运行T步,得到shape=[N, M, T]的脉冲。这N个数据的标签为shape=[N, C]的\ ``labels``。 用shape=[N, N]的矩阵\ ``sim``\ 表示\ **实际相似度矩阵**,\ ``sim[i][j] == 1``\ 表示数据i与数据j相似,反之亦然。若\\ \ ``labels[i]``\ 与\ ``labels[j]``\ 共享至少同一个标签,则认为他们相似,否则不相似。 用shape=[N, N]的矩阵\ ``sim_p``\ 表示\ **输出相似度矩阵**,\ ``sim_p[i][j]``\ 的取值为0到1,值越大表示数据i与数据j的脉冲越相似。 使用内积来衡量两个脉冲之间的相似性,\ ``kernel_type``\ 是计算内积时,所使用的核函数种类: - 'linear',线性内积,:math:`\\kappa(\\boldsymbol{x_{i}}, \\boldsymbol{y_{j}}) = \\boldsymbol{x_{i}}^{T}\\boldsymbol{y_{j}}`。 - 'sigmoid',Sigmoid内积,:math:`\\kappa(\\boldsymbol{x_{i}}, \\boldsymbol{y_{j}}) = \\mathrm{sigmoid}(\\alpha \\boldsymbol{x_{i}}^{T}\\boldsymbol{y_{j}})`,其中 :math:`\\alpha = args[0]`。 - 'gaussian',高斯内积,:math:`\\kappa(\\boldsymbol{x_{i}}, \\boldsymbol{y_{j}}) = \\mathrm{exp}(- \\frac{||\\boldsymbol{x_{i}} - \\boldsymbol{y_{j}}||^{2}}{2\\sigma^{2}})`,其中 :math:`\\sigma = args[0]`。 当使用Sigmoid或高斯内积时,内积的取值范围均在[0, 1]之间;而使用线性内积时,为了保证内积取值仍然在[0, 1]之间,会进行归一化:\\ 按照 :math:`\\text{sim_p}[i][j]=\\frac{\\kappa(\\boldsymbol{x_{i}}, \\boldsymbol{y_{j}})}{||\\boldsymbol{x_{i}}|| · ||\\boldsymbol{y_{j}}||}`。 对于相似的数据,根据输入的\ ``loss_type``,返回度量\ ``sim``\ 与\ ``sim_p``\ 差异的损失: - 'mse' -- 返回sim与sim_p的均方误差(也就是l2误差)。 - 'l1' -- 返回sim与sim_p的l1误差。 - 'bce' -- 返回sim与sim_p的二值交叉熵误差。 .. note:: 脉冲向量稀疏、离散,最好先使用高斯核进行平滑,然后再计算相似度。 * :ref:`中文API <spike_similar_loss-cn>` .. _spike_similar_loss-en: :param spikes: shape=[N, M, T], output spikes corresponding to a batch of N inputs :param labels: shape=[N, C], labels of inputs, ``labels[i][k] == 1`` means the i-th input belongs to the k-th category and vice versa. Multi-label input is allowed. :param str kernel_type: Type of kernel function used when calculating inner products. The inner product is the similarity measure of two spikes. :param str loss_type: Type of loss returned. Can be: 'mse', 'l1', 'bce' :param args: Extra parameters for inner product :return: shape=[1], similarity loss A SNN consisting M neurons will receive a batch of N input data in each timestep (from 0 to T-1) and output a spike tensor of shape=[N, M, T]. The label is a tensor of shape=[N, C]. The **groundtruth similarity matrix** ``sim`` has a shape of [N, N]. ``sim[i][j] == 1`` indicates that input i is similar to input j and vice versa. If and only if ``labels[i]`` and ``labels[j]`` have at least one common label, they are viewed as similar. The **output similarity matrix** ``sim_p`` has a shape of [N, N]. The value of ``sim_p[i][j]`` ranges from 0 to 1, represents the similarity between output spike from both input i and input j. The similarity is measured by inner product of two spikes. ``kernel_type`` is the type of kernel function when calculating inner product: - 'linear', Linear kernel, :math:`\\kappa(\\boldsymbol{x_{i}}, \\boldsymbol{y_{j}}) = \\boldsymbol{x_{i}}^{T}\\boldsymbol{y_{j}}`. - 'sigmoid', Sigmoid kernel, :math:`\\kappa(\\boldsymbol{x_{i}}, \\boldsymbol{y_{j}}) = \\mathrm{sigmoid}(\\alpha \\boldsymbol{x_{i}}^{T}\\boldsymbol{y_{j}})`, where :math:`\\alpha = args[0]`. - 'gaussian', Gaussian kernel,:math:`\\kappa(\\boldsymbol{x_{i}}, \\boldsymbol{y_{j}}) = \\mathrm{exp}(- \\frac{||\\boldsymbol{x_{i}} - \\boldsymbol{y_{j}}||^{2}}{2\\sigma^{2}})`, where :math:`\\sigma = args[0]`. When Sigmoid or Gaussian kernel is applied, the inner product naturally lies in :math:`[0, 1]`. To make the value consistent when using linear kernel, the result will be normalized as: :math:`\\text{sim_p}[i][j]=\\frac{\\kappa(\\boldsymbol{x_{i}}, \\boldsymbol{y_{j}})}{||\\boldsymbol{x_{i}}|| · ||\\boldsymbol{y_{j}}||}`. For similar data, return the specified discrepancy loss between ``sim`` and ``sim_p`` according to ``loss_type``. - 'mse' -- Return the Mean-Square Error (squared L2 norm) between sim and sim_p. - 'l1' -- Return the L1 error between sim and sim_p. - 'bce' -- Return the Binary Cross Entropy between sim and sim_p. .. admonition:: Note :class: note Since spike vectors are usually discrete and sparse, it would be better to apply Gaussian filter first to smooth the vectors before calculating similarities.
Here is the function:
def spike_similar_loss(spikes: Tensor, labels: Tensor, kernel_type='linear', loss_type='mse', *args):
"""
* :ref:`API in English <spike_similar_loss-en>`
.. _spike_similar_loss-cn:
:param spikes: shape=[N, M, T],N个数据生成的脉冲
:param labels: shape=[N, C],N个数据的标签,\ ``labels[i][k] == 1``\ 表示数据i属于第k类,反之亦然,允许多标签
:param str kernel_type: 使用内积来衡量两个脉冲之间的相似性,\ ``kernel_type``\ 是计算内积时,所使用的核函数种类
:param str loss_type: 返回哪种损失,可以为'mse', 'l1', 'bce'
:param args: 用于计算内积的额外参数
:return: shape=[1]的tensor,相似损失
将N个数据输入到输出层有M个神经元的SNN,运行T步,得到shape=[N, M, T]的脉冲。这N个数据的标签为shape=[N, C]的\ ``labels``。
用shape=[N, N]的矩阵\ ``sim``\ 表示\ **实际相似度矩阵**,\ ``sim[i][j] == 1``\ 表示数据i与数据j相似,反之亦然。若\\
\ ``labels[i]``\ 与\ ``labels[j]``\ 共享至少同一个标签,则认为他们相似,否则不相似。
用shape=[N, N]的矩阵\ ``sim_p``\ 表示\ **输出相似度矩阵**,\ ``sim_p[i][j]``\ 的取值为0到1,值越大表示数据i与数据j的脉冲越相似。
使用内积来衡量两个脉冲之间的相似性,\ ``kernel_type``\ 是计算内积时,所使用的核函数种类:
- 'linear',线性内积,:math:`\\kappa(\\boldsymbol{x_{i}}, \\boldsymbol{y_{j}}) = \\boldsymbol{x_{i}}^{T}\\boldsymbol{y_{j}}`。
- 'sigmoid',Sigmoid内积,:math:`\\kappa(\\boldsymbol{x_{i}}, \\boldsymbol{y_{j}}) = \\mathrm{sigmoid}(\\alpha \\boldsymbol{x_{i}}^{T}\\boldsymbol{y_{j}})`,其中 :math:`\\alpha = args[0]`。
- 'gaussian',高斯内积,:math:`\\kappa(\\boldsymbol{x_{i}}, \\boldsymbol{y_{j}}) = \\mathrm{exp}(- \\frac{||\\boldsymbol{x_{i}} - \\boldsymbol{y_{j}}||^{2}}{2\\sigma^{2}})`,其中 :math:`\\sigma = args[0]`。
当使用Sigmoid或高斯内积时,内积的取值范围均在[0, 1]之间;而使用线性内积时,为了保证内积取值仍然在[0, 1]之间,会进行归一化:\\
按照 :math:`\\text{sim_p}[i][j]=\\frac{\\kappa(\\boldsymbol{x_{i}}, \\boldsymbol{y_{j}})}{||\\boldsymbol{x_{i}}|| · ||\\boldsymbol{y_{j}}||}`。
对于相似的数据,根据输入的\ ``loss_type``,返回度量\ ``sim``\ 与\ ``sim_p``\ 差异的损失:
- 'mse' -- 返回sim与sim_p的均方误差(也就是l2误差)。
- 'l1' -- 返回sim与sim_p的l1误差。
- 'bce' -- 返回sim与sim_p的二值交叉熵误差。
.. note::
脉冲向量稀疏、离散,最好先使用高斯核进行平滑,然后再计算相似度。
* :ref:`中文API <spike_similar_loss-cn>`
.. _spike_similar_loss-en:
:param spikes: shape=[N, M, T], output spikes corresponding to a batch of N inputs
:param labels: shape=[N, C], labels of inputs, ``labels[i][k] == 1`` means the i-th input belongs to the k-th category and vice versa. Multi-label input is allowed.
:param str kernel_type: Type of kernel function used when calculating inner products. The inner product is the similarity measure of two spikes.
:param str loss_type: Type of loss returned. Can be: 'mse', 'l1', 'bce'
:param args: Extra parameters for inner product
:return: shape=[1], similarity loss
A SNN consisting M neurons will receive a batch of N input data in each timestep (from 0 to T-1) and output a spike tensor of shape=[N, M, T]. The label is a tensor of shape=[N, C].
The **groundtruth similarity matrix** ``sim`` has a shape of [N, N]. ``sim[i][j] == 1`` indicates that input i is similar to input j and vice versa. If and only if ``labels[i]`` and ``labels[j]`` have at least one common label, they are viewed as similar.
The **output similarity matrix** ``sim_p`` has a shape of [N, N]. The value of ``sim_p[i][j]`` ranges from 0 to 1, represents the similarity between output spike from both input i and input j.
The similarity is measured by inner product of two spikes. ``kernel_type`` is the type of kernel function when calculating inner product:
- 'linear', Linear kernel, :math:`\\kappa(\\boldsymbol{x_{i}}, \\boldsymbol{y_{j}}) = \\boldsymbol{x_{i}}^{T}\\boldsymbol{y_{j}}`.
- 'sigmoid', Sigmoid kernel, :math:`\\kappa(\\boldsymbol{x_{i}}, \\boldsymbol{y_{j}}) = \\mathrm{sigmoid}(\\alpha \\boldsymbol{x_{i}}^{T}\\boldsymbol{y_{j}})`, where :math:`\\alpha = args[0]`.
- 'gaussian', Gaussian kernel,:math:`\\kappa(\\boldsymbol{x_{i}}, \\boldsymbol{y_{j}}) = \\mathrm{exp}(- \\frac{||\\boldsymbol{x_{i}} - \\boldsymbol{y_{j}}||^{2}}{2\\sigma^{2}})`, where :math:`\\sigma = args[0]`.
When Sigmoid or Gaussian kernel is applied, the inner product naturally lies in :math:`[0, 1]`. To make the value consistent when using linear kernel, the result will be normalized as: :math:`\\text{sim_p}[i][j]=\\frac{\\kappa(\\boldsymbol{x_{i}}, \\boldsymbol{y_{j}})}{||\\boldsymbol{x_{i}}|| · ||\\boldsymbol{y_{j}}||}`.
For similar data, return the specified discrepancy loss between ``sim`` and ``sim_p`` according to ``loss_type``.
- 'mse' -- Return the Mean-Square Error (squared L2 norm) between sim and sim_p.
- 'l1' -- Return the L1 error between sim and sim_p.
- 'bce' -- Return the Binary Cross Entropy between sim and sim_p.
.. admonition:: Note
:class: note
Since spike vectors are usually discrete and sparse, it would be better to apply Gaussian filter first to smooth the vectors before calculating similarities.
"""
spikes = spikes.flatten(start_dim=1)
sim_p = kernel_dot_product(spikes, spikes, kernel_type, *args)
if kernel_type == 'linear':
spikes_len = spikes.norm(p=2, dim=1, keepdim=True)
sim_p = sim_p / ((spikes_len.mm(spikes_len.t())) + 1e-8)
labels = labels.float()
sim = labels.mm(labels.t()).clamp_max(1) # labels.mm(labels.t())[i][j]位置的元素表现输入数据i和数据数据j有多少个相同的标签
# 将大于1的元素设置为1,因为共享至少同一个标签,就认为他们相似
if loss_type == 'mse':
return F.mse_loss(sim_p, sim)
elif loss_type == 'l1':
return F.l1_loss(sim_p, sim)
elif loss_type == 'bce':
return F.binary_cross_entropy(sim_p, sim)
else:
raise NotImplementedError | * :ref:`API in English <spike_similar_loss-en>` .. _spike_similar_loss-cn: :param spikes: shape=[N, M, T],N个数据生成的脉冲 :param labels: shape=[N, C],N个数据的标签,\ ``labels[i][k] == 1``\ 表示数据i属于第k类,反之亦然,允许多标签 :param str kernel_type: 使用内积来衡量两个脉冲之间的相似性,\ ``kernel_type``\ 是计算内积时,所使用的核函数种类 :param str loss_type: 返回哪种损失,可以为'mse', 'l1', 'bce' :param args: 用于计算内积的额外参数 :return: shape=[1]的tensor,相似损失 将N个数据输入到输出层有M个神经元的SNN,运行T步,得到shape=[N, M, T]的脉冲。这N个数据的标签为shape=[N, C]的\ ``labels``。 用shape=[N, N]的矩阵\ ``sim``\ 表示\ **实际相似度矩阵**,\ ``sim[i][j] == 1``\ 表示数据i与数据j相似,反之亦然。若\\ \ ``labels[i]``\ 与\ ``labels[j]``\ 共享至少同一个标签,则认为他们相似,否则不相似。 用shape=[N, N]的矩阵\ ``sim_p``\ 表示\ **输出相似度矩阵**,\ ``sim_p[i][j]``\ 的取值为0到1,值越大表示数据i与数据j的脉冲越相似。 使用内积来衡量两个脉冲之间的相似性,\ ``kernel_type``\ 是计算内积时,所使用的核函数种类: - 'linear',线性内积,:math:`\\kappa(\\boldsymbol{x_{i}}, \\boldsymbol{y_{j}}) = \\boldsymbol{x_{i}}^{T}\\boldsymbol{y_{j}}`。 - 'sigmoid',Sigmoid内积,:math:`\\kappa(\\boldsymbol{x_{i}}, \\boldsymbol{y_{j}}) = \\mathrm{sigmoid}(\\alpha \\boldsymbol{x_{i}}^{T}\\boldsymbol{y_{j}})`,其中 :math:`\\alpha = args[0]`。 - 'gaussian',高斯内积,:math:`\\kappa(\\boldsymbol{x_{i}}, \\boldsymbol{y_{j}}) = \\mathrm{exp}(- \\frac{||\\boldsymbol{x_{i}} - \\boldsymbol{y_{j}}||^{2}}{2\\sigma^{2}})`,其中 :math:`\\sigma = args[0]`。 当使用Sigmoid或高斯内积时,内积的取值范围均在[0, 1]之间;而使用线性内积时,为了保证内积取值仍然在[0, 1]之间,会进行归一化:\\ 按照 :math:`\\text{sim_p}[i][j]=\\frac{\\kappa(\\boldsymbol{x_{i}}, \\boldsymbol{y_{j}})}{||\\boldsymbol{x_{i}}|| · ||\\boldsymbol{y_{j}}||}`。 对于相似的数据,根据输入的\ ``loss_type``,返回度量\ ``sim``\ 与\ ``sim_p``\ 差异的损失: - 'mse' -- 返回sim与sim_p的均方误差(也就是l2误差)。 - 'l1' -- 返回sim与sim_p的l1误差。 - 'bce' -- 返回sim与sim_p的二值交叉熵误差。 .. note:: 脉冲向量稀疏、离散,最好先使用高斯核进行平滑,然后再计算相似度。 * :ref:`中文API <spike_similar_loss-cn>` .. _spike_similar_loss-en: :param spikes: shape=[N, M, T], output spikes corresponding to a batch of N inputs :param labels: shape=[N, C], labels of inputs, ``labels[i][k] == 1`` means the i-th input belongs to the k-th category and vice versa. Multi-label input is allowed. :param str kernel_type: Type of kernel function used when calculating inner products. The inner product is the similarity measure of two spikes. :param str loss_type: Type of loss returned. Can be: 'mse', 'l1', 'bce' :param args: Extra parameters for inner product :return: shape=[1], similarity loss A SNN consisting M neurons will receive a batch of N input data in each timestep (from 0 to T-1) and output a spike tensor of shape=[N, M, T]. The label is a tensor of shape=[N, C]. The **groundtruth similarity matrix** ``sim`` has a shape of [N, N]. ``sim[i][j] == 1`` indicates that input i is similar to input j and vice versa. If and only if ``labels[i]`` and ``labels[j]`` have at least one common label, they are viewed as similar. The **output similarity matrix** ``sim_p`` has a shape of [N, N]. The value of ``sim_p[i][j]`` ranges from 0 to 1, represents the similarity between output spike from both input i and input j. The similarity is measured by inner product of two spikes. ``kernel_type`` is the type of kernel function when calculating inner product: - 'linear', Linear kernel, :math:`\\kappa(\\boldsymbol{x_{i}}, \\boldsymbol{y_{j}}) = \\boldsymbol{x_{i}}^{T}\\boldsymbol{y_{j}}`. - 'sigmoid', Sigmoid kernel, :math:`\\kappa(\\boldsymbol{x_{i}}, \\boldsymbol{y_{j}}) = \\mathrm{sigmoid}(\\alpha \\boldsymbol{x_{i}}^{T}\\boldsymbol{y_{j}})`, where :math:`\\alpha = args[0]`. - 'gaussian', Gaussian kernel,:math:`\\kappa(\\boldsymbol{x_{i}}, \\boldsymbol{y_{j}}) = \\mathrm{exp}(- \\frac{||\\boldsymbol{x_{i}} - \\boldsymbol{y_{j}}||^{2}}{2\\sigma^{2}})`, where :math:`\\sigma = args[0]`. When Sigmoid or Gaussian kernel is applied, the inner product naturally lies in :math:`[0, 1]`. To make the value consistent when using linear kernel, the result will be normalized as: :math:`\\text{sim_p}[i][j]=\\frac{\\kappa(\\boldsymbol{x_{i}}, \\boldsymbol{y_{j}})}{||\\boldsymbol{x_{i}}|| · ||\\boldsymbol{y_{j}}||}`. For similar data, return the specified discrepancy loss between ``sim`` and ``sim_p`` according to ``loss_type``. - 'mse' -- Return the Mean-Square Error (squared L2 norm) between sim and sim_p. - 'l1' -- Return the L1 error between sim and sim_p. - 'bce' -- Return the Binary Cross Entropy between sim and sim_p. .. admonition:: Note :class: note Since spike vectors are usually discrete and sparse, it would be better to apply Gaussian filter first to smooth the vectors before calculating similarities. |
7,644 | import logging
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from typing import Callable
from . import neuron, base
from torch import Tensor
The provided code snippet includes necessary dependencies for implementing the `set_threshold_margin` function. Write a Python function `def set_threshold_margin(output_layer: neuron.BaseNode, label_one_hot: Tensor, eval_threshold=1.0, threshold0=0.9, threshold1=1.1)` to solve the following problem:
* :ref:`API in English <set_threshold_margin-en>` .. _set_threshold_margin-cn: :param output_layer: 用于分类的网络的输出层,输出层输出shape=[batch_size, C] :param label_one_hot: one hot格式的样本标签,shape=[batch_size, C] :param float eval_threshold: 输出层神经元在测试(推理)时使用的电压阈值 :param float threshold0: 输出层神经元在训练时,负样本的电压阈值 :param float threshold1: 输出层神经元在训练时,正样本的电压阈值 :return: None 对于用来分类的网络,为输出层神经元的电压阈值设置一定的裕量,以获得更好的分类性能。 类别总数为C,网络的输出层共有C个神经元。网络在训练时,当输入真实类别为i的数据,输出层中第i个神经元的电压阈值会被设置成\\ ``threshold1``,而其他神经元的电压阈值会被设置成\ ``threshold0``。而在测试(推理)时,输出层中神经元的电压阈值被统一设置成\ ``eval_threshold``。 * :ref:`中文API <set_threshold_margin-cn>` .. _set_threshold_margin-en: :param output_layer: The output layer of classification network, where the shape of output should be [batch_size, C] :param label_one_hot: Labels in one-hot format, shape=[batch_size, C] :param float eval_threshold: Voltage threshold of neurons in output layer when evaluating (inference) :param float threshold0: Voltage threshold of the corresponding neurons of **negative** samples in output layer when training :param float threshold1: Voltage threshold of the corresponding neurons of **positive** samples in output layer when training :return: None Set voltage threshold margin for neurons in the output layer to reach better performance in classification task. When there are C different classes, the output layer contains C neurons. During training, when the input with groundtruth label i are sent into the network, the voltage threshold of the i-th neurons in the output layer will be set to ``threshold1`` and the remaining will be set to ``threshold0``. During inference, the voltage thresholds of **ALL** neurons in the output layer will be set to ``eval_threshold``.
Here is the function:
def set_threshold_margin(output_layer: neuron.BaseNode, label_one_hot: Tensor,
eval_threshold=1.0, threshold0=0.9, threshold1=1.1):
"""
* :ref:`API in English <set_threshold_margin-en>`
.. _set_threshold_margin-cn:
:param output_layer: 用于分类的网络的输出层,输出层输出shape=[batch_size, C]
:param label_one_hot: one hot格式的样本标签,shape=[batch_size, C]
:param float eval_threshold: 输出层神经元在测试(推理)时使用的电压阈值
:param float threshold0: 输出层神经元在训练时,负样本的电压阈值
:param float threshold1: 输出层神经元在训练时,正样本的电压阈值
:return: None
对于用来分类的网络,为输出层神经元的电压阈值设置一定的裕量,以获得更好的分类性能。
类别总数为C,网络的输出层共有C个神经元。网络在训练时,当输入真实类别为i的数据,输出层中第i个神经元的电压阈值会被设置成\\
``threshold1``,而其他神经元的电压阈值会被设置成\ ``threshold0``。而在测试(推理)时,输出层中神经元的电压阈值被统一设置成\ ``eval_threshold``。
* :ref:`中文API <set_threshold_margin-cn>`
.. _set_threshold_margin-en:
:param output_layer: The output layer of classification network, where the shape of output should be [batch_size, C]
:param label_one_hot: Labels in one-hot format, shape=[batch_size, C]
:param float eval_threshold: Voltage threshold of neurons in output layer when evaluating (inference)
:param float threshold0: Voltage threshold of the corresponding neurons of **negative** samples in output layer when training
:param float threshold1: Voltage threshold of the corresponding neurons of **positive** samples in output layer when training
:return: None
Set voltage threshold margin for neurons in the output layer to reach better performance in classification task.
When there are C different classes, the output layer contains C neurons. During training, when the input with groundtruth label i are sent into the network, the voltage threshold of the i-th neurons in the output layer will be set to ``threshold1`` and the remaining will be set to ``threshold0``.
During inference, the voltage thresholds of **ALL** neurons in the output layer will be set to ``eval_threshold``.
"""
if output_layer.training:
output_layer.v_threshold = torch.ones_like(label_one_hot) * threshold0
output_layer.v_threshold[label_one_hot == 1] = threshold1
else:
output_layer.v_threshold = eval_threshold | * :ref:`API in English <set_threshold_margin-en>` .. _set_threshold_margin-cn: :param output_layer: 用于分类的网络的输出层,输出层输出shape=[batch_size, C] :param label_one_hot: one hot格式的样本标签,shape=[batch_size, C] :param float eval_threshold: 输出层神经元在测试(推理)时使用的电压阈值 :param float threshold0: 输出层神经元在训练时,负样本的电压阈值 :param float threshold1: 输出层神经元在训练时,正样本的电压阈值 :return: None 对于用来分类的网络,为输出层神经元的电压阈值设置一定的裕量,以获得更好的分类性能。 类别总数为C,网络的输出层共有C个神经元。网络在训练时,当输入真实类别为i的数据,输出层中第i个神经元的电压阈值会被设置成\\ ``threshold1``,而其他神经元的电压阈值会被设置成\ ``threshold0``。而在测试(推理)时,输出层中神经元的电压阈值被统一设置成\ ``eval_threshold``。 * :ref:`中文API <set_threshold_margin-cn>` .. _set_threshold_margin-en: :param output_layer: The output layer of classification network, where the shape of output should be [batch_size, C] :param label_one_hot: Labels in one-hot format, shape=[batch_size, C] :param float eval_threshold: Voltage threshold of neurons in output layer when evaluating (inference) :param float threshold0: Voltage threshold of the corresponding neurons of **negative** samples in output layer when training :param float threshold1: Voltage threshold of the corresponding neurons of **positive** samples in output layer when training :return: None Set voltage threshold margin for neurons in the output layer to reach better performance in classification task. When there are C different classes, the output layer contains C neurons. During training, when the input with groundtruth label i are sent into the network, the voltage threshold of the i-th neurons in the output layer will be set to ``threshold1`` and the remaining will be set to ``threshold0``. During inference, the voltage thresholds of **ALL** neurons in the output layer will be set to ``eval_threshold``. |
7,645 | import logging
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from typing import Callable
from . import neuron, base
from torch import Tensor
The provided code snippet includes necessary dependencies for implementing the `redundant_one_hot` function. Write a Python function `def redundant_one_hot(labels: Tensor, num_classes: int, n: int)` to solve the following problem:
* :ref:`API in English <redundant_one_hot-en>` .. _redundant_one_hot-cn: :param labels: shape=[batch_size]的tensor,表示\ ``batch_size``\ 个标签 :param int num_classes: 类别总数 :param int n: 表示每个类别所用的编码数量 :return: shape=[batch_size, num_classes * n]的tensor 对数据进行冗余的one-hot编码,每一类用 ``n`` 个1和 ``(num_classes - 1) * n`` 个0来编码。 示例: .. code-block:: python >>> num_classes = 3 >>> n = 2 >>> labels = torch.randint(0, num_classes, [4]) >>> labels tensor([0, 1, 1, 0]) >>> codes = functional.redundant_one_hot(labels, num_classes, n) >>> codes tensor([[1., 1., 0., 0., 0., 0.], [0., 0., 1., 1., 0., 0.], [0., 0., 1., 1., 0., 0.], [1., 1., 0., 0., 0., 0.]]) * :ref:`中文API <redundant_one_hot-cn>` .. _redundant_one_hot-en: :param labels: Tensor of shape=[batch_size], ``batch_size`` labels :param int num_classes: The total number of classes. :param int n: The encoding length for each class. :return: Tensor of shape=[batch_size, num_classes * n] Redundant one-hot encoding for data. Each class is encoded to ``n`` 1's and ``(num_classes - 1) * n`` 0's e.g.: .. code-block:: python >>> num_classes = 3 >>> n = 2 >>> labels = torch.randint(0, num_classes, [4]) >>> labels tensor([0, 1, 1, 0]) >>> codes = functional.redundant_one_hot(labels, num_classes, n) >>> codes tensor([[1., 1., 0., 0., 0., 0.], [0., 0., 1., 1., 0., 0.], [0., 0., 1., 1., 0., 0.], [1., 1., 0., 0., 0., 0.]])
Here is the function:
def redundant_one_hot(labels: Tensor, num_classes: int, n: int):
"""
* :ref:`API in English <redundant_one_hot-en>`
.. _redundant_one_hot-cn:
:param labels: shape=[batch_size]的tensor,表示\ ``batch_size``\ 个标签
:param int num_classes: 类别总数
:param int n: 表示每个类别所用的编码数量
:return: shape=[batch_size, num_classes * n]的tensor
对数据进行冗余的one-hot编码,每一类用 ``n`` 个1和 ``(num_classes - 1) * n`` 个0来编码。
示例:
.. code-block:: python
>>> num_classes = 3
>>> n = 2
>>> labels = torch.randint(0, num_classes, [4])
>>> labels
tensor([0, 1, 1, 0])
>>> codes = functional.redundant_one_hot(labels, num_classes, n)
>>> codes
tensor([[1., 1., 0., 0., 0., 0.],
[0., 0., 1., 1., 0., 0.],
[0., 0., 1., 1., 0., 0.],
[1., 1., 0., 0., 0., 0.]])
* :ref:`中文API <redundant_one_hot-cn>`
.. _redundant_one_hot-en:
:param labels: Tensor of shape=[batch_size], ``batch_size`` labels
:param int num_classes: The total number of classes.
:param int n: The encoding length for each class.
:return: Tensor of shape=[batch_size, num_classes * n]
Redundant one-hot encoding for data. Each class is encoded to ``n`` 1's and ``(num_classes - 1) * n`` 0's
e.g.:
.. code-block:: python
>>> num_classes = 3
>>> n = 2
>>> labels = torch.randint(0, num_classes, [4])
>>> labels
tensor([0, 1, 1, 0])
>>> codes = functional.redundant_one_hot(labels, num_classes, n)
>>> codes
tensor([[1., 1., 0., 0., 0., 0.],
[0., 0., 1., 1., 0., 0.],
[0., 0., 1., 1., 0., 0.],
[1., 1., 0., 0., 0., 0.]])
"""
redundant_classes = num_classes * n
codes = torch.zeros(size=[labels.shape[0], redundant_classes], device=labels.device)
for i in range(n):
codes += F.one_hot(labels * n + i, redundant_classes)
return codes | * :ref:`API in English <redundant_one_hot-en>` .. _redundant_one_hot-cn: :param labels: shape=[batch_size]的tensor,表示\ ``batch_size``\ 个标签 :param int num_classes: 类别总数 :param int n: 表示每个类别所用的编码数量 :return: shape=[batch_size, num_classes * n]的tensor 对数据进行冗余的one-hot编码,每一类用 ``n`` 个1和 ``(num_classes - 1) * n`` 个0来编码。 示例: .. code-block:: python >>> num_classes = 3 >>> n = 2 >>> labels = torch.randint(0, num_classes, [4]) >>> labels tensor([0, 1, 1, 0]) >>> codes = functional.redundant_one_hot(labels, num_classes, n) >>> codes tensor([[1., 1., 0., 0., 0., 0.], [0., 0., 1., 1., 0., 0.], [0., 0., 1., 1., 0., 0.], [1., 1., 0., 0., 0., 0.]]) * :ref:`中文API <redundant_one_hot-cn>` .. _redundant_one_hot-en: :param labels: Tensor of shape=[batch_size], ``batch_size`` labels :param int num_classes: The total number of classes. :param int n: The encoding length for each class. :return: Tensor of shape=[batch_size, num_classes * n] Redundant one-hot encoding for data. Each class is encoded to ``n`` 1's and ``(num_classes - 1) * n`` 0's e.g.: .. code-block:: python >>> num_classes = 3 >>> n = 2 >>> labels = torch.randint(0, num_classes, [4]) >>> labels tensor([0, 1, 1, 0]) >>> codes = functional.redundant_one_hot(labels, num_classes, n) >>> codes tensor([[1., 1., 0., 0., 0., 0.], [0., 0., 1., 1., 0., 0.], [0., 0., 1., 1., 0., 0.], [1., 1., 0., 0., 0., 0.]]) |
7,646 | import logging
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from typing import Callable
from . import neuron, base
from torch import Tensor
The provided code snippet includes necessary dependencies for implementing the `first_spike_index` function. Write a Python function `def first_spike_index(spikes: Tensor)` to solve the following problem:
* :ref:`API in English <first_spike_index-en>` .. _first_spike_index-cn: :param spikes: shape=[*, T],表示任意个神经元在t=0, 1, ..., T-1,共T个时刻的输出脉冲 :return: index, shape=[*, T],为 ``True`` 的位置表示该神经元首次释放脉冲的时刻 输入若干个神经元的输出脉冲,返回一个与输入相同shape的 ``bool`` 类型的index。index为 ``True`` 的位置,表示该神经元首次释放脉冲的时刻。 示例: .. code-block:: python >>> spikes = (torch.rand(size=[2, 3, 8]) >= 0.8).float() >>> spikes tensor([[[0., 0., 0., 0., 0., 0., 0., 0.], [1., 0., 0., 0., 0., 0., 1., 0.], [0., 1., 0., 0., 0., 1., 0., 1.]], [[0., 0., 1., 1., 0., 0., 0., 1.], [1., 1., 0., 0., 1., 0., 0., 0.], [0., 0., 0., 1., 0., 0., 0., 0.]]]) >>> first_spike_index(spikes) tensor([[[False, False, False, False, False, False, False, False], [ True, False, False, False, False, False, False, False], [False, True, False, False, False, False, False, False]], [[False, False, True, False, False, False, False, False], [ True, False, False, False, False, False, False, False], [False, False, False, True, False, False, False, False]]]) * :ref:`中文API <first_spike_index-cn>` .. _first_spike_index-en: :param spikes: shape=[*, T], indicates the output spikes of some neurons when t=0, 1, ..., T-1. :return: index, shape=[*, T], the index of ``True`` represents the moment of first spike. Return an ``index`` tensor of the same shape of input tensor, which is the output spike of some neurons. The index of ``True`` represents the moment of first spike. e.g.: .. code-block:: python >>> spikes = (torch.rand(size=[2, 3, 8]) >= 0.8).float() >>> spikes tensor([[[0., 0., 0., 0., 0., 0., 0., 0.], [1., 0., 0., 0., 0., 0., 1., 0.], [0., 1., 0., 0., 0., 1., 0., 1.]], [[0., 0., 1., 1., 0., 0., 0., 1.], [1., 1., 0., 0., 1., 0., 0., 0.], [0., 0., 0., 1., 0., 0., 0., 0.]]]) >>> first_spike_index(spikes) tensor([[[False, False, False, False, False, False, False, False], [ True, False, False, False, False, False, False, False], [False, True, False, False, False, False, False, False]], [[False, False, True, False, False, False, False, False], [ True, False, False, False, False, False, False, False], [False, False, False, True, False, False, False, False]]])
Here is the function:
def first_spike_index(spikes: Tensor):
"""
* :ref:`API in English <first_spike_index-en>`
.. _first_spike_index-cn:
:param spikes: shape=[*, T],表示任意个神经元在t=0, 1, ..., T-1,共T个时刻的输出脉冲
:return: index, shape=[*, T],为 ``True`` 的位置表示该神经元首次释放脉冲的时刻
输入若干个神经元的输出脉冲,返回一个与输入相同shape的 ``bool`` 类型的index。index为 ``True`` 的位置,表示该神经元首次释放脉冲的时刻。
示例:
.. code-block:: python
>>> spikes = (torch.rand(size=[2, 3, 8]) >= 0.8).float()
>>> spikes
tensor([[[0., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 1., 0.],
[0., 1., 0., 0., 0., 1., 0., 1.]],
[[0., 0., 1., 1., 0., 0., 0., 1.],
[1., 1., 0., 0., 1., 0., 0., 0.],
[0., 0., 0., 1., 0., 0., 0., 0.]]])
>>> first_spike_index(spikes)
tensor([[[False, False, False, False, False, False, False, False],
[ True, False, False, False, False, False, False, False],
[False, True, False, False, False, False, False, False]],
[[False, False, True, False, False, False, False, False],
[ True, False, False, False, False, False, False, False],
[False, False, False, True, False, False, False, False]]])
* :ref:`中文API <first_spike_index-cn>`
.. _first_spike_index-en:
:param spikes: shape=[*, T], indicates the output spikes of some neurons when t=0, 1, ..., T-1.
:return: index, shape=[*, T], the index of ``True`` represents the moment of first spike.
Return an ``index`` tensor of the same shape of input tensor, which is the output spike of some neurons. The index of ``True`` represents the moment of first spike.
e.g.:
.. code-block:: python
>>> spikes = (torch.rand(size=[2, 3, 8]) >= 0.8).float()
>>> spikes
tensor([[[0., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 1., 0.],
[0., 1., 0., 0., 0., 1., 0., 1.]],
[[0., 0., 1., 1., 0., 0., 0., 1.],
[1., 1., 0., 0., 1., 0., 0., 0.],
[0., 0., 0., 1., 0., 0., 0., 0.]]])
>>> first_spike_index(spikes)
tensor([[[False, False, False, False, False, False, False, False],
[ True, False, False, False, False, False, False, False],
[False, True, False, False, False, False, False, False]],
[[False, False, True, False, False, False, False, False],
[ True, False, False, False, False, False, False, False],
[False, False, False, True, False, False, False, False]]])
"""
with torch.no_grad():
# 在时间维度上,2次cumsum后,元素为1的位置,即为首次发放脉冲的位置
return spikes.cumsum(dim=-1).cumsum(dim=-1) == 1 | * :ref:`API in English <first_spike_index-en>` .. _first_spike_index-cn: :param spikes: shape=[*, T],表示任意个神经元在t=0, 1, ..., T-1,共T个时刻的输出脉冲 :return: index, shape=[*, T],为 ``True`` 的位置表示该神经元首次释放脉冲的时刻 输入若干个神经元的输出脉冲,返回一个与输入相同shape的 ``bool`` 类型的index。index为 ``True`` 的位置,表示该神经元首次释放脉冲的时刻。 示例: .. code-block:: python >>> spikes = (torch.rand(size=[2, 3, 8]) >= 0.8).float() >>> spikes tensor([[[0., 0., 0., 0., 0., 0., 0., 0.], [1., 0., 0., 0., 0., 0., 1., 0.], [0., 1., 0., 0., 0., 1., 0., 1.]], [[0., 0., 1., 1., 0., 0., 0., 1.], [1., 1., 0., 0., 1., 0., 0., 0.], [0., 0., 0., 1., 0., 0., 0., 0.]]]) >>> first_spike_index(spikes) tensor([[[False, False, False, False, False, False, False, False], [ True, False, False, False, False, False, False, False], [False, True, False, False, False, False, False, False]], [[False, False, True, False, False, False, False, False], [ True, False, False, False, False, False, False, False], [False, False, False, True, False, False, False, False]]]) * :ref:`中文API <first_spike_index-cn>` .. _first_spike_index-en: :param spikes: shape=[*, T], indicates the output spikes of some neurons when t=0, 1, ..., T-1. :return: index, shape=[*, T], the index of ``True`` represents the moment of first spike. Return an ``index`` tensor of the same shape of input tensor, which is the output spike of some neurons. The index of ``True`` represents the moment of first spike. e.g.: .. code-block:: python >>> spikes = (torch.rand(size=[2, 3, 8]) >= 0.8).float() >>> spikes tensor([[[0., 0., 0., 0., 0., 0., 0., 0.], [1., 0., 0., 0., 0., 0., 1., 0.], [0., 1., 0., 0., 0., 1., 0., 1.]], [[0., 0., 1., 1., 0., 0., 0., 1.], [1., 1., 0., 0., 1., 0., 0., 0.], [0., 0., 0., 1., 0., 0., 0., 0.]]]) >>> first_spike_index(spikes) tensor([[[False, False, False, False, False, False, False, False], [ True, False, False, False, False, False, False, False], [False, True, False, False, False, False, False, False]], [[False, False, True, False, False, False, False, False], [ True, False, False, False, False, False, False, False], [False, False, False, True, False, False, False, False]]]) |
7,647 | import logging
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from typing import Callable
from . import neuron, base
from torch import Tensor
The provided code snippet includes necessary dependencies for implementing the `multi_step_forward` function. Write a Python function `def multi_step_forward(x_seq: Tensor, single_step_module: nn.Module or list[nn.Module] or tuple[nn.Module] or nn.Sequential or Callable)` to solve the following problem:
* :ref:`API in English <multi_step_forward-en>` .. _multi_step_forward-cn: :param x_seq: ``shape=[T, batch_size, ...]`` 的输入tensor :type x_seq: Tensor :param single_step_module: 一个或多个单步模块 :type single_step_module: torch.nn.Module or list[nn.Module] or tuple[nn.Module] or torch.nn.Sequential or Callable :return: ``shape=[T, batch_size, ...]`` 的输出tensor :rtype: torch.Tensor 在单步模块 ``single_step_module`` 上使用多步前向传播。 * :ref:`中文 API <multi_step_forward-cn>` .. _multi_step_forward-en: :param x_seq: the input tensor with ``shape=[T, batch_size, ...]`` :type x_seq: torch.Tensor :param single_step_module: one or many single-step modules :type single_step_module: torch.nn.Module or list[nn.Module] or tuple[nn.Module] or torch.nn.Sequential or Callable :return: the output tensor with ``shape=[T, batch_size, ...]`` :rtype: torch.torch.Tensor Applies multi-step forward on ``single_step_module``.
Here is the function:
def multi_step_forward(x_seq: Tensor, single_step_module: nn.Module or list[nn.Module] or tuple[nn.Module] or nn.Sequential or Callable):
"""
* :ref:`API in English <multi_step_forward-en>`
.. _multi_step_forward-cn:
:param x_seq: ``shape=[T, batch_size, ...]`` 的输入tensor
:type x_seq: Tensor
:param single_step_module: 一个或多个单步模块
:type single_step_module: torch.nn.Module or list[nn.Module] or tuple[nn.Module] or torch.nn.Sequential or Callable
:return: ``shape=[T, batch_size, ...]`` 的输出tensor
:rtype: torch.Tensor
在单步模块 ``single_step_module`` 上使用多步前向传播。
* :ref:`中文 API <multi_step_forward-cn>`
.. _multi_step_forward-en:
:param x_seq: the input tensor with ``shape=[T, batch_size, ...]``
:type x_seq: torch.Tensor
:param single_step_module: one or many single-step modules
:type single_step_module: torch.nn.Module or list[nn.Module] or tuple[nn.Module] or torch.nn.Sequential or Callable
:return: the output tensor with ``shape=[T, batch_size, ...]``
:rtype: torch.torch.Tensor
Applies multi-step forward on ``single_step_module``.
"""
y_seq = []
if isinstance(single_step_module, (list, tuple, nn.Sequential)):
for t in range(x_seq.shape[0]):
x_seq_t = x_seq[t]
for m in single_step_module:
x_seq_t = m(x_seq_t)
y_seq.append(x_seq_t)
else:
for t in range(x_seq.shape[0]):
y_seq.append(single_step_module(x_seq[t]))
return torch.stack(y_seq) | * :ref:`API in English <multi_step_forward-en>` .. _multi_step_forward-cn: :param x_seq: ``shape=[T, batch_size, ...]`` 的输入tensor :type x_seq: Tensor :param single_step_module: 一个或多个单步模块 :type single_step_module: torch.nn.Module or list[nn.Module] or tuple[nn.Module] or torch.nn.Sequential or Callable :return: ``shape=[T, batch_size, ...]`` 的输出tensor :rtype: torch.Tensor 在单步模块 ``single_step_module`` 上使用多步前向传播。 * :ref:`中文 API <multi_step_forward-cn>` .. _multi_step_forward-en: :param x_seq: the input tensor with ``shape=[T, batch_size, ...]`` :type x_seq: torch.Tensor :param single_step_module: one or many single-step modules :type single_step_module: torch.nn.Module or list[nn.Module] or tuple[nn.Module] or torch.nn.Sequential or Callable :return: the output tensor with ``shape=[T, batch_size, ...]`` :rtype: torch.torch.Tensor Applies multi-step forward on ``single_step_module``. |
7,648 | import logging
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from typing import Callable
from . import neuron, base
from torch import Tensor
The provided code snippet includes necessary dependencies for implementing the `chunk_multi_step_forward` function. Write a Python function `def chunk_multi_step_forward(split_size: int, x_seq: Tensor, multi_step_module: nn.Module)` to solve the following problem:
* :ref:`API in English <chunk_multi_step_forward-en>` .. _chunk_multi_step_forward-cn: :param split_size: 分割的尺寸 :type split_size: int :param x_seq: 输入 :type x_seq: Tensor :param multi_step_module: 一个使用多步传播模式的网络 :type multi_step_module: nn.Module :return: 输出 :rtype: Tensor 将 ``shape = [T, *]`` 的输入 ``x_seq`` 拆分成多个 ``shape = [split_size, *]`` 的小tensor(若 ``T % split_size != 0``,最后\ 一个tensor的 ``shape[0]`` 会小于 ``split_size``),然后逐个输入到 ``multi_step_module`` 中,再将输出重新拼接为 ``shape = [split_size, *]``。\ ``chunk_multi_step_forward`` 可以在使用很大的 ``T`` 进行不带梯度的推理(例如ANN2SNN)时使用,能够减少内存消耗量。 示例代码: .. code-block:: python import torch import torch.nn as nn from spikingjelly.activation_based import neuron, layer, functional net = nn.Sequential( layer.Linear(8, 4), neuron.IFNode(step_mode='m'), layer.Linear(4, 2), neuron.IFNode(step_mode='m'), ) x_seq = torch.rand([1024, 8]) with torch.no_grad(): y_seq = functional.chunk_multi_step_forward(16, x_seq, net) print(y_seq.shape) # torch.Size([1024, 2]) * :ref:`中文 API <chunk_multi_step_forward-cn>` .. _chunk_multi_step_forward-en: :param split_size: the split size :type split_size: int :param x_seq: the input tensor :type x_seq: Tensor :param multi_step_module: :type multi_step_module: nn.Module :return: the output tensor :rtype: Tensor Splits the input ``x_seq`` with ``shape = [T, *]`` to many tensor chunks with ``shape = [split_size, *]`` (if ``T % split_size != 0``,\ ``shape[0]`` of the last tensor chunk will be smaller than ``split_size``), and sends chunks to ``multi_step_module``,\ then concatenates the outputs to ``shape = [split_size, *]``. ``chunk_multi_step_forward`` can be used for inference with a large ``T`` (e.g., ANN2SNN) to reduce the memory consumption. Codes example: .. code-block:: python import torch import torch.nn as nn from spikingjelly.activation_based import neuron, layer, functional net = nn.Sequential( layer.Linear(8, 4), neuron.IFNode(step_mode='m'), layer.Linear(4, 2), neuron.IFNode(step_mode='m'), ) x_seq = torch.rand([1024, 8]) with torch.no_grad(): y_seq = functional.chunk_multi_step_forward(16, x_seq, net) print(y_seq.shape) # torch.Size([1024, 2])
Here is the function:
def chunk_multi_step_forward(split_size: int, x_seq: Tensor, multi_step_module: nn.Module):
"""
* :ref:`API in English <chunk_multi_step_forward-en>`
.. _chunk_multi_step_forward-cn:
:param split_size: 分割的尺寸
:type split_size: int
:param x_seq: 输入
:type x_seq: Tensor
:param multi_step_module: 一个使用多步传播模式的网络
:type multi_step_module: nn.Module
:return: 输出
:rtype: Tensor
将 ``shape = [T, *]`` 的输入 ``x_seq`` 拆分成多个 ``shape = [split_size, *]`` 的小tensor(若 ``T % split_size != 0``,最后\
一个tensor的 ``shape[0]`` 会小于 ``split_size``),然后逐个输入到 ``multi_step_module`` 中,再将输出重新拼接为 ``shape = [split_size, *]``。\
``chunk_multi_step_forward`` 可以在使用很大的 ``T`` 进行不带梯度的推理(例如ANN2SNN)时使用,能够减少内存消耗量。
示例代码:
.. code-block:: python
import torch
import torch.nn as nn
from spikingjelly.activation_based import neuron, layer, functional
net = nn.Sequential(
layer.Linear(8, 4),
neuron.IFNode(step_mode='m'),
layer.Linear(4, 2),
neuron.IFNode(step_mode='m'),
)
x_seq = torch.rand([1024, 8])
with torch.no_grad():
y_seq = functional.chunk_multi_step_forward(16, x_seq, net)
print(y_seq.shape)
# torch.Size([1024, 2])
* :ref:`中文 API <chunk_multi_step_forward-cn>`
.. _chunk_multi_step_forward-en:
:param split_size: the split size
:type split_size: int
:param x_seq: the input tensor
:type x_seq: Tensor
:param multi_step_module:
:type multi_step_module: nn.Module
:return: the output tensor
:rtype: Tensor
Splits the input ``x_seq`` with ``shape = [T, *]`` to many tensor chunks with ``shape = [split_size, *]`` (if ``T % split_size != 0``,\
``shape[0]`` of the last tensor chunk will be smaller than ``split_size``), and sends chunks to ``multi_step_module``,\
then concatenates the outputs to ``shape = [split_size, *]``.
``chunk_multi_step_forward`` can be used for inference with a large ``T`` (e.g., ANN2SNN) to reduce the memory consumption.
Codes example:
.. code-block:: python
import torch
import torch.nn as nn
from spikingjelly.activation_based import neuron, layer, functional
net = nn.Sequential(
layer.Linear(8, 4),
neuron.IFNode(step_mode='m'),
layer.Linear(4, 2),
neuron.IFNode(step_mode='m'),
)
x_seq = torch.rand([1024, 8])
with torch.no_grad():
y_seq = functional.chunk_multi_step_forward(16, x_seq, net)
print(y_seq.shape)
# torch.Size([1024, 2])
"""
y_seq = []
for x in torch.split(x_seq, split_size):
y_seq.append(multi_step_module(x))
return torch.cat(y_seq, 0) | * :ref:`API in English <chunk_multi_step_forward-en>` .. _chunk_multi_step_forward-cn: :param split_size: 分割的尺寸 :type split_size: int :param x_seq: 输入 :type x_seq: Tensor :param multi_step_module: 一个使用多步传播模式的网络 :type multi_step_module: nn.Module :return: 输出 :rtype: Tensor 将 ``shape = [T, *]`` 的输入 ``x_seq`` 拆分成多个 ``shape = [split_size, *]`` 的小tensor(若 ``T % split_size != 0``,最后\ 一个tensor的 ``shape[0]`` 会小于 ``split_size``),然后逐个输入到 ``multi_step_module`` 中,再将输出重新拼接为 ``shape = [split_size, *]``。\ ``chunk_multi_step_forward`` 可以在使用很大的 ``T`` 进行不带梯度的推理(例如ANN2SNN)时使用,能够减少内存消耗量。 示例代码: .. code-block:: python import torch import torch.nn as nn from spikingjelly.activation_based import neuron, layer, functional net = nn.Sequential( layer.Linear(8, 4), neuron.IFNode(step_mode='m'), layer.Linear(4, 2), neuron.IFNode(step_mode='m'), ) x_seq = torch.rand([1024, 8]) with torch.no_grad(): y_seq = functional.chunk_multi_step_forward(16, x_seq, net) print(y_seq.shape) # torch.Size([1024, 2]) * :ref:`中文 API <chunk_multi_step_forward-cn>` .. _chunk_multi_step_forward-en: :param split_size: the split size :type split_size: int :param x_seq: the input tensor :type x_seq: Tensor :param multi_step_module: :type multi_step_module: nn.Module :return: the output tensor :rtype: Tensor Splits the input ``x_seq`` with ``shape = [T, *]`` to many tensor chunks with ``shape = [split_size, *]`` (if ``T % split_size != 0``,\ ``shape[0]`` of the last tensor chunk will be smaller than ``split_size``), and sends chunks to ``multi_step_module``,\ then concatenates the outputs to ``shape = [split_size, *]``. ``chunk_multi_step_forward`` can be used for inference with a large ``T`` (e.g., ANN2SNN) to reduce the memory consumption. Codes example: .. code-block:: python import torch import torch.nn as nn from spikingjelly.activation_based import neuron, layer, functional net = nn.Sequential( layer.Linear(8, 4), neuron.IFNode(step_mode='m'), layer.Linear(4, 2), neuron.IFNode(step_mode='m'), ) x_seq = torch.rand([1024, 8]) with torch.no_grad(): y_seq = functional.chunk_multi_step_forward(16, x_seq, net) print(y_seq.shape) # torch.Size([1024, 2]) |
7,649 | import logging
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from typing import Callable
from . import neuron, base
from torch import Tensor
The provided code snippet includes necessary dependencies for implementing the `scale_fused_conv2d_weight_of_convbn2d` function. Write a Python function `def scale_fused_conv2d_weight_of_convbn2d(conv2d: nn.Conv2d, bn2d: nn.BatchNorm2d, k=None, b=None)` to solve the following problem:
* :ref:`API in English <scale_fused_conv2d_weight_of_convbn2d-en>` .. _scale_fused_conv2d_weight_of_convbn2d-cn: :param conv2d: 一个2D卷积层 :type conv2d: torch.nn.Conv2d :param bn2d: 一个2D的BN层 :type bn2d: torch.nn.BatchNorm2d :return: the weight of this fused module :rtype: Tensor ``{Conv2d-BatchNorm2d}`` 模块可以合并为一个单个的 ``{Conv2d}``,其中``BatchNorm2d`` 的参数会被吸收进 ``Conv2d``。 本函数对 ``{Conv2d-BatchNorm2d}`` 模块整体的等效权重进行 ``weight = k * weight + b`` 的线性变换。 .. note:: 这里按照 ``conv2d.bias`` 为 ``None`` 进行处理。原因参见 `Disable bias for convolutions directly followed by a batch norm <https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#disable-bias-for-convolutions-directly-followed-by-a-batch-norm>`_ 。 * :ref:`中文 API <scale_fused_conv2d_weight_of_convbn2d-cn>` .. _scale_fused_conv2d_weight_of_convbn2d-en: :param conv2d: a Conv2d layer :type conv2d: torch.nn.Conv2d :param bn2d: a BatchNorm2d layer :type bn2d: torch.nn.BatchNorm2d :return: the weight of this fused module :rtype: Tensor A ``{Conv2d-BatchNorm2d}`` can be fused to a ``{Conv2d}`` module with ``BatchNorm2d`` 's parameters being absorbed into ``Conv2d``. This function applies a linear transform ``weight = k * weight + b`` on the equivalent weight of the whole ``{Conv2d-BatchNorm2d}``. .. admonition:: Note :class: note We assert ``conv2d.bias`` is ``None``. See `Disable bias for convolutions directly followed by a batch norm <https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#disable-bias-for-convolutions-directly-followed-by-a-batch-norm>`_ for more details.
Here is the function:
def scale_fused_conv2d_weight_of_convbn2d(conv2d: nn.Conv2d, bn2d: nn.BatchNorm2d, k=None, b=None):
"""
* :ref:`API in English <scale_fused_conv2d_weight_of_convbn2d-en>`
.. _scale_fused_conv2d_weight_of_convbn2d-cn:
:param conv2d: 一个2D卷积层
:type conv2d: torch.nn.Conv2d
:param bn2d: 一个2D的BN层
:type bn2d: torch.nn.BatchNorm2d
:return: the weight of this fused module
:rtype: Tensor
``{Conv2d-BatchNorm2d}`` 模块可以合并为一个单个的 ``{Conv2d}``,其中``BatchNorm2d`` 的参数会被吸收进 ``Conv2d``。
本函数对 ``{Conv2d-BatchNorm2d}`` 模块整体的等效权重进行 ``weight = k * weight + b`` 的线性变换。
.. note::
这里按照 ``conv2d.bias`` 为 ``None`` 进行处理。原因参见 `Disable bias for convolutions directly followed by a batch norm <https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#disable-bias-for-convolutions-directly-followed-by-a-batch-norm>`_ 。
* :ref:`中文 API <scale_fused_conv2d_weight_of_convbn2d-cn>`
.. _scale_fused_conv2d_weight_of_convbn2d-en:
:param conv2d: a Conv2d layer
:type conv2d: torch.nn.Conv2d
:param bn2d: a BatchNorm2d layer
:type bn2d: torch.nn.BatchNorm2d
:return: the weight of this fused module
:rtype: Tensor
A ``{Conv2d-BatchNorm2d}`` can be fused to a ``{Conv2d}`` module with ``BatchNorm2d`` 's parameters being absorbed into ``Conv2d``.
This function applies a linear transform ``weight = k * weight + b`` on the equivalent weight of the whole ``{Conv2d-BatchNorm2d}``.
.. admonition:: Note
:class: note
We assert ``conv2d.bias`` is ``None``. See `Disable bias for convolutions directly followed by a batch norm <https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#disable-bias-for-convolutions-directly-followed-by-a-batch-norm>`_ for more details.
"""
assert conv2d.bias is None
if k is not None:
conv2d.weight.data *= k
if b is not None:
conv2d.weight.data += b | * :ref:`API in English <scale_fused_conv2d_weight_of_convbn2d-en>` .. _scale_fused_conv2d_weight_of_convbn2d-cn: :param conv2d: 一个2D卷积层 :type conv2d: torch.nn.Conv2d :param bn2d: 一个2D的BN层 :type bn2d: torch.nn.BatchNorm2d :return: the weight of this fused module :rtype: Tensor ``{Conv2d-BatchNorm2d}`` 模块可以合并为一个单个的 ``{Conv2d}``,其中``BatchNorm2d`` 的参数会被吸收进 ``Conv2d``。 本函数对 ``{Conv2d-BatchNorm2d}`` 模块整体的等效权重进行 ``weight = k * weight + b`` 的线性变换。 .. note:: 这里按照 ``conv2d.bias`` 为 ``None`` 进行处理。原因参见 `Disable bias for convolutions directly followed by a batch norm <https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#disable-bias-for-convolutions-directly-followed-by-a-batch-norm>`_ 。 * :ref:`中文 API <scale_fused_conv2d_weight_of_convbn2d-cn>` .. _scale_fused_conv2d_weight_of_convbn2d-en: :param conv2d: a Conv2d layer :type conv2d: torch.nn.Conv2d :param bn2d: a BatchNorm2d layer :type bn2d: torch.nn.BatchNorm2d :return: the weight of this fused module :rtype: Tensor A ``{Conv2d-BatchNorm2d}`` can be fused to a ``{Conv2d}`` module with ``BatchNorm2d`` 's parameters being absorbed into ``Conv2d``. This function applies a linear transform ``weight = k * weight + b`` on the equivalent weight of the whole ``{Conv2d-BatchNorm2d}``. .. admonition:: Note :class: note We assert ``conv2d.bias`` is ``None``. See `Disable bias for convolutions directly followed by a batch norm <https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#disable-bias-for-convolutions-directly-followed-by-a-batch-norm>`_ for more details. |
7,650 | import logging
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from typing import Callable
from . import neuron, base
from torch import Tensor
The provided code snippet includes necessary dependencies for implementing the `scale_fused_conv2d_bias_of_convbn2d` function. Write a Python function `def scale_fused_conv2d_bias_of_convbn2d(conv2d: nn.Conv2d, bn2d: nn.BatchNorm2d, k=None, b=None)` to solve the following problem:
* :ref:`API in English <scale_fused_conv2d_bias_of_convbn2d-en>` .. _scale_fused_conv2d_bias_of_convbn2d-cn: :param conv2d: 一个2D卷积层 :type conv2d: torch.nn.Conv2d :param bn2d: 一个2D的BN层 :type bn2d: torch.nn.BatchNorm2d :return: the weight of this fused module :rtype: Tensor ``{Conv2d-BatchNorm2d}`` 模块可以合并为一个单个的 ``{Conv2d}``,其中``BatchNorm2d`` 的参数会被吸收进 ``Conv2d``。 本函数对 ``{Conv2d-BatchNorm2d}`` 模块整体的等效偏置项进行 ``bias = k * bias + b`` 的线性变换。 .. note:: 这里按照 ``conv2d.bias`` 为 ``None`` 进行处理。原因参见 `Disable bias for convolutions directly followed by a batch norm <https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#disable-bias-for-convolutions-directly-followed-by-a-batch-norm>`_ 。 * :ref:`中文 API <scale_fused_conv2d_bias_of_convbn2d-cn>` .. _scale_fused_conv2d_bias_of_convbn2d-en: :param conv2d: a Conv2d layer :type conv2d: torch.nn.Conv2d :param bn2d: a BatchNorm2d layer :type bn2d: torch.nn.BatchNorm2d :return: the weight of this fused module :rtype: Tensor A ``{Conv2d-BatchNorm2d}`` can be fused to a ``{Conv2d}`` module with ``BatchNorm2d`` 's parameters being absorbed into ``Conv2d``. This function applies a linear transform ``bias = k * bias + b`` on the equivalent bias of the whole ``{Conv2d-BatchNorm2d}``. .. admonition:: Note :class: note We assert ``conv2d.bias`` is ``None``. See `Disable bias for convolutions directly followed by a batch norm <https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#disable-bias-for-convolutions-directly-followed-by-a-batch-norm>`_ for more details.
Here is the function:
def scale_fused_conv2d_bias_of_convbn2d(conv2d: nn.Conv2d, bn2d: nn.BatchNorm2d, k=None, b=None):
"""
* :ref:`API in English <scale_fused_conv2d_bias_of_convbn2d-en>`
.. _scale_fused_conv2d_bias_of_convbn2d-cn:
:param conv2d: 一个2D卷积层
:type conv2d: torch.nn.Conv2d
:param bn2d: 一个2D的BN层
:type bn2d: torch.nn.BatchNorm2d
:return: the weight of this fused module
:rtype: Tensor
``{Conv2d-BatchNorm2d}`` 模块可以合并为一个单个的 ``{Conv2d}``,其中``BatchNorm2d`` 的参数会被吸收进 ``Conv2d``。
本函数对 ``{Conv2d-BatchNorm2d}`` 模块整体的等效偏置项进行 ``bias = k * bias + b`` 的线性变换。
.. note::
这里按照 ``conv2d.bias`` 为 ``None`` 进行处理。原因参见 `Disable bias for convolutions directly followed by a batch norm <https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#disable-bias-for-convolutions-directly-followed-by-a-batch-norm>`_ 。
* :ref:`中文 API <scale_fused_conv2d_bias_of_convbn2d-cn>`
.. _scale_fused_conv2d_bias_of_convbn2d-en:
:param conv2d: a Conv2d layer
:type conv2d: torch.nn.Conv2d
:param bn2d: a BatchNorm2d layer
:type bn2d: torch.nn.BatchNorm2d
:return: the weight of this fused module
:rtype: Tensor
A ``{Conv2d-BatchNorm2d}`` can be fused to a ``{Conv2d}`` module with ``BatchNorm2d`` 's parameters being absorbed into ``Conv2d``.
This function applies a linear transform ``bias = k * bias + b`` on the equivalent bias of the whole ``{Conv2d-BatchNorm2d}``.
.. admonition:: Note
:class: note
We assert ``conv2d.bias`` is ``None``. See `Disable bias for convolutions directly followed by a batch norm <https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#disable-bias-for-convolutions-directly-followed-by-a-batch-norm>`_ for more details.
"""
assert conv2d.bias is None
if k is not None:
bn2d.bias.data *= k
bn2d.running_mean *= k
if b is not None:
bn2d.bias.data += b | * :ref:`API in English <scale_fused_conv2d_bias_of_convbn2d-en>` .. _scale_fused_conv2d_bias_of_convbn2d-cn: :param conv2d: 一个2D卷积层 :type conv2d: torch.nn.Conv2d :param bn2d: 一个2D的BN层 :type bn2d: torch.nn.BatchNorm2d :return: the weight of this fused module :rtype: Tensor ``{Conv2d-BatchNorm2d}`` 模块可以合并为一个单个的 ``{Conv2d}``,其中``BatchNorm2d`` 的参数会被吸收进 ``Conv2d``。 本函数对 ``{Conv2d-BatchNorm2d}`` 模块整体的等效偏置项进行 ``bias = k * bias + b`` 的线性变换。 .. note:: 这里按照 ``conv2d.bias`` 为 ``None`` 进行处理。原因参见 `Disable bias for convolutions directly followed by a batch norm <https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#disable-bias-for-convolutions-directly-followed-by-a-batch-norm>`_ 。 * :ref:`中文 API <scale_fused_conv2d_bias_of_convbn2d-cn>` .. _scale_fused_conv2d_bias_of_convbn2d-en: :param conv2d: a Conv2d layer :type conv2d: torch.nn.Conv2d :param bn2d: a BatchNorm2d layer :type bn2d: torch.nn.BatchNorm2d :return: the weight of this fused module :rtype: Tensor A ``{Conv2d-BatchNorm2d}`` can be fused to a ``{Conv2d}`` module with ``BatchNorm2d`` 's parameters being absorbed into ``Conv2d``. This function applies a linear transform ``bias = k * bias + b`` on the equivalent bias of the whole ``{Conv2d-BatchNorm2d}``. .. admonition:: Note :class: note We assert ``conv2d.bias`` is ``None``. See `Disable bias for convolutions directly followed by a batch norm <https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#disable-bias-for-convolutions-directly-followed-by-a-batch-norm>`_ for more details. |
7,651 | import logging
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from typing import Callable
from . import neuron, base
from torch import Tensor
def fused_conv2d_weight_of_convbn2d(conv2d: nn.Conv2d, bn2d: nn.BatchNorm2d):
"""
* :ref:`API in English <fused_conv2d_weight_of_convbn2d-en>`
.. _fused_conv2d_weight_of_convbn2d-cn:
:param conv2d: 一个2D卷积层
:type conv2d: torch.nn.Conv2d
:param bn2d: 一个2D的BN层
:type bn2d: torch.nn.BatchNorm2d
:return: the weight of this fused module
:rtype: Tensor
``{Conv2d-BatchNorm2d}`` 模块可以合并为一个单个的 ``{Conv2d}``,其中``BatchNorm2d`` 的参数会被吸收进 ``Conv2d``。
本函数返回合并后的卷积的权重。
.. note::
这里按照 ``conv2d.bias`` 为 ``None`` 进行处理。原因参见 `Disable bias for convolutions directly followed by a batch norm <https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#disable-bias-for-convolutions-directly-followed-by-a-batch-norm>`_ 。
* :ref:`中文 API <fused_conv2d_weight_of_convbn2d-cn>`
.. _fused_conv2d_weight_of_convbn2d-en:
:param conv2d: a Conv2d layer
:type conv2d: torch.nn.Conv2d
:param bn2d: a BatchNorm2d layer
:type bn2d: torch.nn.BatchNorm2d
:return: the weight of this fused module
:rtype: Tensor
A ``{Conv2d-BatchNorm2d}`` can be fused to a ``{Conv2d}`` module with ``BatchNorm2d`` 's parameters being absorbed into ``Conv2d``.
This function returns the weight of this fused module.
.. admonition:: Note
:class: note
We assert ``conv2d.bias`` is ``None``. See `Disable bias for convolutions directly followed by a batch norm <https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#disable-bias-for-convolutions-directly-followed-by-a-batch-norm>`_ for more details.
"""
assert conv2d.bias is None
return (conv2d.weight.transpose(0, 3) * bn2d.weight / (
bn2d.running_var + bn2d.eps).sqrt()).transpose(0, 3)
def fused_conv2d_bias_of_convbn2d(conv2d: nn.Conv2d, bn2d: nn.BatchNorm2d):
"""
* :ref:`API in English <fused_conv2d_bias_of_convbn2d-en>`
.. _fused_conv2d_bias_of_convbn2d-cn:
:param conv2d: 一个2D卷积层
:type conv2d: torch.nn.Conv2d
:param bn2d: 一个2D的BN层
:type bn2d: torch.nn.BatchNorm2d
:return: the weight of this fused module
:rtype: Tensor
``{Conv2d-BatchNorm2d}`` 模块可以合并为一个单个的 ``{Conv2d}``,其中``BatchNorm2d`` 的参数会被吸收进 ``Conv2d``。
本函数返回合并后的卷积的偏置项。
.. note::
这里按照 ``conv2d.bias`` 为 ``None`` 进行处理。原因参见 `Disable bias for convolutions directly followed by a batch norm <https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#disable-bias-for-convolutions-directly-followed-by-a-batch-norm>`_ 。
* :ref:`中文 API <fused_conv2d_bias_of_convbn2d-cn>`
.. _fused_conv2d_bias_of_convbn2d-en:
:param conv2d: a Conv2d layer
:type conv2d: torch.nn.Conv2d
:param bn2d: a BatchNorm2d layer
:type bn2d: torch.nn.BatchNorm2d
:return: the weight of this fused module
:rtype: Tensor
A ``{Conv2d-BatchNorm2d}`` can be fused to a ``{Conv2d}`` module with ``BatchNorm2d`` 's parameters being absorbed into ``Conv2d``.
This function returns the bias of this fused module.
.. admonition:: Note
:class: note
We assert ``conv2d.bias`` is ``None``. See `Disable bias for convolutions directly followed by a batch norm <https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#disable-bias-for-convolutions-directly-followed-by-a-batch-norm>`_ for more details.
"""
assert conv2d.bias is None
return bn2d.bias - bn2d.running_mean * bn2d.weight / (bn2d.running_var + bn2d.eps).sqrt()
The provided code snippet includes necessary dependencies for implementing the `fuse_convbn2d` function. Write a Python function `def fuse_convbn2d(conv2d: nn.Conv2d, bn2d: nn.BatchNorm2d)` to solve the following problem:
* :ref:`API in English <fuse_convbn2d-en>` .. _fuse_convbn2d-cn: :param conv2d: 一个2D卷积层 :type conv2d: torch.nn.Conv2d :param bn2d: 一个2D的BN层 :type bn2d: torch.nn.BatchNorm2d :return: the weight of this fused module :rtype: Tensor ``{Conv2d-BatchNorm2d}`` 模块可以合并为一个单个的 ``{Conv2d}``,其中``BatchNorm2d`` 的参数会被吸收进 ``Conv2d``。 本函数对返回这个等效的合并后的 ``{Conv2d}``。 .. note:: 这里按照 ``conv2d.bias`` 为 ``None`` 进行处理。原因参见 `Disable bias for convolutions directly followed by a batch norm <https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#disable-bias-for-convolutions-directly-followed-by-a-batch-norm>`_ 。 * :ref:`中文 API <fuse_convbn2d-cn>` .. _fuse_convbn2d-en: :param conv2d: a Conv2d layer :type conv2d: torch.nn.Conv2d :param bn2d: a BatchNorm2d layer :type bn2d: torch.nn.BatchNorm2d :return: the weight of this fused module :rtype: Tensor A ``{Conv2d-BatchNorm2d}`` can be fused to a ``{Conv2d}`` module with ``BatchNorm2d`` 's parameters being absorbed into ``Conv2d``. This function returns the fused ``{Conv2d}`` merged by ``{Conv2d-BatchNorm2d}``. .. admonition:: Note :class: note We assert ``conv2d.bias`` is ``None``. See `Disable bias for convolutions directly followed by a batch norm <https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#disable-bias-for-convolutions-directly-followed-by-a-batch-norm>`_ for more details.
Here is the function:
def fuse_convbn2d(conv2d: nn.Conv2d, bn2d: nn.BatchNorm2d):
"""
* :ref:`API in English <fuse_convbn2d-en>`
.. _fuse_convbn2d-cn:
:param conv2d: 一个2D卷积层
:type conv2d: torch.nn.Conv2d
:param bn2d: 一个2D的BN层
:type bn2d: torch.nn.BatchNorm2d
:return: the weight of this fused module
:rtype: Tensor
``{Conv2d-BatchNorm2d}`` 模块可以合并为一个单个的 ``{Conv2d}``,其中``BatchNorm2d`` 的参数会被吸收进 ``Conv2d``。
本函数对返回这个等效的合并后的 ``{Conv2d}``。
.. note::
这里按照 ``conv2d.bias`` 为 ``None`` 进行处理。原因参见 `Disable bias for convolutions directly followed by a batch norm <https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#disable-bias-for-convolutions-directly-followed-by-a-batch-norm>`_ 。
* :ref:`中文 API <fuse_convbn2d-cn>`
.. _fuse_convbn2d-en:
:param conv2d: a Conv2d layer
:type conv2d: torch.nn.Conv2d
:param bn2d: a BatchNorm2d layer
:type bn2d: torch.nn.BatchNorm2d
:return: the weight of this fused module
:rtype: Tensor
A ``{Conv2d-BatchNorm2d}`` can be fused to a ``{Conv2d}`` module with ``BatchNorm2d`` 's parameters being absorbed into ``Conv2d``.
This function returns the fused ``{Conv2d}`` merged by ``{Conv2d-BatchNorm2d}``.
.. admonition:: Note
:class: note
We assert ``conv2d.bias`` is ``None``. See `Disable bias for convolutions directly followed by a batch norm <https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#disable-bias-for-convolutions-directly-followed-by-a-batch-norm>`_ for more details.
"""
fused_conv = nn.Conv2d(in_channels=conv2d.in_channels, out_channels=conv2d.out_channels,
kernel_size=conv2d.kernel_size,
stride=conv2d.stride, padding=conv2d.padding, dilation=conv2d.dilation,
groups=conv2d.groups, bias=True,
padding_mode=conv2d.padding_mode)
fused_conv.weight.data = fused_conv2d_weight_of_convbn2d(conv2d, bn2d)
fused_conv.bias.data = fused_conv2d_bias_of_convbn2d(conv2d, bn2d)
return fused_conv | * :ref:`API in English <fuse_convbn2d-en>` .. _fuse_convbn2d-cn: :param conv2d: 一个2D卷积层 :type conv2d: torch.nn.Conv2d :param bn2d: 一个2D的BN层 :type bn2d: torch.nn.BatchNorm2d :return: the weight of this fused module :rtype: Tensor ``{Conv2d-BatchNorm2d}`` 模块可以合并为一个单个的 ``{Conv2d}``,其中``BatchNorm2d`` 的参数会被吸收进 ``Conv2d``。 本函数对返回这个等效的合并后的 ``{Conv2d}``。 .. note:: 这里按照 ``conv2d.bias`` 为 ``None`` 进行处理。原因参见 `Disable bias for convolutions directly followed by a batch norm <https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#disable-bias-for-convolutions-directly-followed-by-a-batch-norm>`_ 。 * :ref:`中文 API <fuse_convbn2d-cn>` .. _fuse_convbn2d-en: :param conv2d: a Conv2d layer :type conv2d: torch.nn.Conv2d :param bn2d: a BatchNorm2d layer :type bn2d: torch.nn.BatchNorm2d :return: the weight of this fused module :rtype: Tensor A ``{Conv2d-BatchNorm2d}`` can be fused to a ``{Conv2d}`` module with ``BatchNorm2d`` 's parameters being absorbed into ``Conv2d``. This function returns the fused ``{Conv2d}`` merged by ``{Conv2d-BatchNorm2d}``. .. admonition:: Note :class: note We assert ``conv2d.bias`` is ``None``. See `Disable bias for convolutions directly followed by a batch norm <https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#disable-bias-for-convolutions-directly-followed-by-a-batch-norm>`_ for more details. |
7,652 | import logging
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from typing import Callable
from . import neuron, base
from torch import Tensor
The provided code snippet includes necessary dependencies for implementing the `temporal_efficient_training_cross_entropy` function. Write a Python function `def temporal_efficient_training_cross_entropy(x_seq: Tensor, target: torch.Tensor)` to solve the following problem:
* :ref:`API in English <temporal_efficient_training_cross_entropy-en>` .. _temporal_efficient_training_cross_entropy-cn: :param x_seq: ``shape=[T, N, C, *]`` 的预测值,其中 ``C`` 是类别总数 :type x_seq: torch.Tensor :param target: ``shape=[N]`` 的真实值,其中 ``target[i]`` 是真实类别 :type target: torch.Tensor :return: the temporal efficient training cross entropy :rtype: torch.Tensor Temporal efficient training (TET) 交叉熵损失, 是每个时间步的交叉熵损失的平均。 示例代码: .. code-block:: python def tet_ce_for_loop_version(x_seq: torch.Tensor, target: torch.LongTensor): loss = 0. for t in range(x_seq.shape[0]): loss += F.cross_entropy(x_seq[t], target) return loss / x_seq.shape[0] T = 8 N = 4 C = 10 x_seq = torch.rand([T, N, C]) target = torch.randint(low=0, high=C - 1, size=[N]) print(f'max error = {(tet_ce_for_loop_version(x_seq, target) - temporal_efficient_training_cross_entropy(x_seq, target)).abs().max()}') # max error < 1e-6 .. note:: TET交叉熵是 `Temporal Efficient Training of Spiking Neural Network via Gradient Re-weighting <https://openreview.net/forum?id=_XNtisL32jv>`_ 一文提出的。 * :ref:`中文 API <temporal_efficient_training_cross_entropy-cn>` .. _temporal_efficient_training_cross_entropy-en: :param x_seq: the predicted value with ``shape=[T, N, C, *]``, where ``C`` is the number of classes :type x_seq: torch.Tensor :param target: the ground truth tensor with ``shape=[N]``, where ``target[i]`` is the label :type target: torch.Tensor :return: the temporal efficient training cross entropy :rtype: torch.Tensor The temporal efficient training (TET) cross entropy, which is the mean of cross entropy of each time-step. Codes example: .. code-block:: python def tet_ce_for_loop_version(x_seq: torch.Tensor, target: torch.LongTensor): loss = 0. for t in range(x_seq.shape[0]): loss += F.cross_entropy(x_seq[t], target) return loss / x_seq.shape[0] T = 8 N = 4 C = 10 x_seq = torch.rand([T, N, C]) target = torch.randint(low=0, high=C - 1, size=[N]) print(f'max error = {(tet_ce_for_loop_version(x_seq, target) - temporal_efficient_training_cross_entropy(x_seq, target)).abs().max()}') # max error < 1e-6 .. admonition:: Note :class: note The TET cross entropy is proposed by `Temporal Efficient Training of Spiking Neural Network via Gradient Re-weighting <https://openreview.net/forum?id=_XNtisL32jv>`_.
Here is the function:
def temporal_efficient_training_cross_entropy(x_seq: Tensor, target: torch.Tensor):
"""
* :ref:`API in English <temporal_efficient_training_cross_entropy-en>`
.. _temporal_efficient_training_cross_entropy-cn:
:param x_seq: ``shape=[T, N, C, *]`` 的预测值,其中 ``C`` 是类别总数
:type x_seq: torch.Tensor
:param target: ``shape=[N]`` 的真实值,其中 ``target[i]`` 是真实类别
:type target: torch.Tensor
:return: the temporal efficient training cross entropy
:rtype: torch.Tensor
Temporal efficient training (TET) 交叉熵损失, 是每个时间步的交叉熵损失的平均。
示例代码:
.. code-block:: python
def tet_ce_for_loop_version(x_seq: torch.Tensor, target: torch.LongTensor):
loss = 0.
for t in range(x_seq.shape[0]):
loss += F.cross_entropy(x_seq[t], target)
return loss / x_seq.shape[0]
T = 8
N = 4
C = 10
x_seq = torch.rand([T, N, C])
target = torch.randint(low=0, high=C - 1, size=[N])
print(f'max error = {(tet_ce_for_loop_version(x_seq, target) - temporal_efficient_training_cross_entropy(x_seq, target)).abs().max()}')
# max error < 1e-6
.. note::
TET交叉熵是 `Temporal Efficient Training of Spiking Neural Network via Gradient Re-weighting <https://openreview.net/forum?id=_XNtisL32jv>`_ 一文提出的。
* :ref:`中文 API <temporal_efficient_training_cross_entropy-cn>`
.. _temporal_efficient_training_cross_entropy-en:
:param x_seq: the predicted value with ``shape=[T, N, C, *]``, where ``C`` is the number of classes
:type x_seq: torch.Tensor
:param target: the ground truth tensor with ``shape=[N]``, where ``target[i]`` is the label
:type target: torch.Tensor
:return: the temporal efficient training cross entropy
:rtype: torch.Tensor
The temporal efficient training (TET) cross entropy, which is the mean of cross entropy of each time-step.
Codes example:
.. code-block:: python
def tet_ce_for_loop_version(x_seq: torch.Tensor, target: torch.LongTensor):
loss = 0.
for t in range(x_seq.shape[0]):
loss += F.cross_entropy(x_seq[t], target)
return loss / x_seq.shape[0]
T = 8
N = 4
C = 10
x_seq = torch.rand([T, N, C])
target = torch.randint(low=0, high=C - 1, size=[N])
print(f'max error = {(tet_ce_for_loop_version(x_seq, target) - temporal_efficient_training_cross_entropy(x_seq, target)).abs().max()}')
# max error < 1e-6
.. admonition:: Note
:class: note
The TET cross entropy is proposed by `Temporal Efficient Training of Spiking Neural Network via Gradient Re-weighting <https://openreview.net/forum?id=_XNtisL32jv>`_.
"""
x_seq = x_seq.transpose(0, 1).transpose(1, 2) # [N, C, T, *]
N, C, T = x_seq.shape[0], x_seq.shape[1], x_seq.shape[2]
if x_seq.dim() == 3:
# x_seq.shape = [N, C, T]
# target.shape = [N]
target = target.unsqueeze(1).repeat(1, T) # [N, T]
else:
# x_seq.shape = [N, C, T, d1, d2, ..., dk]
# target.shape = [N, d1, d2, ..., dk]
rep_shape = [1, T]
rep_shape.extend([1] * (x_seq.dim() - 3))
target = target.unsqueeze(1).repeat(rep_shape)
loss = F.cross_entropy(x_seq, target)
return loss | * :ref:`API in English <temporal_efficient_training_cross_entropy-en>` .. _temporal_efficient_training_cross_entropy-cn: :param x_seq: ``shape=[T, N, C, *]`` 的预测值,其中 ``C`` 是类别总数 :type x_seq: torch.Tensor :param target: ``shape=[N]`` 的真实值,其中 ``target[i]`` 是真实类别 :type target: torch.Tensor :return: the temporal efficient training cross entropy :rtype: torch.Tensor Temporal efficient training (TET) 交叉熵损失, 是每个时间步的交叉熵损失的平均。 示例代码: .. code-block:: python def tet_ce_for_loop_version(x_seq: torch.Tensor, target: torch.LongTensor): loss = 0. for t in range(x_seq.shape[0]): loss += F.cross_entropy(x_seq[t], target) return loss / x_seq.shape[0] T = 8 N = 4 C = 10 x_seq = torch.rand([T, N, C]) target = torch.randint(low=0, high=C - 1, size=[N]) print(f'max error = {(tet_ce_for_loop_version(x_seq, target) - temporal_efficient_training_cross_entropy(x_seq, target)).abs().max()}') # max error < 1e-6 .. note:: TET交叉熵是 `Temporal Efficient Training of Spiking Neural Network via Gradient Re-weighting <https://openreview.net/forum?id=_XNtisL32jv>`_ 一文提出的。 * :ref:`中文 API <temporal_efficient_training_cross_entropy-cn>` .. _temporal_efficient_training_cross_entropy-en: :param x_seq: the predicted value with ``shape=[T, N, C, *]``, where ``C`` is the number of classes :type x_seq: torch.Tensor :param target: the ground truth tensor with ``shape=[N]``, where ``target[i]`` is the label :type target: torch.Tensor :return: the temporal efficient training cross entropy :rtype: torch.Tensor The temporal efficient training (TET) cross entropy, which is the mean of cross entropy of each time-step. Codes example: .. code-block:: python def tet_ce_for_loop_version(x_seq: torch.Tensor, target: torch.LongTensor): loss = 0. for t in range(x_seq.shape[0]): loss += F.cross_entropy(x_seq[t], target) return loss / x_seq.shape[0] T = 8 N = 4 C = 10 x_seq = torch.rand([T, N, C]) target = torch.randint(low=0, high=C - 1, size=[N]) print(f'max error = {(tet_ce_for_loop_version(x_seq, target) - temporal_efficient_training_cross_entropy(x_seq, target)).abs().max()}') # max error < 1e-6 .. admonition:: Note :class: note The TET cross entropy is proposed by `Temporal Efficient Training of Spiking Neural Network via Gradient Re-weighting <https://openreview.net/forum?id=_XNtisL32jv>`_. |
7,653 | import logging
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from typing import Callable
from . import neuron, base
from torch import Tensor
The provided code snippet includes necessary dependencies for implementing the `kaiming_normal_conv_linear_weight` function. Write a Python function `def kaiming_normal_conv_linear_weight(net: nn.Module)` to solve the following problem:
* :ref:`API in English <kaiming_normal_conv_linear_weight-en>` .. _kaiming_normal_conv_linear_weight-cn: :param net: 任何属于 ``nn.Module`` 子类的网络 :return: None 使用kaiming normal初始化 ``net` `中的所有 :class:`torch.nn._ConvNd` 和 :class:`torch.nn.Linear` 的权重(不包括偏置项)。参见 :class:`torch.nn.init.kaiming_normal_`。 * :ref:`中文API <kaiming_normal_conv_linear_weight-cn>` .. _kaiming_normal_conv_linear_weight-en: :param net: Any network inherits from ``nn.Module`` :return: None initialize all weights (not including bias) of :class:`torch.nn._ConvNd` and :class:`torch.nn.Linear` in ``net`` by the kaiming normal. See :class:`torch.nn.init.kaiming_normal_` for more details.
Here is the function:
def kaiming_normal_conv_linear_weight(net: nn.Module):
"""
* :ref:`API in English <kaiming_normal_conv_linear_weight-en>`
.. _kaiming_normal_conv_linear_weight-cn:
:param net: 任何属于 ``nn.Module`` 子类的网络
:return: None
使用kaiming normal初始化 ``net` `中的所有 :class:`torch.nn._ConvNd` 和 :class:`torch.nn.Linear` 的权重(不包括偏置项)。参见 :class:`torch.nn.init.kaiming_normal_`。
* :ref:`中文API <kaiming_normal_conv_linear_weight-cn>`
.. _kaiming_normal_conv_linear_weight-en:
:param net: Any network inherits from ``nn.Module``
:return: None
initialize all weights (not including bias) of :class:`torch.nn._ConvNd` and :class:`torch.nn.Linear` in ``net`` by the kaiming normal. See :class:`torch.nn.init.kaiming_normal_`
for more details.
"""
for m in net.modules():
if isinstance(m, (nn.Conv1d, nn.Conv2d, nn.Conv3d, nn.Linear)):
nn.init.kaiming_normal_(m.weight, a=math.sqrt(5)) | * :ref:`API in English <kaiming_normal_conv_linear_weight-en>` .. _kaiming_normal_conv_linear_weight-cn: :param net: 任何属于 ``nn.Module`` 子类的网络 :return: None 使用kaiming normal初始化 ``net` `中的所有 :class:`torch.nn._ConvNd` 和 :class:`torch.nn.Linear` 的权重(不包括偏置项)。参见 :class:`torch.nn.init.kaiming_normal_`。 * :ref:`中文API <kaiming_normal_conv_linear_weight-cn>` .. _kaiming_normal_conv_linear_weight-en: :param net: Any network inherits from ``nn.Module`` :return: None initialize all weights (not including bias) of :class:`torch.nn._ConvNd` and :class:`torch.nn.Linear` in ``net`` by the kaiming normal. See :class:`torch.nn.init.kaiming_normal_` for more details. |
7,654 | import logging
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from typing import Callable
from . import neuron, base
from torch import Tensor
The provided code snippet includes necessary dependencies for implementing the `delay` function. Write a Python function `def delay(x_seq: torch.Tensor, delay_steps: int)` to solve the following problem:
* :ref:`API in English <delay.__init__-en>` .. _delay.__init__-cn: :param x_seq: 输入的序列,``shape = [T, *]`` :type x_seq: torch.Tensor :param delay_steps: 延迟的时间步数 :type delay_steps: int :return: 延迟后的序列 :rtype: torch.Tensor 延迟函数,可以用来延迟输入,使得 ``y[t] = x[t - delay_steps]``。缺失的数据用0填充。 代码示例: .. code-block:: python x = torch.rand([5, 2]) x[3:].zero_() x.requires_grad = True y = delay(x, 1) print('x=') print(x) print('y=') print(y) y.sum().backward() print('x.grad=') print(x.grad) 输出为: .. code-block:: bash x= tensor([[0.1084, 0.5698], [0.4563, 0.3623], [0.0556, 0.4704], [0.0000, 0.0000], [0.0000, 0.0000]], requires_grad=True) y= tensor([[0.0000, 0.0000], [0.1084, 0.5698], [0.4563, 0.3623], [0.0556, 0.4704], [0.0000, 0.0000]], grad_fn=<CatBackward0>) x.grad= tensor([[1., 1.], [1., 1.], [1., 1.], [1., 1.], [0., 0.]]) * :ref:`中文API <delay.__init__-cn>` .. _delay.__init__-en: :param x_seq: the input sequence with ``shape = [T, *]`` :type x_seq: torch.Tensor :param delay_steps: the number of delayed time-steps :type delay_steps: int :return: the delayed sequence :rtype: torch.Tensor A delay function that can delay inputs and makes ``y[t] = x[t - delay_steps]``. The nonexistent data will be regarded as 0. Codes example: .. code-block:: python x = torch.rand([5, 2]) x[3:].zero_() x.requires_grad = True y = delay(x, 1) print('x=') print(x) print('y=') print(y) y.sum().backward() print('x.grad=') print(x.grad) The outputs are: .. code-block:: bash x= tensor([[0.1084, 0.5698], [0.4563, 0.3623], [0.0556, 0.4704], [0.0000, 0.0000], [0.0000, 0.0000]], requires_grad=True) y= tensor([[0.0000, 0.0000], [0.1084, 0.5698], [0.4563, 0.3623], [0.0556, 0.4704], [0.0000, 0.0000]], grad_fn=<CatBackward0>) x.grad= tensor([[1., 1.], [1., 1.], [1., 1.], [1., 1.], [0., 0.]])
Here is the function:
def delay(x_seq: torch.Tensor, delay_steps: int):
"""
* :ref:`API in English <delay.__init__-en>`
.. _delay.__init__-cn:
:param x_seq: 输入的序列,``shape = [T, *]``
:type x_seq: torch.Tensor
:param delay_steps: 延迟的时间步数
:type delay_steps: int
:return: 延迟后的序列
:rtype: torch.Tensor
延迟函数,可以用来延迟输入,使得 ``y[t] = x[t - delay_steps]``。缺失的数据用0填充。
代码示例:
.. code-block:: python
x = torch.rand([5, 2])
x[3:].zero_()
x.requires_grad = True
y = delay(x, 1)
print('x=')
print(x)
print('y=')
print(y)
y.sum().backward()
print('x.grad=')
print(x.grad)
输出为:
.. code-block:: bash
x=
tensor([[0.1084, 0.5698],
[0.4563, 0.3623],
[0.0556, 0.4704],
[0.0000, 0.0000],
[0.0000, 0.0000]], requires_grad=True)
y=
tensor([[0.0000, 0.0000],
[0.1084, 0.5698],
[0.4563, 0.3623],
[0.0556, 0.4704],
[0.0000, 0.0000]], grad_fn=<CatBackward0>)
x.grad=
tensor([[1., 1.],
[1., 1.],
[1., 1.],
[1., 1.],
[0., 0.]])
* :ref:`中文API <delay.__init__-cn>`
.. _delay.__init__-en:
:param x_seq: the input sequence with ``shape = [T, *]``
:type x_seq: torch.Tensor
:param delay_steps: the number of delayed time-steps
:type delay_steps: int
:return: the delayed sequence
:rtype: torch.Tensor
A delay function that can delay inputs and makes ``y[t] = x[t - delay_steps]``. The nonexistent data will be regarded as 0.
Codes example:
.. code-block:: python
x = torch.rand([5, 2])
x[3:].zero_()
x.requires_grad = True
y = delay(x, 1)
print('x=')
print(x)
print('y=')
print(y)
y.sum().backward()
print('x.grad=')
print(x.grad)
The outputs are:
.. code-block:: bash
x=
tensor([[0.1084, 0.5698],
[0.4563, 0.3623],
[0.0556, 0.4704],
[0.0000, 0.0000],
[0.0000, 0.0000]], requires_grad=True)
y=
tensor([[0.0000, 0.0000],
[0.1084, 0.5698],
[0.4563, 0.3623],
[0.0556, 0.4704],
[0.0000, 0.0000]], grad_fn=<CatBackward0>)
x.grad=
tensor([[1., 1.],
[1., 1.],
[1., 1.],
[1., 1.],
[0., 0.]])
"""
# x_seq.shape = [T, *]
y = torch.zeros_like(x_seq[0: delay_steps].data)
return torch.cat((y, x_seq[0: x_seq.shape[0] - delay_steps]), 0) | * :ref:`API in English <delay.__init__-en>` .. _delay.__init__-cn: :param x_seq: 输入的序列,``shape = [T, *]`` :type x_seq: torch.Tensor :param delay_steps: 延迟的时间步数 :type delay_steps: int :return: 延迟后的序列 :rtype: torch.Tensor 延迟函数,可以用来延迟输入,使得 ``y[t] = x[t - delay_steps]``。缺失的数据用0填充。 代码示例: .. code-block:: python x = torch.rand([5, 2]) x[3:].zero_() x.requires_grad = True y = delay(x, 1) print('x=') print(x) print('y=') print(y) y.sum().backward() print('x.grad=') print(x.grad) 输出为: .. code-block:: bash x= tensor([[0.1084, 0.5698], [0.4563, 0.3623], [0.0556, 0.4704], [0.0000, 0.0000], [0.0000, 0.0000]], requires_grad=True) y= tensor([[0.0000, 0.0000], [0.1084, 0.5698], [0.4563, 0.3623], [0.0556, 0.4704], [0.0000, 0.0000]], grad_fn=<CatBackward0>) x.grad= tensor([[1., 1.], [1., 1.], [1., 1.], [1., 1.], [0., 0.]]) * :ref:`中文API <delay.__init__-cn>` .. _delay.__init__-en: :param x_seq: the input sequence with ``shape = [T, *]`` :type x_seq: torch.Tensor :param delay_steps: the number of delayed time-steps :type delay_steps: int :return: the delayed sequence :rtype: torch.Tensor A delay function that can delay inputs and makes ``y[t] = x[t - delay_steps]``. The nonexistent data will be regarded as 0. Codes example: .. code-block:: python x = torch.rand([5, 2]) x[3:].zero_() x.requires_grad = True y = delay(x, 1) print('x=') print(x) print('y=') print(y) y.sum().backward() print('x.grad=') print(x.grad) The outputs are: .. code-block:: bash x= tensor([[0.1084, 0.5698], [0.4563, 0.3623], [0.0556, 0.4704], [0.0000, 0.0000], [0.0000, 0.0000]], requires_grad=True) y= tensor([[0.0000, 0.0000], [0.1084, 0.5698], [0.4563, 0.3623], [0.0556, 0.4704], [0.0000, 0.0000]], grad_fn=<CatBackward0>) x.grad= tensor([[1., 1.], [1., 1.], [1., 1.], [1., 1.], [0., 0.]]) |
7,655 | import logging
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from typing import Callable
from . import neuron, base
from torch import Tensor
def fptt_online_training_init_w_ra(optimizer: torch.optim.Optimizer) -> list:
w_ra = []
for item in optimizer.param_groups:
for w in item['params']:
w_ra.append(w.data)
return w_ra | null |
7,656 | import logging
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from typing import Callable
from . import neuron, base
from torch import Tensor
def detach_net(net: nn.Module):
"""
* :ref:`API in English <detach_net-en>`
.. _detach_net-cn:
:param net: 任何属于 ``nn.Module`` 子类的网络
:return: None
将网络与之前的时间步的计算图断开。做法是遍历网络中的所有 ``Module``,若 ``m `` 为 ``base.MemoryModule`` 函数或者是拥有 ``detach()`` 方法,则调用 ``m.detach()``。
* :ref:`中文API <detach_net-cn>`
.. _detach_net-en:
:param net: Any network inherits from ``nn.Module``
:return: None
Detach the computation graph of the whole network from previous time-steps. Walk through every ``Module`` as ``m``, and call ``m.detach()`` if this ``m`` is ``base.MemoryModule`` or ``m`` has ``detach()``.
"""
for m in net.modules():
if hasattr(m, 'detach'):
if not isinstance(m, base.MemoryModule):
logging.warning(f'Trying to call `detach()` of {m}, which is not spikingjelly.activation_based.base'
f'.MemoryModule')
m.detach()
The provided code snippet includes necessary dependencies for implementing the `fptt_online_training` function. Write a Python function `def fptt_online_training(model: nn.Module, optimizer: torch.optim.Optimizer, x_seq: torch.Tensor, target_seq: torch.Tensor, f_loss_t: Callable, alpha: float, w_ra: list) -> None` to solve the following problem:
:param model: the neural network :type model: nn.Module :param optimizer: the optimizer for the network :type optimizer: torch.optim.Optimizer :param x_seq: the input sequence :type x_seq: torch.Tensor :param target_seq: the output sequence :type target_seq: torch.Tensor :param f_loss_t: the loss function, which should has the formulation of ``def f_loss_t(x_t, y_t) -> torch.Tensor`` :type f_loss_t: Callable :param alpha: the hyper-parameter :type alpha: float :param w_ra: the running average of params, which can be initialized by :class:`spikingjelly.activation_based.functional.fptt_online_training_init_w_ra` :type w_ra: list The FPTT online learning method proposed by `Training Recurrent Neural Networks via Forward Propagation Through Time <https://proceedings.mlr.press/v139/kag21a.html>`_ and used for SNN in `Accurate online training of dynamical spiking neural networks through Forward Propagation Through Time <https://arxiv.org/abs/2112.11231>`_ . Example: .. code-block:: python from spikingjelly.activation_based import neuron net = nn.Sequential( nn.Linear(8, 4), neuron.IFNode(), nn.Linear(4, 2), neuron.IFNode() ) optimizer = torch.optim.SGD(net.parameters(), lr=0.1) T = 4 N = 2 w_ra = fptt_online_training_init_w_ra(optimizer) for epoch in range(2): x_seq = torch.rand([T, N, 8]) target_seq = torch.rand([T, N, 2]) fptt_online_training(model=net, optimizer=optimizer, x_seq=x_seq, target_seq=target_seq, f_loss_t=F.mse_loss, alpha=0.1, w_ra=w_ra) functional.reset_net(net)
Here is the function:
def fptt_online_training(model: nn.Module, optimizer: torch.optim.Optimizer, x_seq: torch.Tensor, target_seq: torch.Tensor, f_loss_t: Callable, alpha: float, w_ra: list) -> None:
"""
:param model: the neural network
:type model: nn.Module
:param optimizer: the optimizer for the network
:type optimizer: torch.optim.Optimizer
:param x_seq: the input sequence
:type x_seq: torch.Tensor
:param target_seq: the output sequence
:type target_seq: torch.Tensor
:param f_loss_t: the loss function, which should has the formulation of ``def f_loss_t(x_t, y_t) -> torch.Tensor``
:type f_loss_t: Callable
:param alpha: the hyper-parameter
:type alpha: float
:param w_ra: the running average of params, which can be initialized by :class:`spikingjelly.activation_based.functional.fptt_online_training_init_w_ra`
:type w_ra: list
The FPTT online learning method proposed by `Training Recurrent Neural Networks via Forward Propagation Through Time <https://proceedings.mlr.press/v139/kag21a.html>`_ and used for SNN in `Accurate online training of dynamical spiking neural networks through Forward Propagation Through Time <https://arxiv.org/abs/2112.11231>`_ .
Example:
.. code-block:: python
from spikingjelly.activation_based import neuron
net = nn.Sequential(
nn.Linear(8, 4),
neuron.IFNode(),
nn.Linear(4, 2),
neuron.IFNode()
)
optimizer = torch.optim.SGD(net.parameters(), lr=0.1)
T = 4
N = 2
w_ra = fptt_online_training_init_w_ra(optimizer)
for epoch in range(2):
x_seq = torch.rand([T, N, 8])
target_seq = torch.rand([T, N, 2])
fptt_online_training(model=net, optimizer=optimizer, x_seq=x_seq, target_seq=target_seq, f_loss_t=F.mse_loss, alpha=0.1, w_ra=w_ra)
functional.reset_net(net)
"""
T = x_seq.shape[0]
grad__l_t_last__to__w_t = []
for item in optimizer.param_groups:
for w in item['params']:
grad__l_t_last__to__w_t.append(0.)
for t in range(T):
optimizer.zero_grad()
y_t = model(x_seq[t])
loss_t = f_loss_t(y_t, target_seq[t])
loss_reg = 0.
i = 0
for item in optimizer.param_groups:
for w in item['params']:
loss_reg = loss_reg + F.mse_loss(w, w_ra[i] + grad__l_t_last__to__w_t[i] / (2. * alpha))
i += 1
loss_reg = loss_reg * (alpha / 2.)
loss = loss_t + loss_reg
loss.backward()
# update params
optimizer.step()
detach_net(model)
# store hidden states
states = []
i = 0
for m in model.modules():
if isinstance(m, base.MemoryModule):
states.append(copy.deepcopy(m._memories))
i += 1
# update w_ra
optimizer.zero_grad()
if t < T - 1:
y_t = model(x_seq[t])
loss_t = f_loss_t(y_t, target_seq[t])
loss_t.backward()
with torch.no_grad():
i = 0
for item in optimizer.param_groups:
for w in item['params']:
grad__l_t_last__to__w_t[i] = w.grad
w_ra[i] = (w_ra[i] + w) / 2. - w.grad / (2. * alpha)
i += 1
optimizer.zero_grad()
# recover hidden states
i = 0
for m in model.modules():
if isinstance(m, base.MemoryModule):
m._memories = states[i]
i += 1 | :param model: the neural network :type model: nn.Module :param optimizer: the optimizer for the network :type optimizer: torch.optim.Optimizer :param x_seq: the input sequence :type x_seq: torch.Tensor :param target_seq: the output sequence :type target_seq: torch.Tensor :param f_loss_t: the loss function, which should has the formulation of ``def f_loss_t(x_t, y_t) -> torch.Tensor`` :type f_loss_t: Callable :param alpha: the hyper-parameter :type alpha: float :param w_ra: the running average of params, which can be initialized by :class:`spikingjelly.activation_based.functional.fptt_online_training_init_w_ra` :type w_ra: list The FPTT online learning method proposed by `Training Recurrent Neural Networks via Forward Propagation Through Time <https://proceedings.mlr.press/v139/kag21a.html>`_ and used for SNN in `Accurate online training of dynamical spiking neural networks through Forward Propagation Through Time <https://arxiv.org/abs/2112.11231>`_ . Example: .. code-block:: python from spikingjelly.activation_based import neuron net = nn.Sequential( nn.Linear(8, 4), neuron.IFNode(), nn.Linear(4, 2), neuron.IFNode() ) optimizer = torch.optim.SGD(net.parameters(), lr=0.1) T = 4 N = 2 w_ra = fptt_online_training_init_w_ra(optimizer) for epoch in range(2): x_seq = torch.rand([T, N, 8]) target_seq = torch.rand([T, N, 2]) fptt_online_training(model=net, optimizer=optimizer, x_seq=x_seq, target_seq=target_seq, f_loss_t=F.mse_loss, alpha=0.1, w_ra=w_ra) functional.reset_net(net) |
7,657 | import logging
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from typing import Callable
from . import neuron, base
from torch import Tensor
The provided code snippet includes necessary dependencies for implementing the `ottt_online_training` function. Write a Python function `def ottt_online_training(model: nn.Module, optimizer: torch.optim.Optimizer, x_seq: torch.Tensor, target_seq: torch.Tensor, f_loss_t: Callable, online: bool) -> None` to solve the following problem:
:param model: the neural network :type model: nn.Module :param optimizer: the optimizer for the network :type optimizer: torch.optim.Optimizer :param x_seq: the input sequence :type x_seq: torch.Tensor :param target_seq: the output sequence :type target_seq: torch.Tensor :param f_loss_t: the loss function, which should has the formulation of ``def f_loss_t(x_t, y_t) -> torch.Tensor`` :type f_loss_t: Callable :param online: whether online update parameters or accumulate gradients through time steps :type online: bool The OTTT online training method is proposed by `Online Training Through Time for Spiking Neural Networks <https://openreview.net/forum?id=Siv3nHYHheI>`_. This function can also be used for SLTT training method proposed by `Towards Memory- and Time-Efficient Backpropagation for Training Spiking Neural Networks <https://openaccess.thecvf.com/content/ICCV2023/html/Meng_Towards_Memory-_and_Time-Efficient_Backpropagation_for_Training_Spiking_Neural_Networks_ICCV_2023_paper.html>`_ . Example: .. code-block:: python from spikingjelly.activation_based import neuron, layer, functional net = layer.OTTTSequential( nn.Linear(8, 4), neuron.OTTTLIFNode(), nn.Linear(4, 2), neuron.LIFNode() ) optimizer = torch.optim.SGD(net.parameters(), lr=0.1) T = 4 N = 2 online = True for epoch in range(2): x_seq = torch.rand([N, T, 8]) target_seq = torch.rand([N, T, 2]) functional.ottt_online_training(model=net, optimizer=optimizer, x_seq=x_seq, target_seq=target_seq, f_loss_t=F.mse_loss, online=online) functional.reset_net(net)
Here is the function:
def ottt_online_training(model: nn.Module, optimizer: torch.optim.Optimizer, x_seq: torch.Tensor, target_seq: torch.Tensor, f_loss_t: Callable, online: bool) -> None:
"""
:param model: the neural network
:type model: nn.Module
:param optimizer: the optimizer for the network
:type optimizer: torch.optim.Optimizer
:param x_seq: the input sequence
:type x_seq: torch.Tensor
:param target_seq: the output sequence
:type target_seq: torch.Tensor
:param f_loss_t: the loss function, which should has the formulation of ``def f_loss_t(x_t, y_t) -> torch.Tensor``
:type f_loss_t: Callable
:param online: whether online update parameters or accumulate gradients through time steps
:type online: bool
The OTTT online training method is proposed by `Online Training Through Time for Spiking Neural Networks <https://openreview.net/forum?id=Siv3nHYHheI>`_.
This function can also be used for SLTT training method proposed by `Towards Memory- and Time-Efficient Backpropagation for Training Spiking Neural Networks <https://openaccess.thecvf.com/content/ICCV2023/html/Meng_Towards_Memory-_and_Time-Efficient_Backpropagation_for_Training_Spiking_Neural_Networks_ICCV_2023_paper.html>`_ .
Example:
.. code-block:: python
from spikingjelly.activation_based import neuron, layer, functional
net = layer.OTTTSequential(
nn.Linear(8, 4),
neuron.OTTTLIFNode(),
nn.Linear(4, 2),
neuron.LIFNode()
)
optimizer = torch.optim.SGD(net.parameters(), lr=0.1)
T = 4
N = 2
online = True
for epoch in range(2):
x_seq = torch.rand([N, T, 8])
target_seq = torch.rand([N, T, 2])
functional.ottt_online_training(model=net, optimizer=optimizer, x_seq=x_seq, target_seq=target_seq, f_loss_t=F.mse_loss, online=online)
functional.reset_net(net)
"""
# input x_seq/target_seq: [B, T, ...]
# transpose to [T, B, ...]
x_seq = x_seq.transpose(0, 1)
target_seq = target_seq.transpose(0, 1)
T = x_seq.shape[0]
batch_loss = 0.
y_all = []
if not online:
optimizer.zero_grad()
for t in range(T):
if online:
optimizer.zero_grad()
y_t = model(x_seq[t])
loss = f_loss_t(y_t, target_seq[t].contiguous())
loss.backward()
# update params
if online:
optimizer.step()
batch_loss += loss.data
y_all.append(y_t.detach())
if not online:
optimizer.step()
# y_all: [B, T, ...]
y_all = torch.stack(y_all, dim=1)
return batch_loss, y_all | :param model: the neural network :type model: nn.Module :param optimizer: the optimizer for the network :type optimizer: torch.optim.Optimizer :param x_seq: the input sequence :type x_seq: torch.Tensor :param target_seq: the output sequence :type target_seq: torch.Tensor :param f_loss_t: the loss function, which should has the formulation of ``def f_loss_t(x_t, y_t) -> torch.Tensor`` :type f_loss_t: Callable :param online: whether online update parameters or accumulate gradients through time steps :type online: bool The OTTT online training method is proposed by `Online Training Through Time for Spiking Neural Networks <https://openreview.net/forum?id=Siv3nHYHheI>`_. This function can also be used for SLTT training method proposed by `Towards Memory- and Time-Efficient Backpropagation for Training Spiking Neural Networks <https://openaccess.thecvf.com/content/ICCV2023/html/Meng_Towards_Memory-_and_Time-Efficient_Backpropagation_for_Training_Spiking_Neural_Networks_ICCV_2023_paper.html>`_ . Example: .. code-block:: python from spikingjelly.activation_based import neuron, layer, functional net = layer.OTTTSequential( nn.Linear(8, 4), neuron.OTTTLIFNode(), nn.Linear(4, 2), neuron.LIFNode() ) optimizer = torch.optim.SGD(net.parameters(), lr=0.1) T = 4 N = 2 online = True for epoch in range(2): x_seq = torch.rand([N, T, 8]) target_seq = torch.rand([N, T, 2]) functional.ottt_online_training(model=net, optimizer=optimizer, x_seq=x_seq, target_seq=target_seq, f_loss_t=F.mse_loss, online=online) functional.reset_net(net) |
7,658 | import torch
class round_atgf(torch.autograd.Function):
def forward(ctx, x: torch.Tensor):
return torch.round(x)
def backward(ctx, grad_output: torch.Tensor):
return grad_output
The provided code snippet includes necessary dependencies for implementing the `round` function. Write a Python function `def round(x: torch.Tensor)` to solve the following problem:
:param x: the input tensor :type x: torch.Tensor :return: the output tensor :rtype: torch.Tensor Apply ``y = torch.round(x)`` with re-defining gradient as :math:`\\frac{\\partial y}{\\partial x} = 1`.
Here is the function:
def round(x: torch.Tensor):
"""
:param x: the input tensor
:type x: torch.Tensor
:return: the output tensor
:rtype: torch.Tensor
Apply ``y = torch.round(x)`` with re-defining gradient as :math:`\\frac{\\partial y}{\\partial x} = 1`.
"""
return round_atgf.apply(x) | :param x: the input tensor :type x: torch.Tensor :return: the output tensor :rtype: torch.Tensor Apply ``y = torch.round(x)`` with re-defining gradient as :math:`\\frac{\\partial y}{\\partial x} = 1`. |
7,659 | import torch
class ceil_atgf(torch.autograd.Function):
def forward(ctx, x: torch.Tensor):
return torch.ceil(x)
def backward(ctx, grad_output: torch.Tensor):
return grad_output
The provided code snippet includes necessary dependencies for implementing the `ceil` function. Write a Python function `def ceil(x: torch.Tensor)` to solve the following problem:
:param x: the input tensor :type x: torch.Tensor :return: the output tensor :rtype: torch.Tensor Apply ``y = torch.ceil(x)`` with re-defining gradient as :math:`\\frac{\\partial y}{\\partial x} = 1`.
Here is the function:
def ceil(x: torch.Tensor):
"""
:param x: the input tensor
:type x: torch.Tensor
:return: the output tensor
:rtype: torch.Tensor
Apply ``y = torch.ceil(x)`` with re-defining gradient as :math:`\\frac{\\partial y}{\\partial x} = 1`.
"""
return ceil_atgf.apply(x) | :param x: the input tensor :type x: torch.Tensor :return: the output tensor :rtype: torch.Tensor Apply ``y = torch.ceil(x)`` with re-defining gradient as :math:`\\frac{\\partial y}{\\partial x} = 1`. |
7,660 | import torch
class floor_atgf(torch.autograd.Function):
def forward(ctx, x: torch.Tensor):
return torch.floor(x)
def backward(ctx, grad_output: torch.Tensor):
return grad_output
The provided code snippet includes necessary dependencies for implementing the `floor` function. Write a Python function `def floor(x: torch.Tensor)` to solve the following problem:
:param x: the input tensor :type x: torch.Tensor :return: the output tensor :rtype: torch.Tensor Apply ``y = torch.floor(x)`` with re-defining gradient as :math:`\\frac{\\partial y}{\\partial x} = 1`.
Here is the function:
def floor(x: torch.Tensor):
"""
:param x: the input tensor
:type x: torch.Tensor
:return: the output tensor
:rtype: torch.Tensor
Apply ``y = torch.floor(x)`` with re-defining gradient as :math:`\\frac{\\partial y}{\\partial x} = 1`.
"""
return floor_atgf.apply(x) | :param x: the input tensor :type x: torch.Tensor :return: the output tensor :rtype: torch.Tensor Apply ``y = torch.floor(x)`` with re-defining gradient as :math:`\\frac{\\partial y}{\\partial x} = 1`. |
7,661 | import torch
def clamp_backward(grad_output: torch.Tensor, x: torch.Tensor, min_value: float, max_value: float):
mask = (x >= min_value).to(x) * (x <= max_value).to(x)
return grad_output * mask | null |
7,662 | import torch
class clamp_atgf(torch.autograd.Function):
def forward(ctx, x: torch.Tensor, min_value: float, max_value: float):
if x.requires_grad:
ctx.save_for_backward(x)
ctx.min_value = min_value
ctx.max_value = max_value
return torch.clamp(x, min_value, max_value)
def backward(ctx, grad_output: torch.Tensor):
return clamp_backward(grad_output, ctx.saved_tensors[0], ctx.min_value, ctx.max_value), None, None
The provided code snippet includes necessary dependencies for implementing the `clamp` function. Write a Python function `def clamp(x: torch.Tensor, min_value: float, max_value: float)` to solve the following problem:
:param x: the input tensor :type x: torch.Tensor :param min_value: lower-bound of the range to be clamped to :type min_value: float :param max_value: upper-bound of the range to be clamped to :type max_value: torch.Tensor :return: the output tensor :rtype: torch.Tensor Apply ``y = torch.clamp(x, min_value, max_value)`` with re-defining gradient as: .. math:: \\frac{\\partial y}{\\partial x} = \\begin{cases} 1, \\rm{min\\_value} \\leq x \\leq \\rm{max\\_value} \\\\ 0, \\rm{otherwise} \\end{cases}
Here is the function:
def clamp(x: torch.Tensor, min_value: float, max_value: float):
"""
:param x: the input tensor
:type x: torch.Tensor
:param min_value: lower-bound of the range to be clamped to
:type min_value: float
:param max_value: upper-bound of the range to be clamped to
:type max_value: torch.Tensor
:return: the output tensor
:rtype: torch.Tensor
Apply ``y = torch.clamp(x, min_value, max_value)`` with re-defining gradient as:
.. math::
\\frac{\\partial y}{\\partial x} = \\begin{cases}
1, \\rm{min\\_value} \\leq x \\leq \\rm{max\\_value} \\\\
0, \\rm{otherwise}
\\end{cases}
"""
return clamp_atgf.apply(x, min_value, max_value) | :param x: the input tensor :type x: torch.Tensor :param min_value: lower-bound of the range to be clamped to :type min_value: float :param max_value: upper-bound of the range to be clamped to :type max_value: torch.Tensor :return: the output tensor :rtype: torch.Tensor Apply ``y = torch.clamp(x, min_value, max_value)`` with re-defining gradient as: .. math:: \\frac{\\partial y}{\\partial x} = \\begin{cases} 1, \\rm{min\\_value} \\leq x \\leq \\rm{max\\_value} \\\\ 0, \\rm{otherwise} \\end{cases} |
7,663 | import torch
def step_quantize_forward(x: torch.Tensor, step: float):
return torch.round_(x / step) * step | null |
7,664 | import torch
class step_quantize_atgf(torch.autograd.Function):
def forward(ctx, x: torch.Tensor, step: float):
return step_quantize_forward(x, step)
def backward(ctx, grad_output: torch.Tensor):
return grad_output, None
The provided code snippet includes necessary dependencies for implementing the `step_quantize` function. Write a Python function `def step_quantize(x: torch.Tensor, step: float)` to solve the following problem:
:param x: the input tensor :type x: torch.Tensor :param step: the quantize step :type step: float :return: the quantized tensor :rtype: torch.Tensor Quantize ``x`` to the nearest ``i * step``, where ``i`` is an integer. Note that the gradient is defined by :math:`\\frac{\\partial y}{\\partial x} = 1`. .. image:: ../_static/API/activation_based//quantize/step_quantize.* :width: 100%
Here is the function:
def step_quantize(x: torch.Tensor, step: float):
"""
:param x: the input tensor
:type x: torch.Tensor
:param step: the quantize step
:type step: float
:return: the quantized tensor
:rtype: torch.Tensor
Quantize ``x`` to the nearest ``i * step``, where ``i`` is an integer.
Note that the gradient is defined by :math:`\\frac{\\partial y}{\\partial x} = 1`.
.. image:: ../_static/API/activation_based//quantize/step_quantize.*
:width: 100%
"""
return step_quantize_atgf.apply(x, step) | :param x: the input tensor :type x: torch.Tensor :param step: the quantize step :type step: float :return: the quantized tensor :rtype: torch.Tensor Quantize ``x`` to the nearest ``i * step``, where ``i`` is an integer. Note that the gradient is defined by :math:`\\frac{\\partial y}{\\partial x} = 1`. .. image:: ../_static/API/activation_based//quantize/step_quantize.* :width: 100% |
7,665 | import torch
def k_bit_quantize_forward(x: torch.Tensor, k: int):
c = float(1 << k) - 1.
x = x * c
torch.round_(x)
return x / c | null |
7,666 | import torch
def k_bit_quantize(x: torch.Tensor, k: int):
"""
:param x: a float tensor whose range is ``[0, 1]``.
:type x: torch.Tensor
:param k: the bit number of output
:type k: int
:return: ``y = round((2 ** k - 1) * x) / (2 ** k - 1)``
:rtype: torch.Tensor
The k-bit quantizer defined in `DoReFa-Net: Training Low Bitwidth Convolutional Neural Networks with Low Bitwidth Gradients <https://arxiv.org/abs/1606.06160>`_.
The input whose range is ``[0, 1]`` will be quantized to the nearest ``i / (2 ** k - 1)``, where ``i = 0, 1, ..., (2 ** k - 1)``.
Note that the gradient is defined by :math:`\\frac{\\partial y}{\\partial x} = 1`.
To clamp the input whose range is ``(-inf, inf)`` to range ``(0, 1)``, using :class:`torch.sigmoid`, :class:`torch.nn.Hardtanh` or
``clamp_*`` functions (e.g., :class:`spikingjelly.activation_based.quantize.clamp_by_linear`) in ``spikingjelly.activation_based.quantize``.
.. image:: ../_static/API/activation_based//quantize/k_bit_quantize.*
:width: 100%
Codes example:
.. code-block:: python
x = torch.rand(8)
y = k_bit_quantize(x, 2)
print(f'x={x}')
print(f'y={y}')
# x=tensor([0.6965, 0.5697, 0.9883, 0.0438, 0.1332, 0.7613, 0.9704, 0.2384])
# y=tensor([0.6667, 0.6667, 1.0000, 0.0000, 0.0000, 0.6667, 1.0000, 0.3333])
"""
return k_bit_quantize_atgf.apply(x, k)
The provided code snippet includes necessary dependencies for implementing the `affine_k_bit_quantize` function. Write a Python function `def affine_k_bit_quantize(x: torch.Tensor, k: int, w: torch.Tensor, b: torch.Tensor)` to solve the following problem:
:param x: a float tensor whose range is ``[0, 1]``. :type x: torch.Tensor :param k: the bit number of output :type k: int :param w: the weight of the affine transform :type w: torch.Tensor :param b: the bias of the affine transform :type b: torch.Tensor :return: ``y = w * round((2 ** k - 1) * x) / (2 ** k - 1) + b`` :rtype: torch.Tensor Apply an affine quantization with ``y = w * round((2 ** k - 1) * x) / (2 ** k - 1) + b``.
Here is the function:
def affine_k_bit_quantize(x: torch.Tensor, k: int, w: torch.Tensor, b: torch.Tensor):
"""
:param x: a float tensor whose range is ``[0, 1]``.
:type x: torch.Tensor
:param k: the bit number of output
:type k: int
:param w: the weight of the affine transform
:type w: torch.Tensor
:param b: the bias of the affine transform
:type b: torch.Tensor
:return: ``y = w * round((2 ** k - 1) * x) / (2 ** k - 1) + b``
:rtype: torch.Tensor
Apply an affine quantization with ``y = w * round((2 ** k - 1) * x) / (2 ** k - 1) + b``.
"""
return w * k_bit_quantize(x, k) + b | :param x: a float tensor whose range is ``[0, 1]``. :type x: torch.Tensor :param k: the bit number of output :type k: int :param w: the weight of the affine transform :type w: torch.Tensor :param b: the bias of the affine transform :type b: torch.Tensor :return: ``y = w * round((2 ** k - 1) * x) / (2 ** k - 1) + b`` :rtype: torch.Tensor Apply an affine quantization with ``y = w * round((2 ** k - 1) * x) / (2 ** k - 1) + b``. |
7,667 | import torch
The provided code snippet includes necessary dependencies for implementing the `clamp_by_linear` function. Write a Python function `def clamp_by_linear(x: torch.Tensor, eps: float = 1e-5)` to solve the following problem:
:param x: the input tensor to be normed, whose range is ``(-inf, inf)`` :type x: torch.Tensor :param eps: a value added to the denominator for numerical stability. The default value is ``1e-5`` :type eps: float :type max_value: float :return: the normed tensor, whose range is ``[min_value, max_value]`` :rtype: torch.Tensor Using the linear transform to clamp the input range from ``(-inf, inf)`` to ``[0., 1.]``: .. math:: y = \\frac{x - \\rm{min}(x)}{\\rm{max}(x) - \\rm{min}(x) + eps}
Here is the function:
def clamp_by_linear(x: torch.Tensor, eps: float = 1e-5):
"""
:param x: the input tensor to be normed, whose range is ``(-inf, inf)``
:type x: torch.Tensor
:param eps: a value added to the denominator for numerical stability. The default value is ``1e-5``
:type eps: float
:type max_value: float
:return: the normed tensor, whose range is ``[min_value, max_value]``
:rtype: torch.Tensor
Using the linear transform to clamp the input range from ``(-inf, inf)`` to ``[0., 1.]``:
.. math::
y = \\frac{x - \\rm{min}(x)}{\\rm{max}(x) - \\rm{min}(x) + eps}
"""
x_max = torch.max(x) + eps
x_min = torch.min(x)
return (x - x_min) / (x_max - x_min) | :param x: the input tensor to be normed, whose range is ``(-inf, inf)`` :type x: torch.Tensor :param eps: a value added to the denominator for numerical stability. The default value is ``1e-5`` :type eps: float :type max_value: float :return: the normed tensor, whose range is ``[min_value, max_value]`` :rtype: torch.Tensor Using the linear transform to clamp the input range from ``(-inf, inf)`` to ``[0., 1.]``: .. math:: y = \\frac{x - \\rm{min}(x)}{\\rm{max}(x) - \\rm{min}(x) + eps} |
7,668 | import logging
import torch
import torch.nn.functional as F
from . import cuda_utils, surrogate, tensor_cache
from .. import configure
import numpy as np
def check_multi_step_neuron_output_and_grad(device, multi_step_neuron, shape = [65, 15, 511], *neu_args, **neu_kwargs):
@torch.no_grad()
def max_error(x, y):
return (x - y).abs().max().item()
def fbptt(m, x: torch.Tensor):
x = x.detach()
x.requires_grad_(True)
spike_seq = m(x)
(spike_seq * m.v_seq ** 2).sum().backward()
ret = {
'spike_seq': spike_seq.detach().clone(),
'v_seq': m.v_seq.detach().clone(),
'x.grad': x.grad.clone()
}
for i, param in enumerate(m.parameters()):
ret[f'param_{i}.grad'] = param.grad.detach().clone()
param.grad.zero_()
x.grad.zero_()
m.reset()
return ret
for hard_reset in [True, False]:
for detach_reset in [False, True]:
for dtype in ['fp32', 'fp16']:
x = (torch.rand(shape, device=device) - 0.5) * 3.
if dtype == 'fp16':
x = x.half()
print(f'hard_reset={hard_reset}, detach_reset={detach_reset}, dtype={dtype}')
model = multi_step_neuron(v_reset=0. if hard_reset else None, detach_reset=detach_reset, *neu_args,
**neu_kwargs)
# print(model)
model.to(device)
if dtype == 'fp16':
model = model.half()
model.backend = 'torch'
y_torch = fbptt(model, x)
model.backend = 'cupy'
y_cupy = fbptt(model, x)
for key in y_torch.keys():
me = max_error(y_torch[key], y_cupy[key])
print(key, 'max error', me)
if me > 0.5:
print(f'y_torch[{key}]={y_torch[key]}, y_cupy[{key}]={y_cupy[key]}')
print('\n') | null |
7,669 | import logging
import torch
import torch.nn.functional as F
from . import cuda_utils, surrogate, tensor_cache
from .. import configure
import numpy as np
def check_single_step_neuron_output_and_grad(device, single_step_neuron, shape = [65, 15, 511], *neu_args, **neu_kwargs):
@torch.no_grad()
def max_error(x, y):
return (x - y).abs().max().item()
def fbp(m, x: torch.Tensor):
x = x.detach()
x.requires_grad_(True)
T = x.size(0)
for i in range(T):
spike = m(x[i])
(spike * m.v ** 2).sum().backward()
ret = {
'spike': spike.detach().clone(),
'v': m.v.detach().clone(),
'x.grad': x.grad.clone()
}
for i, param in enumerate(m.parameters()):
ret[f'param_{i}.grad'] = param.grad.detach().clone()
param.grad.zero_()
x.grad.zero_()
m.reset()
return ret
for hard_reset in [True, False]:
for detach_reset in [False, True]:
for dtype in ['fp32', 'fp16']:
x = (torch.rand(shape, device=device) - 0.5) * 3.
if dtype == 'fp16':
x = x.half()
print(f'hard_reset={hard_reset}, detach_reset={detach_reset}, dtype={dtype}')
model = single_step_neuron(v_reset=0. if hard_reset else None, detach_reset=detach_reset, step_mode='s', *neu_args,
**neu_kwargs)
# print(model)
model.to(device)
if dtype == 'fp16':
model = model.half()
model.backend = 'torch'
y_torch = fbp(model, x)
model.backend = 'cupy'
y_cupy = fbp(model, x)
for key in y_torch.keys():
me = max_error(y_torch[key], y_cupy[key])
print(key, 'max error', me)
if me > 0.5:
print(f'y_torch[{key}]={y_torch[key]}, y_cupy[{key}]={y_cupy[key]}')
print('\n') | null |
7,670 | import logging
import torch
import torch.nn.functional as F
from . import cuda_utils, surrogate, tensor_cache
from .. import configure
import numpy as np
class MultiStepIFNodePTT(torch.autograd.Function):
def create_fptt_kernel(hard_reset: bool, dtype: str):
kernel_name = f'IFNode_fptt_{"hard" if hard_reset else "soft"}Reset_{dtype}'
if dtype == 'fp32':
code = rf'''
extern "C" __global__
void {kernel_name}(const float* x_seq, float* v_v_seq, float* h_seq, float* spike_seq,
const float & v_threshold, {'const float & v_reset,' if hard_reset else ''}
const int & neuron_num, const int & numel)
'''
code += r'''
{
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < neuron_num)
{
const int dt = neuron_num;
for(int mem_offset = 0; mem_offset < numel; mem_offset += neuron_num)
{
const int t = index + mem_offset;
h_seq[t] = v_v_seq[t] + x_seq[t];
if (h_seq[t] >= v_threshold)
'''
if hard_reset:
code += r'''
{
spike_seq[t] = 1.0f;
v_v_seq[t + dt] = v_reset;
}
'''
else:
code += r'''
{
spike_seq[t] = 1.0f;
v_v_seq[t + dt] = h_seq[t] - v_threshold;
}
'''
code += r'''
else
{
spike_seq[t] = 0.0f;
v_v_seq[t + dt] = h_seq[t];
}
}
}
}
'''
elif dtype == 'fp16':
code = rf'''
#include <cuda_fp16.h>
extern "C" __global__
void {kernel_name}(const half2* x_seq, half2* v_v_seq, half2* h_seq, half2* spike_seq,
const half & v_threshold, {'const half & v_reset,' if hard_reset else ''}
const int & neuron_num, const int & numel)
'''
code += r'''
{
const int index = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = neuron_num >> 1;
if (index < stride)
{
const int numel_2 = numel >> 1;
const half2 v_threshold_half2 = __half2half2(v_threshold);
'''
if hard_reset:
code += r'''
const half2 v_reset_half2 = __half2half2(v_reset);
'''
code += r'''
for(int mem_offset = 0; mem_offset < numel_2; mem_offset += stride)
{
const int t = index + mem_offset;
h_seq[t] = __hadd2(v_v_seq[t], x_seq[t]);
spike_seq[t] = __hgeu2(h_seq[t], v_threshold_half2);
'''
if hard_reset:
code += r'''
v_v_seq[t + stride] = __hadd2(__hmul2(spike_seq[t], v_reset_half2), __hmul2(__hsub2(__float2half2_rn(1.0f), spike_seq[t]), h_seq[t]));
'''
else:
code += r'''
v_v_seq[t + stride] = __hadd2(__hmul2(spike_seq[t], __hsub2(h_seq[t], v_threshold_half2)), __hmul2(__hsub2(__float2half2_rn(1.0f), spike_seq[t]), h_seq[t]));
'''
code += r'''
}
}
}
'''
else:
raise TypeError
return cupy.RawKernel(code, kernel_name, options=configure.cuda_compiler_options, backend=configure.cuda_compiler_backend)
def create_bptt_kernel(sg_cuda_code_fun, hard_reset: bool, detach_reset: bool, dtype: str):
kernel_name = f'IFNode_bptt_{"hard" if hard_reset else "soft"}Reset_{"detachReset" if detach_reset else ""}_{dtype}'
code_grad_s_to_h = sg_cuda_code_fun(x='over_th', y='grad_s_to_h', dtype=dtype)
if dtype == 'fp32':
code = fr'''
extern "C" __global__
void {kernel_name}(
const float* grad_spike_seq, const float* grad_v_seq, const float* h_seq, const float* spike_seq,
float* grad_x_seq, float* grad_v_init,
const float & v_threshold, {'const float & v_reset,' if hard_reset else ''}
const int & neuron_num, const int & numel)
'''
code += r'''
{
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < neuron_num)
{
float grad_h = 0.0f; // grad_h will be used recursively
for(int mem_offset = numel - neuron_num; mem_offset >= 0; mem_offset -= neuron_num)
{
const int t = index + mem_offset;
const float over_th = h_seq[t] - v_threshold;
'''
code += code_grad_s_to_h
if detach_reset:
if hard_reset:
code_grad_v_to_h = r'''
const float grad_v_to_h = 1.0f - spike_seq[t];
'''
else:
code_grad_v_to_h = r'''
const float grad_v_to_h = 1.0f;
'''
else:
if hard_reset:
code_grad_v_to_h = r'''
const float grad_v_to_h = 1.0f - spike_seq[t] + (v_reset - h_seq[t]) * grad_s_to_h;
// const float grad_v_to_h = fmaf(grad_s_to_h, v_reset - h_seq[t], 1.0f - spike_seq[t]);
'''
else:
code_grad_v_to_h = r'''
const float grad_v_to_h = 1.0f - v_threshold * grad_s_to_h;
// const float grad_v_to_h = fmaf(-grad_s_to_h, v_threshold, 1.0f);
'''
code += code_grad_v_to_h
code += r'''
grad_h = grad_spike_seq[t] * grad_s_to_h + (grad_v_seq[t] + grad_h) * grad_v_to_h;
// grad_h = fmaf(grad_spike_seq[t], grad_s_to_h, (grad_v_seq[t] + grad_h) * grad_v_to_h);
grad_x_seq[t] = grad_h;
}
grad_v_init[index] = grad_h;
}
}
'''
elif dtype == 'fp16':
code = fr'''
#include <cuda_fp16.h>
extern "C" __global__
void {kernel_name}(
const half2* grad_spike_seq, const half2* grad_v_seq, const half2* h_seq, const half2* spike_seq,
half2* grad_x_seq, half2* grad_v_init,
const half & v_threshold, {'const half & v_reset,' if hard_reset else ''}
const int & neuron_num, const int & numel)
'''
code += r'''
{
const int index = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = neuron_num >> 1;
if (index < stride)
{
const half2 v_threshold_half2 = __half2half2(v_threshold);
'''
if hard_reset:
code += r'''
const half2 v_reset_half2 = __half2half2(v_reset);
'''
code += r'''
half2 grad_h = __float2half2_rn(0.0f); // grad_h will be used recursively
for(int mem_offset = (numel >> 1) - stride; mem_offset >= 0; mem_offset -= stride)
{
const int t = index + mem_offset;
const half2 over_th = __hsub2(h_seq[t], v_threshold_half2);
'''
code += code_grad_s_to_h
if detach_reset:
if hard_reset:
code_grad_v_to_h = r'''
const half2 grad_v_to_h = __hsub2(__float2half2_rn(1.0f), spike_seq[t]);
'''
else:
code_grad_v_to_h = r'''
const half2 grad_v_to_h = __float2half2_rn(1.0f);
'''
else:
if hard_reset:
code_grad_v_to_h = r'''
const half2 grad_v_to_h = __hfma2(__hsub2(v_reset_half2, h_seq[t]), grad_s_to_h, __hsub2(__float2half2_rn(1.0f), spike_seq[t]));
'''
else:
code_grad_v_to_h = r'''
const half2 grad_v_to_h = __hsub2(__float2half2_rn(1.0f), __hmul2(v_threshold_half2, grad_s_to_h));
'''
code += code_grad_v_to_h
code += r'''
grad_h = __hfma2(__hadd2(grad_v_seq[t], grad_h), grad_v_to_h, __hmul2(grad_spike_seq[t], grad_s_to_h));
grad_x_seq[t] = grad_h;
}
grad_v_init[index] = grad_h;
}
}
'''
else:
raise TypeError
return cupy.RawKernel(code, kernel_name, options=configure.cuda_compiler_options, backend=configure.cuda_compiler_backend)
def forward(ctx, x_seq: torch.Tensor, v_init: torch.Tensor, v_threshold: float, v_reset: float,
detach_reset: bool, sg_cuda_code_fun):
requires_grad = x_seq.requires_grad or v_init.requires_grad
device = x_seq.get_device()
if x_seq.dtype == torch.float32:
dtype = 'fp32'
cp_dtype = np.float32
elif x_seq.dtype == torch.float16:
dtype = 'fp16'
cp_dtype = np.half
else:
raise NotImplementedError
use_pad = False
if dtype == 'fp16' and v_init.numel() % 2 != 0:
# only fp16 needs even numel because we use half2 to accelerate
# when numel is odd, we will pad x_seq
use_pad = True
x_seq = F.pad(x_seq, (0, 1)) # [T, N] -> [T, N + 1]
v_init = F.pad(v_init, (0, 1)) # [N] -> [N + 1]
zero_shape = list(x_seq.shape)
zero_shape[0] *= 3
v_seq, h_seq, spike_seq = torch.split(torch.zeros(zero_shape, device=x_seq.device, dtype=x_seq.dtype), x_seq.shape[0])
v_v_seq = torch.cat((v_init.unsqueeze(0), v_seq))
with cuda_utils.DeviceEnvironment(device):
numel = x_seq.numel()
neuron_num = numel // x_seq.shape[0]
threads = configure.cuda_threads
if dtype == 'fp16':
assert neuron_num % 2 == 0
blocks = cuda_utils.cal_blocks(neuron_num >> 1)
# we will take two neurons to calculate as one neuron in cuda half2
else:
blocks = cuda_utils.cal_blocks(neuron_num)
cp_numel = cupy.asarray(numel)
cp_neuron_num = cupy.asarray(neuron_num)
cp_v_threshold = cupy.asarray(v_threshold, dtype=cp_dtype)
if v_reset is None:
cp_v_reset = None
hard_reset = False
x_seq, v_v_seq, h_seq, spike_seq, cp_v_threshold, cp_neuron_num, cp_numel = cuda_utils.get_contiguous(
x_seq, v_v_seq, h_seq, spike_seq, cp_v_threshold, cp_neuron_num, cp_numel)
kernel_args = [x_seq, v_v_seq, h_seq, spike_seq, cp_v_threshold, cp_neuron_num, cp_numel]
else:
cp_v_reset = cupy.asarray(v_reset, dtype=cp_dtype)
hard_reset = True
x_seq, v_v_seq, h_seq, spike_seq, cp_v_threshold, cp_v_reset, cp_neuron_num, cp_numel = cuda_utils.get_contiguous(
x_seq, v_v_seq, h_seq, spike_seq, cp_v_threshold, cp_v_reset, cp_neuron_num, cp_numel)
kernel_args = [x_seq, v_v_seq, h_seq, spike_seq, cp_v_threshold, cp_v_reset, cp_neuron_num,
cp_numel]
kernel = MultiStepIFNodePTT.create_fptt_kernel(hard_reset, dtype)
kernel(
(blocks,), (threads,),
cuda_utils.wrap_args_to_raw_kernel(
device,
*kernel_args
)
)
if requires_grad:
ctx.use_pad = use_pad
if configure.save_spike_as_bool_in_neuron_kernel:
ctx.s_shape = spike_seq.shape
ctx.s_tk = tensor_cache.BOOL_TENSOR_CACHE.store_bool(spike_seq)
ctx.save_for_backward(h_seq)
else:
ctx.save_for_backward(h_seq, spike_seq)
ctx.blocks = blocks
ctx.threads = threads
ctx.cp_numel = cp_numel
ctx.cp_neuron_num = cp_neuron_num
ctx.cp_v_threshold = cp_v_threshold
ctx.cp_v_reset = cp_v_reset
ctx.detach_reset = detach_reset
ctx.sg_cuda_code_fun = sg_cuda_code_fun
if use_pad:
return spike_seq[..., :-1], v_v_seq[1:, ..., :-1]
else:
return spike_seq, v_v_seq[1:, ]
def backward(ctx, grad_spike_seq, grad_v_seq):
if ctx.use_pad:
# grad_spike_seq.shape = [T, N]
# grad_v_seq.shape = [T, N]
# h_seq.shape = [T, N + 1]
# spike_seq.shape = [T, N + 1]
grad_spike_seq = F.pad(grad_spike_seq, (0, 1))
grad_v_seq = F.pad(grad_v_seq, (0, 1))
device = grad_spike_seq.get_device()
if configure.save_spike_as_bool_in_neuron_kernel:
h_seq = ctx.saved_tensors[0]
spike_seq = tensor_cache.BOOL_TENSOR_CACHE.get_float(ctx.s_tk, ctx.s_shape)
else:
h_seq, spike_seq = ctx.saved_tensors
zero_shape = list(grad_spike_seq.shape)
zero_shape[0] += 1
zero_data = torch.zeros(zero_shape, device=grad_spike_seq.device, dtype=grad_spike_seq.dtype)
grad_x_seq = zero_data[0: -1]
grad_v_init = zero_data[-1]
if ctx.cp_v_reset is None:
hard_reset = False
else:
hard_reset = True
if grad_spike_seq.dtype == torch.float32:
dtype = 'fp32'
elif grad_spike_seq.dtype == torch.float16:
dtype = 'fp16'
else:
raise NotImplementedError
kernel = MultiStepIFNodePTT.create_bptt_kernel(ctx.sg_cuda_code_fun, hard_reset, ctx.detach_reset, dtype)
with cuda_utils.DeviceEnvironment(device):
if hard_reset:
grad_spike_seq, grad_v_seq, h_seq, spike_seq, grad_x_seq, grad_v_init, ctx.cp_v_threshold, ctx.cp_v_reset, ctx.cp_neuron_num, ctx.cp_numel = cuda_utils.get_contiguous(
grad_spike_seq, grad_v_seq, h_seq, spike_seq, grad_x_seq, grad_v_init, ctx.cp_v_threshold,
ctx.cp_v_reset, ctx.cp_neuron_num, ctx.cp_numel)
kernel_args = [grad_spike_seq, grad_v_seq, h_seq, spike_seq, grad_x_seq, grad_v_init,
ctx.cp_v_threshold, ctx.cp_v_reset, ctx.cp_neuron_num, ctx.cp_numel]
else:
grad_spike_seq, grad_v_seq, h_seq, spike_seq, grad_x_seq, grad_v_init, ctx.cp_v_threshold, ctx.cp_neuron_num, ctx.cp_numel = cuda_utils.get_contiguous(
grad_spike_seq, grad_v_seq, h_seq, spike_seq, grad_x_seq, grad_v_init, ctx.cp_v_threshold,
ctx.cp_neuron_num, ctx.cp_numel)
kernel_args = [grad_spike_seq, grad_v_seq, h_seq, spike_seq, grad_x_seq, grad_v_init,
ctx.cp_v_threshold, ctx.cp_neuron_num, ctx.cp_numel]
kernel(
(ctx.blocks,), (ctx.threads,),
cuda_utils.wrap_args_to_raw_kernel(
device,
*kernel_args
)
)
if ctx.use_pad:
return grad_x_seq[..., :-1], grad_v_init[..., :-1], None, None, None, None
else:
return grad_x_seq, grad_v_init, None, None, None, None
def save_cuda_codes(cu_file_path: str = './spikingjelly/activation_based/neuron_kernel_sample.cu'):
# save all cuda codes to files
with open(cu_file_path, 'w+') as cu_file:
cu_file.write('// This file is created by spikingjelly.activation_based.neuron_kernel.save_cuda_codes.\n')
cu_file.write('// Note that codes in this file will not be executed This file is just created for reading.\n')
for ms_neu in [MultiStepIFNodePTT]:
cu_file.write('\n// ' + ms_neu.__name__ + '\n')
for sg in surrogate._has_cuda_:
for hard_reset in [True, False]:
for dtype in ['fp32', 'fp16']:
cu_file.write(
f'\n// {ms_neu.__name__} fptt {sg.__name__}, hard_reset={hard_reset}, dtype={dtype}\n')
fp_codes = ms_neu.create_fptt_kernel(hard_reset, dtype).code
cu_file.write(fp_codes)
for detach_reset in [True, False]:
cu_file.write(
f'\n// {ms_neu.__name__} bptt {sg.__name__}, hard_reset={hard_reset}, dtype={dtype}, detach_reset={detach_reset}\n')
bp_codes = ms_neu.create_bptt_kernel(sg().cuda_code, hard_reset, detach_reset,
dtype).code
cu_file.write(bp_codes) | null |
7,671 | import torch
import torch.nn as nn
import copy
import logging
from abc import abstractmethod
try:
import cupy
except BaseException as e:
logging.info(f'spikingjelly.activation_based.base: {e}')
cupy = None
The provided code snippet includes necessary dependencies for implementing the `check_backend_library` function. Write a Python function `def check_backend_library(backend: str)` to solve the following problem:
* :ref:`API in English <check_backend_library-en>` .. _check_backend_library-cn: :param backend: ``'torch'``, ``'cupy'`` 或 ``'lava'`` :type backend: str 检查某个后端的python库是否已经安装。若未安装则此函数会报错。 * :ref:`中文 API <check_backend_library-cn>` .. _check_backend_library-en: :param backend: ``'torch'``, ``'cupy'`` or ``'lava'`` :type backend: str Check whether the python lib for backend is installed. If not, this function will raise an error.
Here is the function:
def check_backend_library(backend: str):
"""
* :ref:`API in English <check_backend_library-en>`
.. _check_backend_library-cn:
:param backend: ``'torch'``, ``'cupy'`` 或 ``'lava'``
:type backend: str
检查某个后端的python库是否已经安装。若未安装则此函数会报错。
* :ref:`中文 API <check_backend_library-cn>`
.. _check_backend_library-en:
:param backend: ``'torch'``, ``'cupy'`` or ``'lava'``
:type backend: str
Check whether the python lib for backend is installed. If not, this function will raise an error.
"""
if backend == 'torch':
return
elif backend == 'cupy':
if cupy is None:
raise ImportError('CuPy is not installed! You can install it from "https://github.com/cupy/cupy".')
elif backend == 'lava':
if slayer is None:
raise ImportError('Lava-DL is not installed! You can install it from ' \
'"https://github.com/lava-nc/lava-dl". ')
else:
pass | * :ref:`API in English <check_backend_library-en>` .. _check_backend_library-cn: :param backend: ``'torch'``, ``'cupy'`` 或 ``'lava'`` :type backend: str 检查某个后端的python库是否已经安装。若未安装则此函数会报错。 * :ref:`中文 API <check_backend_library-cn>` .. _check_backend_library-en: :param backend: ``'torch'``, ``'cupy'`` or ``'lava'`` :type backend: str Check whether the python lib for backend is installed. If not, this function will raise an error. |
7,672 | from typing import Callable, Union
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import neuron, monitor, base
def mstdp_linear_single_step(
fc: nn.Linear, in_spike: torch.Tensor, out_spike: torch.Tensor,
trace_pre: Union[float, torch.Tensor, None],
trace_post: Union[float, torch.Tensor, None],
tau_pre: float, tau_post: float,
f_pre: Callable = lambda x: x, f_post: Callable = lambda x: x
):
if trace_pre is None:
trace_pre = 0.
if trace_post is None:
trace_post = 0.
weight = fc.weight.data
trace_pre = trace_pre * math.exp(-1 / tau_pre) + in_spike # shape = [batch_size, C_in]
trace_post = trace_post * math.exp(-1 / tau_post) + out_spike # shape = [batch_size, C_out]
# [batch_size, N_out, N_in]
eligibility = f_post(weight) * (trace_pre.unsqueeze(1) * out_spike.unsqueeze(2)) -\
f_pre(weight) * (trace_post.unsqueeze(2) * in_spike.unsqueeze(1))
return trace_pre, trace_post, eligibility | null |
7,673 | from typing import Callable, Union
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import neuron, monitor, base
def mstdpet_linear_single_step(
fc: nn.Linear, in_spike: torch.Tensor, out_spike: torch.Tensor,
trace_pre: Union[float, torch.Tensor, None],
trace_post: Union[float, torch.Tensor, None],
tau_pre: float, tau_post: float, tau_trace: float,
f_pre: Callable = lambda x: x, f_post: Callable = lambda x: x
):
if trace_pre is None:
trace_pre = 0.
if trace_post is None:
trace_post = 0.
weight = fc.weight.data
trace_pre = trace_pre * math.exp(-1 / tau_pre) + in_spike
trace_post = trace_post * math.exp(-1 / tau_post) + out_spike
eligibility = f_post(weight) * torch.outer(out_spike, trace_pre) -\
f_pre(weight) * torch.outer(trace_post, in_spike)
return trace_pre, trace_post, eligibility | null |
7,674 | from typing import Callable, Union
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import neuron, monitor, base
def stdp_linear_single_step(
fc: nn.Linear, in_spike: torch.Tensor, out_spike: torch.Tensor,
trace_pre: Union[float, torch.Tensor, None],
trace_post: Union[float, torch.Tensor, None],
tau_pre: float, tau_post: float,
f_pre: Callable = lambda x: x, f_post: Callable = lambda x: x
):
if trace_pre is None:
trace_pre = 0.
if trace_post is None:
trace_post = 0.
weight = fc.weight.data
trace_pre = trace_pre - trace_pre / tau_pre + in_spike # shape = [batch_size, N_in]
trace_post = trace_post - trace_post / tau_post + out_spike # shape = [batch_size, N_out]
# [batch_size, N_out, N_in] -> [N_out, N_in]
delta_w_pre = -f_pre(weight) * (trace_post.unsqueeze(2) * in_spike.unsqueeze(1)).sum(0)
delta_w_post = f_post(weight) * (trace_pre.unsqueeze(1) * out_spike.unsqueeze(2)).sum(0)
return trace_pre, trace_post, delta_w_pre + delta_w_post
def stdp_conv2d_single_step(
conv: nn.Conv2d, in_spike: torch.Tensor, out_spike: torch.Tensor,
trace_pre: Union[torch.Tensor, None], trace_post: Union[torch.Tensor, None],
tau_pre: float, tau_post: float,
f_pre: Callable = lambda x: x, f_post: Callable = lambda x: x
):
if conv.dilation != (1, 1):
raise NotImplementedError(
'STDP with dilation != 1 for Conv2d has not been implemented!'
)
if conv.groups != 1:
raise NotImplementedError(
'STDP with groups != 1 for Conv2d has not been implemented!'
)
stride_h = conv.stride[0]
stride_w = conv.stride[1]
if conv.padding == (0, 0):
pass
else:
pH = conv.padding[0]
pW = conv.padding[1]
if conv.padding_mode != 'zeros':
in_spike = F.pad(
in_spike, conv._reversed_padding_repeated_twice,
mode=conv.padding_mode
)
else:
in_spike = F.pad(in_spike, pad=(pW, pW, pH, pH))
if trace_pre is None:
trace_pre = torch.zeros_like(
in_spike, device=in_spike.device, dtype=in_spike.dtype
)
if trace_post is None:
trace_post = torch.zeros_like(
out_spike, device = in_spike.device, dtype=in_spike.dtype
)
trace_pre = trace_pre - trace_pre / tau_pre + in_spike
trace_post = trace_post - trace_post / tau_post + out_spike
delta_w = torch.zeros_like(conv.weight.data)
for h in range(conv.weight.shape[2]):
for w in range(conv.weight.shape[3]):
h_end = in_spike.shape[2] - conv.weight.shape[2] + 1 + h
w_end = in_spike.shape[3] - conv.weight.shape[3] + 1 + w
pre_spike = in_spike[:, :, h:h_end:stride_h, w:w_end:stride_w] # shape = [batch_size, C_in, h_out, w_out]
post_spike = out_spike # shape = [batch_size, C_out, h_out, h_out]
weight = conv.weight.data[:, :, h, w] # shape = [batch_size_out, C_in]
tr_pre = trace_pre[:, :, h:h_end:stride_h, w:w_end:stride_w] # shape = [batch_size, C_in, h_out, w_out]
tr_post = trace_post # shape = [batch_size, C_out, h_out, w_out]
delta_w_pre = - (f_pre(weight) *
(tr_post.unsqueeze(2) * pre_spike.unsqueeze(1))
.permute([1, 2, 0, 3, 4]).sum(dim = [2, 3, 4]))
delta_w_post = f_post(weight) * \
(tr_pre.unsqueeze(1) * post_spike.unsqueeze(2))\
.permute([1, 2, 0, 3, 4]).sum(dim = [2, 3, 4])
delta_w[:, :, h, w] += delta_w_pre + delta_w_post
return trace_pre, trace_post, delta_w
def stdp_conv1d_single_step(
conv: nn.Conv1d, in_spike: torch.Tensor, out_spike: torch.Tensor,
trace_pre: Union[torch.Tensor, None], trace_post: Union[torch.Tensor, None],
tau_pre: float, tau_post: float,
f_pre: Callable = lambda x: x, f_post: Callable = lambda x: x
):
if conv.dilation != (1, ):
raise NotImplementedError(
'STDP with dilation != 1 for Conv1d has not been implemented!'
)
if conv.groups != 1:
raise NotImplementedError(
'STDP with groups != 1 for Conv1d has not been implemented!'
)
stride_l = conv.stride[0]
if conv.padding == (0, ):
pass
else:
pL = conv.padding[0]
if conv.padding_mode != 'zeros':
in_spike = F.pad(
in_spike, conv._reversed_padding_repeated_twice,
mode=conv.padding_mode
)
else:
in_spike = F.pad(in_spike, pad=(pL, pL))
if trace_pre is None:
trace_pre = torch.zeros_like(
in_spike, device=in_spike.device, dtype=in_spike.dtype
)
if trace_post is None:
trace_post = torch.zeros_like(
out_spike, device=in_spike.device, dtype=in_spike.dtype
)
trace_pre = trace_pre - trace_pre / tau_pre + in_spike
trace_post = trace_post - trace_post / tau_post + out_spike
delta_w = torch.zeros_like(conv.weight.data)
for l in range(conv.weight.shape[2]):
l_end = in_spike.shape[2] - conv.weight.shape[2] + 1 + l
pre_spike = in_spike[:, :, l:l_end:stride_l] # shape = [batch_size, C_in, l_out]
post_spike = out_spike # shape = [batch_size, C_out, l_out]
weight = conv.weight.data[:, :, l] # shape = [batch_size_out, C_in]
tr_pre = trace_pre[:, :, l:l_end:stride_l] # shape = [batch_size, C_in, l_out]
tr_post = trace_post # shape = [batch_size, C_out, l_out]
delta_w_pre = - (f_pre(weight) *
(tr_post.unsqueeze(2) * pre_spike.unsqueeze(1))
.permute([1, 2, 0, 3]).sum(dim = [2, 3]))
delta_w_post = f_post(weight) * \
(tr_pre.unsqueeze(1) * post_spike.unsqueeze(2))\
.permute([1, 2, 0, 3]).sum(dim = [2, 3])
delta_w[:, :, l] += delta_w_pre + delta_w_post
return trace_pre, trace_post, delta_w
def stdp_multi_step(
layer: Union[nn.Linear, nn.Conv1d, nn.Conv2d],
in_spike: torch.Tensor, out_spike: torch.Tensor,
trace_pre: Union[float, torch.Tensor, None],
trace_post: Union[float, torch.Tensor, None],
tau_pre: float, tau_post: float,
f_pre: Callable = lambda x: x, f_post: Callable = lambda x: x
):
weight = layer.weight.data
delta_w = torch.zeros_like(weight)
T = in_spike.shape[0]
if isinstance(layer, nn.Linear):
stdp_single_step = stdp_linear_single_step
elif isinstance(layer, nn.Conv1d):
stdp_single_step = stdp_conv1d_single_step
elif isinstance(layer, nn.Conv2d):
stdp_single_step = stdp_conv2d_single_step
for t in range(T):
trace_pre, trace_post, dw = stdp_single_step(
layer, in_spike[t], out_spike[t], trace_pre, trace_post,
tau_pre, tau_post, f_pre, f_post
)
delta_w += dw
return trace_pre, trace_post, delta_w | null |
7,675 | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from .auto_cuda import cfunction
The provided code snippet includes necessary dependencies for implementing the `check_manual_grad` function. Write a Python function `def check_manual_grad(primitive_function, spiking_function, *args, **kwargs)` to solve the following problem:
:param primitive_function: 梯度替代函数的原函数 :type primitive_function: callable :param spiking_function: 梯度替代函数 :type spiking_function: callable 梯度替代函数的反向传播一般是手写的,可以用此函数去检查手写梯度是否正确。 此函数检查梯度替代函数spiking_function的反向传播,与原函数primitive_function的反向传播结果是否一致。“一致”被定义为,两者的误差不超过eps。 示例代码: .. code-block:: python def s2nn_apply(x, alpha, beta): return surrogate.s2nn.apply(x, alpha, beta) surrogate.check_manual_grad(surrogate.S2NN.primitive_function, s2nn_apply, alpha=4., beta=1.)
Here is the function:
def check_manual_grad(primitive_function, spiking_function, *args, **kwargs):
'''
:param primitive_function: 梯度替代函数的原函数
:type primitive_function: callable
:param spiking_function: 梯度替代函数
:type spiking_function: callable
梯度替代函数的反向传播一般是手写的,可以用此函数去检查手写梯度是否正确。
此函数检查梯度替代函数spiking_function的反向传播,与原函数primitive_function的反向传播结果是否一致。“一致”被定义为,两者的误差不超过eps。
示例代码:
.. code-block:: python
def s2nn_apply(x, alpha, beta):
return surrogate.s2nn.apply(x, alpha, beta)
surrogate.check_manual_grad(surrogate.S2NN.primitive_function, s2nn_apply, alpha=4., beta=1.)
'''
x = torch.arange(-2, 2, 32 / 8192)
# x = torch.as_tensor([-1., 0., 1.])
x.requires_grad_(True)
primitive_function(x, *args, **kwargs).sum().backward()
x_grad_auto = x.grad.clone()
x.grad.zero_()
spiking_function(x, *args, **kwargs).sum().backward()
x_grad_manual = x.grad.clone()
print('auto grad', x_grad_auto)
print('manual grad', x_grad_manual)
abs_error = (x_grad_manual - x_grad_auto).abs()
idx = abs_error.argmax()
print('max error', abs_error[idx], 'occurs at')
print(f'x[{idx}] = {x[idx]}')
print('auto grad', x_grad_auto[idx])
print('manual grad', x_grad_manual[idx]) | :param primitive_function: 梯度替代函数的原函数 :type primitive_function: callable :param spiking_function: 梯度替代函数 :type spiking_function: callable 梯度替代函数的反向传播一般是手写的,可以用此函数去检查手写梯度是否正确。 此函数检查梯度替代函数spiking_function的反向传播,与原函数primitive_function的反向传播结果是否一致。“一致”被定义为,两者的误差不超过eps。 示例代码: .. code-block:: python def s2nn_apply(x, alpha, beta): return surrogate.s2nn.apply(x, alpha, beta) surrogate.check_manual_grad(surrogate.S2NN.primitive_function, s2nn_apply, alpha=4., beta=1.) |
7,676 | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from .auto_cuda import cfunction
def check_cuda_grad(neu, surrogate_function, device, *args, **kwargs):
# check_cuda_grad(neuron.IFNode, surrogate.S2NN, device='cuda:1', alpha=4., beta=1.)
for dtype in [torch.float, torch.half]:
print(dtype)
net = neu(surrogate_function=surrogate_function(*args, **kwargs), step_mode='m')
net.to(device)
x = torch.arange(-2, 2, 32 / 8192, device=device, dtype=dtype)
x.requires_grad_(True)
net.backend = 'torch'
net(x.unsqueeze(0)).sum().backward()
x_grad_py = x.grad.clone()
x.grad.zero_()
net.reset()
net.backend = 'cupy'
net(x.unsqueeze(0)).sum().backward()
x_grad_cp = x.grad.clone()
# print('python grad', x_grad_py)
# print('cupy grad', x_grad_cp)
abs_error = (x_grad_cp - x_grad_py).abs()
idx = abs_error.argmax()
print('max error', abs_error[idx], 'occurs at')
print(f'x[{idx}] = {x[idx]}')
print('python grad', x_grad_py[idx])
print('cupy grad', x_grad_cp[idx]) | null |
7,677 | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from .auto_cuda import cfunction
def piecewise_quadratic_backward(grad_output: torch.Tensor, x: torch.Tensor, alpha: float):
x_abs = x.abs()
mask = (x_abs > (1 / alpha))
grad_x = (grad_output * (- (alpha ** 2) * x_abs + alpha)).masked_fill_(mask, 0)
return grad_x, None | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.